Skip to content

Commit

Permalink
Simple leaf page (#261)
Browse files Browse the repository at this point in the history
* Simple LeafPage introduced

* smarter cow for TrySet

* cleanup

* counting right

* extension caching

* reporting

* bigger fanout

* format

* do not nest parent responses to make caches smaller

* cache extension when it is touched on delete
  • Loading branch information
Scooletz authored Feb 27, 2024
1 parent cda38d3 commit 8b91e46
Show file tree
Hide file tree
Showing 18 changed files with 883 additions and 714 deletions.
2 changes: 1 addition & 1 deletion src/Paprika.Tests/Chain/BlockchainTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ public async Task Account_destruction_database_flushed()
blockchain.Finalize(hash);

// Poor man's await on finalization flushed
await Task.Delay(500);
await blockchain.WaitTillFlush(hash);

using var block2 = blockchain.StartNew(hash);

Expand Down
25 changes: 25 additions & 0 deletions src/Paprika.Tests/Data/SlottedArrayTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,25 @@ public void Update_in_situ()
map.GetAssert(key1, Data2);
}

[Test]
public void Report_has_space_properly()
{
const int dataSize = 1;
const int keySize = 0;
var key = NibblePath.Empty;
Span<byte> value = stackalloc byte[dataSize] { 13 };
Span<byte> valueToBig = stackalloc byte[dataSize + 1];

Span<byte> span = stackalloc byte[SlottedArray.OneSlotArrayMinimalSize + dataSize + keySize];
var map = new SlottedArray(span);

map.SetAssert(key, value);

map.HasSpaceToUpdateExisting(key, ReadOnlySpan<byte>.Empty).Should().BeTrue();
map.HasSpaceToUpdateExisting(key, value).Should().BeTrue();
map.HasSpaceToUpdateExisting(key, valueToBig).Should().BeFalse();
}

[Test]
public void Update_in_resize()
{
Expand Down Expand Up @@ -195,6 +214,12 @@ void Unique(in ReadOnlySpan<byte> key)

file static class FixedMapTestExtensions
{
public static void SetAssert(this SlottedArray map, in NibblePath key, ReadOnlySpan<byte> data,
string? because = null)
{
map.TrySet(key, data).Should().BeTrue(because ?? "TrySet should succeed");
}

public static void SetAssert(this SlottedArray map, in ReadOnlySpan<byte> key, ReadOnlySpan<byte> data,
string? because = null)
{
Expand Down
2 changes: 1 addition & 1 deletion src/Paprika.Tests/Merkle/AdditionalTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ public async Task Account_destruction_same_block()
const int seed = 17;
const int storageCount = 32 * 1024;

using var db = PagedDb.NativeMemoryDb(8 * 1024 * 1024, 2);
using var db = PagedDb.NativeMemoryDb(16 * 1024 * 1024, 2);
var merkle = new ComputeMerkleBehavior(2, 2);

await using var blockchain = new Blockchain(db, merkle);
Expand Down
43 changes: 27 additions & 16 deletions src/Paprika.Tests/Store/BasePageTests.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using System.Runtime.InteropServices;
using FluentAssertions;
using Paprika.Crypto;
using Paprika.Store;

Expand All @@ -8,59 +9,67 @@ public abstract class BasePageTests
{
protected static unsafe Page AllocPage()
{
var memory = (byte*)NativeMemory.AlignedAlloc((UIntPtr)Page.PageSize, (UIntPtr)sizeof(long));
var memory = (byte*)NativeMemory.AlignedAlloc(Page.PageSize, sizeof(long));
new Span<byte>(memory, Page.PageSize).Clear();
return new Page(memory);
}

internal class TestBatchContext : BatchContextBase
internal class TestBatchContext(uint batchId, Stack<DbAddress>? reusable = null) : BatchContextBase(batchId)
{
private readonly Dictionary<DbAddress, Page> _address2Page = new();
private readonly Dictionary<UIntPtr, DbAddress> _page2Address = new();
private readonly Stack<DbAddress> _reusable = reusable ?? new Stack<DbAddress>();
private readonly HashSet<DbAddress> _toReuse = new();

// data pages should start at non-null addresses
// 0-N is take by metadata pages
private uint _pageCount = 1U;

public TestBatchContext(uint batchId) : base(batchId)
{
IdCache = new Dictionary<Keccak, uint>();
}

public override Page GetAt(DbAddress address) => _address2Page[address];

public override DbAddress GetAddress(Page page) => _page2Address[page.Raw];

public override Page GetNewPage(out DbAddress addr, bool clear)
{
var page = AllocPage();
Page page;
if (_reusable.TryPop(out addr))
{
page = GetAt(addr);
}
else
{
page = AllocPage();
addr = DbAddress.Page(_pageCount++);

_address2Page[addr] = page;
_page2Address[page.Raw] = addr;
}

if (clear)
page.Clear();

page.Header.BatchId = BatchId;

addr = DbAddress.Page(_pageCount++);

_address2Page[addr] = page;
_page2Address[page.Raw] = addr;

return page;
}

// for now
public override bool WasWritten(DbAddress addr) => true;

public override void RegisterForFutureReuse(Page page)
{
// NOOP
_toReuse.Add(GetAddress(page))
.Should()
.BeTrue("Page should not be registered as reusable before");
}

public override Dictionary<Keccak, uint> IdCache { get; }
public override Dictionary<Keccak, uint> IdCache { get; } = new();

public override string ToString() => $"Batch context used {_pageCount} pages to write the data";

public TestBatchContext Next()
{
var next = new TestBatchContext(BatchId + 1);
var next = new TestBatchContext(BatchId + 1, new Stack<DbAddress>(_toReuse));

// remember the mapping
foreach (var (addr, page) in _address2Page)
Expand All @@ -78,6 +87,8 @@ public TestBatchContext Next()

return next;
}

public uint PageCount => _pageCount;
}

internal static TestBatchContext NewBatch(uint batchId) => new(batchId);
Expand Down
Loading

0 comments on commit 8b91e46

Please sign in to comment.