Skip to content

Commit

Permalink
Merged main
Browse files Browse the repository at this point in the history
  • Loading branch information
damian-orzechowski committed Nov 28, 2024
2 parents 47e38db + 3eff92a commit c7232ef
Show file tree
Hide file tree
Showing 31 changed files with 1,568 additions and 506 deletions.
16 changes: 16 additions & 0 deletions .github/workflows/dependency_review.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
name: 'Dependency Review'
on: [pull_request]

permissions:
contents: read

jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 #v4.2.0
- name: 'Dependency Review'
uses: actions/dependency-review-action@4081bf99e2866ebe428fc0477b69eb4fcda7220a #v4.4.0
with:
fail-on-severity: high
4 changes: 2 additions & 2 deletions docs/design.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ blockchain.Finalize(Block2A);

### PagedDb

The `PagedDb` component is responsible for storing the left-fold of the blocks that are beyond the cut-off point. This database uses [memory-mapped files](https://en.wikipedia.org/wiki/Memory-mapped_file) to provide storing capabilities. To handle concurrency, [Copy on Write](https://en.wikipedia.org/wiki/Copy-on-write) is used. This allows multiple concurrent readers to cooperate in a full lock-free manner and a single writer that runs the current transaction. In that manner, it's heavily inspired by [LMBD](https://github.com/LMDB/lmdb).
The `PagedDb` component is responsible for storing the left-fold of the blocks that are beyond the cut-off point. This database uses [memory-mapped files](https://en.wikipedia.org/wiki/Memory-mapped_file) to provide storing capabilities. To handle concurrency, [Copy on Write](https://en.wikipedia.org/wiki/Copy-on-write) is used. This allows multiple concurrent readers to cooperate in a full lock-free manner and a single writer that runs the current transaction. In that manner, it's heavily inspired by [LMDB](https://github.com/LMDB/lmdb).

It's worth to mention that due to the design of the `Blockchain` component, having a single writer available is sufficient. At the same time, having multiple readers allow to create readonly transactions that are later used by blocks from `Blockchain`.

Expand Down Expand Up @@ -365,6 +365,6 @@ A few remarks:
1. Database Storage lectures by Andy Pavlo from CMU Intro to Database Systems / Fall 2022:
1. Database Storage, pt. 1 https://www.youtube.com/watch?v=df-l2PxUidI
1. Database Storage, pt. 2 https://www.youtube.com/watch?v=2HtfGdsrwqA
1. LMBD
1. LMDB
1. Howard Chu - LMDB [The Databaseology Lectures - CMU Fall 2015](https://www.youtube.com/watch?v=tEa5sAh-kVk)
1. The main file of LMDB [mdb.c](https://github.com/LMDB/lmdb/blob/mdb.master/libraries/liblmdb/mdb.c)
19 changes: 19 additions & 0 deletions src/Paprika.Benchmarks/Allocator.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
using System.Runtime.InteropServices;
using Paprika.Store;

namespace Paprika.Benchmarks;

public static class Allocator
{
public static unsafe void* AllocAlignedPage(bool clean = true)
{
const UIntPtr size = Page.PageSize;
var memory = NativeMemory.AlignedAlloc(size, size);
if (clean)
{
NativeMemory.Clear(memory, size);
}

return memory;
}
}
35 changes: 29 additions & 6 deletions src/Paprika.Benchmarks/BitMapFilterBenchmarks.cs
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
using System.Runtime.InteropServices;
using BenchmarkDotNet.Attributes;
using BenchmarkDotNet.Diagnostics.dotMemory;
using Paprika.Data;
using Paprika.Store;

namespace Paprika.Benchmarks;

[DisassemblyDiagnoser]
[DisassemblyDiagnoser(maxDepth: 2)]
[MemoryDiagnoser]
//[DotMemoryDiagnoser]

public class BitMapFilterBenchmarks
{
private readonly Page[] _pages1A = AlignedAlloc(1);
Expand All @@ -15,8 +18,17 @@ public class BitMapFilterBenchmarks
private readonly Page[] _pages2A = AlignedAlloc(2);
private readonly Page[] _pages2B = AlignedAlloc(2);

private readonly Page[] _pages16A = AlignedAlloc(128);
private readonly Page[] _pages16B = AlignedAlloc(128);
private readonly Page[] _pages16A = AlignedAlloc(BitMapFilter.OfNSize128.Count);
private readonly Page[] _pages16B = AlignedAlloc(BitMapFilter.OfNSize128.Count);

private readonly BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>[] _filters = Enumerable
.Range(0, MaxFilterCount)
.Select(i =>
new BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>(
new BitMapFilter.OfN<BitMapFilter.OfNSize128>(AlignedAlloc(BitMapFilter.OfNSize128.Count))))
.ToArray();

private const int MaxFilterCount = 64;

[Benchmark(OperationsPerInvoke = 4)]
public void Or_BitMapFilter_Of1()
Expand Down Expand Up @@ -45,19 +57,30 @@ public void Or_BitMapFilter_Of2()
[Benchmark(OperationsPerInvoke = 4)]
public void Or_BitMapFilter_OfN_128()
{
var a = new BitMapFilter<BitMapFilter.OfN>(new BitMapFilter.OfN(_pages16A));
var b = new BitMapFilter<BitMapFilter.OfN>(new BitMapFilter.OfN(_pages16B));
var a = new BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>(new BitMapFilter.OfN<BitMapFilter.OfNSize128>(_pages16A));
var b = new BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>(new BitMapFilter.OfN<BitMapFilter.OfNSize128>(_pages16B));

a.OrWith(b);
a.OrWith(b);
a.OrWith(b);
a.OrWith(b);
}

[Benchmark]
[Arguments(16)]
[Arguments(32)]
[Arguments(MaxFilterCount)]
public void Or_BitMapFilter_OfN_128_Multiple(int count)
{
var a = new BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>(new BitMapFilter.OfN<BitMapFilter.OfNSize128>(_pages16A));
var filters = _filters.AsSpan(0, count).ToArray();
a.OrWith(filters);
}

[Benchmark(OperationsPerInvoke = 4)]
public int MayContainAny_BitMapFilter_OfN_128()
{
var a = new BitMapFilter<BitMapFilter.OfN>(new BitMapFilter.OfN(_pages16A));
var a = new BitMapFilter<BitMapFilter.OfN<BitMapFilter.OfNSize128>>(new BitMapFilter.OfN<BitMapFilter.OfNSize128>(_pages16A));

return (a.MayContainAny(13, 17) ? 1 : 0) +
(a.MayContainAny(2342, 2345) ? 1 : 0) +
Expand Down
24 changes: 24 additions & 0 deletions src/Paprika.Benchmarks/PageExtensionsBenchmarks.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
using BenchmarkDotNet.Attributes;
using Paprika.Data;
using Paprika.Store;

namespace Paprika.Benchmarks;

[DisassemblyDiagnoser]
public class PageExtensionsBenchmarks
{
private readonly Page _a;
private readonly Page _b;

public unsafe PageExtensionsBenchmarks()
{
_a = new Page((byte*)Allocator.AllocAlignedPage());
_b = new Page((byte*)Allocator.AllocAlignedPage());
}

[Benchmark]
public void OrWith()
{
_a.OrWith(_b);
}
}
5 changes: 3 additions & 2 deletions src/Paprika.Benchmarks/Paprika.Benchmarks.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@
</PropertyGroup>

<ItemGroup>
<PackageReference Include="BenchmarkDotNet" Version="0.13.12" />
<PackageReference Include="BenchmarkDotNet.Diagnostics.dotTrace" Version="0.13.12" />
<PackageReference Include="BenchmarkDotNet" Version="0.14.0" />
<PackageReference Include="BenchmarkDotNet.Diagnostics.dotMemory" Version="0.14.0" />
<PackageReference Include="BenchmarkDotNet.Diagnostics.dotTrace" Version="0.14.0" />
</ItemGroup>

<ItemGroup>
Expand Down
19 changes: 4 additions & 15 deletions src/Paprika.Benchmarks/SlottedArrayBenchmarks.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
using System.Runtime.InteropServices;
using BenchmarkDotNet.Attributes;
using Paprika.Crypto;
using Paprika.Data;
Expand Down Expand Up @@ -29,7 +28,7 @@ private const int
public SlottedArrayBenchmarks()
{
// Create keys
_keys = AllocAlignedPage();
_keys = Allocator.AllocAlignedPage();

var span = new Span<byte>(_keys, Page.PageSize);
for (byte i = 0; i < KeyCount; i++)
Expand All @@ -41,7 +40,7 @@ public SlottedArrayBenchmarks()
}

// Map
_map = AllocAlignedPage();
_map = Allocator.AllocAlignedPage();
Span<byte> value = stackalloc byte[1];

var map = new SlottedArray(new Span<byte>(_map, Page.PageSize));
Expand All @@ -55,7 +54,7 @@ public SlottedArrayBenchmarks()
}

// Hash colliding
_hashCollidingKeys = AllocAlignedPage();
_hashCollidingKeys = Allocator.AllocAlignedPage();

// Create keys so that two consecutive ones share the hash.
// This should make it somewhat realistic where there are some collisions but not a lot of them.
Expand All @@ -72,7 +71,7 @@ public SlottedArrayBenchmarks()
hashCollidingKeys[i * BytesPerKeyHashColliding + 2] = (byte)(i / 2);
}

_hashCollidingMap = AllocAlignedPage();
_hashCollidingMap = Allocator.AllocAlignedPage();

var hashColliding = new SlottedArray(new Span<byte>(_hashCollidingMap, Page.PageSize));
for (byte i = 0; i < HashCollidingKeyCount; i++)
Expand All @@ -83,16 +82,6 @@ public SlottedArrayBenchmarks()
throw new Exception("Not enough memory");
}
}

return;

static void* AllocAlignedPage()
{
const UIntPtr size = Page.PageSize;
var memory = NativeMemory.AlignedAlloc(size, size);
NativeMemory.Clear(memory, size);
return memory;
}
}

[Benchmark(OperationsPerInvoke = 4)]
Expand Down
13 changes: 13 additions & 0 deletions src/Paprika.Importer/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
## Paprika Importer

Use the following steps to import from Nethermind’s DB

- Switch to the `importer` branch
- `git checkout remotes/origin/importer`
- Fetch the submodules
- `git submodule update --init --recursive`
- Build Paprika.Importer
- `dotnet build .\\src\\Paprika.Importer\\Paprika.Importer.sln`
- Run
- `cd .\\src\\Paprika.Importer`
- `dotnet run -- path_to_nethermind_db_mainnet`
Loading

0 comments on commit c7232ef

Please sign in to comment.