Skip to content

Commit

Permalink
Merge branch 'main' into importer
Browse files Browse the repository at this point in the history
  • Loading branch information
Scooletz committed Feb 22, 2024
2 parents 710cda1 + 5d59443 commit 0f85fec
Show file tree
Hide file tree
Showing 6 changed files with 330 additions and 38 deletions.
67 changes: 67 additions & 0 deletions src/Paprika.Tests/Utils/MetricsTests.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
using FluentAssertions;
using NUnit.Framework;
using Paprika.Chain;
using Paprika.Crypto;
using Paprika.Merkle;
using Paprika.Store;
using Paprika.Utils;

namespace Paprika.Tests.Utils;

public class MetricsTests
{
private const int Mb = 1024 * 1024;

[Test]
public async Task Metrics_should_report()
{
using var metrics = new Metrics();

using var db = PagedDb.NativeMemoryDb(16 * Mb, 2);

await using var blockchain = new Blockchain(db, new ComputeMerkleBehavior(1, 1, Memoization.None));

var random = new Random(13);
var parent = Keccak.EmptyTreeHash;
var finality = new Queue<Keccak>();
var value = new byte[13];

const uint spins = 100;
const int accountsPerSpin = 100;

for (uint at = 1; at < spins; at++)
{
using var block = blockchain.StartNew(parent);

Keccak k = default;
for (int i = 0; i < accountsPerSpin; i++)
{
random.NextBytes(k.BytesAsSpan);
block.SetAccount(k, new Account(at, at));
}

// set storage for the last
block.SetStorage(k, k, value);

parent = block.Commit(at + 1);
finality.Enqueue(parent);

if (finality.Count > 64)
{
blockchain.Finalize(finality.Dequeue());
}

metrics.Observe();
}

while (finality.TryDequeue(out var finalized))
{
blockchain.Finalize(finalized);
}

metrics.Merkle.TotalMerkle.Value.Should().BeGreaterThan(0);
metrics.Merkle.StateProcessing.Value.Should().BeGreaterThan(0);
metrics.Merkle.StorageProcessing.Value.Should().BeGreaterThan(0);
metrics.Db.DbSize.Value.Should().BeGreaterThan(0);
}
}
16 changes: 11 additions & 5 deletions src/Paprika/Merkle/ComputeMerkleBehavior.cs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
using System.Buffers;
using System.Collections.Concurrent;
using System.ComponentModel;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Diagnostics.Metrics;
Expand Down Expand Up @@ -28,6 +27,12 @@ namespace Paprika.Merkle;
/// </remarks>
public class ComputeMerkleBehavior : IPreCommitBehavior, IDisposable
{
internal const string MeterName = "Paprika.Merkle";

internal const string HistogramStateProcessing = "State processing";
internal const string HistogramStorageProcessing = "Storage processing";
internal const string TotalMerkle = "Total Merkle";

/// <summary>
/// The upper boundary of memory needed to write RLP of any Merkle node.
/// </summary>
Expand Down Expand Up @@ -60,12 +65,13 @@ public ComputeMerkleBehavior(int minimumTreeLevelToMemoizeKeccak = DefaultMinimu
_memoizeKeccakEvery = memoizeKeccakEvery;
_memoization = memoization;

_meter = new Meter("Paprika.Merkle");
_storageProcessing = _meter.CreateHistogram<long>("State processing", "ms",
_meter = new Meter(MeterName);

_storageProcessing = _meter.CreateHistogram<long>(HistogramStateProcessing, "ms",
"How long it takes to process state");
_stateProcessing = _meter.CreateHistogram<long>("Storage processing", "ms",
_stateProcessing = _meter.CreateHistogram<long>(HistogramStorageProcessing, "ms",
"How long it takes to process storage");
_totalMerkle = _meter.CreateHistogram<long>("Total Merkle", "ms",
_totalMerkle = _meter.CreateHistogram<long>(TotalMerkle, "ms",
"How long it takes to process Merkle total");
}

Expand Down
12 changes: 11 additions & 1 deletion src/Paprika/Store/BatchMetrics.cs
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,22 @@
class BatchMetrics : IBatchMetrics
{
public int PagesReused { get; private set; }

public int PagesAllocated { get; private set; }
public int UnusedPoolFetch { get; private set; }

public void ReportPageReused() => PagesReused++;

public void ReportNewPageAllocation() => PagesAllocated++;

/// <summary>
/// The batch is accessed from a single thread, no need to use atomic.
/// </summary>
public int Writes { get; set; }

/// <summary>
/// The batch is accessed from a single thread, no need to use atomic.
/// </summary>
public int Reads { get; set; }
}

public interface IBatchMetrics
Expand Down
39 changes: 18 additions & 21 deletions src/Paprika/Store/PagedDb.cs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
using System.Buffers.Binary;
using System.Diagnostics;
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Runtime.InteropServices;
using Paprika.Chain;
Expand Down Expand Up @@ -29,6 +28,9 @@ public class PagedDb : IPageResolver, IDb, IDisposable
/// </remarks>
private const int MinHistoryDepth = 2;

public const string MeterName = "Paprika.Store.PagedDb";
public const string DbSize = "DB Size";

private readonly IPageManager _manager;
private readonly byte _historyDepth;
private long _lastRoot;
Expand Down Expand Up @@ -74,8 +76,8 @@ private PagedDb(IPageManager manager, byte historyDepth)

RootInit();

_meter = new Meter("Paprika.Store.PagedDb");
_dbSize = _meter.CreateAtomicObservableGauge("DB Size", "MB", "The size of the database in MB");
_meter = new Meter(MeterName);
_dbSize = _meter.CreateAtomicObservableGauge(DbSize, "MB", "The size of the database in MB");

_reads = _meter.CreateCounter<long>("Reads", "Reads", "The number of reads db handles");
_writes = _meter.CreateCounter<long>("Writes", "Writes", "The number of writes db handles");
Expand All @@ -98,8 +100,9 @@ public static PagedDb MemoryMappedDb(long size, byte historyDepth, string direct
new MemoryMappedPageManager(size, historyDepth, directory,
flushToDisk ? PersistenceOptions.FlushFile : PersistenceOptions.MMapOnly), historyDepth);

private void ReportRead(long number = 1) => _reads.Add(number);
private void ReportWrite() => _writes.Add(1);
private void ReportReads(long number) => _reads.Add(number);

private void ReportWrites(long number) => _writes.Add(number);

private void ReportCommit(TimeSpan elapsed)
{
Expand Down Expand Up @@ -360,7 +363,7 @@ private class ReadOnlyBatch(PagedDb db, RootPage root, string name) : IReporting

public void Dispose()
{
db.ReportRead(Volatile.Read(ref _reads));
db.ReportReads(Volatile.Read(ref _reads));
_disposed = true;
db.DisposeReadOnlyBatch(this);
}
Expand All @@ -372,6 +375,7 @@ public bool TryGet(scoped in Key key, out ReadOnlySpan<byte> result)
if (_disposed)
throw new ObjectDisposedException("The readonly batch has already been disposed");

// Need to use interlocked as read batches can be used concurrently
Interlocked.Increment(ref _reads);

return root.TryGet(key, this, out result);
Expand Down Expand Up @@ -438,7 +442,7 @@ public bool TryGet(scoped in Key key, out ReadOnlySpan<byte> result)
if (_disposed)
throw new ObjectDisposedException("The readonly batch has already been disposed");

_db.ReportRead();
_metrics.Reads++;

return _root.TryGet(key, this, out result);
}
Expand All @@ -450,22 +454,14 @@ public void SetMetadata(uint blockNumber, in Keccak blockHash)

public void SetRaw(in Key key, ReadOnlySpan<byte> rawData)
{
_db.ReportWrite();
_metrics.Writes++;

_root.SetRaw(key, this, rawData);
}

private void SetAtRoot<TPage>(in NibblePath path, in ReadOnlySpan<byte> rawData, ref DbAddress root)
where TPage : struct, IPageWithData<TPage>
{
var data = TryGetPageAlloc(ref root, PageType.Standard);
var updated = TPage.Wrap(data).Set(path, rawData, this);
root = _db.GetAddress(updated);
}

public void Destroy(in NibblePath account)
{
_db.ReportWrite();
_metrics.Writes++;
_root.Destroy(this, account);
}

Expand All @@ -491,8 +487,12 @@ public async ValueTask Commit(CommitOptions options)
// memoize the abandoned so that it's preserved for future uses
MemoizeAbandoned();

// report metrics
_db.ReportPageCountPerCommit(_written.Count, _metrics.PagesReused, _metrics.PagesAllocated);

_db.ReportReads(_metrics.Reads);
_db.ReportWrites(_metrics.Writes);

await _db._manager.FlushPages(_written, options);

var newRootPage = _db.SetNewRoot(_root);
Expand Down Expand Up @@ -597,9 +597,6 @@ public void Dispose()
}
}

private static unsafe Page AllocateOnePage() =>
new((byte*)NativeMemory.AlignedAlloc(Page.PageSize, (UIntPtr)UIntPtr.Size));

/// <summary>
/// A reusable context for the write batch.
/// </summary>
Expand Down
Loading

0 comments on commit 0f85fec

Please sign in to comment.