Skip to content

Commit

Permalink
Refactor cleanup logic (#228)
Browse files Browse the repository at this point in the history
  • Loading branch information
alrevuelta authored Oct 11, 2024
1 parent 87d7b98 commit da6bcee
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 13 deletions.
8 changes: 0 additions & 8 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,14 +312,6 @@ func mainLoop(oracleInstance *oracle.Oracle, onchain *oracle.Onchain, cfg *oracl
// Every X slots we update the onchain validators and cleanup any stranded oracle validators
if oracleInstance.State().LatestProcessedSlot%UpdateValidatorsIntervalSlots == 0 {
onchain.RefreshBeaconValidators()

// Do the validator cleanup: redisitribute the pending rewards of validators subscribed to the pool
// that are not in the beacon chain anymore (e.g. slashed)
err := oracleInstance.ValidatorCleanup(oracleInstance.State().LatestProcessedSlot)
// As precaution, we stop the oracle if anything happened during the cleanup
if err != nil {
log.Fatal("Could not cleanup validators: ", err)
}
}

// Every CheckPointSizeInSlots we commit the state given some conditions, starting from
Expand Down
12 changes: 10 additions & 2 deletions oracle/oracle.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,16 @@ func (or *Oracle) AdvanceStateToNextSlot(fullBlock *FullBlock) (uint64, error) {
// Handle the donations from this block
or.handleDonations(blockDonations)

// Handle validator cleanup: redisitribute the pending rewards of validators subscribed to the pool
// that are not in the beacon chain anymore (exited/slashed). We dont run this on every slot because
// its expensive. Runs every 4 hours.
if or.state.NextSlotToProcess%uint64(1200) == 0 {
err = or.ValidatorCleanup(or.state.NextSlotToProcess)
if err != nil {
return 0, errors.Wrap(err, "could not cleanup validators")
}
}

processedSlot := or.state.NextSlotToProcess
or.state.LatestProcessedSlot = processedSlot
or.state.NextSlotToProcess++
Expand All @@ -217,8 +227,6 @@ func (or *Oracle) AdvanceStateToNextSlot(fullBlock *FullBlock) (uint64, error) {

// Unsubscribes validators that are not active. Shares their pending rewards to the pool
func (or *Oracle) ValidatorCleanup(slot uint64) error {
or.mutex.Lock()
defer or.mutex.Unlock()

// Only cleanup if we're past the cleanup slot fork
if slot >= SlotFork1[or.cfg.Network] {
Expand Down
3 changes: 0 additions & 3 deletions oracle/oracle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2534,7 +2534,6 @@ func Test_ValidatorCleanup_1(t *testing.T) {
require.Equal(t, big.NewInt(99), oracle.state.Validators[22].AccumulatedRewardsWei)
require.Equal(t, Active, oracle.state.Validators[22].ValidatorStatus)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)

// Test5:
log.Info("Test5: Exited validator rewards are reset and go to the rest (including yellow)")
Expand Down Expand Up @@ -2581,7 +2580,6 @@ func Test_ValidatorCleanup_1(t *testing.T) {
require.Equal(t, big.NewInt(99), oracle.state.Validators[22].AccumulatedRewardsWei)
require.Equal(t, Active, oracle.state.Validators[22].ValidatorStatus)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)

// Test6:
log.Info("Test6: Slashed validator in the beacon chain. Pending goes to the rest")
Expand Down Expand Up @@ -2628,7 +2626,6 @@ func Test_ValidatorCleanup_1(t *testing.T) {
require.Equal(t, big.NewInt(99), oracle.state.Validators[22].AccumulatedRewardsWei)
require.Equal(t, Active, oracle.state.Validators[22].ValidatorStatus)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)
require.Equal(t, big.NewInt(76548235), oracle.state.PoolAccumulatedFees)

// Test7:
log.Info("Test7: Exited validator rewards are reset and go to the rest (not red)")
Expand Down

0 comments on commit da6bcee

Please sign in to comment.