Skip to content

Commit 5a5a11d

Browse files
authored
Merge pull request #7457 from onflow/jan/update-v.0.42-merge-master
Jan/update v.0.42 merge master
2 parents d287cb9 + 91a98d9 commit 5a5a11d

File tree

16 files changed

+932
-174
lines changed

16 files changed

+932
-174
lines changed

cmd/consensus/main.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,10 +400,13 @@ func main() {
400400
// the chain of seals
401401
rawMempool := stdmap.NewIncorporatedResultSeals(sealLimit)
402402
multipleReceiptsFilterMempool := consensusMempools.NewIncorporatedResultSeals(rawMempool, node.Storage.Receipts)
403+
404+
dbStore := cmd.GetStorageMultiDBStoreIfNeeded(node)
405+
403406
seals, err = consensusMempools.NewExecStateForkSuppressor(
404407
multipleReceiptsFilterMempool,
405408
consensusMempools.LogForkAndCrash(node.Logger),
406-
node.DB,
409+
dbStore,
407410
node.Logger,
408411
)
409412
if err != nil {

cmd/execution_builder.go

Lines changed: 4 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@ import (
5454
"github.com/onflow/flow-go/engine/execution/ingestion/fetcher"
5555
"github.com/onflow/flow-go/engine/execution/ingestion/stop"
5656
"github.com/onflow/flow-go/engine/execution/ingestion/uploader"
57-
"github.com/onflow/flow-go/engine/execution/migration"
5857
exeprovider "github.com/onflow/flow-go/engine/execution/provider"
5958
exepruner "github.com/onflow/flow-go/engine/execution/pruner"
6059
"github.com/onflow/flow-go/engine/execution/rpc"
@@ -93,13 +92,11 @@ import (
9392
"github.com/onflow/flow-go/state/protocol/blocktimer"
9493
storageerr "github.com/onflow/flow-go/storage"
9594
storage "github.com/onflow/flow-go/storage/badger"
96-
"github.com/onflow/flow-go/storage/dbops"
9795
"github.com/onflow/flow-go/storage/operation"
9896
"github.com/onflow/flow-go/storage/operation/badgerimpl"
9997
"github.com/onflow/flow-go/storage/operation/pebbleimpl"
10098
storagepebble "github.com/onflow/flow-go/storage/pebble"
10199
"github.com/onflow/flow-go/storage/store"
102-
"github.com/onflow/flow-go/storage/store/chained"
103100
)
104101

105102
const (
@@ -224,7 +221,6 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() {
224221
Module("blobservice peer manager dependencies", exeNode.LoadBlobservicePeerManagerDependencies).
225222
Module("bootstrap", exeNode.LoadBootstrapper).
226223
Module("register store", exeNode.LoadRegisterStore).
227-
Module("migrate last executed block", exeNode.MigrateLastSealedExecutedResultToPebble).
228224
AdminCommand("get-transactions", func(conf *NodeConfig) commands.AdminCommand {
229225
return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, exeNode.collections)
230226
}).
@@ -347,24 +343,10 @@ func (exeNode *ExecutionNode) LoadExecutionStorage(
347343
exeNode.receipts = store.NewExecutionReceipts(node.Metrics.Cache, db, exeNode.results, storage.DefaultCacheSize)
348344
exeNode.myReceipts = store.NewMyExecutionReceipts(node.Metrics.Cache, db, exeNode.receipts)
349345
exeNode.txResults = store.NewTransactionResults(node.Metrics.Cache, db, exeNode.exeConf.transactionResultsCacheSize)
350-
351-
if dbops.IsBadgerBased(node.DBOps) {
352-
// if data are stored in badger, we can use the same storage for all data
353-
exeNode.eventsReader = exeNode.events
354-
exeNode.commitsReader = exeNode.commits
355-
exeNode.resultsReader = exeNode.results
356-
exeNode.txResultsReader = exeNode.txResults
357-
} else if dbops.IsPebbleBatch(node.DBOps) {
358-
// when data are stored in pebble, we need to use chained storage to query data from
359-
// both pebble and badger
360-
// note the pebble storage is the first argument, and badger storage is the second, so
361-
// the data will be queried from pebble first, then badger
362-
badgerDB := badgerimpl.ToDB(node.DB)
363-
exeNode.eventsReader = chained.NewEvents(exeNode.events, store.NewEvents(node.Metrics.Cache, badgerDB))
364-
exeNode.commitsReader = chained.NewCommits(exeNode.commits, store.NewCommits(node.Metrics.Cache, badgerDB))
365-
exeNode.resultsReader = chained.NewExecutionResults(exeNode.results, store.NewExecutionResults(node.Metrics.Cache, badgerDB))
366-
exeNode.txResultsReader = chained.NewTransactionResults(exeNode.txResults, store.NewTransactionResults(node.Metrics.Cache, badgerDB, exeNode.exeConf.transactionResultsCacheSize))
367-
}
346+
exeNode.eventsReader = exeNode.events
347+
exeNode.commitsReader = exeNode.commits
348+
exeNode.resultsReader = exeNode.results
349+
exeNode.txResultsReader = exeNode.txResults
368350
return nil
369351
}
370352

@@ -756,16 +738,6 @@ func (exeNode *ExecutionNode) LoadExecutionDataGetter(node *NodeConfig) error {
756738
return nil
757739
}
758740

759-
func (exeNode *ExecutionNode) MigrateLastSealedExecutedResultToPebble(node *NodeConfig) error {
760-
// Migrate the last sealed executed
761-
err := migration.MigrateLastSealedExecutedResultToPebble(node.Logger, node.DB, node.PebbleDB, node.State, node.RootSeal)
762-
if err != nil {
763-
return fmt.Errorf("could not migrate last sealed executed result to pebble: %w", err)
764-
}
765-
766-
return nil
767-
}
768-
769741
func (exeNode *ExecutionNode) LoadExecutionState(
770742
node *NodeConfig,
771743
) (

fvm/environment/minimum_required_version.go

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
package environment
22

33
import (
4+
"sync"
5+
46
"github.com/coreos/go-semver/semver"
57

68
"github.com/onflow/flow-go/fvm/errors"
@@ -15,15 +17,34 @@ type GetVersionBeaconFunc func() (*flow.SealedVersionBeacon, error)
1517

1618
type VersionBeaconExecutionVersionProvider struct {
1719
getVersionBeacon GetVersionBeaconFunc
20+
21+
once sync.Once
22+
cachedVersion semver.Version
23+
cachedErr error
1824
}
1925

20-
func NewVersionBeaconExecutionVersionProvider(getVersionBeacon GetVersionBeaconFunc) VersionBeaconExecutionVersionProvider {
21-
return VersionBeaconExecutionVersionProvider{
26+
// NewVersionBeaconExecutionVersionProvider creates a new VersionBeaconExecutionVersionProvider
27+
// It caches the result of the getVersionBeacon function
28+
// The assumption here is that the GetVersionBeaconFunc will not return a different result for the lifetime of the provider
29+
// This is safe to make because version beacons change in between blocks and VersionBeaconExecutionVersionProvider are created
30+
// on every block.
31+
//
32+
// This logic will go away once we switch to the cadence component version from the dynamic protocol state
33+
func NewVersionBeaconExecutionVersionProvider(getVersionBeacon GetVersionBeaconFunc) *VersionBeaconExecutionVersionProvider {
34+
return &VersionBeaconExecutionVersionProvider{
2235
getVersionBeacon: getVersionBeacon,
2336
}
2437
}
2538

26-
func (v VersionBeaconExecutionVersionProvider) ExecutionVersion() (semver.Version, error) {
39+
func (v *VersionBeaconExecutionVersionProvider) ExecutionVersion() (semver.Version, error) {
40+
v.once.Do(func() {
41+
v.cachedVersion, v.cachedErr = v.queryExecutionVersion()
42+
})
43+
44+
return v.cachedVersion, v.cachedErr
45+
}
46+
47+
func (v *VersionBeaconExecutionVersionProvider) queryExecutionVersion() (semver.Version, error) {
2748
vb, err := v.getVersionBeacon()
2849
if err != nil {
2950
return semver.Version{}, err
@@ -39,7 +60,6 @@ func (v VersionBeaconExecutionVersionProvider) ExecutionVersion() (semver.Versio
3960
if err != nil {
4061
return semver.Version{}, err
4162
}
42-
4363
return *sv, nil
4464
}
4565

module/executiondatasync/execution_data/cache/cache.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ func NewExecutionDataCache(
3030
) *ExecutionDataCache {
3131
return &ExecutionDataCache{
3232
backend: backend,
33-
3433
headers: headers,
3534
seals: seals,
3635
results: results,

module/mempool/consensus/exec_fork_suppressor.go

Lines changed: 29 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,9 @@ package consensus
22

33
import (
44
"encoding/json"
5-
"errors"
65
"fmt"
76
"sync"
87

9-
"github.com/dgraph-io/badger/v2"
108
"github.com/rs/zerolog"
119
"github.com/rs/zerolog/log"
1210
"go.uber.org/atomic"
@@ -15,7 +13,7 @@ import (
1513
"github.com/onflow/flow-go/model/flow"
1614
"github.com/onflow/flow-go/module/mempool"
1715
"github.com/onflow/flow-go/storage"
18-
"github.com/onflow/flow-go/storage/badger/operation"
16+
"github.com/onflow/flow-go/storage/store"
1917
)
2018

2119
// ExecForkSuppressor is a wrapper around a conventional mempool.IncorporatedResultSeals
@@ -40,15 +38,15 @@ import (
4038
//
4139
// Implementation is concurrency safe.
4240
type ExecForkSuppressor struct {
43-
mutex sync.RWMutex
44-
seals mempool.IncorporatedResultSeals
45-
sealsForBlock map[flow.Identifier]sealSet // map BlockID -> set of IncorporatedResultSeal
46-
byHeight map[uint64]map[flow.Identifier]struct{} // map height -> set of executed block IDs at height
47-
lowestHeight uint64
48-
execForkDetected atomic.Bool
49-
onExecFork ExecForkActor
50-
db *badger.DB
51-
log zerolog.Logger
41+
mutex sync.RWMutex
42+
seals mempool.IncorporatedResultSeals
43+
sealsForBlock map[flow.Identifier]sealSet // map BlockID -> set of IncorporatedResultSeal
44+
byHeight map[uint64]map[flow.Identifier]struct{} // map height -> set of executed block IDs at height
45+
lowestHeight uint64
46+
execForkDetected atomic.Bool
47+
onExecFork ExecForkActor
48+
execForkEvidenceStore storage.ExecutionForkEvidence
49+
log zerolog.Logger
5250
}
5351

5452
var _ mempool.IncorporatedResultSeals = (*ExecForkSuppressor)(nil)
@@ -59,25 +57,33 @@ type sealSet map[flow.Identifier]*flow.IncorporatedResultSeal
5957
// sealsList is a list of seals
6058
type sealsList []*flow.IncorporatedResultSeal
6159

62-
func NewExecStateForkSuppressor(seals mempool.IncorporatedResultSeals, onExecFork ExecForkActor, db *badger.DB, log zerolog.Logger) (*ExecForkSuppressor, error) {
63-
conflictingSeals, err := checkExecutionForkEvidence(db)
60+
func NewExecStateForkSuppressor(
61+
seals mempool.IncorporatedResultSeals,
62+
onExecFork ExecForkActor,
63+
db storage.DB,
64+
log zerolog.Logger,
65+
) (*ExecForkSuppressor, error) {
66+
executionForkEvidenceStore := store.NewExecutionForkEvidence(db)
67+
68+
conflictingSeals, err := executionForkEvidenceStore.Retrieve()
6469
if err != nil {
6570
return nil, fmt.Errorf("failed to interface with storage: %w", err)
6671
}
72+
6773
execForkDetectedFlag := len(conflictingSeals) != 0
6874
if execForkDetectedFlag {
6975
onExecFork(conflictingSeals)
7076
}
7177

7278
wrapper := ExecForkSuppressor{
73-
mutex: sync.RWMutex{},
74-
seals: seals,
75-
sealsForBlock: make(map[flow.Identifier]sealSet),
76-
byHeight: make(map[uint64]map[flow.Identifier]struct{}),
77-
execForkDetected: *atomic.NewBool(execForkDetectedFlag),
78-
onExecFork: onExecFork,
79-
db: db,
80-
log: log.With().Str("mempool", "ExecForkSuppressor").Logger(),
79+
mutex: sync.RWMutex{},
80+
seals: seals,
81+
sealsForBlock: make(map[flow.Identifier]sealSet),
82+
byHeight: make(map[uint64]map[flow.Identifier]struct{}),
83+
execForkDetected: *atomic.NewBool(execForkDetectedFlag),
84+
onExecFork: onExecFork,
85+
execForkEvidenceStore: executionForkEvidenceStore,
86+
log: log.With().Str("mempool", "ExecForkSuppressor").Logger(),
8187
}
8288

8389
return &wrapper, nil
@@ -337,41 +343,6 @@ func hasConsistentStateTransitions(irSeal, irSeal2 *flow.IncorporatedResultSeal)
337343
return true
338344
}
339345

340-
// checkExecutionForkDetected checks the database whether evidence
341-
// about an execution fork is stored. Returns the stored evidence.
342-
func checkExecutionForkEvidence(db *badger.DB) ([]*flow.IncorporatedResultSeal, error) {
343-
var conflictingSeals []*flow.IncorporatedResultSeal
344-
err := db.View(func(tx *badger.Txn) error {
345-
err := operation.RetrieveExecutionForkEvidence(&conflictingSeals)(tx)
346-
if errors.Is(err, storage.ErrNotFound) {
347-
return nil // no evidence in data base; conflictingSeals is still nil slice
348-
}
349-
if err != nil {
350-
return fmt.Errorf("failed to load evidence whether or not an execution fork occured: %w", err)
351-
}
352-
return nil
353-
})
354-
return conflictingSeals, err
355-
}
356-
357-
// storeExecutionForkEvidence stores the provided seals in the database
358-
// as evidence for an execution fork.
359-
func storeExecutionForkEvidence(conflictingSeals []*flow.IncorporatedResultSeal, db *badger.DB) error {
360-
err := operation.RetryOnConflict(db.Update, func(tx *badger.Txn) error {
361-
err := operation.InsertExecutionForkEvidence(conflictingSeals)(tx)
362-
if errors.Is(err, storage.ErrAlreadyExists) {
363-
// some evidence about execution fork already stored;
364-
// we only keep the first evidence => noting more to do
365-
return nil
366-
}
367-
if err != nil {
368-
return fmt.Errorf("failed to store evidence about execution fork: %w", err)
369-
}
370-
return nil
371-
})
372-
return err
373-
}
374-
375346
// filterConflictingSeals performs filtering of provided seals by checking if there are conflicting seals for same block.
376347
// For every block we check if first seal has same state transitions as others. Multiple seals for same block are allowed
377348
// but their state transitions should be the same. Upon detecting seal with inconsistent state transition we will clear our mempool,
@@ -395,7 +366,7 @@ func (s *ExecForkSuppressor) filterConflictingSeals(sealsByBlockID map[flow.Iden
395366
s.execForkDetected.Store(true)
396367
s.Clear()
397368
conflictingSeals = append(sealsList{candidateSeal}, conflictingSeals...)
398-
err := storeExecutionForkEvidence(conflictingSeals, s.db)
369+
err := s.execForkEvidenceStore.StoreIfNotExists(conflictingSeals)
399370
if err != nil {
400371
panic("failed to store execution fork evidence")
401372
}

0 commit comments

Comments
 (0)