diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index aac230ce19e..f6f463941b3 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" ) const ( @@ -117,12 +118,12 @@ func getFlowClient() *client.Client { insecureClient = false } - config, err := common.NewFlowClientConfig(flagANAddress, strings.TrimPrefix(flagANNetworkKey, "0x"), flow.ZeroID, insecureClient) + config, err := grpcclient.NewFlowClientConfig(flagANAddress, strings.TrimPrefix(flagANNetworkKey, "0x"), flow.ZeroID, insecureClient) if err != nil { log.Fatal().Err(err).Msgf("could not get flow client config with address (%s) and network key (%s)", flagANAddress, flagANNetworkKey) } - flowClient, err := common.FlowClient(config) + flowClient, err := grpcclient.FlowClient(config) if err != nil { log.Fatal().Err(err).Msgf("could not get flow client with address (%s) and network key (%s)", flagANAddress, flagANNetworkKey) } diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 3c8da22457c..46ab61276ed 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -43,6 +43,7 @@ import ( modulecompliance "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/epochs" confinalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/module/mempool" epochpool "github.com/onflow/flow-go/module/mempool/epochs" "github.com/onflow/flow-go/module/mempool/herocache" @@ -98,7 +99,7 @@ func main() { // epoch qc contract client machineAccountInfo *bootstrap.NodeMachineAccountInfo - flowClientConfigs []*common.FlowClientConfig + flowClientConfigs []*grpcclient.FlowClientConfig insecureAccessAPI bool accessNodeIDS []string apiRatelimits map[string]int @@ -284,7 +285,7 @@ func main() { return fmt.Errorf("failed to validate flag --access-node-ids %w", err) } - flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) + flowClientConfigs, err = grpcclient.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err) } @@ -293,7 +294,7 @@ func main() { }). Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // @TODO use fallback logic for flowClient similar to DKG/QC contract clients - flowClient, err := common.FlowClient(flowClientConfigs[0]) + flowClient, err := grpcclient.FlowClient(flowClientConfigs[0]) if err != nil { return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err) } @@ -682,11 +683,11 @@ func createQCContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap. } // createQCContractClients creates priority ordered array of QCContractClient -func createQCContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.QCContractClient, error) { +func createQCContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*grpcclient.FlowClientConfig) ([]module.QCContractClient, error) { qcClients := make([]module.QCContractClient, 0) for _, opt := range flowClientOpts { - flowClient, err := common.FlowClient(opt) + flowClient, err := grpcclient.FlowClient(opt) if err != nil { return nil, fmt.Errorf("failed to create flow client for qc contract client with options: %s %w", flowClientOpts, err) } diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index 4d92e7b3480..69fba46895d 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -50,6 +50,7 @@ import ( dkgmodule "github.com/onflow/flow-go/module/dkg" "github.com/onflow/flow-go/module/epochs" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/module/mempool" consensusMempools "github.com/onflow/flow-go/module/mempool/consensus" "github.com/onflow/flow-go/module/mempool/stdmap" @@ -99,7 +100,7 @@ func main() { // DKG contract client machineAccountInfo *bootstrap.NodeMachineAccountInfo - flowClientConfigs []*common.FlowClientConfig + flowClientConfigs []*grpcclient.FlowClientConfig insecureAccessAPI bool accessNodeIDS []string @@ -408,7 +409,7 @@ func main() { return fmt.Errorf("failed to validate flag --access-node-ids %w", err) } - flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) + flowClientConfigs, err = grpcclient.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err) } @@ -417,7 +418,7 @@ func main() { }). Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // @TODO use fallback logic for flowClient similar to DKG/QC contract clients - flowClient, err := common.FlowClient(flowClientConfigs[0]) + flowClient, err := grpcclient.FlowClient(flowClientConfigs[0]) if err != nil { return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err) } @@ -981,11 +982,11 @@ func createDKGContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap } // createDKGContractClients creates an array dkgContractClient that is sorted by retry fallback priority -func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.DKGContractClient, error) { +func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*grpcclient.FlowClientConfig) ([]module.DKGContractClient, error) { dkgClients := make([]module.DKGContractClient, 0) for _, opt := range flowClientOpts { - flowClient, err := common.FlowClient(opt) + flowClient, err := grpcclient.FlowClient(opt) if err != nil { return nil, fmt.Errorf("failed to create flow client for dkg contract client with options: %s %w", flowClientOpts, err) } diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index 97cb118eacc..a108aa4a58c 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol" badgerstate "github.com/onflow/flow-go/state/protocol/badger" utilsio "github.com/onflow/flow-go/utils/io" @@ -92,11 +93,11 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { // CASE 2.2: Use Dynamic Startup to bootstrap. // get flow client with secure client connection to download protocol snapshot from access node - config, err := common.NewFlowClientConfig(nodeConfig.DynamicStartupANAddress, nodeConfig.DynamicStartupANPubkey, flow.ZeroID, false) + config, err := grpcclient.NewFlowClientConfig(nodeConfig.DynamicStartupANAddress, nodeConfig.DynamicStartupANPubkey, flow.ZeroID, false) if err != nil { return fmt.Errorf("failed to create flow client config for node dynamic startup pre-init: %w", err) } - flowClient, err := common.FlowClient(config) + flowClient, err := grpcclient.FlowClient(config) if err != nil { return fmt.Errorf("failed to create flow client for node dynamic startup pre-init: %w", err) } diff --git a/cmd/util/cmd/common/validation.go b/cmd/util/cmd/common/validation.go index 6e974b7d1d1..8e86a34be80 100644 --- a/cmd/util/cmd/common/validation.go +++ b/cmd/util/cmd/common/validation.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol" ) @@ -31,7 +32,7 @@ func validateFlags(accessNodeIDS []string, snapshot protocol.Snapshot) ([]flow.I } if accessNodeIDS[0] == "*" { - anIDS, err := DefaultAccessNodeIDS(snapshot) + anIDS, err := grpcclient.DefaultAccessNodeIDS(snapshot) if err != nil { return nil, fmt.Errorf("failed to get default access node ids %w", err) } @@ -59,7 +60,7 @@ func validateFlagsMainNet(accessNodeIDS []string) ([]flow.Identifier, error) { // convertIDS converts a list of access node id hex strings to flow.Identifier func convertIDS(accessNodeIDS []string) ([]flow.Identifier, error) { - anIDS, err := FlowIDFromHexString(accessNodeIDS...) + anIDS, err := grpcclient.FlowIDFromHexString(accessNodeIDS...) if err != nil { return nil, fmt.Errorf("failed to convert access node ID(s) into flow identifier(s) %w", err) } diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 049a8657910..1e24d3a5460 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -13,6 +13,7 @@ import ( epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol/inmem" ) @@ -90,12 +91,12 @@ func addGenerateRecoverEpochTxArgsCmdFlags() error { func getSnapshot() *inmem.Snapshot { // get flow client with secure client connection to download protocol snapshot from access node - config, err := common.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) + config, err := grpcclient.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) if err != nil { log.Fatal().Err(err).Msg("failed to create flow client config") } - flowClient, err := common.FlowClient(config) + flowClient, err := grpcclient.FlowClient(config) if err != nil { log.Fatal().Err(err).Msg("failed to create flow client") } diff --git a/cmd/util/ledger/migrations/account_based_migration.go b/cmd/util/ledger/migrations/account_based_migration.go index 04cb9a97744..23d0452795f 100644 --- a/cmd/util/ledger/migrations/account_based_migration.go +++ b/cmd/util/ledger/migrations/account_based_migration.go @@ -182,7 +182,7 @@ func MigrateGroupConcurrently( continue } - for m, migration := range migrations { + for migrationIndex, migration := range migrations { select { case <-ctx.Done(): @@ -194,7 +194,7 @@ func MigrateGroupConcurrently( if err != nil { log.Error(). Err(err). - Int("migration_index", m). + Int("migration_index", migrationIndex). Type("migration", migration). Hex("address", address[:]). Msg("could not migrate account") diff --git a/cmd/util/ledger/migrations/cadence_values_migration_test.go b/cmd/util/ledger/migrations/cadence_values_migration_test.go index 2019d9de657..6b397f24bdf 100644 --- a/cmd/util/ledger/migrations/cadence_values_migration_test.go +++ b/cmd/util/ledger/migrations/cadence_values_migration_test.go @@ -4,10 +4,12 @@ import ( _ "embed" "fmt" "io" + "sort" "sync" "testing" _ "github.com/glebarez/go-sqlite" + migrations2 "github.com/onflow/cadence/migrations" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -112,6 +114,352 @@ func TestCadenceValuesMigration(t *testing.T) { // Check error logs. require.Empty(t, logWriter.logs) + + checkMigratedState(t, address, registersByAccount, chainID) +} + +type migrationVisit struct { + storageKey interpreter.StorageKey + storageMapKey interpreter.StorageMapKey + value string +} + +type visitMigration struct { + visits []migrationVisit +} + +var _ migrations2.ValueMigration = &visitMigration{} + +func (*visitMigration) Name() string { + return "visit" +} + +func (m *visitMigration) Migrate( + storageKey interpreter.StorageKey, + storageMapKey interpreter.StorageMapKey, + value interpreter.Value, + _ *interpreter.Interpreter, +) (newValue interpreter.Value, err error) { + + m.visits = append( + m.visits, + migrationVisit{ + storageKey: storageKey, + storageMapKey: storageMapKey, + value: value.String(), + }, + ) + + return nil, nil +} + +func (*visitMigration) CanSkip(_ interpreter.StaticType) bool { + return false +} + +func (*visitMigration) Domains() map[string]struct{} { + return nil +} + +func checkMigratedState( + t *testing.T, + address common.Address, + registersByAccount *registers.ByAccount, + chainID flow.ChainID, +) { + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount, + chainID, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + validationMigration, err := migrations2.NewStorageMigration( + mr.Interpreter, + mr.Storage, + "validation", + address, + ) + require.NoError(t, err) + + visitMigration := &visitMigration{} + + validationMigration.Migrate( + validationMigration.NewValueMigrationsPathMigrator(nil, visitMigration), + ) + + require.Equal(t, + []migrationVisit{ + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + value: `"H\u{e9}llo"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + value: `"Caf\u{e9}"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + value: `2`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + value: `1`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + value: `{"H\u{e9}llo": 2, "Caf\u{e9}": 1}`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("flowTokenVault"), + value: `0.00100000`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("flowTokenVault"), + value: `11240984669916758018`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("flowTokenVault"), + value: `A.0ae53cb6e3f42a79.FlowToken.Vault(balance: 0.00100000, uuid: 11240984669916758018)`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_auth_reference_typed_key"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_auth_reference_typed_key"), + value: `"auth_ref"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_auth_reference_typed_key"), + value: `{Type(): "auth_ref"}`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_reference_typed_key"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_reference_typed_key"), + value: `"non_auth_ref"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_reference_typed_key"), + value: `{Type(): "non_auth_ref"}`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("type_value"), + value: "Type()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "Type()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "Type()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "Type()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "Type()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "Type<&Account>()", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: `Type()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "4", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "6", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "5", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "7", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "8", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "2", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "3", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "9", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: "1", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + value: `{Type(): 4, Type(): 6, Type(): 5, Type(): 7, Type<&Account>(): 8, Type(): 2, Type(): 3, Type(): 9, Type(): 1}`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("capability"), + value: `Capability(address: 0x01cf0e2f2f715450, id: 2)`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("capability"), + value: `Capability(address: 0x01cf0e2f2f715450, id: 2)`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("string_value_1"), + value: `"Caf\u{e9}"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("untyped_capability"), + value: `Capability(address: 0x01cf0e2f2f715450, id: 2)`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("r"), + value: `360287970189639680`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("r"), + value: "A.01cf0e2f2f715450.Test.R(uuid: 360287970189639680)", + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("string_value_2"), + value: `"Caf\u{e9}"`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + value: `Type<{A.01cf0e2f2f715450.Test.Bar, A.01cf0e2f2f715450.Test.Foo}>()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + value: `Type<{A.01cf0e2f2f715450.Test.Foo, A.01cf0e2f2f715450.Test.Bar, A.01cf0e2f2f715450.Test.Baz}>()`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + value: `1`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + value: `2`, + }, + { + storageKey: interpreter.StorageKey{Key: "storage", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + value: `{Type<{A.01cf0e2f2f715450.Test.Bar, A.01cf0e2f2f715450.Test.Foo}>(): 1, Type<{A.01cf0e2f2f715450.Test.Foo, A.01cf0e2f2f715450.Test.Bar, A.01cf0e2f2f715450.Test.Baz}>(): 2}`, + }, + { + storageKey: interpreter.StorageKey{Key: "public", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("flowTokenReceiver"), + value: `Capability<&A.0ae53cb6e3f42a79.FlowToken.Vault>(address: 0x01cf0e2f2f715450, id: 1)`, + }, + { + storageKey: interpreter.StorageKey{Key: "public", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("linkR"), + value: `Capability(address: 0x01cf0e2f2f715450, id: 2)`, + }, + { + storageKey: interpreter.StorageKey{Key: "public", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("flowTokenBalance"), + value: `Capability<&A.0ae53cb6e3f42a79.FlowToken.Vault>(address: 0x01cf0e2f2f715450, id: 3)`, + }, + { + storageKey: interpreter.StorageKey{Key: "contract", Address: address}, + storageMapKey: interpreter.StringStorageMapKey("Test"), + value: `A.01cf0e2f2f715450.Test()`, + }, + { + storageKey: interpreter.StorageKey{Key: "cap_con", Address: address}, + storageMapKey: interpreter.Uint64StorageMapKey(0x2), + value: "StorageCapabilityController(borrowType: auth(A.01cf0e2f2f715450.Test.E) &A.01cf0e2f2f715450.Test.R, capabilityID: /storage/r, target: 2)", + }, + { + storageKey: interpreter.StorageKey{Key: "cap_con", Address: address}, + storageMapKey: interpreter.Uint64StorageMapKey(0x1), + value: "StorageCapabilityController(borrowType: &A.0ae53cb6e3f42a79.FlowToken.Vault, capabilityID: /storage/flowTokenVault, target: 1)", + }, + { + storageKey: interpreter.StorageKey{Key: "cap_con", Address: address}, + storageMapKey: interpreter.Uint64StorageMapKey(0x3), + value: "StorageCapabilityController(borrowType: &A.0ae53cb6e3f42a79.FlowToken.Vault, capabilityID: /storage/flowTokenVault, target: 3)", + }, + }, + visitMigration.visits, + ) + } var flowTokenAddress = func() common.Address { @@ -199,12 +547,6 @@ func checkMigratedPayloads( } expectedValues := []interpreter.Value{ - // Both string values should be in the normalized form. - interpreter.NewUnmeteredStringValue("Caf\u00E9"), - interpreter.NewUnmeteredStringValue("Caf\u00E9"), - - interpreter.NewUnmeteredTypeValue(fullyEntitledAccountReferenceType), - interpreter.NewDictionaryValue( mr.Interpreter, interpreter.EmptyLocationRange, @@ -219,94 +561,41 @@ func checkMigratedPayloads( interpreter.NewUnmeteredIntValueFromInt64(2), ), - interpreter.NewDictionaryValue( - mr.Interpreter, - interpreter.EmptyLocationRange, - interpreter.NewDictionaryStaticType( - nil, - interpreter.PrimitiveStaticTypeMetaType, - interpreter.PrimitiveStaticTypeInt, - ), - interpreter.NewUnmeteredTypeValue( - &interpreter.IntersectionStaticType{ - Types: []*interpreter.InterfaceStaticType{ - fooInterfaceType, - barInterfaceType, - }, - LegacyType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - ), - interpreter.NewUnmeteredIntValueFromInt64(1), - interpreter.NewUnmeteredTypeValue( - &interpreter.IntersectionStaticType{ - Types: []*interpreter.InterfaceStaticType{ - fooInterfaceType, - barInterfaceType, - bazInterfaceType, - }, - LegacyType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - ), - interpreter.NewUnmeteredIntValueFromInt64(2), - ), - interpreter.NewCompositeValue( mr.Interpreter, interpreter.EmptyLocationRange, - testContractLocation, - "Test.R", + flowTokenLocation, + "FlowToken.Vault", common.CompositeKindResource, []interpreter.CompositeField{ { - Value: interpreter.NewUnmeteredUInt64Value(360287970189639680), + Value: interpreter.NewUnmeteredUFix64Value(0.001 * sema.Fix64Factor), + Name: "balance", + }, + { + Value: interpreter.NewUnmeteredUInt64Value(11240984669916758018), Name: "uuid", }, }, address, ), - - interpreter.NewUnmeteredSomeValueNonCopying( - interpreter.NewUnmeteredCapabilityValue( - 2, - interpreter.NewAddressValue(nil, address), - interpreter.NewReferenceStaticType(nil, entitlementAuthorization(), rResourceType), - ), - ), - - interpreter.NewUnmeteredCapabilityValue( - 2, - interpreter.NewAddressValue(nil, address), - interpreter.NewReferenceStaticType(nil, entitlementAuthorization(), rResourceType), - ), - interpreter.NewDictionaryValue( mr.Interpreter, interpreter.EmptyLocationRange, interpreter.NewDictionaryStaticType( nil, interpreter.PrimitiveStaticTypeMetaType, - interpreter.PrimitiveStaticTypeInt, + interpreter.PrimitiveStaticTypeString, ), - interpreter.NewUnmeteredTypeValue(fullyEntitledAccountReferenceType), - interpreter.NewUnmeteredIntValueFromInt64(1), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Capabilities), - interpreter.NewUnmeteredIntValueFromInt64(2), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_AccountCapabilities), - interpreter.NewUnmeteredIntValueFromInt64(3), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_StorageCapabilities), - interpreter.NewUnmeteredIntValueFromInt64(4), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Contracts), - interpreter.NewUnmeteredIntValueFromInt64(5), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Keys), - interpreter.NewUnmeteredIntValueFromInt64(6), - interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Inbox), - interpreter.NewUnmeteredIntValueFromInt64(7), - interpreter.NewUnmeteredTypeValue(accountReferenceType), - interpreter.NewUnmeteredIntValueFromInt64(8), - interpreter.NewUnmeteredTypeValue(interpreter.AccountKeyStaticType), - interpreter.NewUnmeteredIntValueFromInt64(9), + interpreter.NewUnmeteredTypeValue( + interpreter.NewReferenceStaticType( + nil, + entitlementAuthorization(), + rResourceType, + ), + ), + interpreter.NewUnmeteredStringValue("auth_ref"), ), - interpreter.NewDictionaryValue( mr.Interpreter, interpreter.EmptyLocationRange, @@ -324,65 +613,114 @@ func checkMigratedPayloads( ), interpreter.NewUnmeteredStringValue("non_auth_ref"), ), + interpreter.NewUnmeteredTypeValue(fullyEntitledAccountReferenceType), interpreter.NewDictionaryValue( mr.Interpreter, interpreter.EmptyLocationRange, interpreter.NewDictionaryStaticType( nil, interpreter.PrimitiveStaticTypeMetaType, - interpreter.PrimitiveStaticTypeString, + interpreter.PrimitiveStaticTypeInt, ), - interpreter.NewUnmeteredTypeValue( - interpreter.NewReferenceStaticType( - nil, - entitlementAuthorization(), - rResourceType, - ), + interpreter.NewUnmeteredTypeValue(fullyEntitledAccountReferenceType), + interpreter.NewUnmeteredIntValueFromInt64(1), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Capabilities), + interpreter.NewUnmeteredIntValueFromInt64(2), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_AccountCapabilities), + interpreter.NewUnmeteredIntValueFromInt64(3), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_StorageCapabilities), + interpreter.NewUnmeteredIntValueFromInt64(4), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Contracts), + interpreter.NewUnmeteredIntValueFromInt64(5), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Keys), + interpreter.NewUnmeteredIntValueFromInt64(6), + interpreter.NewUnmeteredTypeValue(interpreter.PrimitiveStaticTypeAccount_Inbox), + interpreter.NewUnmeteredIntValueFromInt64(7), + interpreter.NewUnmeteredTypeValue(accountReferenceType), + interpreter.NewUnmeteredIntValueFromInt64(8), + interpreter.NewUnmeteredTypeValue(interpreter.AccountKeyStaticType), + interpreter.NewUnmeteredIntValueFromInt64(9), + ), + interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredCapabilityValue( + 2, + interpreter.NewAddressValue(nil, address), + interpreter.NewReferenceStaticType(nil, entitlementAuthorization(), rResourceType), ), - interpreter.NewUnmeteredStringValue("auth_ref"), + ), + + // String value should be in the normalized form. + interpreter.NewUnmeteredStringValue("Caf\u00E9"), + + interpreter.NewUnmeteredCapabilityValue( + 2, + interpreter.NewAddressValue(nil, address), + interpreter.NewReferenceStaticType(nil, entitlementAuthorization(), rResourceType), ), interpreter.NewCompositeValue( mr.Interpreter, interpreter.EmptyLocationRange, - flowTokenLocation, - "FlowToken.Vault", + testContractLocation, + "Test.R", common.CompositeKindResource, []interpreter.CompositeField{ { - Value: interpreter.NewUnmeteredUFix64Value(0.001 * sema.Fix64Factor), - Name: "balance", - }, - { - Value: interpreter.NewUnmeteredUInt64Value(11240984669916758018), + Value: interpreter.NewUnmeteredUInt64Value(360287970189639680), Name: "uuid", }, }, address, ), + + // String value should be in the normalized form. + interpreter.NewUnmeteredStringValue("Caf\u00E9"), + + interpreter.NewDictionaryValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeMetaType, + interpreter.PrimitiveStaticTypeInt, + ), + interpreter.NewUnmeteredTypeValue( + &interpreter.IntersectionStaticType{ + Types: []*interpreter.InterfaceStaticType{ + fooInterfaceType, + barInterfaceType, + }, + LegacyType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + ), + interpreter.NewUnmeteredIntValueFromInt64(1), + interpreter.NewUnmeteredTypeValue( + &interpreter.IntersectionStaticType{ + Types: []*interpreter.InterfaceStaticType{ + fooInterfaceType, + barInterfaceType, + bazInterfaceType, + }, + LegacyType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + ), + interpreter.NewUnmeteredIntValueFromInt64(2), + ), } require.Equal(t, len(expectedValues), len(values)) - // Order is non-deterministic, so do a greedy compare. - for _, value := range values { - found := false + for index, value := range values { actualValue := value.(interpreter.EquatableValue) - for i, expectedValue := range expectedValues { - if actualValue.Equal(mr.Interpreter, interpreter.EmptyLocationRange, expectedValue) { - expectedValues = append(expectedValues[:i], expectedValues[i+1:]...) - found = true - break - } - - } - if !found { - assert.Fail(t, fmt.Sprintf("extra item in actual values: %s", actualValue)) - } - } - - if len(expectedValues) != 0 { - assert.Fail(t, fmt.Sprintf("%d extra item(s) in expected values: %s", len(expectedValues), expectedValues)) + expectedValue := expectedValues[index] + + assert.True(t, + actualValue.Equal(mr.Interpreter, interpreter.EmptyLocationRange, expectedValue), + "values at index %d are not equal: %s != %s", + index, + actualValue, + expectedValue, + ) } } @@ -428,24 +766,17 @@ func checkReporters( ) } - newCadenceValueMigrationEntry := func( - migration, key string, - domain common.PathDomain, - ) cadenceValueMigrationEntry { - return cadenceValueMigrationEntry{ - StorageMapKey: interpreter.StringStorageMapKey(key), - StorageKey: interpreter.NewStorageKey( - nil, - address, - domain.Identifier(), - ), - Migration: migration, - } + var reporterNames []string + for reporterName := range rwf.reportWriters { + reporterNames = append(reporterNames, reporterName) } + sort.Strings(reporterNames) var accountReportEntries []valueMigrationReportEntry - for _, reportWriter := range rwf.reportWriters { + for _, reporterName := range reporterNames { + reportWriter := rwf.reportWriters[reporterName] + for _, entry := range reportWriter.entries { e, ok := entry.(valueMigrationReportEntry) @@ -457,63 +788,14 @@ func checkReporters( } } - acctTypedDictKeyMigrationReportEntry := newCadenceValueMigrationEntry( - "StaticTypeMigration", - "dictionary_with_account_type_keys", - common.PathDomainStorage) - - // Order is non-deterministic, so use 'ElementsMatch'. - assert.ElementsMatch( + assert.Equal( t, []valueMigrationReportEntry{ - newCadenceValueMigrationEntry( - "StringNormalizingMigration", - "string_value_1", - common.PathDomainStorage, - ), - newCadenceValueMigrationEntry( - "StaticTypeMigration", - "type_value", - common.PathDomainStorage, - ), - - // String keys in dictionary - newCadenceValueMigrationEntry( - "StringNormalizingMigration", - "dictionary_with_string_keys", - common.PathDomainStorage, - ), - - // Restricted typed keys in dictionary - newCadenceValueMigrationEntry( - "StaticTypeMigration", - "dictionary_with_restricted_typed_keys", - common.PathDomainStorage, - ), - newCadenceValueMigrationEntry( - "StaticTypeMigration", - "dictionary_with_restricted_typed_keys", - common.PathDomainStorage, - ), - - // Capabilities and links - cadenceValueMigrationEntry{ - StorageMapKey: interpreter.StringStorageMapKey("capability"), - StorageKey: interpreter.NewStorageKey( - nil, - address, - common.PathDomainStorage.Identifier(), - ), - Migration: "CapabilityValueMigration", - }, capabilityMigrationEntry{ AccountAddress: address, AddressPath: interpreter.AddressPath{ Address: address, - Path: interpreter.NewUnmeteredPathValue( - common.PathDomainPublic, - "linkR", - ), + Path: interpreter.PathValue{Identifier: "linkR", Domain: 0x3}, }, BorrowType: interpreter.NewReferenceStaticType( nil, @@ -521,26 +803,15 @@ func checkReporters( rResourceType, ), }, - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "capability", - common.PathDomainStorage, - ), - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "linkR", - common.PathDomainPublic, - ), - - // untyped capability + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("capability"), + Migration: "CapabilityValueMigration", + }, capabilityMigrationEntry{ AccountAddress: address, AddressPath: interpreter.AddressPath{ - Address: address, - Path: interpreter.NewUnmeteredPathValue( - common.PathDomainPublic, - "linkR", - ), + Address: address, Path: interpreter.PathValue{Identifier: "linkR", Domain: 0x3}, }, BorrowType: interpreter.NewReferenceStaticType( nil, @@ -548,97 +819,149 @@ func checkReporters( rResourceType, ), }, - newCadenceValueMigrationEntry( - "CapabilityValueMigration", - "untyped_capability", - common.PathDomainStorage, - ), - - // Account-typed keys in dictionary - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - acctTypedDictKeyMigrationReportEntry, - - // Entitled typed keys in dictionary - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "dictionary_with_auth_reference_typed_key", - common.PathDomainStorage, - ), - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "dictionary_with_reference_typed_key", - common.PathDomainStorage, - ), - - // Entitlements in links - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "flowTokenReceiver", - common.PathDomainPublic, - ), - newCadenceValueMigrationEntry( - "EntitlementsMigration", - "flowTokenBalance", - common.PathDomainPublic, - ), - - // Cap cons - + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("untyped_capability"), + Migration: "CapabilityValueMigration", + }, linkMigrationEntry{ AccountAddressPath: interpreter.AddressPath{ Address: address, - Path: interpreter.PathValue{ - Identifier: "flowTokenReceiver", - Domain: common.PathDomainPublic, - }, + Path: interpreter.PathValue{Identifier: "flowTokenReceiver", Domain: 0x3}, }, - CapabilityID: 1, + CapabilityID: 0x1, + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("flowTokenReceiver"), + Migration: "LinkValueMigration", }, - newCadenceValueMigrationEntry( - "LinkValueMigration", - "flowTokenReceiver", - common.PathDomainPublic, - ), - linkMigrationEntry{ AccountAddressPath: interpreter.AddressPath{ Address: address, - Path: interpreter.PathValue{ - Identifier: "linkR", - Domain: common.PathDomainPublic, - }, + Path: interpreter.PathValue{Identifier: "linkR", Domain: 0x3}, }, - CapabilityID: 2, + CapabilityID: 0x2, + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("linkR"), + Migration: "LinkValueMigration", }, - newCadenceValueMigrationEntry( - "LinkValueMigration", - "linkR", - common.PathDomainPublic, - ), - linkMigrationEntry{ AccountAddressPath: interpreter.AddressPath{ Address: address, - Path: interpreter.PathValue{ - Identifier: "flowTokenBalance", - Domain: common.PathDomainPublic, - }, + Path: interpreter.PathValue{Identifier: "flowTokenBalance", Domain: 0x3}, }, - CapabilityID: 3, + CapabilityID: 0x3, + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("flowTokenBalance"), + Migration: "LinkValueMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_string_keys"), + Migration: "StringNormalizingMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_auth_reference_typed_key"), + Migration: "EntitlementsMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_reference_typed_key"), + Migration: "EntitlementsMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("type_value"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_account_type_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("capability"), + Migration: "EntitlementsMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("string_value_1"), + Migration: "StringNormalizingMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "storage", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("dictionary_with_restricted_typed_keys"), + Migration: "StaticTypeMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("flowTokenReceiver"), + Migration: "EntitlementsMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("linkR"), + Migration: "EntitlementsMigration", + }, + cadenceValueMigrationEntry{ + StorageKey: interpreter.StorageKey{Key: "public", Address: address}, + StorageMapKey: interpreter.StringStorageMapKey("flowTokenBalance"), + Migration: "EntitlementsMigration", }, - newCadenceValueMigrationEntry( - "LinkValueMigration", - "flowTokenBalance", - common.PathDomainPublic, - ), }, + accountReportEntries, ) } diff --git a/cmd/util/ledger/migrations/change_contract_code_migration.go b/cmd/util/ledger/migrations/change_contract_code_migration.go index 3b58154b6ba..5b22b3fd454 100644 --- a/cmd/util/ledger/migrations/change_contract_code_migration.go +++ b/cmd/util/ledger/migrations/change_contract_code_migration.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/cadence/runtime/common" coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" + nftStorefrontContracts "github.com/onflow/nft-storefront/lib/go/contracts" "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd/util/ledger/reporters" @@ -243,6 +244,22 @@ func SystemContractChanges(chainID flow.ChainID, options SystemContractsMigratio ) } + if chainID == flow.Testnet { + contractChanges = append( + contractChanges, + StagedContract{ + Address: common.Address(flow.HexToAddress("0x2d55b98eb200daef")), + Contract: Contract{ + Name: "NFTStorefrontV2", + Code: nftStorefrontContracts.NFTStorefrontV2( + systemContracts.FungibleToken.Address.Hex(), + systemContracts.NonFungibleToken.Address.Hex(), + ), + }, + }, + ) + } + return contractChanges } diff --git a/cmd/util/ledger/migrations/contract_checking_migration.go b/cmd/util/ledger/migrations/contract_checking_migration.go index 432f1681a5d..d891ed5c966 100644 --- a/cmd/util/ledger/migrations/contract_checking_migration.go +++ b/cmd/util/ledger/migrations/contract_checking_migration.go @@ -2,6 +2,7 @@ package migrations import ( "fmt" + "sort" "strings" "github.com/onflow/cadence/runtime/common" @@ -41,7 +42,13 @@ func NewContractCheckingMigration( // Gather all contracts - contractsByLocation := make(map[common.Location][]byte, contractCountEstimate) + contractsForPrettyPrinting := make(map[common.Location][]byte, contractCountEstimate) + + type contract struct { + location common.AddressLocation + code []byte + } + contracts := make([]contract, 0, contractCountEstimate) err = registersByAccount.ForEach(func(owner string, key string, value []byte) error { @@ -58,7 +65,15 @@ func NewContractCheckingMigration( Name: contractName, } - contractsByLocation[location] = code + contracts = append( + contracts, + contract{ + location: location, + code: code, + }, + ) + + contractsForPrettyPrinting[location] = code return nil }) @@ -66,9 +81,18 @@ func NewContractCheckingMigration( return fmt.Errorf("failed to iterate over registers: %w", err) } + sort.Slice(contracts, func(i, j int) bool { + a := contracts[i] + b := contracts[j] + return a.location.ID() < b.location.ID() + }) + // Check all contracts - for location, code := range contractsByLocation { + for _, contract := range contracts { + location := contract.location + code := contract.code + log.Info().Msgf("checking contract %s ...", location) // Check contract code @@ -80,7 +104,7 @@ func NewContractCheckingMigration( var builder strings.Builder errorPrinter := pretty.NewErrorPrettyPrinter(&builder, false) - printErr := errorPrinter.PrettyPrintError(err, location, contractsByLocation) + printErr := errorPrinter.PrettyPrintError(err, location, contractsForPrettyPrinting) var errorDetails string if printErr == nil { @@ -89,8 +113,6 @@ func NewContractCheckingMigration( errorDetails = err.Error() } - addressLocation := location.(common.AddressLocation) - if verboseErrorOutput { log.Error().Msgf( "error checking contract %s: %s", @@ -100,8 +122,8 @@ func NewContractCheckingMigration( } reporter.Write(contractCheckingFailure{ - AccountAddressHex: addressLocation.Address.HexWithPrefix(), - ContractName: addressLocation.Name, + AccountAddressHex: location.Address.HexWithPrefix(), + ContractName: location.Name, Error: errorDetails, }) diff --git a/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go index 38ef02e10cb..6826c15896c 100644 --- a/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go +++ b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "path" + "sort" "strings" "sync" "time" @@ -128,7 +129,20 @@ func (m *FilterUnreferencedSlabsMigration) MigrateAccount( Str("account", address.HexWithPrefix()). Msgf("filtering %d unreferenced slabs", len(unreferencedSlabIDs)) + var storageIDs []atree.StorageID for storageID := range unreferencedSlabIDs { + storageIDs = append(storageIDs, storageID) + } + sort.Slice( + storageIDs, + func(i, j int) bool { + a := storageIDs[i] + b := storageIDs[j] + return a.Compare(b) < 0 + }, + ) + + for _, storageID := range storageIDs { owner, key := registerFromStorageID(storageID) value, err := accountRegisters.Get(owner, key) diff --git a/cmd/util/ledger/migrations/fix_broken_data_migration.go b/cmd/util/ledger/migrations/fix_broken_data_migration.go index e3938dd3ae6..03ed27cdb7b 100644 --- a/cmd/util/ledger/migrations/fix_broken_data_migration.go +++ b/cmd/util/ledger/migrations/fix_broken_data_migration.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path" + "sort" "sync" "time" @@ -215,7 +216,10 @@ func getAtreePayloadsByID( ) { outputPayloads := make([]*ledger.Payload, 0, len(ids)) - err := registers.ForEach(func(owner string, key string, value []byte) error { + owner := registers.Owner() + + keys := make([]string, 0, len(ids)) + err := registers.ForEachKey(func(key string) error { if !flow.IsSlabIndexKey(key) { return nil @@ -231,17 +235,28 @@ func getAtreePayloadsByID( return nil } + keys = append(keys, key) + + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(keys) + + for _, key := range keys { + value, err := registers.Get(owner, key) + if err != nil { + return nil, err + } + ledgerKey := convert.RegisterIDToLedgerKey(flow.RegisterID{ Owner: owner, Key: key, }) payload := ledger.NewPayload(ledgerKey, value) outputPayloads = append(outputPayloads, payload) - - return nil - }) - if err != nil { - return nil, err } return outputPayloads, nil diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go index 3ec22388ddd..3185bdbd2f5 100644 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ b/cmd/util/ledger/migrations/migrator_runtime.go @@ -148,12 +148,7 @@ func NewInterpreterMigrationRuntime( return nil, fmt.Errorf("failed to create runtime interface: %w", err) } - evmContractAccountAddress, err := evm.ContractAccountAddress(chainID) - if err != nil { - return nil, fmt.Errorf("failed to get EVM contract account address for chain %s: %w", chainID, err) - } - - evmStdlib.SetupEnvironment(env, nil, evmContractAccountAddress) + evmStdlib.SetupEnvironment(env, nil, evm.ContractAccountAddress(chainID)) env.Configure( runtimeInterface, diff --git a/cmd/util/ledger/migrations/staged_contracts_migration.go b/cmd/util/ledger/migrations/staged_contracts_migration.go index 80b8646d477..ad5ed0f6fed 100644 --- a/cmd/util/ledger/migrations/staged_contracts_migration.go +++ b/cmd/util/ledger/migrations/staged_contracts_migration.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "sort" "strings" "sync" @@ -425,7 +426,14 @@ func (m *StagedContractsMigration) MigrateAccount( return nil } - for name, contract := range contractUpdates { + var contractNames []string + for name := range contractUpdates { + contractNames = append(contractNames, name) + } + sort.Strings(contractNames) + + for _, name := range contractNames { + contract := contractUpdates[name] owner := string(address[:]) key := flow.ContractKey(name) diff --git a/cmd/util/ledger/util/registers/registers.go b/cmd/util/ledger/util/registers/registers.go index eb421a57bc1..0ac77096753 100644 --- a/cmd/util/ledger/util/registers/registers.go +++ b/cmd/util/ledger/util/registers/registers.go @@ -277,6 +277,16 @@ func (a *AccountRegisters) Merge(other *AccountRegisters) error { return nil } +func (a *AccountRegisters) ForEachKey(f func(key string) error) error { + for key := range a.registers { + err := f(key) + if err != nil { + return err + } + } + return nil +} + func NewAccountRegistersFromPayloads(owner string, payloads []*ledger.Payload) (*AccountRegisters, error) { accountRegisters := NewAccountRegisters(owner) diff --git a/consensus/integration/epoch_test.go b/consensus/integration/epoch_test.go index 2a96a1c9d72..ae56e01befd 100644 --- a/consensus/integration/epoch_test.go +++ b/consensus/integration/epoch_test.go @@ -188,6 +188,7 @@ func TestEpochTransition_IdentitiesDisjoint(t *testing.T) { // withNextEpoch adds a valid next epoch with the given identities to the input // snapshot. Also sets the length of the first (current) epoch to curEpochViews. +// NOTE: the input initial snapshot must be a spork root snapshot. // // We make the first (current) epoch start in committed phase so we can transition // to the next epoch upon reaching the appropriate view without any further changes @@ -205,6 +206,9 @@ func withNextEpoch( // convert to encodable representation for simple modification encodableSnapshot := snapshot.Encodable() + rootResult, rootSeal, err := snapshot.SealedResult() + require.NoError(t, err) + require.Len(t, encodableSnapshot.SealingSegment.Blocks, 1, "function `withNextEpoch` only works for spork-root/genesis snapshots") rootProtocolState := encodableSnapshot.SealingSegment.LatestProtocolStateEntry() epochProtocolState := rootProtocolState.EpochEntry @@ -236,7 +240,7 @@ func withNextEpoch( ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(nextEpochIdentities), } // Re-construct epoch protocol state with modified events (constructs ActiveIdentity fields) - epochProtocolState, err := flow.NewRichProtocolStateEntry( + epochProtocolState, err = flow.NewRichProtocolStateEntry( epochProtocolState.ProtocolStateEntry, epochProtocolState.PreviousEpochSetup, epochProtocolState.PreviousEpochCommit, currEpochSetup, currEpochCommit, @@ -258,16 +262,18 @@ func withNextEpoch( } // Since we modified the root protocol state, we need to update the root block's ProtocolStateID field. + // rootBlock is a pointer, so mutations apply to Snapshot rootBlock := encodableSnapshot.SealingSegment.Blocks[0] rootBlockPayload := rootBlock.Payload rootBlockPayload.ProtocolStateID = rootKVStore.ID() rootBlock.SetPayload(*rootBlockPayload) // Since we changed the root block, we need to update the QC, root result, and root seal. - encodableSnapshot.LatestResult.BlockID = rootBlock.ID() - encodableSnapshot.LatestSeal.ResultID = encodableSnapshot.LatestResult.ID() - encodableSnapshot.LatestSeal.BlockID = rootBlock.ID() + // rootResult and rootSeal are pointers, so mutations apply to Snapshot + rootResult.BlockID = rootBlock.ID() + rootSeal.ResultID = rootResult.ID() + rootSeal.BlockID = rootBlock.ID() encodableSnapshot.SealingSegment.LatestSeals = map[flow.Identifier]flow.Identifier{ - rootBlock.ID(): encodableSnapshot.LatestSeal.ID(), + rootBlock.ID(): rootSeal.ID(), } encodableSnapshot.QuorumCertificate = createQC(rootBlock) diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index 5f869821ea1..b1e35bcb642 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -312,24 +312,11 @@ func (e *Engine) dispatchRequest() (bool, error) { return false, fmt.Errorf("could not get providers: %w", err) } - // randomize order of items, so that they can be requested in different order each time - rndItems := make([]flow.Identifier, 0, len(e.items)) - for k := range e.items { - rndItems = append(rndItems, e.items[k].EntityID) - } - err = rand.Shuffle(uint(len(rndItems)), func(i, j uint) { - rndItems[i], rndItems[j] = rndItems[j], rndItems[i] - }) - if err != nil { - return false, fmt.Errorf("shuffle failed: %w", err) - } - // go through each item and decide if it should be requested again now := time.Now().UTC() var providerID flow.Identifier var entityIDs []flow.Identifier - for _, entityID := range rndItems { - item := e.items[entityID] + for entityID, item := range e.items { // if the item should not be requested yet, ignore cutoff := item.LastRequested.Add(item.RetryAfter) @@ -363,15 +350,18 @@ func (e *Engine) dispatchRequest() (bool, error) { // order is random and will skip the item most of the times // when other items are available if providerID == flow.ZeroID { - providers = providers.Filter(item.ExtraSelector) - if len(providers) == 0 { - return false, fmt.Errorf("no valid providers available") + filteredProviders := providers.Filter(item.ExtraSelector) + if len(filteredProviders) == 0 { + return false, fmt.Errorf("no valid providers available for item %s, total providers: %v", entityID.String(), len(providers)) } - id, err := providers.Sample(1) + // ramdonly select a provider from the filtered set + // to send as many item requests as possible. + id, err := filteredProviders.Sample(1) if err != nil { return false, fmt.Errorf("sampling failed: %w", err) } providerID = id[0].NodeID + providers = filteredProviders } // add item to list and set retry parameters diff --git a/engine/consensus/matching/core.go b/engine/consensus/matching/core.go index ceadfa71254..910803d1094 100644 --- a/engine/consensus/matching/core.go +++ b/engine/consensus/matching/core.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" @@ -180,12 +181,18 @@ func (c *Core) processReceipt(receipt *flow.ExecutionReceipt) (bool, error) { Hex("initial_state", initialState[:]). Hex("final_state", finalState[:]).Logger() - // if the receipt is for an unknown block, skip it. It will be re-requested - // later by `requestPending` function. + // If the receipt is for an unknown block, skip it. + // Reasoning: If this is an honest receipt, this replica is behind. Chances are high that other leaders will + // already have included the receipt by the time this replica has caught up. If we still need the receipt by + // the time this replica has caught up, it will be re-requested later by `requestPending` function. If it is + // a malicious receipt, discarding it is advantageous for mitigating spamming and resource exhaustion attacks. executedBlock, err := c.headersDB.ByBlockID(receipt.ExecutionResult.BlockID) if err != nil { - log.Debug().Msg("discarding receipt for unknown block") - return false, nil + if errors.Is(err, storage.ErrNotFound) { + log.Debug().Msg("dropping execution receipt for unknown block") + return false, nil + } + return false, irrecoverable.NewExceptionf("encountered unexpected storage error attempting to retrieve block %v: %w", receipt.ExecutionResult.BlockID, err) } log = log.With(). @@ -208,31 +215,28 @@ func (c *Core) processReceipt(receipt *flow.ExecutionReceipt) (bool, error) { childSpan := c.tracer.StartSpanFromParent(receiptSpan, trace.CONMatchProcessReceiptVal) err = c.receiptValidator.Validate(receipt) childSpan.End() - - if engine.IsUnverifiableInputError(err) { - // If previous result is missing, we can't validate this receipt. - // Although we will request its previous receipt(s), - // we don't want to drop it now, because when the missing previous arrive - // in a wrong order, they will still be dropped, and causing the catch up - // to be inefficient. - // Instead, we cache the receipt in case it arrives earlier than its - // previous receipt. - // For instance, given blocks A <- B <- C <- D <- E, if we receive their receipts - // in the order of [E,C,D,B,A], then: - // if we drop the missing previous receipts, then only A will be processed; - // if we cache the missing previous receipts, then all of them will be processed, because - // once A is processed, we will check if there is a child receipt pending, - // if yes, then process it. - c.pendingReceipts.Add(receipt) - log.Info().Msg("receipt is cached because its previous result is missing") - return false, nil - } - if err != nil { + if module.IsUnknownResultError(err) { + // Previous result is missing. Hence, we can't validate this receipt. + // We want to efficiently handle receipts arriving out of order. Therefore, we cache the + // receipt in `c.pendingReceipts`. On finalization of new blocks, we request receipts + // for all unsealed but finalized blocks. For instance, given blocks + // A <- B <- C <- D <- E, if we receive their receipts in the order of [E,C,D,B,A], then: + // - If we drop the missing previous receipts, then only A will be processed. + // - If we cache the missing previous receipts, then all of them will be processed, because once + // A is processed, we will check if there is a child receipt pending, if yes, then process it. + c.pendingReceipts.Add(receipt) + log.Debug().Msg("receipt is cached because its previous result is missing") + return false, nil + } if engine.IsInvalidInputError(err) { - log.Err(err).Msg("invalid execution receipt") + log.Err(err).Bool(logging.KeyProtocolViolation, true).Msg("invalid execution receipt") return false, nil } + if module.IsUnknownBlockError(err) { // This should never happen + // Above, we successfully retrieved the `executedBlock`. Hence, `UnknownBlockError` here means our state is corrupted! + return false, irrecoverable.NewExceptionf("internal state corruption detected when validating receipt %v for block %v: %w", receipt.ID(), receipt.BlockID, err) + } return false, fmt.Errorf("failed to validate execution receipt: %w", err) } diff --git a/engine/consensus/matching/core_test.go b/engine/consensus/matching/core_test.go index 6097c4422ac..18af22bde40 100644 --- a/engine/consensus/matching/core_test.go +++ b/engine/consensus/matching/core_test.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" @@ -154,7 +155,7 @@ func (ms *MatchingSuite) TestOnReceiptValid() { ms.ReceiptsDB.AssertExpectations(ms.T()) } -// TestOnReceiptInvalid tests that we reject receipts that don't pass the ReceiptValidator +// TestOnReceiptInvalid tests handing of receipts that the ReceiptValidator detects as violating the protocol func (ms *MatchingSuite) TestOnReceiptInvalid() { // we use the same Receipt as in TestOnReceiptValid to ensure that the sealing Core is not // rejecting the receipt for any other reason @@ -166,13 +167,38 @@ func (ms *MatchingSuite) TestOnReceiptInvalid() { // check that _expected_ failure case of invalid receipt is handled without error ms.receiptValidator.On("Validate", receipt).Return(engine.NewInvalidInputError("")).Once() - _, err := ms.core.processReceipt(receipt) + wasAdded, err := ms.core.processReceipt(receipt) ms.Require().NoError(err, "invalid receipt should be dropped but not error") + ms.Require().False(wasAdded, "invalid receipt should not be added") + ms.receiptValidator.AssertExpectations(ms.T()) + ms.ReceiptsDB.AssertNumberOfCalls(ms.T(), "Store", 0) +} - // check that _unexpected_ failure case causes the error to be escalated +// TestOnReceiptValidatorExceptions tests matching.Core escalates unexpected errors and exceptions. +// We expect that such errors are *not* interpreted as the receipt being invalid. +func (ms *MatchingSuite) TestOnReceiptValidatorExceptions() { + // we use the same Receipt as in TestOnReceiptValid to ensure that the sealing Core is not rejecting the receipt for any other reason + originID := ms.ExeID + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.UnfinalizedBlock))), + ) + + // Check that _unexpected_ failure causes the error to be escalated and is *not* interpreted as an invalid receipt. ms.receiptValidator.On("Validate", receipt).Return(fmt.Errorf("")).Once() + _, err := ms.core.processReceipt(receipt) + ms.Require().Error(err, "unexpected errors should be escalated") + ms.Require().False(engine.IsInvalidInputError(err), "exceptions should not be misinterpreted as an invalid receipt") + + // Check that an `UnknownBlockError` causes the error to be escalated and is *not* interpreted as an invalid receipt. + // Reasoning: For attack resilience, we should discard outdated receipts based on the height of the executed block, _before_ we + // run the expensive receipt validation. Therefore, matching.Core should retrieve the executed block before calling into the + // ReceiptValidator. Hence, if matching.Core finds the executed block, but `ReceiptValidator.Validate(..)` errors saying that + // the executed block is unknown, our state is corrupted or we have a severe internal bug. + ms.receiptValidator.On("Validate", receipt).Return(module.NewUnknownBlockError("")).Once() _, err = ms.core.processReceipt(receipt) ms.Require().Error(err, "unexpected errors should be escalated") + ms.Require().False(engine.IsInvalidInputError(err), "exceptions should not be misinterpreted as an invalid receipt") ms.receiptValidator.AssertExpectations(ms.T()) ms.ReceiptsDB.AssertNumberOfCalls(ms.T(), "Store", 0) @@ -192,7 +218,7 @@ func (ms *MatchingSuite) TestOnUnverifiableReceipt() { ms.PendingReceipts.On("Add", receipt).Return(false).Once() // check that _expected_ failure case of invalid receipt is handled without error - ms.receiptValidator.On("Validate", receipt).Return(engine.NewUnverifiableInputError("missing parent result")).Once() + ms.receiptValidator.On("Validate", receipt).Return(module.NewUnknownResultError("missing parent result")).Once() wasAdded, err := ms.core.processReceipt(receipt) ms.Require().NoError(err, "unverifiable receipt should be cached but not error") ms.Require().False(wasAdded, "unverifiable receipt should be cached but not added to the node's validated information") diff --git a/engine/execution/ingestion/fetcher/fetcher.go b/engine/execution/ingestion/fetcher/fetcher.go index 4c525570d14..af5fb9bfdbc 100644 --- a/engine/execution/ingestion/fetcher/fetcher.go +++ b/engine/execution/ingestion/fetcher/fetcher.go @@ -12,7 +12,7 @@ import ( "github.com/onflow/flow-go/state/protocol" ) -var onlyOnflowRegex = regexp.MustCompile(`.*\.onflow\.org:3569$`) +var onlyOnflowRegex = regexp.MustCompile(`.*\.(onflow\.org|dapper-flow\.com):3569$`) type CollectionFetcher struct { log zerolog.Logger @@ -71,6 +71,10 @@ func (e *CollectionFetcher) FetchCollection(blockID flow.Identifier, height uint }) } + e.log.Debug().Bool("onflowOnlyLNs", e.onflowOnlyLNs). + Msgf("queued collection %v for block %v, height %v from guarantors: %v", + guarantee.ID(), blockID, height, guarantors) + // queue the collection to be requested from one of the guarantors e.request.EntityByID(guarantee.ID(), filter.And( filters..., diff --git a/fvm/evm/evm.go b/fvm/evm/evm.go index b0db385e751..7f8956285fd 100644 --- a/fvm/evm/evm.go +++ b/fvm/evm/evm.go @@ -13,50 +13,32 @@ import ( "github.com/onflow/flow-go/model/flow" ) -func ContractAccountAddress(chainID flow.ChainID) (flow.Address, error) { - sc := systemcontracts.SystemContractsForChain(chainID) - return sc.EVMContract.Address, nil +func ContractAccountAddress(chainID flow.ChainID) flow.Address { + return systemcontracts.SystemContractsForChain(chainID).EVMContract.Address } -func StorageAccountAddress(chainID flow.ChainID) (flow.Address, error) { - sc := systemcontracts.SystemContractsForChain(chainID) - return sc.EVMStorage.Address, nil -} - -func RandomBeaconAddress(chainID flow.ChainID) flow.Address { - return systemcontracts.SystemContractsForChain(chainID).RandomBeaconHistory.Address +func StorageAccountAddress(chainID flow.ChainID) flow.Address { + return systemcontracts.SystemContractsForChain(chainID).EVMStorage.Address } func SetupEnvironment( chainID flow.ChainID, fvmEnv environment.Environment, runtimeEnv runtime.Environment, - flowToken flow.Address, ) error { - evmStorageAccountAddress, err := StorageAccountAddress(chainID) - if err != nil { - return err - } - - evmContractAccountAddress, err := ContractAccountAddress(chainID) - if err != nil { - return err - } - - randomBeaconAddress := RandomBeaconAddress(chainID) + sc := systemcontracts.SystemContractsForChain(chainID) + randomBeaconAddress := sc.RandomBeaconHistory.Address + flowTokenAddress := sc.FlowToken.Address backend := backends.NewWrappedEnvironment(fvmEnv) - - emulator := evm.NewEmulator(backend, evmStorageAccountAddress) - - blockStore := handler.NewBlockStore(backend, evmStorageAccountAddress) - + emulator := evm.NewEmulator(backend, StorageAccountAddress(chainID)) + blockStore := handler.NewBlockStore(backend, StorageAccountAddress(chainID)) addressAllocator := handler.NewAddressAllocator() contractHandler := handler.NewContractHandler( chainID, - evmContractAccountAddress, - common.Address(flowToken), + ContractAccountAddress(chainID), + common.Address(flowTokenAddress), randomBeaconAddress, blockStore, addressAllocator, @@ -67,7 +49,7 @@ func SetupEnvironment( stdlib.SetupEnvironment( runtimeEnv, contractHandler, - evmContractAccountAddress, + ContractAccountAddress(chainID), ) return nil diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index 0e5dc04eb94..9983faa4efc 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -2196,8 +2196,7 @@ func RunWithNewEnvironment( *EOATestAccount, ), ) { - rootAddr, err := evm.StorageAccountAddress(chain.ChainID()) - require.NoError(t, err) + rootAddr := evm.StorageAccountAddress(chain.ChainID()) RunWithTestBackend(t, func(backend *TestBackend) { RunWithDeployedContract(t, GetStorageTestContract(t), backend, rootAddr, func(testContract *TestContract) { diff --git a/fvm/script.go b/fvm/script.go index 28067cfc1bd..aef8b391e19 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -13,7 +13,6 @@ import ( "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/logical" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/hash" ) @@ -202,12 +201,10 @@ func (executor *scriptExecutor) executeScript() error { if executor.ctx.EVMEnabled { chain := executor.ctx.Chain - sc := systemcontracts.SystemContractsForChain(chain.ChainID()) err := evm.SetupEnvironment( chain.ChainID(), executor.env, rt.ScriptRuntimeEnv, - sc.FlowToken.Address, ) if err != nil { return err diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 5e05b9016d3..0958eefe5b9 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -18,7 +18,6 @@ import ( "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/module/trace" ) @@ -185,12 +184,10 @@ func (executor *transactionExecutor) preprocessTransactionBody() error { // setup evm if executor.ctx.EVMEnabled { chain := executor.ctx.Chain - sc := systemcontracts.SystemContractsForChain(chain.ChainID()) err := evm.SetupEnvironment( chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - sc.FlowToken.Address, ) if err != nil { return err @@ -244,12 +241,10 @@ func (executor *transactionExecutor) ExecuteTransactionBody() error { // setup evm if executor.ctx.EVMEnabled { chain := executor.ctx.Chain - sc := systemcontracts.SystemContractsForChain(chain.ChainID()) err := evm.SetupEnvironment( chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - sc.FlowToken.Address, ) if err != nil { return err diff --git a/go.mod b/go.mod index 7760d594b1f..c7efb0d8a96 100644 --- a/go.mod +++ b/go.mod @@ -105,6 +105,7 @@ require ( github.com/ipfs/boxo v0.17.1-0.20240131173518-89bceff34bf1 github.com/mitchellh/mapstructure v1.5.0 github.com/onflow/go-ethereum v1.13.4 + github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 github.com/onflow/wal v1.0.2 github.com/slok/go-http-metrics v0.10.0 github.com/sony/gobreaker v0.5.0 diff --git a/go.sum b/go.sum index 9ce155d8cc4..709611df6ef 100644 --- a/go.sum +++ b/go.sum @@ -2184,6 +2184,8 @@ github.com/onflow/flow/protobuf/go/flow v0.4.3 h1:gdY7Ftto8dtU+0wI+6ZgW4oE+z0DSD github.com/onflow/flow/protobuf/go/flow v0.4.3/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= github.com/onflow/sdks v0.5.1-0.20230912225508-b35402f12bba h1:rIehuhO6bj4FkwE4VzwEjX7MoAlOhUJENBJLqDqVxAo= github.com/onflow/sdks v0.5.1-0.20230912225508-b35402f12bba/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= github.com/onflow/wal v1.0.2 h1:5bgsJVf2O3cfMNK12fiiTyYZ8cOrUiELt3heBJfHOhc= diff --git a/integration/go.mod b/integration/go.mod index adebd67c13a..eb533a1e8b4 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -11,7 +11,7 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.4 github.com/docker/docker v24.0.6+incompatible github.com/docker/go-connections v0.4.0 - github.com/go-git/go-git/v5 v5.5.2 + github.com/go-git/go-git/v5 v5.11.0 github.com/go-yaml/yaml v2.1.0+incompatible github.com/gorilla/websocket v1.5.0 github.com/ipfs/boxo v0.17.1-0.20240131173518-89bceff34bf1 @@ -50,14 +50,14 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/storage v1.37.0 // indirect + dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect github.com/apache/arrow/go/v14 v14.0.2 // indirect github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect @@ -82,7 +82,7 @@ require ( github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.1.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect @@ -95,6 +95,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect @@ -125,8 +126,8 @@ require ( github.com/gammazero/workerpool v1.1.2 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.4.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -169,7 +170,6 @@ require ( github.com/huandu/go-clone v1.7.2 // indirect github.com/huandu/go-clone/generic v1.7.2 // indirect github.com/huin/goupnp v1.3.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-block-format v0.2.0 // indirect @@ -257,7 +257,7 @@ require ( github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect - github.com/pjbgf/sha1cd v0.2.3 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect @@ -272,7 +272,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rootless-containers/rootlesskit v1.1.1 // indirect github.com/schollz/progressbar/v3 v3.13.1 // indirect github.com/sergi/go-diff v1.2.0 // indirect @@ -280,7 +280,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/shirou/gopsutil/v3 v3.22.2 // indirect github.com/sirupsen/logrus v1.9.2 // indirect - github.com/skeema/knownhosts v1.1.0 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect github.com/slok/go-http-metrics v0.10.0 // indirect github.com/sony/gobreaker v0.5.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/integration/go.sum b/integration/go.sum index 31056296efa..b36c47e15fa 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -935,6 +935,8 @@ cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvo cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= @@ -986,8 +988,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I= -github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc h1:DCHzPQOcU/7gwDTWbFQZc5qHMPS1g0xTO56k8NXsv9M= github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc/go.mod h1:LJM5a3zcIJ/8TmZwlUczvROEJT8ntOdhdG9jjcR1B0I= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= @@ -1001,8 +1003,6 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes= github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -1020,7 +1020,6 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= @@ -1031,7 +1030,6 @@ github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybF github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= @@ -1135,7 +1133,7 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v7 v7.0.0/go.mod h1:bu6fic7trDt20w+LMooX7j3fsOwv4/ln6j8gAdP6vmA= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY= @@ -1159,8 +1157,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cloudflare/cloudflare-go v0.79.0/go.mod h1:gkHQf9xEubaQPEuerBuoinR9P8bf8a05Lq0X6WKy1Oc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -1240,6 +1238,8 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724 h1:zOOpPLu5VvH8ixyoDWHnQHWoEHtryT1ne31vwz0G7Fo= github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724/go.mod h1:U0cEcbf9hAwPSuuoPVqXKhcWV+IU4CStK75cJ52f2/A= @@ -1323,6 +1323,7 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZi github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -1404,7 +1405,6 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -1418,15 +1418,13 @@ github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= github.com/go-fonts/liberation v0.3.0/go.mod h1:jdJ+cqF+F4SUL2V+qxBth8fvBpBDS7yloUL5Fi8GTGY= github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE= -github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ= -github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= -github.com/go-git/go-git/v5 v5.5.2 h1:v8lgZa5k9ylUw+OR/roJHTxR4QItsNFI5nKtAXFuynw= -github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -1738,8 +1736,6 @@ github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -1816,7 +1812,6 @@ github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZl github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -1981,8 +1976,6 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= @@ -2214,8 +2207,8 @@ github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI= -github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -2305,8 +2298,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rootless-containers/rootlesskit v1.1.1 h1:F5psKWoWY9/VjZ3ifVcaosjvFZJOagX85U22M0/EQZE= github.com/rootless-containers/rootlesskit v1.1.1/go.mod h1:UD5GoA3dqKCJrnvnhVgQQnweMF2qZnf9KLw8EewcMZI= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -2365,8 +2358,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= -github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/slok/go-http-metrics v0.10.0 h1:rh0LaYEKza5eaYRGDXujKrOln57nHBi4TtVhmNEpbgM= github.com/slok/go-http-metrics v0.10.0/go.mod h1:lFqdaS4kWMfUKCSukjC47PdCeTk+hXDUVm8kLHRqJ38= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -2621,12 +2614,10 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= @@ -2779,7 +2770,6 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= @@ -2985,7 +2975,6 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3008,7 +2997,6 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= diff --git a/model/flow/execution_result.go b/model/flow/execution_result.go index 8bf94afd7bc..a24af0be53c 100644 --- a/model/flow/execution_result.go +++ b/model/flow/execution_result.go @@ -54,7 +54,8 @@ func (er ExecutionResult) ValidateChunksLength() bool { // FinalStateCommitment returns the Execution Result's commitment to the final // execution state of the block, i.e. the last chunk's output state. -// Error returns: +// +// This function is side-effect free. The only possible error it returns is of type: // - ErrNoChunks: if there are no chunks (ExecutionResult is malformed) func (er ExecutionResult) FinalStateCommitment() (StateCommitment, error) { if !er.ValidateChunksLength() { @@ -65,7 +66,8 @@ func (er ExecutionResult) FinalStateCommitment() (StateCommitment, error) { // InitialStateCommit returns a commitment to the execution state used as input // for computing the block, i.e. the leading chunk's input state. -// Error returns: +// +// This function is side-effect free. The only possible error it returns is of type // - ErrNoChunks: if there are no chunks (ExecutionResult is malformed) func (er ExecutionResult) InitialStateCommit() (StateCommitment, error) { if !er.ValidateChunksLength() { diff --git a/module/errors.go b/module/errors.go new file mode 100644 index 00000000000..5d91dafa8f6 --- /dev/null +++ b/module/errors.go @@ -0,0 +1,54 @@ +package module + +import ( + "errors" + "fmt" +) + +// UnknownBlockError indicates that a referenced block is missing +type UnknownBlockError struct { + err error +} + +func NewUnknownBlockError(msg string, args ...interface{}) error { + return UnknownBlockError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownBlockError) Unwrap() error { + return e.err +} + +func (e UnknownBlockError) Error() string { + return e.err.Error() +} + +func IsUnknownBlockError(err error) bool { + var unknownExecutedBlockError UnknownBlockError + return errors.As(err, &unknownExecutedBlockError) +} + +// UnknownResultError indicates that a referenced result is missing +type UnknownResultError struct { + err error +} + +func NewUnknownResultError(msg string, args ...interface{}) error { + return UnknownResultError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownResultError) Unwrap() error { + return e.err +} + +func (e UnknownResultError) Error() string { + return e.err.Error() +} + +func IsUnknownResultError(err error) bool { + var unknownParentResultError UnknownResultError + return errors.As(err, &unknownParentResultError) +} diff --git a/cmd/util/cmd/common/flow_client.go b/module/grpcclient/flow_client.go similarity index 99% rename from cmd/util/cmd/common/flow_client.go rename to module/grpcclient/flow_client.go index 11fa53a96a4..8b1da8ef117 100644 --- a/cmd/util/cmd/common/flow_client.go +++ b/module/grpcclient/flow_client.go @@ -1,4 +1,4 @@ -package common +package grpcclient import ( "fmt" diff --git a/module/mock/receipt_validator.go b/module/mock/receipt_validator.go index f6f0545666d..a219f78ae84 100644 --- a/module/mock/receipt_validator.go +++ b/module/mock/receipt_validator.go @@ -12,13 +12,13 @@ type ReceiptValidator struct { mock.Mock } -// Validate provides a mock function with given fields: receipts -func (_m *ReceiptValidator) Validate(receipts *flow.ExecutionReceipt) error { - ret := _m.Called(receipts) +// Validate provides a mock function with given fields: receipt +func (_m *ReceiptValidator) Validate(receipt *flow.ExecutionReceipt) error { + ret := _m.Called(receipt) var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) error); ok { - r0 = rf(receipts) + r0 = rf(receipt) } else { r0 = ret.Error(0) } diff --git a/module/receipt_validator.go b/module/receipt_validator.go index 6a9d98840f1..f50fa00b98f 100644 --- a/module/receipt_validator.go +++ b/module/receipt_validator.go @@ -1,41 +1,51 @@ package module -import "github.com/onflow/flow-go/model/flow" +import ( + "github.com/onflow/flow-go/model/flow" +) // ReceiptValidator is an interface which is used for validating // receipts with respect to current protocol state. type ReceiptValidator interface { - // Validate verifies that the ExecutionReceipt satisfies - // the following conditions: - // * is from Execution node with positive weight - // * has valid signature - // * chunks are in correct format - // * execution result has a valid parent and satisfies the subgraph check - // Returns nil if all checks passed successfully. + // Validate verifies that the ExecutionReceipt satisfies the following conditions: + // - is from Execution node with positive weight + // - has valid signature + // - chunks are in correct format + // - execution result has a valid parent and satisfies the subgraph check + // + // In order to validate a receipt, both the executed block and the parent result + // referenced in `receipt.ExecutionResult` must be known. We return nil if all checks + // pass successfully. + // // Expected errors during normal operations: - // * engine.InvalidInputError - // if receipt violates protocol condition - // * engine.UnverifiableInputError - // if receipt's parent result is unknown - Validate(receipts *flow.ExecutionReceipt) error + // - engine.InvalidInputError if receipt violates protocol condition + // - module.UnknownResultError if the receipt's parent result is unknown + // - module.UnknownBlockError if the executed block is unknown + // + // All other error are potential symptoms critical internal failures, such as bugs or state corruption. + Validate(receipt *flow.ExecutionReceipt) error // ValidatePayload verifies the ExecutionReceipts and ExecutionResults // in the payload for compliance with the protocol: // Receipts: - // * are from Execution node with positive weight - // * have valid signature - // * chunks are in correct format - // * no duplicates in fork + // - are from Execution node with positive weight + // - have valid signature + // - chunks are in correct format + // - no duplicates in fork + // // Results: - // * have valid parents and satisfy the subgraph check - // * extend the execution tree, where the tree root is the latest - // finalized block and only results from this fork are included - // * no duplicates in fork + // - have valid parents and satisfy the subgraph check + // - extend the execution tree, where the tree root is the latest + // finalized block and only results from this fork are included + // - no duplicates in fork + // // Expected errors during normal operations: - // * engine.InvalidInputError - // if some receipts in the candidate block violate protocol condition - // * engine.UnverifiableInputError - // if for some of the receipts, their respective parent result is unknown + // - engine.InvalidInputError if some receipts in the candidate block violate protocol condition + // - module.UnknownBlockError if the candidate block's _parent_ is unknown + // + // All other error are potential symptoms critical internal failures, such as bugs or state corruption. + // Note that module.UnknownResultError is not possible; we have either an invalid candidate block + // (yields engine.InvalidInputError) or a missing parent block (yields module.UnknownBlockError). ValidatePayload(candidate *flow.Block) error } diff --git a/module/validation/common.go b/module/validation/common.go index 01e46e1328e..135c0b6efbb 100644 --- a/module/validation/common.go +++ b/module/validation/common.go @@ -1,19 +1,24 @@ package validation import ( + "errors" "fmt" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + protocolstate "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" ) // identityForNode ensures that `nodeID` is an authorized member of the network // at the given block and returns the corresponding node's full identity. // Error returns: -// - sentinel engine.InvalidInputError is nodeID is NOT an authorized member of the network -// - generic error indicating a fatal internal problem +// - engine.InvalidInputError if nodeID is NOT an authorized member of the network at the given block +// - module.UnknownBlockError if blockID is not known to the protocol state +// +// All other error are potential symptoms critical internal failures, such as bugs or state corruption. func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow.Identifier) (*flow.Identity, error) { // get the identity of the origin node identity, err := state.AtBlockID(blockID).Identity(nodeID) @@ -21,8 +26,10 @@ func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow. if protocol.IsIdentityNotFound(err) { return nil, engine.NewInvalidInputErrorf("unknown node identity: %w", err) } - // unexpected exception - return nil, fmt.Errorf("failed to retrieve node identity: %w", err) + if errors.Is(err, protocolstate.ErrUnknownSnapshotReference) { + return nil, module.NewUnknownBlockError("block %v is unknown: %w", blockID, err) + } + return nil, fmt.Errorf("unexpected exception retrieving node identity: %w", err) } return identity, nil @@ -33,8 +40,8 @@ func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow. // - and has the expected role // - is an active participant of the current epoch and not ejected (i.e. has `EpochParticipationStatusActive`) // -// Returns the following errors: -// - sentinel engine.InvalidInputError if any of the above-listed conditions are violated. +// This function is side-effect free. The only possible error it returns is of type +// - engine.InvalidInputError if any of the above-listed conditions are violated. // // Note: the method receives the identity as proof of its existence. // Therefore, we consider the case where the respective identity is unknown to the @@ -50,9 +57,7 @@ func ensureNodeHasWeightAndRole(identity *flow.Identity, expectedRole flow.Role) } // check if the identity is a valid epoch participant(is active in the current epoch + not ejected) if !filter.IsValidCurrentEpochParticipant(identity) { - return engine.NewInvalidInputErrorf("node (%x) is not an active participant, instead has status: %s", identity.NodeID, - identity.EpochParticipationStatus.String()) + return engine.NewInvalidInputErrorf("node %x is not an active participant, instead has status: %s", identity.NodeID, identity.EpochParticipationStatus.String()) } - return nil } diff --git a/module/validation/receipt_validator.go b/module/validation/receipt_validator.go index 95a0cb7bb03..9fb2569e21c 100644 --- a/module/validation/receipt_validator.go +++ b/module/validation/receipt_validator.go @@ -9,8 +9,8 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" @@ -46,76 +46,73 @@ func NewReceiptValidator(state protocol.State, return rv } +// verifySignature ensures that the given receipt has a valid signature from nodeIdentity. +// Expected errors during normal operations: +// - engine.InvalidInputError if the signature is invalid func (v *receiptValidator) verifySignature(receipt *flow.ExecutionReceiptMeta, nodeIdentity *flow.Identity) error { id := receipt.ID() valid, err := nodeIdentity.StakingPubKey.Verify(receipt.ExecutorSignature, id[:], v.signatureHasher) - if err != nil { - return fmt.Errorf("failed to verify signature: %w", err) + if err != nil { // Verify(..) returns (false,nil) for invalid signature. Any error indicates unexpected internal failure. + return irrecoverable.NewExceptionf("failed to verify signature: %w", err) } - if !valid { return engine.NewInvalidInputErrorf("invalid signature for (%x)", nodeIdentity.NodeID) } - return nil } +// verifyChunksFormat enforces that: +// - chunks are indexed without any gaps starting from zero +// - each chunk references the same blockID as the top-level execution result +// - the execution result has the correct number of chunks in accordance with the number of collections in the executed block +// +// Expected errors during normal operations: +// - engine.InvalidInputError if the result has malformed chunks +// - module.UnknownBlockError when the executed block is unknown func (v *receiptValidator) verifyChunksFormat(result *flow.ExecutionResult) error { for index, chunk := range result.Chunks.Items() { if uint(index) != chunk.CollectionIndex { return engine.NewInvalidInputErrorf("invalid CollectionIndex, expected %d got %d", index, chunk.CollectionIndex) } - + if uint64(index) != chunk.Index { + return engine.NewInvalidInputErrorf("invalid Chunk.Index, expected %d got %d", index, chunk.CollectionIndex) + } if chunk.BlockID != result.BlockID { return engine.NewInvalidInputErrorf("invalid blockID, expected %v got %v", result.BlockID, chunk.BlockID) } } - // we create one chunk per collection, plus the - // system chunk. so we can check if the chunk number matches with the - // number of guarantees plus one; this will ensure the execution receipt - // cannot lie about having less chunks and having the remaining ones - // approved - requiredChunks := 1 // system chunk: must exist for block's ExecutionResult, even if block payload itself is empty - - index, err := v.index.ByBlockID(result.BlockID) + // For a block containing k collections, the Flow protocol prescribes that a valid execution result + // must contain k+1 chunks. Specifically, we have one chunk per collection plus the system chunk. + // The system chunk must exist, even if block payload itself is empty. + index, err := v.index.ByBlockID(result.BlockID) // returns `storage.ErrNotFound` for unknown BlockID if err != nil { - // the mutator will always create payload index for a valid block - return fmt.Errorf("could not find payload index for executed block %v: %w", result.BlockID, err) + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownBlockError("could not find payload index for executed block %v: %w", result.BlockID, err) + } + return irrecoverable.NewExceptionf("unexpected failure retrieving index for executed block %v: %w", result.BlockID, err) } - - requiredChunks += len(index.CollectionIDs) - + requiredChunks := 1 + len(index.CollectionIDs) // one chunk per collection + 1 system chunk if result.Chunks.Len() != requiredChunks { - return engine.NewInvalidInputErrorf("invalid number of chunks, expected %d got %d", - requiredChunks, result.Chunks.Len()) + return engine.NewInvalidInputErrorf("invalid number of chunks, expected %d got %d", requiredChunks, result.Chunks.Len()) } - return nil } -func (v *receiptValidator) fetchResult(resultID flow.Identifier) (*flow.ExecutionResult, error) { - prevResult, err := v.results.ByID(resultID) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil, engine.NewUnverifiableInputError("cannot retrieve result: %v", resultID) - } - return nil, err - } - return prevResult, nil -} - // subgraphCheck enforces that result forms a valid sub-graph: -// Let R1 be a result that references block A, and R2 be R1's parent result. -// The execution results form a valid subgraph if and only if R2 references -// A's parent. +// Let R1 be a result that references block A, and R2 be R1's parent result. The +// execution results form a valid subgraph if and only if R2 references A's parent. +// +// Expected errors during normal operations: +// - engine.InvalidInputError if result does not form a valid sub-graph +// - module.UnknownBlockError when the executed block is unknown func (v *receiptValidator) subgraphCheck(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { - block, err := v.state.AtBlockID(result.BlockID).Head() + block, err := v.state.AtBlockID(result.BlockID).Head() // returns `storage.ErrNotFound` for unknown BlockID if err != nil { - if errors.Is(err, state.ErrUnknownSnapshotReference) { - return engine.NewInvalidInputErrorf("no block found %v %w", result.BlockID, err) + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownBlockError("executed block %v unknown: %w", result.BlockID, err) } - return err + return irrecoverable.NewExceptionf("unexpected failure retrieving executed block %v: %w", result.BlockID, err) } // validating the PreviousResultID field @@ -127,12 +124,13 @@ func (v *receiptValidator) subgraphCheck(result *flow.ExecutionResult, prevResul if prevResult.BlockID != block.ParentID { return engine.NewInvalidInputErrorf("invalid block for previous result %v", prevResult.BlockID) } - return nil } // resultChainCheck enforces that the end state of the parent result -// matches the current result's start state +// matches the current result's start state. +// This function is side effect free. The only possible error it returns is of type +// - engine.InvalidInputError if starting state of result is inconsistent with previous result's end state func (v *receiptValidator) resultChainCheck(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { finalState, err := prevResult.FinalStateCommitment() if err != nil { @@ -149,38 +147,40 @@ func (v *receiptValidator) resultChainCheck(result *flow.ExecutionResult, prevRe return nil } -// Validate verifies that the ExecutionReceipt satisfies -// the following conditions: +// Validate verifies that the ExecutionReceipt satisfies the following conditions: // - is from Execution node with positive weight // - has valid signature // - chunks are in correct format // - execution result has a valid parent and satisfies the subgraph check // -// Returns nil if all checks passed successfully. +// In order to validate a receipt, both the executed block and the parent result +// referenced in `receipt.ExecutionResult` must be known. We return nil if all checks +// pass successfully. +// // Expected errors during normal operations: -// - engine.InvalidInputError -// if receipt violates protocol condition -// - engine.UnverifiableInputError -// if receipt's parent result is unknown +// - engine.InvalidInputError if receipt violates protocol rules +// - module.UnknownBlockError if the executed block is unknown +// - module.UnknownResultError if the receipt's parent result is unknown +// +// All other error are potential symptoms critical internal failures, such as bugs or state corruption. func (v *receiptValidator) Validate(receipt *flow.ExecutionReceipt) error { - // TODO: this can be optimized by checking if result was already stored and validated. - // This needs to be addressed later since many tests depend on this behavior. - prevResult, err := v.fetchResult(receipt.ExecutionResult.PreviousResultID) - if err != nil { - return fmt.Errorf("error fetching parent result of receipt %v: %w", receipt.ID(), err) + parentResult, err := v.results.ByID(receipt.ExecutionResult.PreviousResultID) + if err != nil { // we expect `storage.ErrNotFound` in case parent result is unknown; any other error is unexpected, critical failure + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownResultError("parent result %v unknown: %w", receipt.ExecutionResult.PreviousResultID, err) + } + return irrecoverable.NewExceptionf("unexpected exception fetching parent result: %v", receipt.ExecutionResult.PreviousResultID) } - // first validate result to avoid signature check in in `validateReceipt` in case result is invalid. - err = v.validateResult(&receipt.ExecutionResult, prevResult) + // first validate result to avoid expensive signature check in `validateReceipt` in case result is invalid. + err = v.validateResult(&receipt.ExecutionResult, parentResult) if err != nil { return fmt.Errorf("could not validate single result %v at index: %w", receipt.ExecutionResult.ID(), err) } err = v.validateReceipt(receipt.Meta(), receipt.ExecutionResult.BlockID) if err != nil { - // It's very important that we fail the whole validation if one of the receipts is invalid. - // It allows us to make assumptions as stated in previous comment. - return fmt.Errorf("could not validate single receipt %v: %w", receipt.ID(), err) + return fmt.Errorf("could not validate receipt %v: %w", receipt.ID(), err) } return nil @@ -201,14 +201,28 @@ func (v *receiptValidator) Validate(receipt *flow.ExecutionReceipt) error { // - no duplicates in fork // // Expected errors during normal operations: -// - engine.InvalidInputError -// if some receipts in the candidate block violate protocol condition -// - engine.UnverifiableInputError -// if for some of the receipts, their respective parent result is unknown +// - engine.InvalidInputError if some receipts in the candidate block violate protocol condition +// - module.UnknownBlockError if the candidate block's _parent_ is unknown +// +// All other error are potential symptoms of critical internal failures, such as bugs or state corruption. +// Note that module.UnknownResultError is not possible; we have either an invalid candidate block +// (yields engine.InvalidInputError) or a missing parent block (yields module.UnknownBlockError). func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { header := candidate.Header payload := candidate.Payload + // As a prerequisite, we check that candidate's parent block is known. Otherwise, we cannot validate it. + // This check is important to distinguish expected error cases from unexpected exceptions. By confirming + // that the protocol state knows the parent block, we guarantee that we can successfully traverse the + // candidate's ancestry below. + exists, err := v.headers.Exists(header.ParentID) + if err != nil { + return irrecoverable.NewExceptionf("unexpected exception retrieving the candidate block's parent %v: %w", header.ParentID, err) + } + if !exists { + return module.NewUnknownBlockError("cannot validate receipts in block, as its parent block is unknown %v", header.ParentID) + } + // return if nothing to validate if len(payload.Receipts) == 0 && len(payload.Results) == 0 { return nil @@ -272,7 +286,9 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { } err = fork.TraverseForward(v.headers, header.ParentID, bookKeeper, fork.ExcludingBlock(lastSeal.BlockID)) if err != nil { - return fmt.Errorf("internal error while traversing the ancestor fork of unsealed blocks: %w", err) + // At the beginning, we checked that candidate's parent exists in the protocol state, i.e. its + // ancestry is known and valid. Hence, any error here is a symptom of internal state corruption. + return irrecoverable.NewExceptionf("internal error while traversing the ancestor fork of unsealed blocks: %w", err) } // tracks the number of receipts committing to each result. @@ -280,13 +296,13 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { // all needed checks after we have validated all results. receiptsByResult := payload.Receipts.GroupByResultID() - // validate all results that are incorporated into the payload. If one is malformed, the entire block is invalid. + // Validate all results that are incorporated into the payload. If one is malformed, the entire block is invalid. for i, result := range payload.Results { resultID := result.ID() // Every included result must be accompanied by a receipt with a corresponding `ResultID`, in the same block. // If a result is included without a corresponding receipt, it cannot be attributed to any executor. - receiptsForResult := uint(len(receiptsByResult.GetGroup(resultID))) + receiptsForResult := len(receiptsByResult.GetGroup(resultID)) if receiptsForResult == 0 { return engine.NewInvalidInputErrorf("no receipts for result %v at index %d", resultID, i) } @@ -306,11 +322,19 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { if _, forBlockOnFork := forkBlocks[result.BlockID]; !forBlockOnFork { return engine.NewInvalidInputErrorf("results %v at index %d is for block not on fork (%x)", resultID, i, result.BlockID) } + // Reaching the following code implies that the executed block with ID `result.BlockID` is known to the protocol state, i.e. well formed. // validate result err = v.validateResult(result, prevResult) if err != nil { - return fmt.Errorf("could not validate result %v at index %d: %w", resultID, i, err) + if engine.IsInvalidInputError(err) { + return fmt.Errorf("result %v at index %d is invalid: %w", resultID, i, err) + } + if module.IsUnknownBlockError(err) { + // Above, we checked that the result is for an ancestor of the candidate block. If this block or parts of it are not found, our state is corrupted + return irrecoverable.NewExceptionf("the executed block or some of its parts were not found despite the block being already incorporated: %w", err) + } + return fmt.Errorf("unexpected exception while validating result %v at index %d: %w", resultID, i, err) } executionTree[resultID] = result } @@ -338,13 +362,25 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { err = v.validateReceipt(receipt, result.BlockID) if err != nil { - return fmt.Errorf("receipt %v at index %d failed validation: %w", receiptID, i, err) + if engine.IsInvalidInputError(err) { + return fmt.Errorf("receipt %v at index %d failed validation: %w", receiptID, i, err) + } + if module.IsUnknownBlockError(err) { + // Above, we checked that the result is for an ancestor of the candidate block. If this block or parts of it are not found, our state is corrupted + return irrecoverable.NewExceptionf("the executed block or some of its parts were not found despite the block being already incorporated: %w", err) + } + return fmt.Errorf("unexpected exception validating receipt %v at index %d: %w", receiptID, i, err) } } return nil } +// validateResult validates that the given result is well-formed. +// We do not check the validity of the resulting state commitment. +// Expected errors during normal operations: +// - engine.InvalidInputError if the result has malformed chunks +// - module.UnknownBlockError if blockID does not correspond to a block known by the protocol state func (v *receiptValidator) validateResult(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { err := v.verifyChunksFormat(result) if err != nil { @@ -364,14 +400,15 @@ func (v *receiptValidator) validateResult(result *flow.ExecutionResult, prevResu return nil } -func (v *receiptValidator) validateReceipt(receipt *flow.ExecutionReceiptMeta, blockID flow.Identifier) error { - identity, err := identityForNode(v.state, blockID, receipt.ExecutorID) +// validateReceipt validates that the given `receipt` is a valid commitment from an Execution Node +// to some result. +// Error returns: +// - engine.InvalidInputError if `receipt` is invalid +// - module.UnknownBlockError if executedBlockID is unknown +func (v *receiptValidator) validateReceipt(receipt *flow.ExecutionReceiptMeta, executedBlockID flow.Identifier) error { + identity, err := identityForNode(v.state, executedBlockID, receipt.ExecutorID) if err != nil { - return fmt.Errorf( - "failed to get executor identity %v at block %v: %w", - receipt.ExecutorID, - blockID, - err) + return fmt.Errorf("retrieving idenity of node %v at block %v failed: %w", receipt.ExecutorID, executedBlockID, err) } err = ensureNodeHasWeightAndRole(identity, flow.RoleExecution) diff --git a/module/validation/receipt_validator_test.go b/module/validation/receipt_validator_test.go index a7ca7ddc976..8d953978d50 100644 --- a/module/validation/receipt_validator_test.go +++ b/module/validation/receipt_validator_test.go @@ -1,6 +1,7 @@ package validation import ( + "errors" "testing" "github.com/stretchr/testify/mock" @@ -10,7 +11,9 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - fmock "github.com/onflow/flow-go/module/mock" + mock_module "github.com/onflow/flow-go/module/mock" + mock_protocol "github.com/onflow/flow-go/state/protocol/mock" + mock_storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,12 +25,12 @@ type ReceiptValidationSuite struct { unittest.BaseChainSuite receiptValidator module.ReceiptValidator - publicKey *fmock.PublicKey + publicKey *mock_module.PublicKey } func (s *ReceiptValidationSuite) SetupTest() { s.SetupChain() - s.publicKey = &fmock.PublicKey{} + s.publicKey = mock_module.NewPublicKey(s.T()) s.Identities[s.ExeID].StakingPubKey = s.publicKey s.receiptValidator = NewReceiptValidator( s.State, @@ -58,22 +61,17 @@ func (s *ReceiptValidationSuite) TestReceiptValid() { } // TestReceiptNoIdentity tests that we reject receipt with invalid `ExecutionResult.ExecutorID` +// Note: for a receipt with a bad `ExecutorID`, we should never get to validating the signature, +// because there is no valid identity, where we can retrieve a staking signature from. func (s *ReceiptValidationSuite) TestReceiptNoIdentity() { valSubgrph := s.ValidSubgraphFixture() - node := unittest.IdentityFixture() - mockPk := &fmock.PublicKey{} + node := unittest.IdentityFixture() // unknown Node + mockPk := mock_module.NewPublicKey(s.T()) node.StakingPubKey = mockPk - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(node.NodeID), - unittest.WithResult(valSubgrph.Result)) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(node.NodeID), unittest.WithResult(valSubgrph.Result)) s.AddSubgraphFixtureToMempools(valSubgrph) - receiptID := receipt.ID() - mockPk.On("Verify", - receiptID[:], - receipt.ExecutorSignature, - mock.Anything, - ).Return(true, nil).Once() err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid identity") s.Assert().True(engine.IsInvalidInputError(err)) @@ -180,6 +178,52 @@ func (s *ReceiptValidationSuite) TestReceiptTooFewChunks() { s.Assert().True(engine.IsInvalidInputError(err)) } +// TestReceiptForBlockWith0Collections tests handling of the edge case of a block that contains no +// collection guarantees: +// - A receipt must contain one chunk (system chunk) +// - receipts with zero or 2 chunks are rejected +func (s *ReceiptValidationSuite) TestReceiptForBlockWith0Collections() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + valSubgrph := s.ValidSubgraphFixture() + valSubgrph.Block.SetPayload(unittest.PayloadFixture()) + s.Assert().Equal(0, len(valSubgrph.Block.Payload.Guarantees)) // sanity check that no collections in block + s.AddSubgraphFixtureToMempools(valSubgrph) + + // happy path receipt + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(valSubgrph.Block), + unittest.WithPreviousResult(*valSubgrph.PreviousResult), + ))) + s.Assert().Equal(1, len(receipt.Chunks)) // sanity check that one chunk in result + + s.T().Run("valid case: 1 chunk", func(t *testing.T) { // confirm happy path receipt valid + err := s.receiptValidator.Validate(receipt) + s.Require().NoError(err) + }) + + s.T().Run("invalid: zero chunks", func(t *testing.T) { // missing system chunk + var r flow.ExecutionReceipt = *receipt // copy + r.Chunks = r.Chunks[0:0] + err := s.receiptValidator.Validate(&r) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + + s.T().Run("invalid: 2 chunks", func(t *testing.T) { // one too many chunks + var r flow.ExecutionReceipt = *receipt // copy + var extraChunk flow.Chunk = *r.Chunks[0] + extraChunk.Index = 1 + extraChunk.CollectionIndex = 1 + r.Chunks = append(r.Chunks, &extraChunk) + err := s.receiptValidator.Validate(&r) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) +} + // TestReceiptTooManyChunks tests that we reject receipt with more chunks than expected func (s *ReceiptValidationSuite) TestReceiptTooManyChunks() { valSubgrph := s.ValidSubgraphFixture() @@ -235,7 +279,9 @@ func (s *ReceiptValidationSuite) TestReceiptInvalidCollectionIndex() { s.Assert().True(engine.IsInvalidInputError(err)) } -// TestReceiptNoPreviousResult tests that we reject receipt with missing previous result +// TestReceiptNoPreviousResult tests that `Validate` rejects a receipt, whose parent result is unknown: +// - per API contract it should return a `module.UnknownResultError` +// - should _not_ be misinterpreted as an invalid receipt, i.e. should not receive an `engine.InvalidInputError` func (s *ReceiptValidationSuite) TestReceiptNoPreviousResult() { valSubgrph := s.ValidSubgraphFixture() // invalidate prev execution result, it will result in failing to lookup @@ -252,24 +298,43 @@ func (s *ReceiptValidationSuite) TestReceiptNoPreviousResult() { err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid receipt") - s.Assert().True(engine.IsUnverifiableInputError(err), err) + s.Assert().True(module.IsUnknownResultError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) } -// TestReceiptInvalidPreviousResult tests that we reject receipt with invalid previous result -func (s *ReceiptValidationSuite) TestReceiptInvalidPreviousResult() { - valSubgrph := s.ValidSubgraphFixture() - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), - unittest.WithResult(valSubgrph.Result)) - s.AddSubgraphFixtureToMempools(valSubgrph) - - // invalidate prev execution result blockID, this should fail because - // prev result points to wrong block - valSubgrph.PreviousResult.BlockID = unittest.IdentifierFixture() - - s.publicKey.On("Verify", - mock.Anything, - mock.Anything, - mock.Anything).Return(true, nil).Maybe() +// TestInvalidSubgraph is part of verifying that we reject a receipt, whose result +// does not form a valid 'subgraph'. Formally, a subgraph is defined as +// +// Result -----------------------------------> Block +// | | +// | v +// | ParentBlock +// v +// PreviousResult ---> PreviousResult.BlockID +// +// with the validity requirement that PreviousResult.BlockID == ParentBlock.ID(). +// +// In our test case, we assume that `ParentResult` and `Block` are known, but +// ParentResult.BlockID ≠ ParentBlock.ID(). The compliance layer guarantees that new elements are added +// to the blockchain graph if and only if they are protocol compliant. In other words, we are testing +// a byzantine receipt that references known and valid entities, but they do not form a valid subgraph. +// For example, it could be a result for a block in a different fork or an ancestor further in the past. +func (s *ReceiptValidationSuite) TestInvalidSubgraph() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + // add two independent sub-graphs, which is essentially two different forks + fork1 := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(fork1) + fork2 := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(fork2) + + // Receipt is for block in fork1 but references a result in fork2 as parent + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), // valid executor + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(fork1.Block), // known executed block on fork 1 + unittest.WithPreviousResult(*fork2.Result)), // known parent result + )) err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid previous result") @@ -300,11 +365,12 @@ func (s *ReceiptValidationSuite) TestReceiptInvalidResultChain() { // TestMultiReceiptValidResultChain tests that multiple receipts and results // within one block payload are accepted, where the receipts are building on -// top of each other (i.e. their results form a chain). -// Say B(A) means block B has receipt for A: -// - we have such chain in storage: G <- A <- B(A) <- C +// top of each other (i.e. their results form a chain). Test case: +// - we have the chain in storage: G <- A <- B(A) <- C // - if a child block of C payload contains receipts and results for (B,C) // it should be accepted as valid +// +// Notation: B(A) means block B has receipt for A. func (s *ReceiptValidationSuite) TestMultiReceiptValidResultChain() { // assuming signatures are all good s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) @@ -343,12 +409,13 @@ func (s *ReceiptValidationSuite) TestMultiReceiptValidResultChain() { s.Require().NoError(err) } -// we have such chain in storage: G <- A <- B(A) <- C -// if a block payload contains (C,B_bad), they should be invalid +// TestMultiReceiptInvalidParent performs the following test: +// - we have the chain in storage: G <- A <- B(A) <- C +// and are receiving `candidate`, which is a child block of C +// - candidate should be invalid, if its payload contains (C,B_bad). +// +// Notation: B(A) means block B has receipt for A. func (s *ReceiptValidationSuite) TestMultiReceiptInvalidParent() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - // G <- A <- B <- C blocks, result0, seal := unittest.ChainFixture(4) s.SealsIndex[blocks[0].ID()] = seal @@ -374,7 +441,9 @@ func (s *ReceiptValidationSuite) TestMultiReceiptInvalidParent() { } s.PersistedResults[result0.ID()] = result0 - // make receipt B as bad + // receipt B is from an invalid node + // Note: for a receipt with a bad `ExecutorID`, we should never get to validating the signature, + // because there is no valid identity, where we can retrieve a staking signature from. receiptBInvalid.ExecutorID = unittest.IdentifierFixture() candidate := unittest.BlockWithParentFixture(blockC.Header) @@ -516,8 +585,8 @@ func (s *ReceiptValidationSuite) TestValidationReceiptForIncorporatedResult() { // Block X must be considered invalid, because confirming validity of // ReceiptMeta[A] requires information _not_ included in the fork. func (s *ReceiptValidationSuite) TestValidationReceiptWithoutIncorporatedResult() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + // assuming signatures are all good (if we get to checking signatures) + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() // create block A blockA := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) // for block G, we use the LatestSealedBlock @@ -757,16 +826,11 @@ func (s *ReceiptValidationSuite) TestExtendReceiptsDuplicate() { // `TestValidateReceiptAfterBootstrap` tests a special case when we try to produce a new block // after genesis with empty payload. func (s *ReceiptValidationSuite) TestValidateReceiptAfterBootstrap() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - - // G + // Genesis block blocks, result0, seal := unittest.ChainFixture(0) + require.Equal(s.T(), len(blocks), 1, "expected only creation of genesis block") s.SealsIndex[blocks[0].ID()] = seal - - for _, b := range blocks { - s.Extend(b) - } + s.Extend(blocks[0]) s.PersistedResults[result0.ID()] = result0 candidate := unittest.BlockWithParentFixture(blocks[0].Header) @@ -778,8 +842,8 @@ func (s *ReceiptValidationSuite) TestValidateReceiptAfterBootstrap() { // into their proposal. ReceiptValidator must ensure that for each result included in the block, there must be // at least one receipt included in that block as well. func (s *ReceiptValidationSuite) TestValidateReceiptResultWithoutReceipt() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + // assuming signatures are all good (if we get to checking signatures) + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() // G <- A <- B blocks, result0, seal := unittest.ChainFixture(2) @@ -872,3 +936,247 @@ func (s *ReceiptValidationSuite) TestValidateReceiptResultHasEnoughReceipts() { err := s.receiptValidator.ValidatePayload(candidate) s.Require().NoError(err) } + +// TestReceiptNoBlock tests that the validator rejects a receipt, whose executed block is unknown: +// - per API contract it should return a `module.UnknownBlockError` +// - should _not_ be misinterpreted as an invalid receipt, i.e. should not receive an `engine.InvalidInputError` +func (s *ReceiptValidationSuite) TestReceiptNoBlock() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + // Initially, s.LatestExecutionResult points to the result for s.LatestSealedBlock. We construct the chain: + // LatestSealedBlock <-- unknownExecutedBlock <-- candidate(r) + // where `r` denotes an execution receipt for block `unknownExecutedBlock` + unknownExecutedBlock := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) + r := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), // valid executor + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(unknownExecutedBlock), + unittest.WithPreviousResult(*s.LatestExecutionResult)), // known parent result + )) // but the ID of the executed block is randomly chosen, i.e. unknown + + // attempting to validate receipt `r` should fail with an `module.UnknownBlockError` + err := s.receiptValidator.Validate(r) + s.Require().Error(err, "should reject invalid receipt") + s.Assert().True(module.IsUnknownBlockError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) + + // attempting to validate a block, whose payload contains receipt `r` should fail with an `module.UnknownBlockError` + candidate := unittest.BlockWithParentFixture(unknownExecutedBlock.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(r))) + err = s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "should reject invalid receipt") + s.Assert().True(module.IsUnknownBlockError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) +} + +// TestException_HeadersExists tests that unexpected exceptions raised by the dependency +// `receiptValidator.headers.Exists(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_HeadersExists() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + + // receiptValidator.headers yields exception on retrieving any block header + *s.HeadersDB = *mock_storage.NewHeaders(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("headers.ByBlockID() exception") + s.HeadersDB.On("Exists", mock.Anything).Return(false, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_HeadersByBlockID tests that unexpected exceptions raised by the dependency +// `receiptValidator.headers.ByBlockID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_HeadersByBlockID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + + // receiptValidator.headers yields exception on retrieving any block header + exception := errors.New("headers.ByBlockID() exception") + *s.HeadersDB = *mock_storage.NewHeaders(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + s.HeadersDB.On("Exists", mock.Anything).Return(true, nil) + s.HeadersDB.On("ByBlockID", mock.Anything).Return(nil, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_SealsHighestInFork tests that unexpected exceptions raised by the dependency +// `receiptValidator.seals.HighestInFork(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_SealsHighestInFork() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + + // receiptValidator.seals yields exception on retrieving highest sealed block in fork up to candidate's parent + *s.SealsDB = *mock_storage.NewSeals(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("seals.HighestInFork(..) exception") + s.SealsDB.On("HighestInFork", candidate.Header.ParentID).Return(nil, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_ProtocolStateHead tests that unexpected exceptions raised by the dependency +// `receiptValidator.state.AtBlockID() -> Snapshot.Head(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ProtocolStateHead() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.state yields exception on Block Header retrieval + *s.State = *mock_protocol.NewState(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + snapshot := mock_protocol.NewSnapshot(s.T()) + exception := errors.New("state.Head() exception") + snapshot.On("Head").Return(nil, exception) + s.State.On("AtBlockID", valSubgrph.Block.ID()).Return(snapshot) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_ProtocolStateIdentity tests that unexpected exceptions raised by the dependency +// `receiptValidator.state.AtBlockID() -> Snapshot.Identity(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ProtocolStateIdentity() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.state yields exception on Identity retrieval + *s.State = *mock_protocol.NewState(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + snapshot := *mock_protocol.NewSnapshot(s.T()) + exception := errors.New("state.Identity() exception") + snapshot.On("Head").Return(valSubgrph.Block.Header, nil) + snapshot.On("Identity", mock.Anything).Return(nil, exception) + s.State.On("AtBlockID", valSubgrph.Block.ID()).Return(&snapshot) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_IndexByBlockID tests that unexpected exceptions raised by the dependency +// `receiptValidator.index.ByBlockID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_IndexByBlockID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.index yields exception on Identity retrieval + *s.IndexDB = *mock_storage.NewIndex(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("index.ByBlockID(..) exception") + s.IndexDB.On("ByBlockID", valSubgrph.Block.ID()).Return(nil, exception) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_ResultsByID tests that unexpected exceptions raised by the dependency +// `receiptValidator.results.ByID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ResultsByID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.results yields exception on ExecutionResult retrieval + *s.ResultsDB = *mock_storage.NewExecutionResults(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("results.ByID(..) exception") + s.ResultsDB.On("ByID", valSubgrph.Result.PreviousResultID).Return(nil, exception) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentFixture(valSubgrph.Block.Header) + candidate.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} diff --git a/state/protocol/badger/mutator.go b/state/protocol/badger/mutator.go index 3f2c9878906..da47fa3a415 100644 --- a/state/protocol/badger/mutator.go +++ b/state/protocol/badger/mutator.go @@ -120,9 +120,20 @@ func NewFullConsensusState( // // candidate.View == certifyingQC.View && candidate.ID() == certifyingQC.BlockID // -// Caution: -// - This function expects that `certifyingQC` has been validated. -// - The parent block must already be stored. +// CAUTION: +// - This function expects that `certifyingQC ` has been validated. (otherwise, the state will be corrupted) +// - The parent block must already have been ingested. +// +// Per convention, the protocol state requires that the candidate's parent has already been ingested. +// Other than that, all valid extensions are accepted. Even if we have enough information to determine that +// a candidate block is already orphaned (e.g. its view is below the latest finalized view), it is important +// to accept it nevertheless to avoid spamming vulnerabilities. If a block is orphaned, consensus rules +// guarantee that there exists only a limited number of descendants which cannot increase anymore. So there +// is only a finite (generally small) amount of work to do accepting orphaned blocks and all their descendants. +// However, if we were to drop orphaned blocks, e.g. block X of the orphaned fork X <- Y <- Z, we might not +// have enough information to reject blocks Y, Z later if we receive them. We would re-request X, then +// determine it is orphaned and drop it, attempt to ingest Y re-request the unknown parent X and repeat +// potentially very often. // // No errors are expected during normal operations. func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate) error { @@ -177,6 +188,21 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo // Extend extends the protocol state of a CONSENSUS PARTICIPANT. It checks // the validity of the _entire block_ (header and full payload). +// +// CAUTION: per convention, the protocol state requires that the candidate's +// parent has already been ingested. Otherwise, an exception is returned. +// +// Per convention, the protocol state requires that the candidate's parent has already been ingested. +// Other than that, all valid extensions are accepted. Even if we have enough information to determine that +// a candidate block is already orphaned (e.g. its view is below the latest finalized view), it is important +// to accept it nevertheless to avoid spamming vulnerabilities. If a block is orphaned, consensus rules +// guarantee that there exists only a limited number of descendants which cannot increase anymore. So there +// is only a finite (generally small) amount of work to do accepting orphaned blocks and all their descendants. +// However, if we were to drop orphaned blocks, e.g. block X of the orphaned fork X <- Y <- Z, we might not +// have enough information to reject blocks Y, Z later if we receive them. We would re-request X, then +// determine it is orphaned and drop it, attempt to ingest Y re-request the unknown parent X and repeat +// potentially very often. +// // Expected errors during normal operations: // - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) // - state.InvalidExtensionError if the candidate block is invalid @@ -273,11 +299,14 @@ func (m *FollowerState) headerExtend(ctx context.Context, candidate *flow.Block, return state.NewInvalidExtensionError("payload integrity check failed") } - // STEP 2: Next, we can check whether the block is a valid descendant of the - // parent. It should have the same chain ID and a height that is one bigger. - parent, err := m.headers.ByBlockID(header.ParentID) + // STEP 2: check whether the candidate (i) connects to the known block tree and + // (ii) has the same chain ID as its parent and a height incremented by 1. + parent, err := m.headers.ByBlockID(header.ParentID) // (i) connects to the known block tree if err != nil { - return state.NewInvalidExtensionErrorf("could not retrieve parent: %s", err) + // The only sentinel error that can happen here is `storage.ErrNotFound`. However, by convention the + // protocol state must be extended in a parent-first order. This block's parent being unknown breaks + // with this API contract and results in an exception. + return irrecoverable.NewExceptionf("could not retrieve the candidate's parent block %v: %w", header.ParentID, err) } if header.ChainID != parent.ChainID { return state.NewInvalidExtensionErrorf("candidate built for invalid chain (candidate: %s, parent: %s)", @@ -531,16 +560,16 @@ func (m *ParticipantState) receiptExtend(ctx context.Context, candidate *flow.Bl err := m.receiptValidator.ValidatePayload(candidate) if err != nil { - // TODO: this might be not an error, potentially it can be solved by requesting more data and processing this receipt again - if errors.Is(err, storage.ErrNotFound) { - return state.NewInvalidExtensionErrorf("some entities referenced by receipts are missing: %w", err) - } if engine.IsInvalidInputError(err) { return state.NewInvalidExtensionErrorf("payload includes invalid receipts: %w", err) } + if module.IsUnknownBlockError(err) { + // By convention, the protocol state must be extended in a parent-first order. This block's parent + // being unknown breaks with this API contract and results in an exception. + return irrecoverable.NewExceptionf("internal state corruption detected when validating receipts in candidate block %v: %w", candidate.ID(), err) + } return fmt.Errorf("unexpected payload validation error %w", err) } - return nil } diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index 1c23fe3a68e..890594c20bd 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -3,6 +3,7 @@ package badger_test import ( "context" "errors" + "fmt" "math/rand" "sync" "testing" @@ -18,6 +19,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" @@ -511,6 +513,9 @@ func TestExtendSealedBoundary(t *testing.T) { }) } +// TestExtendMissingParent tests the behaviour when attempting to extend the protocol state by a block +// whose parent is unknown. Per convention, the protocol state requires that the candidate's +// parent has already been ingested. Otherwise, an exception is returned. func TestExtendMissingParent(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { @@ -524,9 +529,10 @@ func TestExtendMissingParent(t *testing.T) { err := state.Extend(context.Background(), &extend) require.Error(t, err) - require.True(t, st.IsInvalidExtensionError(err), err) + require.False(t, st.IsInvalidExtensionError(err), err) + require.False(t, st.IsOutdatedExtensionError(err), err) - // verify seal not indexed + // verify seal that was contained in candidate block is not indexed var sealID flow.Identifier err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) require.Error(t, err) @@ -702,17 +708,10 @@ func TestExtendReceiptsInvalid(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - validator.On("ValidatePayload", mock.Anything).Return(nil).Once() - // create block2 and block3 block2 := unittest.BlockWithParentFixture(head) block2.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) - err = state.Extend(context.Background(), block2) - require.NoError(t, err) - - // Add a receipt for block 2 - receipt := unittest.ExecutionReceiptFixture() - + receipt := unittest.ReceiptForBlockFixture(block2) // receipt for block 2 block3 := unittest.BlockWithParentFixture(block2.Header) block3.SetPayload(flow.Payload{ Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, @@ -720,15 +719,47 @@ func TestExtendReceiptsInvalid(t *testing.T) { ProtocolStateID: rootProtocolStateID, }) - // force the receipt validator to refuse this payload - validator.On("ValidatePayload", block3).Return(engine.NewInvalidInputError("")).Once() + // validator accepts block 2 + validator.On("ValidatePayload", block2).Return(nil).Once() + err = state.Extend(context.Background(), block2) + require.NoError(t, err) + // but receipt for block 2 is invalid, which the ParticipantState should reject with an InvalidExtensionError + validator.On("ValidatePayload", block3).Return(engine.NewInvalidInputError("")).Once() err = state.Extend(context.Background(), block3) require.Error(t, err) require.True(t, st.IsInvalidExtensionError(err), err) }) } +// TestOnReceiptValidatorExceptions tests that ParticipantState escalates unexpected errors and exceptions +// returned by the ReceiptValidator. We expect that such errors are *not* interpreted as the block being invalid. +func TestOnReceiptValidatorExceptions(t *testing.T) { + validator := mockmodule.NewReceiptValidator(t) + + rootSnapshot := unittest.RootSnapshotFixture(participants) + util.RunWithFullProtocolStateAndValidator(t, rootSnapshot, validator, func(db *badger.DB, state *protocol.ParticipantState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + block := unittest.BlockWithParentFixture(head) + + // Check that _unexpected_ failure causes the error to be escalated and is *not* interpreted as an invalid block. + validator.On("ValidatePayload", block).Return(fmt.Errorf("")).Once() + err = state.Extend(context.Background(), block) + require.Error(t, err) + require.False(t, st.IsInvalidExtensionError(err), err) + + // Check that an `UnknownBlockError` causes the error to be escalated and is *not* interpreted as an invalid receipt. + // Reasoning: per convention, the ParticipantState requires that the candidate's parent has already been ingested. + // Otherwise, an exception is returned. The `ReceiptValidator.ValidatePayload(..)` returning an `UnknownBlockError` + // indicates exactly this situation, where the parent block is unknown. + validator.On("ValidatePayload", block).Return(module.NewUnknownBlockError("")).Once() + err = state.Extend(context.Background(), block) + require.Error(t, err) + require.False(t, st.IsInvalidExtensionError(err), err) + }) +} + func TestExtendReceiptsValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index 9ed7d369249..f7dd16ac0a6 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -40,7 +40,7 @@ func TestUnknownReferenceBlock(t *testing.T) { util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { // build some finalized non-root blocks (heights 101-110) - head := unittest.BlockWithParentFixture(rootSnapshot.Encodable().Head) + head := unittest.BlockWithParentFixture(rootSnapshot.Encodable().Head()) head.SetPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) buildFinalizedBlock(t, state, head) @@ -808,7 +808,7 @@ func TestSealingSegment_FailureCases(t *testing.T) { // Here, we want to specifically test correct handling of the edge case, where a block exists in storage // that has _lower height_ than the node's local root block. Such blocks are typically contained in the // bootstrapping data, such that all entities referenced in the local root block can be resolved. - // Is is possible to retrieve blocks that are lower than the local root block from storage, directly + // It is possible to retrieve blocks that are lower than the local root block from storage, directly // via their ID. Despite these blocks existing in storage, SealingSegment construction should be // because the known history is potentially insufficient when going below the root block. t.Run("sealing segment from block below local state root", func(t *testing.T) { diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index 7721ffad3fb..832dcb5b99a 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -180,7 +180,7 @@ func TestBootstrap_EpochHeightBoundaries(t *testing.T) { t.Parallel() // start with a regular post-spork root snapshot rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) - epoch1FirstHeight := rootSnapshot.Encodable().Head.Height + epoch1FirstHeight := rootSnapshot.Encodable().Head().Height // For the spork root snapshot, only the first height of the root epoch should be indexed. // [x] @@ -609,7 +609,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestSeal.BlockID = unittest.IdentifierFixture() + latestSeal, err := encodable.LatestSeal() + require.NoError(t, err) + latestSeal.BlockID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -620,7 +622,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestResult.BlockID = unittest.IdentifierFixture() + latestSealedResult, err := encodable.LatestSealedResult() + require.NoError(t, err) + latestSealedResult.BlockID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -631,7 +635,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestSeal.ResultID = unittest.IdentifierFixture() + latestSeal, err := encodable.LatestSeal() + require.NoError(t, err) + latestSeal.ResultID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) diff --git a/state/protocol/badger/validity.go b/state/protocol/badger/validity.go index e6db77d8974..120947528ef 100644 --- a/state/protocol/badger/validity.go +++ b/state/protocol/badger/validity.go @@ -20,14 +20,14 @@ func IsValidRootSnapshot(snap protocol.Snapshot, verifyResultID bool) error { if err != nil { return fmt.Errorf("could not get sealing segment: %w", err) } - result, seal, err := snap.SealedResult() + err = segment.Validate() if err != nil { - return fmt.Errorf("could not latest sealed result: %w", err) + return fmt.Errorf("invalid root sealing segment: %w", err) } - err = segment.Validate() + result, seal, err := snap.SealedResult() if err != nil { - return fmt.Errorf("invalid root sealing segment: %w", err) + return fmt.Errorf("could not latest sealed result: %w", err) } highest := segment.Highest() // reference block of the snapshot diff --git a/state/protocol/badger/validity_test.go b/state/protocol/badger/validity_test.go index e1bfa95bb62..3a9bb04d9c1 100644 --- a/state/protocol/badger/validity_test.go +++ b/state/protocol/badger/validity_test.go @@ -24,14 +24,14 @@ func TestEntityExpirySnapshotValidation(t *testing.T) { }) t.Run("not-enough-history", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - rootSnapshot.Encodable().Head.Height += 10 // advance height to be not spork root snapshot + rootSnapshot.Encodable().Head().Height += 10 // advance height to be not spork root snapshot err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) require.Error(t, err) }) t.Run("enough-history-spork-just-started", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) // advance height to be not spork root snapshot, but still lower than transaction expiry - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry / 2 + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry / 2 // add blocks to sealing segment rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(int(flow.DefaultTransactionExpiry / 2)) err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) @@ -40,7 +40,7 @@ func TestEntityExpirySnapshotValidation(t *testing.T) { t.Run("enough-history-long-spork", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) // advance height to be not spork root snapshot - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry * 2 + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry * 2 // add blocks to sealing segment rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(int(flow.DefaultTransactionExpiry) - 1) err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) @@ -49,7 +49,7 @@ func TestEntityExpirySnapshotValidation(t *testing.T) { t.Run("more-history-than-needed", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) // advance height to be not spork root snapshot - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry * 2 + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry * 2 // add blocks to sealing segment rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(flow.DefaultTransactionExpiry * 2) err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) diff --git a/state/protocol/chain_state.go b/state/protocol/chain_state.go index 2b143091f6f..699026d8380 100644 --- a/state/protocol/chain_state.go +++ b/state/protocol/chain_state.go @@ -49,9 +49,12 @@ type FollowerState interface { // been certified, and it's safe to add it to the protocol state. The QC // cannot be nil and must certify candidate block: // candidate.View == qc.View && candidate.BlockID == qc.BlockID - // The `candidate` block and its QC _must be valid_ (otherwise, the state will - // be corrupted). ExtendCertified inserts any given block, as long as its - // parent is already in the protocol state. Also orphaned blocks are excepted. + // + // CAUTION: + // - This function expects that `qc` has been validated. (otherwise, the state will be corrupted) + // - The parent block must already be stored. + // Orphaned blocks are excepted. + // // No errors are expected during normal operations. ExtendCertified(ctx context.Context, candidate *flow.Block, qc *flow.QuorumCertificate) error @@ -75,6 +78,10 @@ type ParticipantState interface { // us to execute fork-aware queries against ambiguous protocol state, while // still checking that the given block is a valid extension of the protocol state. // The candidate block must have passed HotStuff validation before being passed to Extend. + // + // CAUTION: per convention, the protocol state requires that the candidate's + // parent has already been ingested. Otherwise, an exception is returned. + // // Expected errors during normal operations: // * state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) // * state.InvalidExtensionError if the candidate block is invalid diff --git a/state/protocol/inmem/convert.go b/state/protocol/inmem/convert.go index fbbc3ea9078..fa7bb88e24a 100644 --- a/state/protocol/inmem/convert.go +++ b/state/protocol/inmem/convert.go @@ -23,15 +23,6 @@ func FromSnapshot(from protocol.Snapshot) (*Snapshot, error) { ) // convert top-level fields - snap.Head, err = from.Head() - if err != nil { - return nil, fmt.Errorf("could not get head: %w", err) - } - snap.LatestResult, snap.LatestSeal, err = from.SealedResult() - if err != nil { - return nil, fmt.Errorf("could not get seal: %w", err) - } - snap.SealingSegment, err = from.SealingSegment() if err != nil { return nil, fmt.Errorf("could not get sealing segment: %w", err) @@ -218,9 +209,6 @@ func SnapshotFromBootstrapStateWithParams( } snap := SnapshotFromEncodable(EncodableSnapshot{ - Head: root.Header, - LatestSeal: seal, - LatestResult: result, SealingSegment: &flow.SealingSegment{ Blocks: []*flow.Block{root}, ExecutionResults: flow.ExecutionResultList{result}, diff --git a/state/protocol/inmem/encodable.go b/state/protocol/inmem/encodable.go index 7890f335ce8..e2bfce567df 100644 --- a/state/protocol/inmem/encodable.go +++ b/state/protocol/inmem/encodable.go @@ -1,6 +1,8 @@ package inmem import ( + "fmt" + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" @@ -8,15 +10,82 @@ import ( // EncodableSnapshot is the encoding format for protocol.Snapshot type EncodableSnapshot struct { - Head *flow.Header - LatestSeal *flow.Seal // TODO replace with same info from sealing segment - LatestResult *flow.ExecutionResult // TODO replace with same info from sealing segment SealingSegment *flow.SealingSegment QuorumCertificate *flow.QuorumCertificate Params EncodableParams SealedVersionBeacon *flow.SealedVersionBeacon } +// Head returns the latest finalized header of the Snapshot, which is the block +// in the sealing segment with the greatest Height. +// The EncodableSnapshot receiver must be correctly formed. +func (snap EncodableSnapshot) Head() *flow.Header { + return snap.SealingSegment.Highest().Header +} + +// LatestSeal returns the latest seal of the Snapshot. This is the seal +// for the block with the greatest height, of all seals in the Snapshot. +// The EncodableSnapshot receiver must be correctly formed. +// No errors are expected during normal operation. +func (snap EncodableSnapshot) LatestSeal() (*flow.Seal, error) { + head := snap.Head() + latestSealID := snap.SealingSegment.LatestSeals[head.ID()] + + // Genesis/Spork-Root Case: The spork root block is the latest sealed block. + // By protocol definition, FirstSeal seals the spork root block. + if snap.SealingSegment.FirstSeal != nil && snap.SealingSegment.FirstSeal.ID() == latestSealID { + return snap.SealingSegment.FirstSeal, nil + } + + // Common Case: The highest seal within the payload of any block in the sealing segment. + // Since seals are included in increasing height order, the latest seal must be in the + // first block (by height descending) which contains any seals. + for i := len(snap.SealingSegment.Blocks) - 1; i >= 0; i-- { + block := snap.SealingSegment.Blocks[i] + for _, seal := range block.Payload.Seals { + if seal.ID() == latestSealID { + return seal, nil + } + } + if len(block.Payload.Seals) > 0 { + // We encountered a block with some seals, but not the latest seal. + // This can only occur in a structurally invalid SealingSegment. + return nil, fmt.Errorf("LatestSeal: sanity check failed: no latest seal") + } + } + // Correctly formatted sealing segments must contain latest seal. + return nil, fmt.Errorf("LatestSeal: unreachable for correctly formatted sealing segments") +} + +// LatestSealedResult returns the latest sealed result of the Snapshot. +// This is the result which is sealed by LatestSeal. +// The EncodableSnapshot receiver must be correctly formed. +// No errors are expected during normal operation. +func (snap EncodableSnapshot) LatestSealedResult() (*flow.ExecutionResult, error) { + latestSeal, err := snap.LatestSeal() + if err != nil { + return nil, fmt.Errorf("LatestSealedResult: could not get latest seal: %w", err) + } + + // For both spork root and mid-spork snapshots, the latest sealing result must + // either appear in a block payload or in the ExecutionResults field. + for i := len(snap.SealingSegment.Blocks) - 1; i >= 0; i-- { + block := snap.SealingSegment.Blocks[i] + for _, result := range block.Payload.Results { + if latestSeal.ResultID == result.ID() { + return result, nil + } + } + } + for _, result := range snap.SealingSegment.ExecutionResults { + if latestSeal.ResultID == result.ID() { + return result, nil + } + } + // Correctly formatted sealing segments must contain latest result. + return nil, fmt.Errorf("LatestSealedResult: unreachable for correctly formatted sealing segments") +} + // EncodableDKG is the encoding format for protocol.DKG type EncodableDKG struct { GroupKey encodable.RandomBeaconPubKey diff --git a/state/protocol/inmem/encodable_test.go b/state/protocol/inmem/encodable_test.go index bc9aba73383..7fe2e2f898d 100644 --- a/state/protocol/inmem/encodable_test.go +++ b/state/protocol/inmem/encodable_test.go @@ -34,6 +34,9 @@ func TestEncodeDecode(t *testing.T) { require.NoError(t, err) // check that the computed and stored result IDs are consistent - decodedResult, decodedSeal := decodedSnapshot.LatestResult, decodedSnapshot.LatestSeal + decodedSeal, err := decodedSnapshot.LatestSeal() + require.NoError(t, err) + decodedResult, err := decodedSnapshot.LatestSealedResult() + require.NoError(t, err) assert.Equal(t, decodedResult.ID(), decodedSeal.ResultID) } diff --git a/state/protocol/inmem/snapshot.go b/state/protocol/inmem/snapshot.go index 955b796368c..3559912b13c 100644 --- a/state/protocol/inmem/snapshot.go +++ b/state/protocol/inmem/snapshot.go @@ -20,7 +20,7 @@ type Snapshot struct { var _ protocol.Snapshot = (*Snapshot)(nil) func (s Snapshot) Head() (*flow.Header, error) { - return s.enc.Head, nil + return s.enc.Head(), nil } func (s Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { @@ -50,11 +50,23 @@ func (s Snapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { } func (s Snapshot) Commit() (flow.StateCommitment, error) { - return s.enc.LatestSeal.FinalState, nil + latestSeal, err := s.enc.LatestSeal() + if err != nil { + return flow.StateCommitment{}, nil + } + return latestSeal.FinalState, nil } func (s Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { - return s.enc.LatestResult, s.enc.LatestSeal, nil + latestSeal, err := s.enc.LatestSeal() + if err != nil { + return nil, nil, err + } + latestSealedResult, err := s.enc.LatestSealedResult() + if err != nil { + return nil, nil, err + } + return latestSealedResult, latestSeal, nil } func (s Snapshot) SealingSegment() (*flow.SealingSegment, error) { diff --git a/state/protocol/invalid/snapshot.go b/state/protocol/invalid/snapshot.go index dd2666192f9..7f453a763f2 100644 --- a/state/protocol/invalid/snapshot.go +++ b/state/protocol/invalid/snapshot.go @@ -27,6 +27,8 @@ func NewSnapshot(err error) *Snapshot { return &Snapshot{fmt.Errorf("critical unexpected error querying snapshot: %w", err)} } +var _ protocol.Snapshot = (*Snapshot)(nil) + // NewSnapshotf is NewSnapshot with ergonomic error formatting. func NewSnapshotf(msg string, args ...interface{}) *Snapshot { return NewSnapshot(fmt.Errorf(msg, args...)) diff --git a/state/protocol/snapshot.go b/state/protocol/snapshot.go index 9c1bf3aee5e..6db3603c442 100644 --- a/state/protocol/snapshot.go +++ b/state/protocol/snapshot.go @@ -31,7 +31,10 @@ type Snapshot interface { // history. It can represent either a finalized or ambiguous block, // depending on our selection criteria. Either way, it's the block on which // we should build the next block in the context of the selected state. - // TODO document error returns + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // All other errors should be treated as exceptions. Head() (*flow.Header, error) // QuorumCertificate returns a valid quorum certificate for the header at @@ -52,12 +55,19 @@ type Snapshot interface { // // It allows us to provide optional upfront filters which can be used by the // implementation to speed up database lookups. - // TODO document error returns + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // All other errors should be treated as exceptions. Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) // Identity attempts to retrieve the node with the given identifier at the // selected point of the protocol state history. It will error if it doesn't exist. - // TODO document error returns + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // - protocol.IdentityNotFoundError if nodeID does not correspond to a valid node. + // All other errors should be treated as exceptions. Identity(nodeID flow.Identifier) (*flow.Identity, error) // SealedResult returns the most recent included seal as of this block and diff --git a/state/protocol/util_test.go b/state/protocol/util_test.go index 81e9489815c..cf601183fe2 100644 --- a/state/protocol/util_test.go +++ b/state/protocol/util_test.go @@ -26,7 +26,7 @@ func TestIsSporkRootSnapshot(t *testing.T) { t.Run("other snapshot", func(t *testing.T) { snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) - snapshot.Encodable().Head.Height += 1 // modify head height to break equivalence with spork root block height + snapshot.Encodable().Head().Height += 1 // modify head height to break equivalence with spork root block height isSporkRoot, err := protocol.IsSporkRootSnapshot(snapshot) require.NoError(t, err) assert.False(t, isSporkRoot) diff --git a/storage/headers.go b/storage/headers.go index ee3c57289d4..45e2f7b4a22 100644 --- a/storage/headers.go +++ b/storage/headers.go @@ -24,7 +24,7 @@ type Headers interface { // BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized // version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: - // * `storage.ErrNotFound` if no finalized block is known at given height + // - storage.ErrNotFound if no finalized block is known at given height BlockIDByHeight(height uint64) (flow.Identifier, error) // ByParentID finds all children for the given parent block. The returned headers diff --git a/storage/index.go b/storage/index.go index f1f76e8df5b..a6e815c6c1f 100644 --- a/storage/index.go +++ b/storage/index.go @@ -10,5 +10,7 @@ type Index interface { Store(blockID flow.Identifier, index *flow.Index) error // ByBlockID retrieves the index for a block payload. + // Error returns: + // - ErrNotFound if no block header with the given ID exists ByBlockID(blockID flow.Identifier) (*flow.Index, error) } diff --git a/utils/logging/consts.go b/utils/logging/consts.go index 5bf0f0d8b6c..a4af341c684 100644 --- a/utils/logging/consts.go +++ b/utils/logging/consts.go @@ -9,6 +9,10 @@ const ( // This is used to add an easily searchable label to the log events. KeyNetworkingSecurity = "networking-security" + // KeyProtocolViolation is a logging label that is used to flag the log event as byzantine protocol violation. + // This is used to add an easily searchable label to the log events. + KeyProtocolViolation = "byzantine-protocol-violation" + // KeyLoad is a logging label that is used to flag the log event as a load issue. KeyLoad = "load" ) diff --git a/utils/unittest/chain_suite.go b/utils/unittest/chain_suite.go index 5d232c50aa2..3ff1ec4e483 100644 --- a/utils/unittest/chain_suite.go +++ b/utils/unittest/chain_suite.go @@ -258,6 +258,13 @@ func (bc *BaseChainSuite) SetupChain() { return nil }, ) + bc.HeadersDB.On("Exists", mock.Anything).Return( + func(blockID flow.Identifier) bool { + _, found := bc.Blocks[blockID] + return found + }, + func(blockID flow.Identifier) error { return nil }, + ) bc.HeadersDB.On("ByHeight", mock.Anything).Return( func(blockHeight uint64) *flow.Header { for _, b := range bc.Blocks { @@ -475,7 +482,7 @@ type subgraphFixture struct { Approvals map[uint64]map[flow.Identifier]*flow.ResultApproval // chunkIndex -> Verifier Node ID -> Approval } -// Generates a valid subgraph: +// ValidSubgraphFixture generates a valid subgraph: // let // - R1 be a result which pertains to blockA // - R2 be R1's previous result, @@ -566,7 +573,7 @@ func (bc *BaseChainSuite) Extend(block *flow.Block) { } } -// addSubgraphFixtureToMempools adds add entities in subgraph to mempools and persistent storage mocks +// AddSubgraphFixtureToMempools adds entities in subgraph to mempools and persistent storage mocks func (bc *BaseChainSuite) AddSubgraphFixtureToMempools(subgraph subgraphFixture) { bc.Blocks[subgraph.ParentBlock.ID()] = subgraph.ParentBlock bc.Blocks[subgraph.Block.ID()] = subgraph.Block diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index ca48c8aca29..cfde1db2b01 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -959,11 +959,11 @@ func ServiceEventsFixture(n int) flow.ServiceEventList { } func ExecutionResultFixture(opts ...func(*flow.ExecutionResult)) *flow.ExecutionResult { - blockID := IdentifierFixture() + executedBlockID := IdentifierFixture() result := &flow.ExecutionResult{ PreviousResultID: IdentifierFixture(), - BlockID: IdentifierFixture(), - Chunks: ChunkListFixture(2, blockID), + BlockID: executedBlockID, + Chunks: ChunkListFixture(2, executedBlockID), ExecutionDataID: IdentifierFixture(), } diff --git a/utils/unittest/staker.go b/utils/unittest/staker.go deleted file mode 100644 index 6305bc224a2..00000000000 --- a/utils/unittest/staker.go +++ /dev/null @@ -1,19 +0,0 @@ -package unittest - -import ( - "github.com/onflow/flow-go/model/flow" -) - -type FixedStaker struct { - Staked bool -} - -func NewFixedStaker(initial bool) *FixedStaker { - return &FixedStaker{ - Staked: initial, - } -} - -func (f *FixedStaker) AmIStakedAt(_ flow.Identifier) bool { - return f.Staked -}