diff --git a/cmd/cmdtest/test_cmd.go b/cmd/cmdtest/test_cmd.go index bfbf08da7..e093188f1 100644 --- a/cmd/cmdtest/test_cmd.go +++ b/cmd/cmdtest/test_cmd.go @@ -80,7 +80,9 @@ func (tt *TestCmd) Run(name string, args ...string) { // // cli.expect(`Passphrase: {{.InputLine "password"}}`) func (tt *TestCmd) InputLine(s string) string { - io.WriteString(tt.stdin, s+"\n") + if _, err := io.WriteString(tt.stdin, s+"\n"); err != nil { + tt.Fatalf("Failed to write to stdin: %v", err) + } return "" } @@ -121,7 +123,10 @@ func (tt *TestCmd) matchExactOutput(want []byte) error { // Grab any additional buffered output in case of mismatch // because it might help with debugging. buf = append(buf, make([]byte, tt.stdout.Buffered())...) - tt.stdout.Read(buf[n:]) + if _, err := tt.stdout.Read(buf[n:]); err != nil { + tt.Fatalf("Failed to read buffered output: %v", err) + } + // Find the mismatch position. for i := 0; i < n; i++ { if want[i] != buf[i] { diff --git a/cmd/sonicd/app/run_test.go b/cmd/sonicd/app/run_test.go index 348d66995..aabadc1f6 100644 --- a/cmd/sonicd/app/run_test.go +++ b/cmd/sonicd/app/run_test.go @@ -2,11 +2,12 @@ package app import ( "fmt" - "github.com/Fantom-foundation/lachesis-base/utils/cachescale" "os" "strings" "testing" + "github.com/Fantom-foundation/lachesis-base/utils/cachescale" + "github.com/Fantom-foundation/go-opera/cmd/sonictool/genesis" "github.com/Fantom-foundation/go-opera/config" "github.com/Fantom-foundation/go-opera/integration/makefakegenesis" @@ -28,7 +29,11 @@ func initFakenetDatadir(dataDir string, validatorsNum idx.Validator) { futils.ToFtm(1000000000), futils.ToFtm(5000000), ) - defer genesisStore.Close() + defer func() { + if err := genesisStore.Close(); err != nil { + panic(fmt.Errorf("failed to close genesis store: %v", err)) + } + }() if err := genesis.ImportGenesisStore(genesis.ImportParams{ GenesisStore: genesisStore, @@ -37,7 +42,7 @@ func initFakenetDatadir(dataDir string, validatorsNum idx.Validator) { LiveDbCache: 1, // Set lowest cache ArchiveCache: 1, // Set lowest cache }); err != nil { - panic(err) + panic(fmt.Errorf("failed to import genesis store: %v", err)) } } @@ -99,7 +104,11 @@ func exec(t *testing.T, args ...string) *testcli { } // Remove the temporary datadir. - tt.Cleanup = func() { os.RemoveAll(tt.Datadir) } + tt.Cleanup = func() { + if err := os.RemoveAll(tt.Datadir); err != nil { + t.Fatalf("failed to remove temporary datadir: %v", err) + } + } defer func() { if t.Failed() { tt.Cleanup() diff --git a/evmcore/tx_pool_test.go b/evmcore/tx_pool_test.go index 9520af69c..2b165b80c 100644 --- a/evmcore/tx_pool_test.go +++ b/evmcore/tx_pool_test.go @@ -497,7 +497,9 @@ func TestTransactionQueue(t *testing.T) { testAddBalance(pool, from, big.NewInt(1000)) <-pool.requestReset(nil, nil) - pool.enqueueTx(tx.Hash(), tx, false, true) + if _, err := pool.enqueueTx(tx.Hash(), tx, false, true); err != nil { + t.Error("failed to enqueue tx", err) + } <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) @@ -506,7 +508,9 @@ func TestTransactionQueue(t *testing.T) { tx = transaction(1, 100, key) from, _ = deriveSender(tx) testSetNonce(pool, from, 2) - pool.enqueueTx(tx.Hash(), tx, false, true) + if _, err := pool.enqueueTx(tx.Hash(), tx, false, true); err != nil { + t.Error("failed to enqueue tx", err) + } <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { @@ -530,9 +534,15 @@ func TestTransactionQueue2(t *testing.T) { testAddBalance(pool, from, big.NewInt(1000)) pool.reset(nil, nil) - pool.enqueueTx(tx1.Hash(), tx1, false, true) - pool.enqueueTx(tx2.Hash(), tx2, false, true) - pool.enqueueTx(tx3.Hash(), tx3, false, true) + if _, err := pool.enqueueTx(tx1.Hash(), tx1, false, true); err != nil { + t.Error("failed to enqueue tx1", err) + } + if _, err := pool.enqueueTx(tx2.Hash(), tx2, false, true); err != nil { + t.Error("failed to enqueue tx2", err) + } + if _, err := pool.enqueueTx(tx3.Hash(), tx3, false, true); err != nil { + t.Error("failed to enqueue tx3", err) + } pool.promoteExecutables([]common.Address{from}) if len(pool.pending) != 1 { @@ -653,7 +663,10 @@ func TestTransactionDoubleNonce(t *testing.T) { } // Add the third transaction and ensure it's not saved (smaller price) - pool.add(tx3, false) + if _, err := pool.add(tx3, false); err == nil { + t.Error("expected transaction to be rejected, it was not") + } + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) if pool.pending[addr].Len() != 1 { t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) @@ -747,9 +760,15 @@ func TestTransactionDropping(t *testing.T) { pool.priced.Put(tx2, false) pool.promoteTx(account, tx2.Hash(), tx2) - pool.enqueueTx(tx10.Hash(), tx10, false, true) - pool.enqueueTx(tx11.Hash(), tx11, false, true) - pool.enqueueTx(tx12.Hash(), tx12, false, true) + if _, err := pool.enqueueTx(tx10.Hash(), tx10, false, true); err != nil { + t.Fatalf("failed to enqueue tx10: %v", err) + } + if _, err := pool.enqueueTx(tx11.Hash(), tx11, false, true); err != nil { + t.Fatalf("failed to enqueue tx11: %v", err) + } + if _, err := pool.enqueueTx(tx12.Hash(), tx12, false, true); err != nil { + t.Fatalf("failed to enqueue tx12: %v", err) + } // Check that pre and post validations leave the pool as is if pool.pending[account].Len() != 3 { @@ -1589,8 +1608,12 @@ func TestTransactionPoolRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotesSync(txs) - pool.AddLocal(ltx) + if errs := pool.AddRemotesSync(txs); errors.Join(errs...) != nil { + t.Fatalf("failed to add remote transactions: %v", errors.Join(errs...)) + } + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } pending, queued := pool.Stats() if pending != 7 { @@ -1710,8 +1733,12 @@ func TestTransactionPoolRepricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotesSync(txs) - pool.AddLocal(ltx) + if errs := pool.AddRemotesSync(txs); errors.Join(errs...) != nil { + t.Fatalf("failed to add transactions: %v", errs) + } + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } pending, queued := pool.Stats() if pending != 7 { @@ -1907,8 +1934,12 @@ func TestTransactionPoolUnderpricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) - pool.AddLocal(ltx) + if errs := pool.AddRemotes(txs); errors.Join(errs...) != nil { + t.Fatalf("failed to add remote transactions: %v", errs) + } + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } pending, queued := pool.Stats() if pending != 3 { @@ -2077,8 +2108,14 @@ func TestTransactionPoolUnderpricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.AddRemotes(txs) // Pend K0:0, K0:1; Que K1:1 - pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + // Pend K0:0, K0:1; Que K1:1 + if errs := pool.AddRemotes(txs); errors.Join(errs...) != nil { + t.Fatalf("failed to add remote transactions: %v", errs) + } + // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + if err := pool.AddLocal(ltx); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } pending, queued := pool.Stats() if pending != 3 { @@ -2456,11 +2493,14 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { t.Fatalf("failed to create temporary journal: %v", err) } journal := file.Name() - defer os.Remove(journal) // Clean up the temporary file, we only need the path for now - file.Close() - os.Remove(journal) + if err := file.Close(); err != nil { + t.Fatalf("failed to close temporary journal: %v", err) + } + if err := os.Remove(journal); err != nil { + t.Fatalf("failed to remove temporary journal: %v", err) + } // Create the original pool to inject transaction into the journal statedb := newTestTxPoolStateDb() @@ -2805,7 +2845,10 @@ func benchmarkFuturePromotion(b *testing.B, size int) { for i := 0; i < size; i++ { tx := transaction(uint64(1+i), 100000, key) - pool.enqueueTx(tx.Hash(), tx, false, true) + _, err := pool.enqueueTx(tx.Hash(), tx, false, true) + if err != nil { + b.Fatalf("failed to enqueue transaction: %v", err) + } } // Benchmark the speed of pool validation b.ResetTimer() @@ -2872,8 +2915,12 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { pool, _ := setupTxPool() testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { - pool.AddLocal(local) + err := pool.AddLocal(local) + if err != nil { + b.Fatalf("failed to add local transaction: %v", err) + } } + b.StartTimer() // Assign a high enough balance for testing testAddBalance(pool, remoteAddr, big.NewInt(100000000)) diff --git a/gossip/common_test.go b/gossip/common_test.go index 89309b0bb..eceb9b712 100644 --- a/gossip/common_test.go +++ b/gossip/common_test.go @@ -219,7 +219,9 @@ func newTestEnv(firstEpoch idx.Epoch, validatorsNum idx.Validator, tb testing.TB func (env *testEnv) Close() { env.verWatcher.Stop() - env.store.Close() + if err := env.store.Close(); err != nil { + panic(fmt.Errorf("store.Close failed; %w", err)) + } env.tflusher.Stop() } diff --git a/gossip/evm_test.go b/gossip/evm_test.go index c58641711..2c00df1ba 100644 --- a/gossip/evm_test.go +++ b/gossip/evm_test.go @@ -50,7 +50,8 @@ func BenchmarkBallotTxsProcessing(b *testing.B) { txs := make([]*types.Transaction, 0, count-1) flushTxs := func() { if len(txs) != 0 { - env.ApplyTxs(nextEpoch, txs...) + _, err := env.ApplyTxs(nextEpoch, txs...) + require.NoError(err, "failed to apply txs") } txs = txs[:0] } diff --git a/opera/genesisstore/fileshash/filehash_test.go b/opera/genesisstore/fileshash/filehash_test.go index bb609a0df..30a8c4dc8 100644 --- a/opera/genesisstore/fileshash/filehash_test.go +++ b/opera/genesisstore/fileshash/filehash_test.go @@ -90,25 +90,27 @@ func testFileHash_ReadWrite(t *testing.T, content []byte, expRoot hash.Hash, pie root, err := writer.Flush() require.NoError(err) require.Equal(expRoot.Hex(), root.Hex()) - file.Close() + err = file.Close() + require.NoError(err) maxMemUsage := memUsageOf(pieceSize, getPiecesNum(uint64(len(content)), pieceSize)) // normal case: correct root hash and content after reading file partially if len(content) > 0 { - file, err = os.OpenFile(filePath, os.O_RDONLY, 0600) + file, err := os.OpenFile(filePath, os.O_RDONLY, 0600) require.NoError(err) reader := WrapReader(file, maxMemUsage, root) readB := make([]byte, rand.Int64N(int64(len(content)))) err = ioread.ReadAll(reader, readB) require.NoError(err) require.Equal(content[:len(readB)], readB) - reader.Close() + err = reader.Close() + require.NoError(err) } // normal case: correct root hash and content after reading the whole file { - file, err = os.OpenFile(filePath, os.O_RDONLY, 0600) + file, err := os.OpenFile(filePath, os.O_RDONLY, 0600) require.NoError(err) reader := WrapReader(file, maxMemUsage, root) readB := make([]byte, len(content)) @@ -117,28 +119,31 @@ func testFileHash_ReadWrite(t *testing.T, content []byte, expRoot hash.Hash, pie require.Equal(content, readB) // try to read one more byte require.Error(ioread.ReadAll(reader, make([]byte, 1)), io.EOF) - reader.Close() + err = reader.Close() + require.NoError(err) } // correct root hash and reading too much content { - file, err = os.OpenFile(filePath, os.O_RDONLY, 0600) + file, err := os.OpenFile(filePath, os.O_RDONLY, 0600) require.NoError(err) reader := WrapReader(file, maxMemUsage, root) readB := make([]byte, len(content)+1) require.Error(ioread.ReadAll(reader, readB), io.EOF) - reader.Close() + err = reader.Close() + require.NoError(err) } // passing the wrong root hash to reader { - file, err = os.OpenFile(filePath, os.O_RDONLY, 0600) + file, err := os.OpenFile(filePath, os.O_RDONLY, 0600) require.NoError(err) maliciousReader := WrapReader(file, maxMemUsage, hash.HexToHash("0x00")) data := make([]byte, 1) err = ioread.ReadAll(maliciousReader, data) require.Contains(err.Error(), ErrInit.Error()) - maliciousReader.Close() + err = maliciousReader.Close() + require.NoError(err) } // modify piece data to make the mismatch piece hash diff --git a/tests/block_hash_test.go b/tests/block_hash_test.go index dfc3fbbd5..1cf5a385e 100644 --- a/tests/block_hash_test.go +++ b/tests/block_hash_test.go @@ -36,9 +36,11 @@ func TestBlockHash_CorrectBlockHashesAreAccessibleInContracts(t *testing.T) { } t.Run("fresh network", runTest) - net.Restart() + err = net.Restart() + require.NoError(err, "failed to restart network; %v", err) t.Run("restarted network", runTest) - net.RestartWithExportImport() + err = net.RestartWithExportImport() + require.NoError(err, "failed to restart network with export/import; %v", err) t.Run("reinitialized network", runTest) } diff --git a/tests/integration_test_net.go b/tests/integration_test_net.go index e050e3a63..d0eeee002 100644 --- a/tests/integration_test_net.go +++ b/tests/integration_test_net.go @@ -71,8 +71,12 @@ func isPortFree(host string, port int) bool { if err != nil { return false } - listener.Close() - return true + err = listener.Close() + if err != nil { + fmt.Printf("failed to close port %d: %v\n", port, err) + } + // if the port reports errors while closing, do not consider it free + return err == nil } func getFreePort() (int, error) { @@ -315,7 +319,8 @@ func (n *IntegrationTestNet) start() error { // Stop shuts the underlying network down. func (n *IntegrationTestNet) Stop() { - syscall.Kill(syscall.Getpid(), syscall.SIGINT) + // best effort to stop the test environment, ignore error + _ = syscall.Kill(syscall.Getpid(), syscall.SIGINT) <-n.done n.done = nil } diff --git a/tests/selfdestruct_test.go b/tests/selfdestruct_test.go index fdd6b9f21..e1e9a5ffb 100644 --- a/tests/selfdestruct_test.go +++ b/tests/selfdestruct_test.go @@ -131,7 +131,8 @@ func testSelfDestruct_Constructor(t *testing.T, net *IntegrationTestNet) { // New beneficiary address for each test beneficiaryAddress := common.Address{} - rand.Read(beneficiaryAddress[:]) + _, err := rand.Read(beneficiaryAddress[:]) + require.NoError(err) // First transaction deploys contract contract, deployReceipt, err := DeployContract(net, @@ -280,7 +281,8 @@ func testSelfDestruct_NestedCall(t *testing.T, net *IntegrationTestNet) { // generate a new beneficiary address for each test beneficiaryAddress := common.Address{} - rand.Read(beneficiaryAddress[:]) + _, err := rand.Read(beneficiaryAddress[:]) + require.NoError(err) // deploy factory contract factory, receipt, err := DeployContract(net, selfdestruct.DeploySelfDestructFactory) diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index b8c61d7cd..487712388 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -418,22 +418,22 @@ func TestPatternLimit(t *testing.T) { err: ErrEmptyTopics, }, { - pattern: [][]common.Hash{[]common.Hash{}, []common.Hash{}, []common.Hash{}}, - exp: [][]common.Hash{[]common.Hash{}, []common.Hash{}, []common.Hash{}}, + pattern: [][]common.Hash{{}, {}, {}}, + exp: [][]common.Hash{{}, {}, {}}, err: ErrEmptyTopics, }, { pattern: [][]common.Hash{ - []common.Hash{hash.FakeHash(1), hash.FakeHash(1)}, []common.Hash{hash.FakeHash(2), hash.FakeHash(2)}, []common.Hash{hash.FakeHash(3), hash.FakeHash(4)}}, + {hash.FakeHash(1), hash.FakeHash(1)}, {hash.FakeHash(2), hash.FakeHash(2)}, {hash.FakeHash(3), hash.FakeHash(4)}}, exp: [][]common.Hash{ - []common.Hash{hash.FakeHash(1)}, []common.Hash{hash.FakeHash(2)}, []common.Hash{hash.FakeHash(3), hash.FakeHash(4)}}, + {hash.FakeHash(1)}, {hash.FakeHash(2)}, {hash.FakeHash(3), hash.FakeHash(4)}}, err: nil, }, { pattern: [][]common.Hash{ - []common.Hash{hash.FakeHash(1), hash.FakeHash(2)}, []common.Hash{hash.FakeHash(3), hash.FakeHash(4)}, []common.Hash{hash.FakeHash(5), hash.FakeHash(6)}}, + {hash.FakeHash(1), hash.FakeHash(2)}, {hash.FakeHash(3), hash.FakeHash(4)}, {hash.FakeHash(5), hash.FakeHash(6)}}, exp: [][]common.Hash{ - []common.Hash{hash.FakeHash(1), hash.FakeHash(2)}, []common.Hash{hash.FakeHash(3), hash.FakeHash(4)}, []common.Hash{hash.FakeHash(5), hash.FakeHash(6)}}, + {hash.FakeHash(1), hash.FakeHash(2)}, {hash.FakeHash(3), hash.FakeHash(4)}, {hash.FakeHash(5), hash.FakeHash(6)}}, err: nil, }, { diff --git a/utils/dbutil/compactdb/keys_test.go b/utils/dbutil/compactdb/keys_test.go index 9b4749a7d..40293fd36 100644 --- a/utils/dbutil/compactdb/keys_test.go +++ b/utils/dbutil/compactdb/keys_test.go @@ -17,45 +17,56 @@ func TestLastKey(t *testing.T) { dir := t.TempDir() ldb, err := leveldb.New(path.Join(dir, "ldb"), 16*opt.MiB, 64, nil, nil) require.NoError(t, err) - defer ldb.Close() + defer func() { require.NoError(t, ldb.Close()) }() testLastKey(t, ldb) pbl, err := pebble.New(path.Join(dir, "pbl"), 16*opt.MiB, 64, nil, nil) require.NoError(t, err) - defer pbl.Close() + defer func() { require.NoError(t, pbl.Close()) }() testLastKey(t, pbl) } func testLastKey(t *testing.T, db kvdb.Store) { + var err error require.Nil(t, lastKey(db)) - db.Put([]byte{0}, []byte{0}) + err = db.Put([]byte{0}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{0}, lastKey(db)) - db.Put([]byte{1}, []byte{0}) + err = db.Put([]byte{1}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{1}, lastKey(db)) - db.Put([]byte{2}, []byte{0}) + err = db.Put([]byte{2}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{2}, lastKey(db)) - db.Put([]byte{1, 0}, []byte{0}) + err = db.Put([]byte{1, 0}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{2}, lastKey(db)) - db.Put([]byte{3}, []byte{0}) + err = db.Put([]byte{3}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{3}, lastKey(db)) - db.Put([]byte{3, 0}, []byte{0}) + err = db.Put([]byte{3, 0}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{3, 0}, lastKey(db)) - db.Put([]byte{3, 1}, []byte{0}) + err = db.Put([]byte{3, 1}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{3, 1}, lastKey(db)) - db.Put([]byte{4}, []byte{0}) + err = db.Put([]byte{4}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{4}, lastKey(db)) - db.Put([]byte{4, 0, 0, 0}, []byte{0}) + err = db.Put([]byte{4, 0, 0, 0}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{4, 0, 0, 0}, lastKey(db)) - db.Put([]byte{4, 0, 1, 0}, []byte{0}) + err = db.Put([]byte{4, 0, 1, 0}, []byte{0}) + require.NoError(t, err) require.Equal(t, []byte{4, 0, 1, 0}, lastKey(db)) } diff --git a/utils/fast/buffer_test.go b/utils/fast/buffer_test.go index 12d847607..e59691dfc 100644 --- a/utils/fast/buffer_test.go +++ b/utils/fast/buffer_test.go @@ -66,7 +66,7 @@ func Benchmark(b *testing.B) { b.Run("Read", func(b *testing.B) { src := make([]byte, 1000) - rand.Read(src) + _, _ = rand.Read(src) b.Run("Std", func(b *testing.B) { for i := 0; i < b.N; i++ {