diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index e6c117fe5f..c5326d5700 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -78,7 +78,7 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format.
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(data) == 0 {
- if len(arguments) != 0 {
+ if len(arguments.NonIndexed()) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
}
return make([]interface{}, 0), nil
@@ -93,7 +93,7 @@ func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte)
return fmt.Errorf("abi: cannot unpack into a nil map")
}
if len(data) == 0 {
- if len(arguments) != 0 {
+ if len(arguments.NonIndexed()) != 0 {
return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
}
return nil // Nothing to unmarshal, return
@@ -115,8 +115,8 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
if len(values) == 0 {
- if len(arguments) != 0 {
- return fmt.Errorf("abi: attempting to copy no values while %d arguments are expected", len(arguments))
+ if len(arguments.NonIndexed()) != 0 {
+ return fmt.Errorf("abi: attempting to copy no values while arguments are expected")
}
return nil // Nothing to copy, return
}
diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go
index f3fe398150..8fcfd5119b 100644
--- a/accounts/abi/bind/auth.go
+++ b/accounts/abi/bind/auth.go
@@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"errors"
"io"
- "io/ioutil"
"math/big"
"github.com/PlatONnetwork/PlatON-Go/accounts"
@@ -44,7 +43,7 @@ var ErrNotAuthorized = errors.New("not authorized to sign this account")
// Deprecated: Use NewTransactorWithChainID instead.
func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID")
- json, err := ioutil.ReadAll(keyin)
+ json, err := io.ReadAll(keyin)
if err != nil {
return nil, err
}
@@ -103,7 +102,7 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts {
// NewTransactorWithChainID is a utility method to easily create a transaction signer from
// an encrypted json key stream and the associated passphrase.
func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) {
- json, err := ioutil.ReadAll(keyin)
+ json, err := io.ReadAll(keyin)
if err != nil {
return nil, err
}
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
index 8ca792da01..bfe4596659 100644
--- a/accounts/abi/bind/base_test.go
+++ b/accounts/abi/bind/base_test.go
@@ -325,3 +325,11 @@ func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
Removed: false,
}
}
+
+// TestCrashers contains some strings which previously caused the abi codec to crash.
+func TestCrashers(t *testing.T) {
+ abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`))
+ abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`))
+ abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`))
+ abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"foo.Bar"}]}]}]`))
+}
diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go
index 5dc3bd3e30..8adfc31adf 100644
--- a/accounts/abi/bind/bind.go
+++ b/accounts/abi/bind/bind.go
@@ -179,7 +179,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
contracts[types[i]] = &tmplContract{
Type: capitalise(types[i]),
- InputABI: strings.Replace(strippedABI, "\"", "\\\"", -1),
+ InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
InputBin: strings.TrimPrefix(strings.TrimSpace(bytecodes[i]), "0x"),
Constructor: evmABI.Constructor,
Calls: calls,
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 47fbaacbf2..8d890aef87 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -1825,7 +1825,7 @@ var bindTests = []struct {
t.Skip("go sdk not found for testing")
}
// Create a temporary workspace for the test suite
- ws, err := ioutil.TempDir("", "binding-test")
+ ws, err := os.MkdirTemp("", "binding-test")
if err != nil {
t.Fatalf("failed to create temporary workspace: %v", err)
}
@@ -1848,7 +1848,7 @@ var bindTests = []struct {
if err != nil {
t.Fatalf("test %d: failed to generate binding: %v", i, err)
}
- if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
+ if err = os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
t.Fatalf("test %d: failed to write binding: %v", i, err)
}
// Generate the test file with the injected test code
@@ -1864,7 +1864,7 @@ var bindTests = []struct {
%s
}
`, tt.imports, tt.name, tt.tester)
- if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
+ if err := os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
t.Fatalf("test %d: failed to write tests: %v", i, err)
}
}
diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go
index 357a3c294b..c0c7b59926 100644
--- a/accounts/abi/bind/template.go
+++ b/accounts/abi/bind/template.go
@@ -161,7 +161,7 @@ var (
}
{{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
- {{$contract.Type}}Bin = strings.Replace({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:], -1)
+ {{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
{{end}}
address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil {
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index 54cd36d387..a72c777120 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -23,6 +23,8 @@ import (
"regexp"
"strconv"
"strings"
+ "unicode"
+ "unicode/utf8"
"github.com/PlatONnetwork/PlatON-Go/common"
)
@@ -173,6 +175,9 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
if err != nil {
return Type{}, err
}
+ if !isValidFieldName(fieldName) {
+ return Type{}, fmt.Errorf("field %d has invalid name", idx)
+ }
overloadedNames[fieldName] = fieldName
fields = append(fields, reflect.StructField{
Name: fieldName, // reflect.StructOf will panic for any exported field.
@@ -201,7 +206,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
// Foo.Bar type definition is not allowed in golang,
// convert the format to FooBar
- typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1)
+ typ.TupleRawName = strings.ReplaceAll(internalType[len(structPrefix):], ".", "")
}
case "function":
@@ -399,3 +404,30 @@ func getTypeSize(t Type) int {
}
return 32
}
+
+// isLetter reports whether a given 'rune' is classified as a Letter.
+// This method is copied from reflect/type.go
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+// This method is copied from reflect/type.go
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
+}
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index 1288e4d173..3e76f82f68 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -202,6 +202,23 @@ var unpackTests = []unpackTest{
IntOne *big.Int
}{big.NewInt(1)},
},
+ {
+ def: `[{"type":"bool"}]`,
+ enc: "",
+ want: false,
+ err: "abi: attempting to unmarshall an empty string while arguments are expected",
+ },
+ {
+ def: `[{"type":"bytes32","indexed":true},{"type":"uint256","indexed":false}]`,
+ enc: "",
+ want: false,
+ err: "abi: attempting to unmarshall an empty string while arguments are expected",
+ },
+ {
+ def: `[{"type":"bool","indexed":true},{"type":"uint64","indexed":true}]`,
+ enc: "",
+ want: false,
+ },
}
// TestLocalUnpackTests runs test specially designed only for unpacking.
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index c474ec6882..edad7b8b1c 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -18,7 +18,6 @@ package keystore
import (
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -382,11 +381,11 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
- // needed so that modTime of `file` is different to its current value after ioutil.WriteFile
+ // needed so that modTime of `file` is different to its current value after os.WriteFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents with crap
- if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
+ if err := os.WriteFile(file, []byte("foo"), 0644); err != nil {
t.Fatal(err)
return
}
@@ -399,9 +398,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
func forceCopyFile(dst, src string) error {
- data, err := ioutil.ReadFile(src)
+ data, err := os.ReadFile(src)
if err != nil {
return err
}
- return ioutil.WriteFile(dst, data, 0644)
+ return os.WriteFile(dst, data, 0644)
}
diff --git a/accounts/keystore/file_cache.go b/accounts/keystore/file_cache.go
index c86a08a812..b97818fd94 100644
--- a/accounts/keystore/file_cache.go
+++ b/accounts/keystore/file_cache.go
@@ -17,15 +17,14 @@
package keystore
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
- mapset "github.com/deckarep/golang-set"
"github.com/PlatONnetwork/PlatON-Go/log"
+ mapset "github.com/deckarep/golang-set"
)
// fileCache is a cache of files seen during scan of keystore.
@@ -41,7 +40,7 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
t0 := time.Now()
// List all the failes from the keystore folder
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return nil, nil, nil, err
}
@@ -65,7 +64,11 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
// Gather the set of all and fresly modified files
all.Add(path)
- modified := fi.ModTime()
+ info, err := fi.Info()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ modified := info.ModTime()
if modified.After(fc.lastMod) {
mods.Add(path)
}
@@ -89,13 +92,13 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
}
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
-func nonKeyFile(fi os.FileInfo) bool {
+func nonKeyFile(fi os.DirEntry) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true
}
// Skip misc special files, directories (yes, symlinks too).
- if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
+ if fi.IsDir() || !fi.Type().IsRegular() {
return true
}
return false
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 5f8aa17930..b25a91ede5 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -23,7 +23,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -195,7 +194,7 @@ func writeTemporaryKeyFile(file string, content []byte) (string, error) {
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
- f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
+ f, err := os.CreateTemp(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
return "", err
}
diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go
index e0e6603faa..f4c33be2c8 100644
--- a/accounts/keystore/keystore_test.go
+++ b/accounts/keystore/keystore_test.go
@@ -18,7 +18,6 @@ package keystore
import (
"github.com/PlatONnetwork/PlatON-Go/crypto"
- "io/ioutil"
"math/rand"
"os"
"runtime"
@@ -462,7 +461,7 @@ func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
}
func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
- d, err := ioutil.TempDir("", "eth-keystore-test")
+ d, err := os.MkdirTemp("", "eth-keystore-test")
if err != nil {
t.Fatal(err)
}
diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go
index a077f4b389..37675c6633 100644
--- a/accounts/keystore/passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -34,7 +34,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -82,7 +81,7 @@ type keyStorePassphrase struct {
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
// Load the key from the keystore and decrypt its contents
- keyjson, err := ioutil.ReadFile(filename)
+ keyjson, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
diff --git a/accounts/keystore/passphrase_test.go b/accounts/keystore/passphrase_test.go
index e36ad8c76c..7783a41aa1 100644
--- a/accounts/keystore/passphrase_test.go
+++ b/accounts/keystore/passphrase_test.go
@@ -17,7 +17,7 @@
package keystore
import (
- "io/ioutil"
+ "os"
"testing"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -30,7 +30,7 @@ const (
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
func TestKeyEncryptDecrypt(t *testing.T) {
- keyjson, err := ioutil.ReadFile("testdata/very-light-scrypt.json")
+ keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
if err != nil {
t.Fatal(err)
}
diff --git a/accounts/keystore/plain_test.go b/accounts/keystore/plain_test.go
index 7c61efe202..10f2604a35 100644
--- a/accounts/keystore/plain_test.go
+++ b/accounts/keystore/plain_test.go
@@ -20,7 +20,6 @@ import (
"crypto/rand"
"encoding/hex"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -32,7 +31,7 @@ import (
)
func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
- d, err := ioutil.TempDir("", "platon-keystore-test")
+ d, err := os.MkdirTemp("", "platon-keystore-test")
if err != nil {
t.Fatal(err)
}
diff --git a/accounts/scwallet/hud.go b/accounts/scwallet/hud.go
index 0ed05327f0..53c7211a81 100644
--- a/accounts/scwallet/hud.go
+++ b/accounts/scwallet/hud.go
@@ -34,7 +34,7 @@ package scwallet
import (
"encoding/json"
- "io/ioutil"
+ "io"
"os"
"path/filepath"
"sort"
@@ -97,7 +97,7 @@ func (hub *Hub) readPairings() error {
return err
}
- pairingData, err := ioutil.ReadAll(pairingFile)
+ pairingData, err := io.ReadAll(pairingFile)
if err != nil {
return err
}
diff --git a/build/ci.go b/build/ci.go
index f27ca90489..6440e0a962 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -45,7 +45,6 @@ import (
"fmt"
"go/parser"
"go/token"
- "io/ioutil"
"log"
"os"
"os/exec"
@@ -123,6 +122,7 @@ var (
"xenial": "golang-go",
"bionic": "golang-go",
"focal": "golang-go",
+ "jammy": "golang-go", // EOL: 04/2032
}
debGoBootPaths = map[string]string{
@@ -265,7 +265,7 @@ func doInstall(cmdline []string) {
goinstall.Args = append(goinstall.Args, packages...)
build.MustRun(goinstall)
- if cmds, err := ioutil.ReadDir("cmd"); err == nil {
+ if cmds, err := os.ReadDir("cmd"); err == nil {
for _, cmd := range cmds {
pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly)
if err != nil {
@@ -616,7 +616,7 @@ func ppaUpload(workdir, ppa, sshUser string, files []string) {
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
idfile = filepath.Join(workdir, "sshkey")
if _, err := os.Stat(idfile); os.IsNotExist(err) {
- ioutil.WriteFile(idfile, sshkey, 0600)
+ os.WriteFile(idfile, sshkey, 0600)
}
}
// Upload
@@ -639,7 +639,7 @@ func makeWorkdir(wdflag string) string {
if wdflag != "" {
err = os.MkdirAll(wdflag, 0744)
} else {
- wdflag, err = ioutil.TempDir("", "platon-build-")
+ wdflag, err = os.MkdirTemp("", "platon-build-")
}
if err != nil {
log.Fatal(err)
diff --git a/build/update-license.go b/build/update-license.go
index f19656e806..1ccc83ac29 100644
--- a/build/update-license.go
+++ b/build/update-license.go
@@ -24,7 +24,6 @@ import (
"bufio"
"bytes"
"fmt"
- "io/ioutil"
"log"
"os"
"os/exec"
@@ -212,7 +211,7 @@ func gitAuthors(files []string) []string {
}
func readAuthors() []string {
- content, err := ioutil.ReadFile("AUTHORS")
+ content, err := os.ReadFile("AUTHORS")
if err != nil && !os.IsNotExist(err) {
log.Fatalln("error reading AUTHORS:", err)
}
@@ -271,7 +270,7 @@ func writeAuthors(files []string) {
content.WriteString("\n")
}
fmt.Println("writing AUTHORS")
- if err := ioutil.WriteFile("AUTHORS", content.Bytes(), 0644); err != nil {
+ if err := os.WriteFile("AUTHORS", content.Bytes(), 0644); err != nil {
log.Fatalln(err)
}
}
@@ -347,7 +346,7 @@ func writeLicense(info *info) {
if err != nil {
log.Fatalf("error stat'ing %s: %v\n", info.file, err)
}
- content, err := ioutil.ReadFile(info.file)
+ content, err := os.ReadFile(info.file)
if err != nil {
log.Fatalf("error reading %s: %v\n", info.file, err)
}
@@ -366,7 +365,7 @@ func writeLicense(info *info) {
return
}
fmt.Println("writing", info.ShortLicense(), info.file)
- if err := ioutil.WriteFile(info.file, buf.Bytes(), fi.Mode()); err != nil {
+ if err := os.WriteFile(info.file, buf.Bytes(), fi.Mode()); err != nil {
log.Fatalf("error writing %s: %v", info.file, err)
}
}
diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go
index d22344595c..4428f85ea0 100644
--- a/cmd/abigen/main.go
+++ b/cmd/abigen/main.go
@@ -19,14 +19,11 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"os"
- "path/filepath"
"regexp"
"strings"
- "github.com/PlatONnetwork/PlatON-Go/accounts/abi"
-
"gopkg.in/urfave/cli.v1"
"github.com/PlatONnetwork/PlatON-Go/accounts/abi/bind"
@@ -61,24 +58,6 @@ var (
Name: "combined-json",
Usage: "Path to the combined-json file generated by compiler",
}
- solFlag = cli.StringFlag{
- Name: "sol",
- Usage: "Path to the Ethereum contract Solidity source to build and bind",
- }
- solcFlag = cli.StringFlag{
- Name: "solc",
- Usage: "Solidity compiler to use if source builds are requested",
- Value: "solc",
- }
- vyFlag = cli.StringFlag{
- Name: "vy",
- Usage: "Path to the Ethereum contract Vyper source to build and bind",
- }
- vyperFlag = cli.StringFlag{
- Name: "vyper",
- Usage: "Vyper compiler to use if source builds are requested",
- Value: "vyper",
- }
excFlag = cli.StringFlag{
Name: "exc",
Usage: "Comma separated types to exclude from binding",
@@ -109,10 +88,6 @@ func init() {
binFlag,
typeFlag,
jsonFlag,
- solFlag,
- solcFlag,
- vyFlag,
- vyperFlag,
excFlag,
pkgFlag,
outFlag,
@@ -124,7 +99,7 @@ func init() {
}
func abigen(c *cli.Context) error {
- utils.CheckExclusive(c, abiFlag, jsonFlag, solFlag, vyFlag) // Only one source can be selected.
+ utils.CheckExclusive(c, abiFlag, jsonFlag) // Only one source can be selected.
if c.GlobalString(pkgFlag.Name) == "" {
utils.Fatalf("No destination package specified (--pkg)")
}
@@ -157,9 +132,9 @@ func abigen(c *cli.Context) error {
)
input := c.GlobalString(abiFlag.Name)
if input == "-" {
- abi, err = ioutil.ReadAll(os.Stdin)
+ abi, err = io.ReadAll(os.Stdin)
} else {
- abi, err = ioutil.ReadFile(input)
+ abi, err = os.ReadFile(input)
}
if err != nil {
utils.Fatalf("Failed to read input ABI: %v", err)
@@ -168,7 +143,7 @@ func abigen(c *cli.Context) error {
var bin []byte
if binFile := c.GlobalString(binFlag.Name); binFile != "" {
- if bin, err = ioutil.ReadFile(binFile); err != nil {
+ if bin, err = os.ReadFile(binFile); err != nil {
utils.Fatalf("Failed to read input bytecode: %v", err)
}
if strings.Contains(string(bin), "//") {
@@ -188,34 +163,10 @@ func abigen(c *cli.Context) error {
for _, kind := range strings.Split(c.GlobalString(excFlag.Name), ",") {
exclude[strings.ToLower(kind)] = true
}
- var err error
var contracts map[string]*compiler.Contract
- switch {
- case c.GlobalIsSet(solFlag.Name):
- contracts, err = compiler.CompileSolidity(c.GlobalString(solcFlag.Name), c.GlobalString(solFlag.Name))
- if err != nil {
- utils.Fatalf("Failed to build Solidity contract: %v", err)
- }
- case c.GlobalIsSet(vyFlag.Name):
- output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
- if err != nil {
- utils.Fatalf("Failed to build Vyper contract: %v", err)
- }
- contracts = make(map[string]*compiler.Contract)
- for n, contract := range output {
- name := n
- // Sanitize the combined json names to match the
- // format expected by solidity.
- if !strings.Contains(n, ":") {
- // Remove extra path components
- name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy"))
- }
- contracts[name] = contract
- }
-
- case c.GlobalIsSet(jsonFlag.Name):
- jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
+ if c.GlobalIsSet(jsonFlag.Name) {
+ jsonOutput, err := os.ReadFile(c.GlobalString(jsonFlag.Name))
if err != nil {
utils.Fatalf("Failed to read combined-json from compiler: %v", err)
}
@@ -265,7 +216,7 @@ func abigen(c *cli.Context) error {
fmt.Printf("%s\n", code)
return nil
}
- if err := ioutil.WriteFile(c.GlobalString(outFlag.Name), []byte(code), 0600); err != nil {
+ if err := os.WriteFile(c.GlobalString(outFlag.Name), []byte(code), 0600); err != nil {
utils.Fatalf("Failed to write ABI binding: %v", err)
}
return nil
diff --git a/cmd/ctool/core/contractcmd.go b/cmd/ctool/core/contractcmd.go
index 3ab2c26eab..b9ca79e413 100644
--- a/cmd/ctool/core/contractcmd.go
+++ b/cmd/ctool/core/contractcmd.go
@@ -14,13 +14,12 @@
// You should have received a copy of the GNU General Public License
// along with PlatON-Go. If not, see .
-
package core
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/PlatONnetwork/PlatON-Go/common/hexutil"
@@ -114,7 +113,7 @@ func DeployContract(abiFilePath string, codeFilePath string) error {
}
func parseFileToBytes(file string) []byte {
- bytes, err := ioutil.ReadFile(file)
+ bytes, err := os.ReadFile(file)
if err != nil {
panic(fmt.Sprintf("parse file %s error,%s", file, err.Error()))
}
diff --git a/cmd/ctool/core/http_util.go b/cmd/ctool/core/http_util.go
index 815e34aca5..715c3c360a 100644
--- a/cmd/ctool/core/http_util.go
+++ b/cmd/ctool/core/http_util.go
@@ -14,14 +14,13 @@
// You should have received a copy of the GNU General Public License
// along with PlatON-Go. If not, see .
-
package core
import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
)
@@ -53,7 +52,7 @@ func HttpPost(param JsonParam) (string, error) {
panic(fmt.Sprintf("no response from node,%s", err.Error()))
}
if err == nil && response.StatusCode == 200 {
- body, _ := ioutil.ReadAll(response.Body)
+ body, _ := io.ReadAll(response.Body)
return string(body), nil
} else {
panic(fmt.Sprintf("http response status :%s", response.Status))
diff --git a/cmd/ctool/core/nodeUtil.go b/cmd/ctool/core/nodeUtil.go
index 3aa81805ae..f51328a0e4 100644
--- a/cmd/ctool/core/nodeUtil.go
+++ b/cmd/ctool/core/nodeUtil.go
@@ -14,13 +14,11 @@
// You should have received a copy of the GNU General Public License
// along with PlatON-Go. If not, see .
-
package core
import (
"crypto/rand"
"fmt"
- "io/ioutil"
"math/big"
"os"
"path/filepath"
@@ -145,7 +143,7 @@ func prepare(t *testing.T) (*testPlatON, string) {
parseConfig(t)
datadir := tmpdir(t)
json := filepath.Join(datadir, "genesis.json")
- err := ioutil.WriteFile(json, []byte(genesis), 0600)
+ err := os.WriteFile(json, []byte(genesis), 0600)
assert.Nil(t, err, fmt.Sprintf("failed to write genesis file: %v", err))
@@ -188,7 +186,7 @@ func trulyRandInt(lo, hi int) int {
}
func tmpdir(t *testing.T) string {
- dir, err := ioutil.TempDir("", "platon-test")
+ dir, err := os.MkdirTemp("", "platon-test")
if err != nil {
t.Fatal(err)
}
diff --git a/cmd/ctool/core/tx_stability.go b/cmd/ctool/core/tx_stability.go
index a0b655d949..d9f30b61b4 100644
--- a/cmd/ctool/core/tx_stability.go
+++ b/cmd/ctool/core/tx_stability.go
@@ -14,7 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with PlatON-Go. If not, see .
-
package core
import (
@@ -22,7 +21,6 @@ import (
"encoding/gob"
"encoding/json"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -135,7 +133,7 @@ func saveAddrs(addrs []string, pkFile string) {
if err != nil {
panic(fmt.Errorf("create addr.json error%s \n", err.Error()))
}
- err = ioutil.WriteFile(addrsPath, byts, 0644)
+ err = os.WriteFile(addrsPath, byts, 0644)
if err != nil {
panic(fmt.Errorf("write to addr.json error%s \n", err.Error()))
}
@@ -264,7 +262,7 @@ func getAllAddress(pkFile string) []string {
addrsPath = dir + DefaultAccountAddrFilePath
}
- bytes, err := ioutil.ReadFile(addrsPath)
+ bytes, err := os.ReadFile(addrsPath)
if err != nil {
panic(fmt.Errorf("get all address array error,%s \n", err.Error()))
}
diff --git a/cmd/ctool/core/utils.go b/cmd/ctool/core/utils.go
index fb78433e82..c6c7e6f1dc 100644
--- a/cmd/ctool/core/utils.go
+++ b/cmd/ctool/core/utils.go
@@ -14,13 +14,11 @@
// You should have received a copy of the GNU General Public License
// along with PlatON-Go. If not, see .
-
package core
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -126,7 +124,7 @@ func parseConfigJson(configPath string) error {
configPath, _ = filepath.Abs(configPath)
}
- bytes, err := ioutil.ReadFile(configPath)
+ bytes, err := os.ReadFile(configPath)
if err != nil {
panic(fmt.Errorf("parse config file error,%s", err.Error()))
}
@@ -138,7 +136,7 @@ func parseConfigJson(configPath string) error {
}
func parseAbiFromJson(fileName string) ([]FuncDesc, error) {
- bytes, err := ioutil.ReadFile(fileName)
+ bytes, err := os.ReadFile(fileName)
if err != nil {
return nil, fmt.Errorf("parse abi file error: %s", err.Error())
}
diff --git a/cmd/keytool/changepassword.go b/cmd/keytool/changepassword.go
index 5d50b3b1e1..233a275172 100644
--- a/cmd/keytool/changepassword.go
+++ b/cmd/keytool/changepassword.go
@@ -2,7 +2,7 @@ package main
import (
"fmt"
- "io/ioutil"
+ "os"
"strings"
"gopkg.in/urfave/cli.v1"
@@ -30,7 +30,7 @@ Change the password of a keyfile.`,
keyfilepath := ctx.Args().First()
// Read key from file.
- keyjson, err := ioutil.ReadFile(keyfilepath)
+ keyjson, err := os.ReadFile(keyfilepath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
}
@@ -46,7 +46,7 @@ Change the password of a keyfile.`,
fmt.Println("Please provide a new password")
var newPhrase string
if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" {
- content, err := ioutil.ReadFile(passFile)
+ content, err := os.ReadFile(passFile)
if err != nil {
utils.Fatalf("Failed to read new password file '%s': %v", passFile, err)
}
@@ -62,7 +62,7 @@ Change the password of a keyfile.`,
}
// Then write the new keyfile in place of the old one.
- if err := ioutil.WriteFile(keyfilepath, newJson, 0600); err != nil {
+ if err := os.WriteFile(keyfilepath, newJson, 0600); err != nil {
utils.Fatalf("Error writing new keyfile to disk: %v", err)
}
diff --git a/cmd/keytool/generate.go b/cmd/keytool/generate.go
index 9a1228ac15..07c4818b32 100644
--- a/cmd/keytool/generate.go
+++ b/cmd/keytool/generate.go
@@ -19,7 +19,6 @@ package main
import (
"crypto/ecdsa"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -120,7 +119,7 @@ If you want to encrypt an existing private key, it can be specified by setting
if err := os.MkdirAll(filepath.Dir(keyfilepath), 0700); err != nil {
utils.Fatalf("Could not create directory %s", filepath.Dir(keyfilepath))
}
- if err := ioutil.WriteFile(keyfilepath, keyjson, 0600); err != nil {
+ if err := os.WriteFile(keyfilepath, keyjson, 0600); err != nil {
utils.Fatalf("Failed to write keyfile to %s: %v", keyfilepath, err)
}
diff --git a/cmd/keytool/hex_to_bech32.go b/cmd/keytool/hex_to_bech32.go
index ab66834b57..fbd990226d 100644
--- a/cmd/keytool/hex_to_bech32.go
+++ b/cmd/keytool/hex_to_bech32.go
@@ -3,7 +3,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"github.com/btcsuite/btcd/btcutil/bech32"
@@ -47,7 +47,7 @@ update hex/bech32 address to bech32 address.
var accounts []string
if ctx.IsSet(HexAccountFileFlag.Name) {
accountPath := ctx.String(HexAccountFileFlag.Name)
- accountjson, err := ioutil.ReadFile(accountPath)
+ accountjson, err := os.ReadFile(accountPath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", accountPath, err)
}
diff --git a/cmd/keytool/inspect.go b/cmd/keytool/inspect.go
index b3bc9190d3..11ee05f87c 100644
--- a/cmd/keytool/inspect.go
+++ b/cmd/keytool/inspect.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/hex"
"fmt"
- "io/ioutil"
+ "os"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -63,7 +63,7 @@ make sure to use this feature with great caution!`,
keyfilepath := ctx.Args().First()
// Read key from file.
- keyjson, err := ioutil.ReadFile(keyfilepath)
+ keyjson, err := os.ReadFile(keyfilepath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
}
diff --git a/cmd/keytool/message.go b/cmd/keytool/message.go
index b6eb24cb2a..5a11b03bb6 100644
--- a/cmd/keytool/message.go
+++ b/cmd/keytool/message.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/hex"
"fmt"
- "io/ioutil"
+ "os"
"gopkg.in/urfave/cli.v1"
@@ -57,7 +57,7 @@ To sign a message contained in a file, use the --msgfile flag.
// Load the keyfile.
keyfilepath := ctx.Args().First()
- keyjson, err := ioutil.ReadFile(keyfilepath)
+ keyjson, err := os.ReadFile(keyfilepath)
if err != nil {
utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err)
}
@@ -155,7 +155,7 @@ func getMessage(ctx *cli.Context, msgarg int) []byte {
if len(ctx.Args()) > msgarg {
utils.Fatalf("Can't use --msgfile and message argument at the same time.")
}
- msg, err := ioutil.ReadFile(file)
+ msg, err := os.ReadFile(file)
if err != nil {
utils.Fatalf("Can't read message file: %v", err)
}
diff --git a/cmd/keytool/message_test.go b/cmd/keytool/message_test.go
index 8b1b177784..b88b0dfe9a 100644
--- a/cmd/keytool/message_test.go
+++ b/cmd/keytool/message_test.go
@@ -17,14 +17,13 @@
package main
import (
- "io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestMessageSignVerify(t *testing.T) {
- tmpdir, err := ioutil.TempDir("", "platonkey-test")
+ tmpdir, err := os.MkdirTemp("", "platonkey-test")
if err != nil {
t.Fatal("Can't create temporary directory:", err)
}
diff --git a/cmd/keytool/utils.go b/cmd/keytool/utils.go
index ba63452183..fe629540ea 100644
--- a/cmd/keytool/utils.go
+++ b/cmd/keytool/utils.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"strings"
"gopkg.in/urfave/cli.v1"
@@ -35,7 +35,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
// Look for the --passwordfile flag.
passphraseFile := ctx.String(passphraseFlag.Name)
if passphraseFile != "" {
- content, err := ioutil.ReadFile(passphraseFile)
+ content, err := os.ReadFile(passphraseFile)
if err != nil {
utils.Fatalf("Failed to read password file '%s': %v",
passphraseFile, err)
diff --git a/cmd/platon/accountcmd_test.go b/cmd/platon/accountcmd_test.go
index ea7c7a15f0..421c4f3218 100644
--- a/cmd/platon/accountcmd_test.go
+++ b/cmd/platon/accountcmd_test.go
@@ -17,13 +17,13 @@
package main
import (
- "io/ioutil"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/cespare/cp"
+ "os"
)
// These tests are 'smoke tests' for the account related
@@ -108,11 +108,11 @@ func TestAccountImport(t *testing.T) {
func importAccountWithExpect(t *testing.T, key string, expected string) {
dir := tmpdir(t)
keyfile := filepath.Join(dir, "key.prv")
- if err := ioutil.WriteFile(keyfile, []byte(key), 0600); err != nil {
+ if err := os.WriteFile(keyfile, []byte(key), 0600); err != nil {
t.Error(err)
}
passwordFile := filepath.Join(dir, "password.txt")
- if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
+ if err := os.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
t.Error(err)
}
platon := runPlatON(t, "account", "import", keyfile, "-password", passwordFile)
diff --git a/cmd/platon/dbcmd.go b/cmd/platon/dbcmd.go
index 8724613b7f..fc46c2559d 100644
--- a/cmd/platon/dbcmd.go
+++ b/cmd/platon/dbcmd.go
@@ -364,7 +364,7 @@ func inspect(ctx *cli.Context) error {
return rawdb.InspectDatabase(db, prefix, start)
}
-func showLeveldbStats(db ethdb.Stater) {
+func showLeveldbStats(db ethdb.KeyValueStater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
diff --git a/cmd/platon/genesis_test.go b/cmd/platon/genesis_test.go
index cae3399ca3..7337f70310 100644
--- a/cmd/platon/genesis_test.go
+++ b/cmd/platon/genesis_test.go
@@ -17,7 +17,6 @@
package main
import (
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -313,7 +312,7 @@ func TestCustomGenesis(t *testing.T) {
// Initialize the data directory with the custom genesis block
json := filepath.Join(datadir, "genesis.json")
- if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
+ if err := os.WriteFile(json, []byte(tt.genesis), 0600); err != nil {
t.Fatalf("test %d: failed to write genesis file: %v", i, err)
}
runPlatON(t, "--datadir", datadir, "init", json).WaitExit()
diff --git a/cmd/platon/run_test.go b/cmd/platon/run_test.go
index fb90d040f8..83e3f64682 100644
--- a/cmd/platon/run_test.go
+++ b/cmd/platon/run_test.go
@@ -19,7 +19,6 @@ package main
import (
"context"
"fmt"
- "io/ioutil"
"os"
"testing"
"time"
@@ -31,7 +30,7 @@ import (
)
func tmpdir(t *testing.T) string {
- dir, err := ioutil.TempDir("", "platon-test")
+ dir, err := os.MkdirTemp("", "platon-test")
if err != nil {
t.Fatal(err)
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 570da5c262..43dcc7f86e 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -21,10 +21,11 @@ import (
"crypto/ecdsa"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/eth/tracers"
+ "github.com/PlatONnetwork/PlatON-Go/ethdb/remotedb"
"io"
- "io/ioutil"
"math"
"math/big"
+ "os"
"path/filepath"
godebug "runtime/debug"
"strconv"
@@ -117,6 +118,10 @@ var (
Usage: "Data directory for the databases and keystore",
Value: DirectoryString(node.DefaultDataDir()),
}
+ RemoteDBFlag = cli.StringFlag{
+ Name: "remotedb",
+ Usage: "URL for remote database",
+ }
AncientFlag = DirectoryFlag{
Name: "datadir.ancient",
Usage: "Data directory for ancient chain segments (default = inside chaindata)",
@@ -725,6 +730,24 @@ var (
}
)
+var (
+ // DatabasePathFlags is the flag group of all database path flags.
+ DatabasePathFlags = []cli.Flag{
+ DataDirFlag,
+ AncientFlag,
+ RemoteDBFlag,
+ }
+)
+
+// GroupFlags combines the given flag slices together and returns the merged one.
+func GroupFlags(groups ...[]cli.Flag) []cli.Flag {
+ var ret []cli.Flag
+ for _, group := range groups {
+ ret = append(ret, group...)
+ }
+ return ret
+}
+
// MakeDataDir retrieves the currently requested data directory, terminating
// if none (or the empty string) is specified. If the node is starting a testnet,
// then a subdirectory of the specified datadir will be used.
@@ -1028,7 +1051,7 @@ func MakePasswordList(ctx *cli.Context) []string {
if path == "" {
return nil
}
- text, err := ioutil.ReadFile(path)
+ text, err := os.ReadFile(path)
if err != nil {
Fatalf("Failed to read password file: %v", err)
}
@@ -1508,12 +1531,19 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
+
+ err error
+ chainDb ethdb.Database
)
- name := "chaindata"
- if ctx.GlobalString(SyncModeFlag.Name) == "light" {
- name = "lightchaindata"
+ switch {
+ case ctx.GlobalIsSet(RemoteDBFlag.Name):
+ log.Info("Using remote db", "url", ctx.GlobalString(RemoteDBFlag.Name))
+ chainDb, err = remotedb.New(ctx.GlobalString(RemoteDBFlag.Name))
+ case ctx.GlobalString(SyncModeFlag.Name) == "light":
+ chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
+ default:
+ chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly)
}
- chainDb, err := stack.OpenDatabaseWithFreezer(name, cache, handles, ctx.GlobalString(AncientFlag.Name), "", readonly)
if err != nil {
Fatalf("Could not open database: %v", err)
}
diff --git a/common/bytes.go b/common/bytes.go
index 3b59f133c9..e25500bcf5 100644
--- a/common/bytes.go
+++ b/common/bytes.go
@@ -21,6 +21,8 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
+ "errors"
+ "github.com/PlatONnetwork/PlatON-Go/common/hexutil"
"math"
)
@@ -81,6 +83,15 @@ func Hex2Bytes(str string) []byte {
return h
}
+// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
+func ParseHexOrString(str string) ([]byte, error) {
+ b, err := hexutil.Decode(str)
+ if errors.Is(err, hexutil.ErrMissingPrefix) {
+ return []byte(str), nil
+ }
+ return b, err
+}
+
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
diff --git a/common/compiler/helpers.go b/common/compiler/helpers.go
index 5ed640de8f..063fc10811 100644
--- a/common/compiler/helpers.go
+++ b/common/compiler/helpers.go
@@ -17,14 +17,6 @@
// Package compiler wraps the Solidity and Vyper compiler executables (solc; vyper).
package compiler
-import (
- "bytes"
- "io/ioutil"
- "regexp"
-)
-
-var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)
-
// Contract contains information about a compiled contract, alongside its code and runtime code.
type Contract struct {
Code string `json:"code"`
@@ -51,15 +43,3 @@ type ContractInfo struct {
DeveloperDoc interface{} `json:"developerDoc"`
Metadata string `json:"metadata"`
}
-
-func slurpFiles(files []string) (string, error) {
- var concat bytes.Buffer
- for _, file := range files {
- content, err := ioutil.ReadFile(file)
- if err != nil {
- return "", err
- }
- concat.Write(content)
- }
- return concat.String(), nil
-}
diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go
index 210d0cbe10..ad8a44aa04 100644
--- a/common/compiler/solidity.go
+++ b/common/compiler/solidity.go
@@ -14,25 +14,14 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Package compiler wraps the Solidity and Vyper compiler executables (solc; vyper).
+// Package compiler wraps the ABI compilation outputs.
package compiler
import (
- "bytes"
"encoding/json"
- "errors"
"fmt"
- "os/exec"
- "strconv"
- "strings"
)
-// Solidity contains information about the solidity compiler.
-type Solidity struct {
- Path, Version, FullVersion string
- Major, Minor, Patch int
-}
-
// --combined-output format
type solcOutput struct {
Contracts map[string]struct {
@@ -58,91 +47,6 @@ type solcOutputV8 struct {
Version string
}
-func (s *Solidity) makeArgs() []string {
- p := []string{
- "--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc",
- "--optimize", // code optimizer switched on
- "--allow-paths", "., ./, ../", // default to support relative paths
- }
- if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {
- p[1] += ",metadata,hashes"
- }
- return p
-}
-
-// SolidityVersion runs solc and parses its version output.
-func SolidityVersion(solc string) (*Solidity, error) {
- if solc == "" {
- solc = "solc"
- }
- var out bytes.Buffer
- cmd := exec.Command(solc, "--version")
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return nil, err
- }
- matches := versionRegexp.FindStringSubmatch(out.String())
- if len(matches) != 4 {
- return nil, fmt.Errorf("can't parse solc version %q", out.String())
- }
- s := &Solidity{Path: cmd.Path, FullVersion: out.String(), Version: matches[0]}
- if s.Major, err = strconv.Atoi(matches[1]); err != nil {
- return nil, err
- }
- if s.Minor, err = strconv.Atoi(matches[2]); err != nil {
- return nil, err
- }
- if s.Patch, err = strconv.Atoi(matches[3]); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-// CompileSolidityString builds and returns all the contracts contained within a source string.
-func CompileSolidityString(solc, source string) (map[string]*Contract, error) {
- if len(source) == 0 {
- return nil, errors.New("solc: empty source string")
- }
- s, err := SolidityVersion(solc)
- if err != nil {
- return nil, err
- }
- args := append(s.makeArgs(), "--")
- cmd := exec.Command(s.Path, append(args, "-")...)
- cmd.Stdin = strings.NewReader(source)
- return s.run(cmd, source)
-}
-
-// CompileSolidity compiles all given Solidity source files.
-func CompileSolidity(solc string, sourcefiles ...string) (map[string]*Contract, error) {
- if len(sourcefiles) == 0 {
- return nil, errors.New("solc: no source files")
- }
- source, err := slurpFiles(sourcefiles)
- if err != nil {
- return nil, err
- }
- s, err := SolidityVersion(solc)
- if err != nil {
- return nil, err
- }
- args := append(s.makeArgs(), "--")
- cmd := exec.Command(s.Path, append(args, sourcefiles...)...)
- return s.run(cmd, source)
-}
-
-func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, error) {
- var stderr, stdout bytes.Buffer
- cmd.Stderr = &stderr
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("solc: %v\n%s", err, stderr.Bytes())
- }
-
- return ParseCombinedJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " "))
-}
-
// ParseCombinedJSON takes the direct output of a solc --combined-output run and
// parses it into a map of string contract name to Contract structs. The
// provided source, language and compiler version, and compiler options are all
@@ -158,7 +62,6 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin
// Try to parse the output with the new solidity v.0.8.0 rules
return parseCombinedJSONV8(combinedJSON, source, languageVersion, compilerVersion, compilerOptions)
}
-
// Compilation succeeded, assemble and return the contracts.
contracts := make(map[string]*Contract)
for name, info := range output.Contracts {
diff --git a/common/compiler/vyper.go b/common/compiler/vyper.go
deleted file mode 100644
index a9bca95e59..0000000000
--- a/common/compiler/vyper.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package compiler wraps the Solidity and Vyper compiler executables (solc; vyper).
-package compiler
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "os/exec"
- "strconv"
- "strings"
-)
-
-// Vyper contains information about the vyper compiler.
-type Vyper struct {
- Path, Version, FullVersion string
- Major, Minor, Patch int
-}
-
-func (s *Vyper) makeArgs() []string {
- p := []string{
- "-f", "combined_json",
- }
- return p
-}
-
-// VyperVersion runs vyper and parses its version output.
-func VyperVersion(vyper string) (*Vyper, error) {
- if vyper == "" {
- vyper = "vyper"
- }
- var out bytes.Buffer
- cmd := exec.Command(vyper, "--version")
- cmd.Stdout = &out
- err := cmd.Run()
- if err != nil {
- return nil, err
- }
- matches := versionRegexp.FindStringSubmatch(out.String())
- if len(matches) != 4 {
- return nil, fmt.Errorf("can't parse vyper version %q", out.String())
- }
- s := &Vyper{Path: cmd.Path, FullVersion: out.String(), Version: matches[0]}
- if s.Major, err = strconv.Atoi(matches[1]); err != nil {
- return nil, err
- }
- if s.Minor, err = strconv.Atoi(matches[2]); err != nil {
- return nil, err
- }
- if s.Patch, err = strconv.Atoi(matches[3]); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-// CompileVyper compiles all given Vyper source files.
-func CompileVyper(vyper string, sourcefiles ...string) (map[string]*Contract, error) {
- if len(sourcefiles) == 0 {
- return nil, errors.New("vyper: no source files")
- }
- source, err := slurpFiles(sourcefiles)
- if err != nil {
- return nil, err
- }
- s, err := VyperVersion(vyper)
- if err != nil {
- return nil, err
- }
- args := s.makeArgs()
- cmd := exec.Command(s.Path, append(args, sourcefiles...)...)
- return s.run(cmd, source)
-}
-
-func (s *Vyper) run(cmd *exec.Cmd, source string) (map[string]*Contract, error) {
- var stderr, stdout bytes.Buffer
- cmd.Stderr = &stderr
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("vyper: %v\n%s", err, stderr.Bytes())
- }
-
- return ParseVyperJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " "))
-}
-
-// ParseVyperJSON takes the direct output of a vyper --f combined_json run and
-// parses it into a map of string contract name to Contract structs. The
-// provided source, language and compiler version, and compiler options are all
-// passed through into the Contract structs.
-//
-// The vyper output is expected to contain ABI and source mapping.
-//
-// Returns an error if the JSON is malformed or missing data, or if the JSON
-// embedded within the JSON is malformed.
-func ParseVyperJSON(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) {
- var output map[string]interface{}
- if err := json.Unmarshal(combinedJSON, &output); err != nil {
- return nil, err
- }
-
- // Compilation succeeded, assemble and return the contracts.
- contracts := make(map[string]*Contract)
- for name, info := range output {
- // Parse the individual compilation results.
- if name == "version" {
- continue
- }
- c := info.(map[string]interface{})
-
- contracts[name] = &Contract{
- Code: c["bytecode"].(string),
- RuntimeCode: c["bytecode_runtime"].(string),
- Info: ContractInfo{
- Source: source,
- Language: "Vyper",
- LanguageVersion: languageVersion,
- CompilerVersion: compilerVersion,
- CompilerOptions: compilerOptions,
- SrcMap: c["source_map"],
- SrcMapRuntime: "",
- AbiDefinition: c["abi"],
- UserDoc: "",
- DeveloperDoc: "",
- Metadata: "",
- },
- }
- }
- return contracts, nil
-}
diff --git a/common/test_utils.go b/common/test_utils.go
index 33d2bc17da..7767f206f6 100644
--- a/common/test_utils.go
+++ b/common/test_utils.go
@@ -19,13 +19,13 @@ package common
import (
"encoding/json"
"fmt"
- "io/ioutil"
"net"
+ "os"
)
// LoadJSON reads the given file and unmarshals its content.
func LoadJSON(file string, val interface{}) error {
- content, err := ioutil.ReadFile(file)
+ content, err := os.ReadFile(file)
if err != nil {
return err
}
diff --git a/consensus/cbft/cbft_byzantine_test.go b/consensus/cbft/cbft_byzantine_test.go
index 8ce4c3f460..b713d43558 100644
--- a/consensus/cbft/cbft_byzantine_test.go
+++ b/consensus/cbft/cbft_byzantine_test.go
@@ -18,7 +18,6 @@ package cbft
import (
"fmt"
- "io/ioutil"
"math/big"
"os"
"strings"
@@ -252,7 +251,7 @@ func TestPB01(t *testing.T) {
}
func TestPB03(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "evidence")
+ tempDir, _ := os.MkdirTemp("", "evidence")
defer os.RemoveAll(tempDir)
nodes := MockNodes(t, 2)
@@ -476,7 +475,7 @@ func TestVT01(t *testing.T) {
}
func TestVT02(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "evidence")
+ tempDir, _ := os.MkdirTemp("", "evidence")
defer os.RemoveAll(tempDir)
nodes := MockNodes(t, 2)
@@ -588,7 +587,7 @@ func TestVC02(t *testing.T) {
}
func TestVC03(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "evidence")
+ tempDir, _ := os.MkdirTemp("", "evidence")
defer os.RemoveAll(tempDir)
nodes := MockNodes(t, 2)
@@ -621,7 +620,7 @@ func TestVC03(t *testing.T) {
}
func TestVC04(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "evidence")
+ tempDir, _ := os.MkdirTemp("", "evidence")
defer os.RemoveAll(tempDir)
nodes := MockNodes(t, 2)
diff --git a/consensus/cbft/cbft_test_util.go b/consensus/cbft/cbft_test_util.go
index 9504c28586..8b6d404c07 100644
--- a/consensus/cbft/cbft_test_util.go
+++ b/consensus/cbft/cbft_test_util.go
@@ -17,7 +17,6 @@
package cbft
import (
- "io/ioutil"
"os"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -37,7 +36,7 @@ const (
)
func path() string {
- name, err := ioutil.TempDir(os.TempDir(), "evidence")
+ name, err := os.MkdirTemp(os.TempDir(), "evidence")
if err != nil {
panic(err)
diff --git a/consensus/cbft/consensus_process_test.go b/consensus/cbft/consensus_process_test.go
index 05d4c17ca1..d6cdd0e819 100644
--- a/consensus/cbft/consensus_process_test.go
+++ b/consensus/cbft/consensus_process_test.go
@@ -14,13 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the PlatON-Go library. If not, see .
-
package cbft
import (
"fmt"
"github.com/PlatONnetwork/PlatON-Go/consensus/cbft/validator"
- "io/ioutil"
"os"
"testing"
"time"
@@ -50,7 +48,7 @@ func TestViewChange(t *testing.T) {
}
func testTryViewChange(t *testing.T, nodes []*TestCBFT) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
result := make(chan *types.Block, 1)
diff --git a/consensus/cbft/evidence/evidence_test.go b/consensus/cbft/evidence/evidence_test.go
index 0c13de8751..32a7769fd7 100644
--- a/consensus/cbft/evidence/evidence_test.go
+++ b/consensus/cbft/evidence/evidence_test.go
@@ -14,13 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the PlatON-Go library. If not, see .
-
package evidence
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"sort"
"testing"
@@ -42,7 +40,7 @@ func init() {
}
func path() string {
- name, err := ioutil.TempDir(os.TempDir(), "evidence")
+ name, err := os.MkdirTemp(os.TempDir(), "evidence")
if err != nil {
panic(err)
diff --git a/consensus/cbft/validator/validator_test.go b/consensus/cbft/validator/validator_test.go
index 21f081c0ae..1d44309e0e 100644
--- a/consensus/cbft/validator/validator_test.go
+++ b/consensus/cbft/validator/validator_test.go
@@ -22,8 +22,8 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
"math/big"
+ "os"
"testing"
"time"
@@ -204,7 +204,7 @@ func TestStaticAgency(t *testing.T) {
func genesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) (*types.Block, *params.ChainConfig) {
common.SetAddressHRP("")
- buf, err := ioutil.ReadFile("../../../eth/downloader/testdata/platon.json")
+ buf, err := os.ReadFile("../../../eth/downloader/testdata/platon.json")
if err != nil {
return nil, nil
}
diff --git a/consensus/cbft/wal/wal_database_test.go b/consensus/cbft/wal/wal_database_test.go
index 56b5cf9fd1..da4cf91efd 100644
--- a/consensus/cbft/wal/wal_database_test.go
+++ b/consensus/cbft/wal/wal_database_test.go
@@ -14,11 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the PlatON-Go library. If not, see .
-
package wal
import (
- "io/ioutil"
"os"
"testing"
@@ -34,7 +32,7 @@ var (
func TestWALDatabase(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
// empty path
diff --git a/consensus/cbft/wal/wal_test.go b/consensus/cbft/wal/wal_test.go
index e28bbe7b3d..ffc2bcfd9a 100644
--- a/consensus/cbft/wal/wal_test.go
+++ b/consensus/cbft/wal/wal_test.go
@@ -14,11 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the PlatON-Go library. If not, see .
-
package wal
import (
- "io/ioutil"
"os"
"testing"
"time"
@@ -35,7 +33,7 @@ var (
)
func TestUpdateChainState(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
wal, _ := NewWal(nil, tempDir)
@@ -53,7 +51,7 @@ func TestUpdateChainState(t *testing.T) {
}
func TestWriteMsg(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
wal, _ := NewWal(nil, tempDir)
@@ -129,7 +127,7 @@ func testWalUpdateViewChange(wal Wal) error {
}
func TestUpdateViewChangeQC(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
wal, _ := NewWal(nil, tempDir)
diff --git a/consensus/cbft/wal_bridge_test.go b/consensus/cbft/wal_bridge_test.go
index 81ab5412f1..2a3902486a 100644
--- a/consensus/cbft/wal_bridge_test.go
+++ b/consensus/cbft/wal_bridge_test.go
@@ -14,11 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the PlatON-Go library. If not, see .
-
package cbft
import (
- "io/ioutil"
"math/big"
"os"
"testing"
@@ -39,7 +37,7 @@ import (
)
func TestUpdateChainState(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
pk, sk, cbftnodes := GenerateCbftNode(1)
@@ -159,7 +157,7 @@ func testAddQCState(t *testing.T, lock, qc *types.Block, node *TestCBFT) {
}
func TestRecordCbftMsg(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
pk, sk, cbftnodes := GenerateCbftNode(1)
@@ -209,7 +207,7 @@ func TestRecordCbftMsg(t *testing.T) {
}
func TestInsertQCBlock_fork_priority(t *testing.T) {
- tempDir, _ := ioutil.TempDir("", "wal")
+ tempDir, _ := os.MkdirTemp("", "wal")
defer os.RemoveAll(tempDir)
pk, sk, cbftnodes := GenerateCbftNode(1)
diff --git a/console/console.go b/console/console.go
index 83de62fff1..c213b59b89 100644
--- a/console/console.go
+++ b/console/console.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"os/signal"
"path/filepath"
@@ -160,7 +159,7 @@ func (c *Console) init(preload []string) error {
// Configure the input prompter for history and tab completion.
if c.prompter != nil {
- if content, err := ioutil.ReadFile(c.histPath); err != nil {
+ if content, err := os.ReadFile(c.histPath); err != nil {
c.prompter.SetHistory(nil)
} else {
c.history = strings.Split(string(content), "\n")
@@ -560,7 +559,7 @@ func (c *Console) Stop(graceful bool) error {
}
func (c *Console) writeHistory() error {
- if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
+ if err := os.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
return err
}
return os.Chmod(c.histPath, 0600) // Force 0600, even if it was different previously
diff --git a/console/console_test.go b/console/console_test.go
index f4917194d6..7794ddc01b 100644
--- a/console/console_test.go
+++ b/console/console_test.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/eth/ethconfig"
- "io/ioutil"
"os"
"strings"
"testing"
@@ -97,7 +96,7 @@ type tester struct {
// Please ensure you call Close() on the returned tester to avoid leaks.
func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
// Create a temporary storage for the node keys and initialize it
- workspace, err := ioutil.TempDir("", "console-tester-")
+ workspace, err := os.MkdirTemp("", "console-tester-")
if err != nil {
t.Fatalf("failed to create temporary keystore: %v", err)
}
diff --git a/console/prompt/prompter.go b/console/prompt/prompter.go
index 810b6c3e14..2a20b6906a 100644
--- a/console/prompt/prompter.go
+++ b/console/prompt/prompter.go
@@ -143,7 +143,7 @@ func (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err err
// choice to be made, returning that choice.
func (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {
input, err := p.Prompt(prompt + " [y/n] ")
- if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
+ if len(input) > 0 && strings.EqualFold(input[:1], "y") {
return true, nil
}
return false, err
diff --git a/core/asm/asm.go b/core/asm/asm.go
index 6dd0fd0ac8..e227b4a57f 100644
--- a/core/asm/asm.go
+++ b/core/asm/asm.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Provides support for dealing with EVM assembly instructions (e.g., disassembling them).
+// Package asm provides support for dealing with EVM assembly instructions (e.g., disassembling them).
package asm
import (
@@ -34,14 +34,14 @@ type instructionIterator struct {
started bool
}
-// Create a new instruction iterator.
+// NewInstructionIterator create a new instruction iterator.
func NewInstructionIterator(code []byte) *instructionIterator {
it := new(instructionIterator)
it.code = code
return it
}
-// Returns true if there is a next instruction and moves on.
+// Next returns true if there is a next instruction and moves on.
func (it *instructionIterator) Next() bool {
if it.error != nil || uint64(len(it.code)) <= it.pc {
// We previously reached an error or the end.
@@ -79,27 +79,27 @@ func (it *instructionIterator) Next() bool {
return true
}
-// Returns any error that may have been encountered.
+// Error returns any error that may have been encountered.
func (it *instructionIterator) Error() error {
return it.error
}
-// Returns the PC of the current instruction.
+// PC returns the PC of the current instruction.
func (it *instructionIterator) PC() uint64 {
return it.pc
}
-// Returns the opcode of the current instruction.
+// Op returns the opcode of the current instruction.
func (it *instructionIterator) Op() vm.OpCode {
return it.op
}
-// Returns the argument of the current instruction.
+// Arg returns the argument of the current instruction.
func (it *instructionIterator) Arg() []byte {
return it.arg
}
-// Pretty-print all disassembled EVM instructions to stdout.
+// PrintDisassembled pretty-print all disassembled EVM instructions to stdout.
func PrintDisassembled(code string) error {
script, err := hex.DecodeString(code)
if err != nil {
@@ -117,7 +117,7 @@ func PrintDisassembled(code string) error {
return it.Error()
}
-// Return all disassembled EVM instructions in human-readable format.
+// Disassemble returns all disassembled EVM instructions in human-readable format.
func Disassemble(script []byte) ([]string, error) {
instrs := make([]string, 0)
diff --git a/core/asm/compiler.go b/core/asm/compiler.go
index 9df3a3d7f9..9b7d9b9aac 100644
--- a/core/asm/compiler.go
+++ b/core/asm/compiler.go
@@ -39,7 +39,7 @@ type Compiler struct {
debug bool
}
-// newCompiler returns a new allocated compiler.
+// NewCompiler returns a new allocated compiler.
func NewCompiler(debug bool) *Compiler {
return &Compiler{
labels: make(map[string]int),
@@ -105,16 +105,16 @@ func (c *Compiler) Compile() (string, []error) {
}
// turn the binary to hex
- var bin string
+ var bin strings.Builder
for _, v := range c.binary {
switch v := v.(type) {
case vm.OpCode:
- bin += fmt.Sprintf("%x", []byte{byte(v)})
+ bin.WriteString(fmt.Sprintf("%x", []byte{byte(v)}))
case []byte:
- bin += fmt.Sprintf("%x", v)
+ bin.WriteString(fmt.Sprintf("%x", v))
}
}
- return bin, errors
+ return bin.String(), errors
}
// next returns the next token and increments the
@@ -243,12 +243,12 @@ func (c *Compiler) pushBin(v interface{}) {
// isPush returns whether the string op is either any of
// push(N).
func isPush(op string) bool {
- return strings.ToUpper(op) == "PUSH"
+ return strings.EqualFold(op, "PUSH")
}
// isJump returns whether the string op is jump(i)
func isJump(op string) bool {
- return strings.ToUpper(op) == "JUMPI" || strings.ToUpper(op) == "JUMP"
+ return strings.EqualFold(op, "JUMPI") || strings.EqualFold(op, "JUMP")
}
// toBinary converts text to a vm.OpCode
diff --git a/core/asm/lexer.go b/core/asm/lexer.go
index ed367939d7..d1b79a1fb9 100644
--- a/core/asm/lexer.go
+++ b/core/asm/lexer.go
@@ -93,7 +93,7 @@ type lexer struct {
debug bool // flag for triggering debug output
}
-// lex lexes the program by name with the given source. It returns a
+// Lex lexes the program by name with the given source. It returns a
// channel on which the tokens are delivered.
func Lex(source []byte, debug bool) <-chan token {
ch := make(chan token)
diff --git a/core/bench_test.go b/core/bench_test.go
index af30cc279f..89798dbf7e 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -18,7 +18,6 @@ package core
import (
"crypto/ecdsa"
- "io/ioutil"
"math/big"
"os"
"testing"
@@ -167,7 +166,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
if !disk {
db = rawdb.NewMemoryDatabase()
} else {
- dir, err := ioutil.TempDir("", "eth-core-bench")
+ dir, err := os.MkdirTemp("", "eth-core-bench")
if err != nil {
b.Fatalf("cannot create temporary directory: %v", err)
}
@@ -264,7 +263,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
func benchWriteChain(b *testing.B, full bool, count uint64) {
for i := 0; i < b.N; i++ {
- dir, err := ioutil.TempDir("", "eth-chain-bench")
+ dir, err := os.MkdirTemp("", "eth-chain-bench")
if err != nil {
b.Fatalf("cannot create temporary directory: %v", err)
}
@@ -279,7 +278,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) {
}
func benchReadChain(b *testing.B, full bool, count uint64) {
- dir, err := ioutil.TempDir("", "eth-chain-bench")
+ dir, err := os.MkdirTemp("", "eth-chain-bench")
if err != nil {
b.Fatalf("cannot create temporary directory: %v", err)
}
diff --git a/core/blockchain_clean_test.go b/core/blockchain_clean_test.go
index 5f016bbe86..dbc48dc1e5 100644
--- a/core/blockchain_clean_test.go
+++ b/core/blockchain_clean_test.go
@@ -19,7 +19,6 @@ package core
import (
"crypto/rand"
"fmt"
- "io/ioutil"
"math/big"
"os"
"testing"
@@ -51,7 +50,7 @@ func randBytes(n int) []byte {
}
func newBlockChainForTesting(db ethdb.Database) (*BlockChain, error) {
- buf, err := ioutil.ReadFile("../eth/downloader/testdata/platon.json")
+ buf, err := os.ReadFile("../eth/downloader/testdata/platon.json")
if err != nil {
return nil, err
}
@@ -76,7 +75,7 @@ func newBlockChainForTesting(db ethdb.Database) (*BlockChain, error) {
}
func TestCleaner(t *testing.T) {
- frdir, err := ioutil.TempDir("", "platon")
+ frdir, err := os.MkdirTemp("", "platon")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
@@ -134,7 +133,7 @@ func TestCleaner(t *testing.T) {
}
func TestStopCleaner(t *testing.T) {
- frdir, err := ioutil.TempDir("", "platon")
+ frdir, err := os.MkdirTemp("", "platon")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 34dafed051..20e9f5b991 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -39,7 +39,7 @@ import (
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerHashTable, number)
if len(data) == 0 {
// Get it by hash from leveldb
@@ -303,7 +303,7 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains
// the canonical data.
@@ -382,7 +382,7 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
-func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
+func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number)
if err != nil {
return false
@@ -396,7 +396,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
// comparison is necessary since ancient database only maintains
// the canonical data.
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerBodiesTable, number)
@@ -413,7 +413,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
// block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerBodiesTable, number)
if len(data) > 0 {
return nil
@@ -491,7 +491,7 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerReceiptTable, number)
@@ -527,7 +527,7 @@ func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Rec
}
// ReadReceipts retrieves all the transaction receipts belonging to a block, including
-// its correspoinding metadata fields. If it is unable to populate these metadata
+// its corresponding metadata fields. If it is unable to populate these metadata
// fields then nil is returned.
//
// The current implementation populates these metadata fields by reading the receipts'
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 7ffcaa991a..0c7c5bf8ee 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -20,7 +20,6 @@ import (
"bytes"
"encoding/hex"
"fmt"
- "io/ioutil"
"math/big"
"math/rand"
"os"
@@ -409,7 +408,7 @@ func checkReceiptsRLP(have, want types.Receipts) error {
func TestAncientStorage(t *testing.T) {
// Freezer style fast import the chain.
- frdir, err := ioutil.TempDir("", "")
+ frdir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
@@ -723,7 +722,7 @@ func TestHashesInRange(t *testing.T) {
// This measures the write speed of the WriteAncientBlocks operation.
func BenchmarkWriteAncientBlocks(b *testing.B) {
// Open freezer database.
- frdir, err := ioutil.TempDir("", "")
+ frdir, err := os.MkdirTemp("", "")
if err != nil {
b.Fatalf("failed to create temp freezer dir: %v", err)
}
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
new file mode 100644
index 0000000000..96971d6691
--- /dev/null
+++ b/core/rawdb/chain_freezer.go
@@ -0,0 +1,296 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/PlatONnetwork/PlatON-Go/common"
+ "github.com/PlatONnetwork/PlatON-Go/ethdb"
+ "github.com/PlatONnetwork/PlatON-Go/log"
+ "github.com/PlatONnetwork/PlatON-Go/params"
+)
+
+const (
+ // freezerRecheckInterval is the frequency to check the key-value database for
+ // chain progression that might permit new blocks to be frozen into immutable
+ // storage.
+ freezerRecheckInterval = time.Minute
+
+ // freezerBatchLimit is the maximum number of blocks to freeze in one batch
+ // before doing an fsync and deleting it from the key-value store.
+ freezerBatchLimit = 30000
+)
+
+// chainFreezer is a wrapper of freezer with additional chain freezing feature.
+// The background thread will keep moving ancient chain segments from key-value
+// database to flat files for saving space on live database.
+type chainFreezer struct {
+ // WARNING: The `threshold` field is accessed atomically. On 32 bit platforms, only
+ // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
+ // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
+ threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
+
+ *Freezer
+ quit chan struct{}
+ wg sync.WaitGroup
+ trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
+}
+
+// newChainFreezer initializes the freezer for ancient chain data.
+func newChainFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*chainFreezer, error) {
+ freezer, err := NewFreezer(datadir, namespace, readonly, maxTableSize, tables)
+ if err != nil {
+ return nil, err
+ }
+ return &chainFreezer{
+ Freezer: freezer,
+ threshold: params.FullImmutabilityThreshold,
+ quit: make(chan struct{}),
+ trigger: make(chan chan struct{}),
+ }, nil
+}
+
+// Close closes the chain freezer instance and terminates the background thread.
+func (f *chainFreezer) Close() error {
+ err := f.Freezer.Close()
+ select {
+ case <-f.quit:
+ default:
+ close(f.quit)
+ }
+ f.wg.Wait()
+ return err
+}
+
+// freeze is a background thread that periodically checks the blockchain for any
+// import progress and moves ancient data from the fast database into the freezer.
+//
+// This functionality is deliberately broken off from block importing to avoid
+// incurring additional data shuffling delays on block propagation.
+func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
+ nfdb := &nofreezedb{KeyValueStore: db}
+
+ var (
+ backoff bool
+ triggered chan struct{} // Used in tests
+ )
+ for {
+ select {
+ case <-f.quit:
+ log.Info("Freezer shutting down")
+ return
+ default:
+ }
+ if backoff {
+ // If we were doing a manual trigger, notify it
+ if triggered != nil {
+ triggered <- struct{}{}
+ triggered = nil
+ }
+ select {
+ case <-time.NewTimer(freezerRecheckInterval).C:
+ backoff = false
+ case triggered = <-f.trigger:
+ backoff = false
+ case <-f.quit:
+ return
+ }
+ }
+ // Retrieve the freezing threshold.
+ hash := ReadHeadBlockHash(nfdb)
+ if hash == (common.Hash{}) {
+ log.Debug("Current full block hash unavailable") // new chain, empty database
+ backoff = true
+ continue
+ }
+ number := ReadHeaderNumber(nfdb, hash)
+ threshold := atomic.LoadUint64(&f.threshold)
+ frozen := atomic.LoadUint64(&f.frozen)
+ switch {
+ case number == nil:
+ log.Error("Current full block number unavailable", "hash", hash)
+ backoff = true
+ continue
+
+ case *number < threshold:
+ log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
+ backoff = true
+ continue
+
+ case *number-threshold <= frozen:
+ log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", frozen)
+ backoff = true
+ continue
+ }
+ head := ReadHeader(nfdb, hash, *number)
+ if head == nil {
+ log.Error("Current full block unavailable", "number", *number, "hash", hash)
+ backoff = true
+ continue
+ }
+
+ // Seems we have data ready to be frozen, process in usable batches
+ var (
+ start = time.Now()
+ first, _ = f.Ancients()
+ limit = *number - threshold
+ )
+ if limit-first > freezerBatchLimit {
+ limit = first + freezerBatchLimit
+ }
+ ancients, err := f.freezeRange(nfdb, first, limit)
+ if err != nil {
+ log.Error("Error in block freeze operation", "err", err)
+ backoff = true
+ continue
+ }
+
+ // Batch of blocks have been frozen, flush them before wiping from leveldb
+ if err := f.Sync(); err != nil {
+ log.Crit("Failed to flush frozen tables", "err", err)
+ }
+
+ // Wipe out all data from the active database
+ batch := db.NewBatch()
+ for i := 0; i < len(ancients); i++ {
+ // Always keep the genesis block in active database
+ if first+uint64(i) != 0 {
+ DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
+ DeleteCanonicalHash(batch, first+uint64(i))
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen canonical blocks", "err", err)
+ }
+ batch.Reset()
+
+ // Wipe out side chains also and track dangling side chains
+ var dangling []common.Hash
+ frozen = atomic.LoadUint64(&f.frozen) // Needs reload after during freezeRange
+ for number := first; number < frozen; number++ {
+ // Always keep the genesis block in active database
+ if number != 0 {
+ dangling = ReadAllHashes(db, number)
+ for _, hash := range dangling {
+ log.Trace("Deleting side chain", "number", number, "hash", hash)
+ DeleteBlock(batch, hash, number)
+ }
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen side blocks", "err", err)
+ }
+ batch.Reset()
+
+ // Step into the future and delete and dangling side chains
+ if frozen > 0 {
+ tip := frozen
+ for len(dangling) > 0 {
+ drop := make(map[common.Hash]struct{})
+ for _, hash := range dangling {
+ log.Debug("Dangling parent from Freezer", "number", tip-1, "hash", hash)
+ drop[hash] = struct{}{}
+ }
+ children := ReadAllHashes(db, tip)
+ for i := 0; i < len(children); i++ {
+ // Dig up the child and ensure it's dangling
+ child := ReadHeader(nfdb, children[i], tip)
+ if child == nil {
+ log.Error("Missing dangling header", "number", tip, "hash", children[i])
+ continue
+ }
+ if _, ok := drop[child.ParentHash]; !ok {
+ children = append(children[:i], children[i+1:]...)
+ i--
+ continue
+ }
+ // Delete all block data associated with the child
+ log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
+ DeleteBlock(batch, children[i], tip)
+ }
+ dangling = children
+ tip++
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete dangling side blocks", "err", err)
+ }
+ }
+
+ // Log something friendly for the user
+ context := []interface{}{
+ "blocks", frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", frozen - 1,
+ }
+ if n := len(ancients); n > 0 {
+ context = append(context, []interface{}{"hash", ancients[n-1]}...)
+ }
+ log.Info("Deep froze chain segment", context...)
+
+ // Avoid database thrashing with tiny writes
+ if frozen-first < freezerBatchLimit {
+ backoff = true
+ }
+ }
+}
+
+func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
+ hashes = make([]common.Hash, 0, limit-number)
+
+ _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for ; number <= limit; number++ {
+ // Retrieve all the components of the canonical block.
+ hash := ReadCanonicalHash(nfdb, number)
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
+ }
+ header := ReadHeaderRLP(nfdb, hash, number)
+ if len(header) == 0 {
+ return fmt.Errorf("block header missing, can't freeze block %d", number)
+ }
+ body := ReadBodyRLP(nfdb, hash, number)
+ if len(body) == 0 {
+ return fmt.Errorf("block body missing, can't freeze block %d", number)
+ }
+ receipts := ReadReceiptsRLP(nfdb, hash, number)
+ // 由于默认会清除回执,因此此处有可能读不到回执
+ //if len(receipts) == 0 {
+ // return fmt.Errorf("block receipts missing, can't freeze block %d", number)
+ //}
+
+ // Write to the batch.
+ if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
+ return fmt.Errorf("can't write hash to Freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
+ return fmt.Errorf("can't write header to Freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
+ return fmt.Errorf("can't write body to Freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
+ return fmt.Errorf("can't write receipts to Freezer: %v", err)
+ }
+ hashes = append(hashes, hash)
+ }
+ return nil
+ })
+
+ return hashes, err
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 5176e4ad0f..5313e753f4 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -58,18 +58,18 @@ func (frdb *freezerdb) Close() error {
// a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run.
func (frdb *freezerdb) Freeze(threshold uint64) error {
- if frdb.AncientStore.(*freezer).readonly {
+ if frdb.AncientStore.(*chainFreezer).readonly {
return errReadOnly
}
// Set the freezer threshold to a temporary value
defer func(old uint64) {
- atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old)
- }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold))
- atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold)
+ atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old)
+ }(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold))
+ atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold)
// Trigger a freeze cycle and block until it's done
trigger := make(chan struct{}, 1)
- frdb.AncientStore.(*freezer).trigger <- trigger
+ frdb.AncientStore.(*chainFreezer).trigger <- trigger
<-trigger
return nil
}
@@ -129,7 +129,7 @@ func (db *nofreezedb) Sync() error {
return errNotSupported
}
-func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
+func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
@@ -151,6 +151,11 @@ func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
return errNotSupported
}
+// AncientDatadir returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientDatadir() (string, error) {
+ return "", errNotSupported
+}
+
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -162,7 +167,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
// storage.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
- frdb, err := newFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
+ frdb, err := newChainFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
if err != nil {
return nil, err
}
@@ -179,7 +184,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// this point care, the key-value/freezer combo is valid).
// - If neither the key-value store nor the freezer is empty, cross validate
// the genesis hashes to make sure they are compatible. If they are, also
- // ensure that there's no gap between the freezer and sunsequently leveldb.
+ // ensure that there's no gap between the freezer and subsequently leveldb.
// - If the key-value store is not empty, but the freezer is we might just be
// upgrading to the freezer release, or we might have had a small chain and
// not frozen anything yet. Ensure that no blocks are missing yet from the
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 7ce696f41e..57c05dd45d 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -19,7 +19,6 @@ package rawdb
import (
"errors"
"fmt"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -33,7 +32,6 @@ import (
"github.com/PlatONnetwork/PlatON-Go/ethdb"
"github.com/PlatONnetwork/PlatON-Go/log"
"github.com/PlatONnetwork/PlatON-Go/metrics"
- "github.com/PlatONnetwork/PlatON-Go/params"
)
var (
@@ -54,34 +52,24 @@ var (
errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
)
-const (
- // freezerRecheckInterval is the frequency to check the key-value database for
- // chain progression that might permit new blocks to be frozen into immutable
- // storage.
- freezerRecheckInterval = time.Minute
+// freezerTableSize defines the maximum size of freezer data files.
+const freezerTableSize = 2 * 1000 * 1000 * 1000
- // freezerBatchLimit is the maximum number of blocks to freeze in one batch
- // before doing an fsync and deleting it from the key-value store.
- freezerBatchLimit = 30000
-
- // freezerTableSize defines the maximum size of freezer data files.
- freezerTableSize = 2 * 1000 * 1000 * 1000
-)
-
-// freezer is a memory mapped append-only database to store immutable chain data
-// into flat files:
+// Freezer is a memory mapped append-only database to store immutable ordered
+// data into flat files:
//
-// - The append only nature ensures that disk writes are minimized.
-// - The memory mapping ensures we can max out system memory for caching without
-// reserving it for go-ethereum. This would also reduce the memory requirements
-// of Geth, and thus also GC overhead.
-type freezer struct {
- // WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
+// - The append-only nature ensures that disk writes are minimized.
+// - The memory mapping ensures we can max out system memory for caching without
+// reserving it for go-ethereum. This would also reduce the memory requirements
+// of Geth, and thus also GC overhead.
+type Freezer struct {
+ // WARNING: The `frozen` and `tail` fields are accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- frozen uint64 // Number of blocks already frozen
- tail uint64 // Number of the first stored item in the freezer
- threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
+ frozen uint64 // Number of blocks already frozen
+ tail uint64 // Number of the first stored item in the freezer
+
+ datadir string // Path of root directory of ancient store
// This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations.
@@ -91,20 +79,15 @@ type freezer struct {
readonly bool
tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens
-
- trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
-
- quit chan struct{}
- wg sync.WaitGroup
- closeOnce sync.Once
+ closeOnce sync.Once
}
-// newFreezer creates a chain freezer that moves ancient chain data into
-// append-only flat file containers.
+// NewFreezer creates a freezer instance for maintaining immutable ordered
+// data according to the given parameters.
//
// The 'tables' argument defines the data tables. If the value of a map
// entry is true, snappy compression is disabled for the table.
-func newFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*freezer, error) {
+func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*Freezer, error) {
// Create the initial freezer object
var (
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
@@ -125,13 +108,11 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
return nil, err
}
// Open all the supported data tables
- freezer := &freezer{
+ freezer := &Freezer{
readonly: readonly,
- threshold: params.FullImmutabilityThreshold,
tables: make(map[string]*freezerTable),
instanceLock: lock,
- trigger: make(chan chan struct{}),
- quit: make(chan struct{}),
+ datadir: datadir,
}
// Create the tables.
@@ -171,15 +152,12 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
}
// Close terminates the chain freezer, unmapping all the data files.
-func (f *freezer) Close() error {
+func (f *Freezer) Close() error {
f.writeLock.Lock()
defer f.writeLock.Unlock()
var errs []error
f.closeOnce.Do(func() {
- close(f.quit)
- // Wait for any background freezing to stop
- f.wg.Wait()
for _, table := range f.tables {
if err := table.Close(); err != nil {
errs = append(errs, err)
@@ -197,7 +175,7 @@ func (f *freezer) Close() error {
// HasAncient returns an indicator whether the specified ancient data exists
// in the freezer.
-func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
+func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
if table := f.tables[kind]; table != nil {
return table.has(number), nil
}
@@ -205,7 +183,7 @@ func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
}
// Ancient retrieves an ancient binary blob from the append-only immutable files.
-func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
+func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
if table := f.tables[kind]; table != nil {
return table.Retrieve(number)
}
@@ -214,10 +192,10 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return
-// - at most 'max' items,
-// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
-// return as many items as fit into maxByteSize.
-func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
+// - at most 'max' items,
+// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
+// return as many items as fit into maxByteSize.
+func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes)
}
@@ -225,17 +203,17 @@ func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]
}
// Ancients returns the length of the frozen items.
-func (f *freezer) Ancients() (uint64, error) {
+func (f *Freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil
}
// Tail returns the number of first stored item in the freezer.
-func (f *freezer) Tail() (uint64, error) {
+func (f *Freezer) Tail() (uint64, error) {
return atomic.LoadUint64(&f.tail), nil
}
// AncientSize returns the ancient size of the specified category.
-func (f *freezer) AncientSize(kind string) (uint64, error) {
+func (f *Freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields.
// Speed doesn't matter here, AncientSize is for debugging.
f.writeLock.RLock()
@@ -249,14 +227,15 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
-func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
+func (f *Freezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
+
return fn(f)
}
// ModifyAncients runs the given write operation.
-func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
+func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly {
return 0, errReadOnly
}
@@ -264,7 +243,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
defer f.writeLock.Unlock()
// Roll back all tables to the starting position in case of error.
- prevItem := f.frozen
+ prevItem := atomic.LoadUint64(&f.frozen)
defer func() {
if err != nil {
// The write operation has failed. Go back to the previous item position.
@@ -290,7 +269,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
}
// TruncateHead discards any recent data above the provided threshold number.
-func (f *freezer) TruncateHead(items uint64) error {
+func (f *Freezer) TruncateHead(items uint64) error {
if f.readonly {
return errReadOnly
}
@@ -310,7 +289,7 @@ func (f *freezer) TruncateHead(items uint64) error {
}
// TruncateTail discards any recent data below the provided threshold number.
-func (f *freezer) TruncateTail(tail uint64) error {
+func (f *Freezer) TruncateTail(tail uint64) error {
if f.readonly {
return errReadOnly
}
@@ -330,7 +309,7 @@ func (f *freezer) TruncateTail(tail uint64) error {
}
// Sync flushes all data tables to disk.
-func (f *freezer) Sync() error {
+func (f *Freezer) Sync() error {
var errs []error
for _, table := range f.tables {
if err := table.Sync(); err != nil {
@@ -345,7 +324,7 @@ func (f *freezer) Sync() error {
// validate checks that every table has the same length.
// Used instead of `repair` in readonly mode.
-func (f *freezer) validate() error {
+func (f *Freezer) validate() error {
if len(f.tables) == 0 {
return nil
}
@@ -371,7 +350,7 @@ func (f *freezer) validate() error {
}
// repair truncates all data tables to the same length.
-func (f *freezer) repair() error {
+func (f *Freezer) repair() error {
var (
head = uint64(math.MaxUint64)
tail = uint64(0)
@@ -399,228 +378,13 @@ func (f *freezer) repair() error {
return nil
}
-// freeze is a background thread that periodically checks the blockchain for any
-// import progress and moves ancient data from the fast database into the freezer.
-//
-// This functionality is deliberately broken off from block importing to avoid
-// incurring additional data shuffling delays on block propagation.
-func (f *freezer) freeze(db ethdb.KeyValueStore) {
- nfdb := &nofreezedb{KeyValueStore: db}
-
- var (
- backoff bool
- triggered chan struct{} // Used in tests
- )
- for {
- select {
- case <-f.quit:
- log.Info("Freezer shutting down")
- return
- default:
- }
- if backoff {
- // If we were doing a manual trigger, notify it
- if triggered != nil {
- triggered <- struct{}{}
- triggered = nil
- }
- select {
- case <-time.NewTimer(freezerRecheckInterval).C:
- backoff = false
- case triggered = <-f.trigger:
- backoff = false
- case <-f.quit:
- return
- }
- }
- // Retrieve the freezing threshold.
- hash := ReadHeadBlockHash(nfdb)
- if hash == (common.Hash{}) {
- log.Debug("Current full block hash unavailable") // new chain, empty database
- backoff = true
- continue
- }
- number := ReadHeaderNumber(nfdb, hash)
- threshold := atomic.LoadUint64(&f.threshold)
-
- switch {
- case number == nil:
- log.Error("Current full block number unavailable", "hash", hash)
- backoff = true
- continue
-
- case *number < threshold:
- log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
- backoff = true
- continue
-
- case *number-threshold <= f.frozen:
- log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
- backoff = true
- continue
- }
- head := ReadHeader(nfdb, hash, *number)
- if head == nil {
- log.Error("Current full block unavailable", "number", *number, "hash", hash)
- backoff = true
- continue
- }
-
- // Seems we have data ready to be frozen, process in usable batches
- var (
- start = time.Now()
- first, _ = f.Ancients()
- limit = *number - threshold
- )
- if limit-first > freezerBatchLimit {
- limit = first + freezerBatchLimit
- }
- ancients, err := f.freezeRange(nfdb, first, limit)
- if err != nil {
- log.Error("Error in block freeze operation", "err", err)
- backoff = true
- continue
- }
-
- // Batch of blocks have been frozen, flush them before wiping from leveldb
- if err := f.Sync(); err != nil {
- log.Crit("Failed to flush frozen tables", "err", err)
- }
-
- // Wipe out all data from the active database
- batch := db.NewBatch()
- for i := 0; i < len(ancients); i++ {
- // Always keep the genesis block in active database
- if first+uint64(i) != 0 {
- DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
- DeleteCanonicalHash(batch, first+uint64(i))
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen canonical blocks", "err", err)
- }
- batch.Reset()
-
- // Wipe out side chains also and track dangling side chains
- var dangling []common.Hash
- for number := first; number < f.frozen; number++ {
- // Always keep the genesis block in active database
- if number != 0 {
- dangling = ReadAllHashes(db, number)
- for _, hash := range dangling {
- log.Trace("Deleting side chain", "number", number, "hash", hash)
- DeleteBlock(batch, hash, number)
- }
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen side blocks", "err", err)
- }
- batch.Reset()
-
- // Step into the future and delete and dangling side chains
- if f.frozen > 0 {
- tip := f.frozen
- for len(dangling) > 0 {
- drop := make(map[common.Hash]struct{})
- for _, hash := range dangling {
- log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
- drop[hash] = struct{}{}
- }
- children := ReadAllHashes(db, tip)
- for i := 0; i < len(children); i++ {
- // Dig up the child and ensure it's dangling
- child := ReadHeader(nfdb, children[i], tip)
- if child == nil {
- log.Error("Missing dangling header", "number", tip, "hash", children[i])
- continue
- }
- if _, ok := drop[child.ParentHash]; !ok {
- children = append(children[:i], children[i+1:]...)
- i--
- continue
- }
- // Delete all block data associated with the child
- log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
- DeleteBlock(batch, children[i], tip)
- }
- dangling = children
- tip++
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete dangling side blocks", "err", err)
- }
- }
-
- // Log something friendly for the user
- context := []interface{}{
- "blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
- }
- if n := len(ancients); n > 0 {
- context = append(context, []interface{}{"hash", ancients[n-1]}...)
- }
- log.Info("Deep froze chain segment", context...)
-
- // Avoid database thrashing with tiny writes
- if f.frozen-first < freezerBatchLimit {
- backoff = true
- }
- }
-}
-
-func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
- hashes = make([]common.Hash, 0, limit-number)
-
- _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for ; number <= limit; number++ {
- // Retrieve all the components of the canonical block.
- hash := ReadCanonicalHash(nfdb, number)
- if hash == (common.Hash{}) {
- return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
- }
- header := ReadHeaderRLP(nfdb, hash, number)
- if len(header) == 0 {
- return fmt.Errorf("block header missing, can't freeze block %d", number)
- }
- body := ReadBodyRLP(nfdb, hash, number)
- if len(body) == 0 {
- return fmt.Errorf("block body missing, can't freeze block %d", number)
- }
- receipts := ReadReceiptsRLP(nfdb, hash, number)
- // 由于默认会清除回执,因此此处有可能读不到回执
- //if len(receipts) == 0 {
- // return fmt.Errorf("block receipts missing, can't freeze block %d", number)
- //}
-
- // Write to the batch.
- if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
- return fmt.Errorf("can't write hash to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
- return fmt.Errorf("can't write header to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
- return fmt.Errorf("can't write body to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
- return fmt.Errorf("can't write receipts to freezer: %v", err)
- }
-
- hashes = append(hashes, hash)
- }
- return nil
- })
-
- return hashes, err
-}
-
// convertLegacyFn takes a raw freezer entry in an older format and
// returns it in the new format.
type convertLegacyFn = func([]byte) ([]byte, error)
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
-func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
+func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
if f.readonly {
return errReadOnly
}
@@ -666,7 +430,7 @@ func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
// Set up new dir for the migrated table, the content of which
// we'll at the end move over to the ancients dir.
migrationPath := filepath.Join(ancientsPath, "migration")
- newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
+ newTable, err := NewFreezerTable(migrationPath, kind, table.noCompression, false)
if err != nil {
return err
}
@@ -708,7 +472,7 @@ func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
if err := newTable.Close(); err != nil {
return err
}
- files, err := ioutil.ReadDir(migrationPath)
+ files, err := os.ReadDir(migrationPath)
if err != nil {
return err
}
@@ -726,3 +490,8 @@ func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
return nil
}
+
+// AncientDatadir returns the root directory path of the ancient store.
+func (f *Freezer) AncientDatadir() (string, error) {
+ return f.datadir, nil
+}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index ed483e39e6..39d0106ab9 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -34,7 +34,7 @@ type freezerBatch struct {
tables map[string]*freezerTableBatch
}
-func newFreezerBatch(f *freezer) *freezerBatch {
+func newFreezerBatch(f *Freezer) *freezerBatch {
batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
for kind, table := range f.tables {
batch.tables[kind] = table.newBatch()
diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go
index 191744a754..3e20aad690 100644
--- a/core/rawdb/freezer_meta_test.go
+++ b/core/rawdb/freezer_meta_test.go
@@ -17,13 +17,12 @@
package rawdb
import (
- "io/ioutil"
"os"
"testing"
)
func TestReadWriteFreezerTableMeta(t *testing.T) {
- f, err := ioutil.TempFile(os.TempDir(), "*")
+ f, err := os.CreateTemp(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
@@ -44,7 +43,7 @@ func TestReadWriteFreezerTableMeta(t *testing.T) {
}
func TestInitializeFreezerTableMeta(t *testing.T) {
- f, err := ioutil.TempFile(os.TempDir(), "*")
+ f, err := os.CreateTemp(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index 7a09873ae4..aa7ae2e1b9 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -20,7 +20,6 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
"math/big"
"math/rand"
"os"
@@ -50,8 +49,7 @@ func TestFreezerModify(t *testing.T) {
}
tables := map[string]bool{"raw": true, "rlp": false}
- f, dir := newFreezerForTesting(t, tables)
- defer os.RemoveAll(dir)
+ f, _ := newFreezerForTesting(t, tables)
defer f.Close()
// Commit test data.
@@ -97,7 +95,6 @@ func TestFreezerModifyRollback(t *testing.T) {
t.Parallel()
f, dir := newFreezerForTesting(t, freezerTestTableDef)
- defer os.RemoveAll(dir)
theError := errors.New("oops")
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -116,7 +113,7 @@ func TestFreezerModifyRollback(t *testing.T) {
// Reopen and check that the rolled-back data doesn't reappear.
tables := map[string]bool{"test": true}
- f2, err := newFreezer(dir, "", false, 2049, tables)
+ f2, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatalf("can't reopen freezer after failed ModifyAncients: %v", err)
}
@@ -128,8 +125,7 @@ func TestFreezerModifyRollback(t *testing.T) {
func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
t.Parallel()
- f, dir := newFreezerForTesting(t, freezerTestTableDef)
- defer os.RemoveAll(dir)
+ f, _ := newFreezerForTesting(t, freezerTestTableDef)
defer f.Close()
var (
@@ -189,8 +185,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
// This test runs ModifyAncients and TruncateHead concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
- f, dir := newFreezerForTesting(t, freezerTestTableDef)
- defer os.RemoveAll(dir)
+ f, _ := newFreezerForTesting(t, freezerTestTableDef)
defer f.Close()
var item = make([]byte, 256)
@@ -256,14 +251,10 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
func TestFreezerReadonlyValidate(t *testing.T) {
tables := map[string]bool{"a": true, "b": true}
- dir, err := ioutil.TempDir("", "freezer")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+ dir := t.TempDir()
// Open non-readonly freezer and fill individual tables
// with different amount of data.
- f, err := newFreezer(dir, "", false, 2049, tables)
+ f, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatal("can't open freezer", err)
}
@@ -286,22 +277,19 @@ func TestFreezerReadonlyValidate(t *testing.T) {
// Re-openening as readonly should fail when validating
// table lengths.
- f, err = newFreezer(dir, "", true, 2049, tables)
+ f, err = NewFreezer(dir, "", true, 2049, tables)
if err == nil {
t.Fatal("readonly freezer should fail with differing table lengths")
}
}
-func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
+func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) {
t.Helper()
- dir, err := ioutil.TempDir("", "freezer")
- if err != nil {
- t.Fatal(err)
- }
+ dir := t.TempDir()
// note: using low max table size here to ensure the tests actually
// switch between multiple files.
- f, err := newFreezer(dir, "", false, 2049, tables)
+ f, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatal("can't open freezer", err)
}
@@ -309,7 +297,7 @@ func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, strin
}
// checkAncientCount verifies that the freezer contains n items.
-func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
+func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
t.Helper()
if frozen, _ := f.Ancients(); frozen != n {
@@ -350,16 +338,8 @@ func TestRenameWindows(t *testing.T) {
)
// Create 2 temp dirs
- dir1, err := os.MkdirTemp("", "rename-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(dir1)
- dir2, err := os.MkdirTemp("", "rename-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(dir2)
+ dir1 := t.TempDir()
+ dir2 := t.TempDir()
// Create file in dir1 and fill with data
f, err := os.Create(path.Join(dir1, fname))
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
index 5695fc0fa8..e7cce2920d 100644
--- a/core/rawdb/freezer_utils.go
+++ b/core/rawdb/freezer_utils.go
@@ -18,7 +18,6 @@ package rawdb
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
)
@@ -30,7 +29,7 @@ import (
// It is perfectly valid to have destPath == srcPath.
func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
// Create a temp file in the same dir where we want it to wind up
- f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
+ f, err := os.CreateTemp(filepath.Dir(destPath), "*")
if err != nil {
return err
}
diff --git a/core/rawdb/freezer_utils_test.go b/core/rawdb/freezer_utils_test.go
index de8087f9b9..cc300cb614 100644
--- a/core/rawdb/freezer_utils_test.go
+++ b/core/rawdb/freezer_utils_test.go
@@ -18,7 +18,6 @@ package rawdb
import (
"bytes"
- "io/ioutil"
"os"
"testing"
)
@@ -44,7 +43,7 @@ func TestCopyFrom(t *testing.T) {
{"foo", "bar", 8, true},
}
for _, c := range cases {
- ioutil.WriteFile(c.src, content, 0644)
+ os.WriteFile(c.src, content, 0644)
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
if !c.writePrefix {
@@ -57,7 +56,7 @@ func TestCopyFrom(t *testing.T) {
t.Fatalf("Failed to copy %v", err)
}
- blob, err := ioutil.ReadFile(c.dest)
+ blob, err := os.ReadFile(c.dest)
if err != nil {
os.Remove(c.src)
os.Remove(c.dest)
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 4a43ea8727..00235f0129 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -91,7 +91,7 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
return t.db.ModifyAncients(fn)
}
-func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
+func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
return t.db.ReadAncients(fn)
}
@@ -102,6 +102,7 @@ func (t *table) TruncateHead(items uint64) error {
}
// TruncateTail is a noop passthrough that just forwards the request to the underlying
+// database.
func (t *table) TruncateTail(items uint64) error {
return t.db.TruncateTail(items)
}
@@ -118,6 +119,11 @@ func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
return t.db.MigrateTable(kind, convert)
}
+// AncientDatadir returns the ancient datadir of the underlying database.
+func (t *table) AncientDatadir() (string, error) {
+ return t.db.AncientDatadir()
+}
+
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 50c070778c..56bca20afe 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -20,7 +20,6 @@ import (
"bytes"
"encoding/hex"
"fmt"
- "io/ioutil"
"math/big"
"os"
"testing"
@@ -268,7 +267,7 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) {
}
func TestEmptyByte(t *testing.T) {
- frdir, err := ioutil.TempDir("", "platon")
+ frdir, err := os.MkdirTemp("", "platon")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
@@ -331,7 +330,7 @@ func TestEmptyByte(t *testing.T) {
}
func TestForEachStorage(t *testing.T) {
- tmpDir, _ := ioutil.TempDir("", "platon")
+ tmpDir, _ := os.MkdirTemp("", "platon")
defer os.Remove(tmpDir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(tmpDir, 0, 0, "freezer", "platon", false)
if err != nil {
@@ -366,7 +365,7 @@ func TestForEachStorage(t *testing.T) {
func TestMigrateStorage(t *testing.T) {
- tmpDir, _ := ioutil.TempDir("", "platon")
+ tmpDir, _ := os.MkdirTemp("", "platon")
defer os.Remove(tmpDir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(tmpDir, 0, 0, "freezer", "platon", false)
if err != nil {
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 7a9c70190f..cc2d86540c 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -54,7 +54,7 @@ func randString(n int) string {
func TestUpdateLeaks(t *testing.T) {
// Create an empty state database
db := rawdb.NewMemoryDatabase()
- //dir, _ := ioutil.TempDir("", "eth-core-bench")
+ //dir, _ := os.MkdirTemp("", "eth-core-bench")
//ethdb,err:= ethdb.NewLDBDatabase(dir,128,128)
state, _ := New(common.Hash{}, NewDatabase(db), nil)
vm.PrecompiledContractCheckInstance = &TestPrecompiledContractCheck{}
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 527ad33cee..41dd86f3f4 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -1728,7 +1728,7 @@ func (pool *TxPool) truncateQueue() {
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
}
}
- sort.Sort(addresses)
+ sort.Sort(sort.Reverse(addresses))
// Drop transactions until the total is below the limit or only locals remain
for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 436506e2d4..be59574285 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
- "io/ioutil"
"math/big"
"math/rand"
"os"
@@ -2130,7 +2129,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
t.Parallel()
// Create a temporary file for the journal
- file, err := ioutil.TempFile("", "")
+ file, err := os.CreateTemp("", "")
if err != nil {
t.Fatalf("failed to create temporary journal: %v", err)
}
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 62dfe44433..e8293ea709 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -673,7 +673,7 @@ func (c *blake2F) Run(input []byte) ([]byte, error) {
// Parse the input into the Blake2b call parameters
var (
rounds = binary.BigEndian.Uint32(input[0:4])
- final = (input[212] == blake2FFinalBlockBytes)
+ final = input[212] == blake2FFinalBlockBytes
h [8]uint64
m [16]uint64
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index b0ad728a9f..50f7b1e3cf 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -18,8 +18,8 @@ package vm
import (
"fmt"
- "io/ioutil"
"math/big"
+ "os"
"reflect"
"testing"
@@ -513,7 +513,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
return
}
if common.Bytes2Hex(res) != test.Expected {
- bench.Error(fmt.Sprintf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)))
+ bench.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res))
return
}
})
@@ -769,7 +769,7 @@ func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG
func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) }
func loadJson(name string) ([]precompiledTest, error) {
- data, err := ioutil.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name))
+ data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name))
if err != nil {
return nil, err
}
@@ -779,7 +779,7 @@ func loadJson(name string) ([]precompiledTest, error) {
}
func loadJsonFail(name string) ([]precompiledFailureTest, error) {
- data, err := ioutil.ReadFile(fmt.Sprintf("testdata/precompiles/fail-%v.json", name))
+ data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/fail-%v.json", name))
if err != nil {
return nil, err
}
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 0e6f57179b..ad28bfde20 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -24,6 +24,7 @@ import (
)
var activators = map[int]func(*JumpTable){
+ 3855: enable3855,
3529: enable3529,
3198: enable3198,
2929: enable2929,
@@ -203,3 +204,20 @@ func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
scope.Stack.push(baseFee)
return nil, nil
}
+
+// enable3855 applies EIP-3855 (PUSH0 opcode)
+func enable3855(jt *JumpTable) {
+ // New opcode
+ jt[PUSH0] = &operation{
+ execute: opPush0,
+ constantGas: GasQuickStep,
+ minStack: minStack(0, 1),
+ maxStack: maxStack(0, 1),
+ }
+}
+
+// opPush0 implements the PUSH0 opcode
+func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int))
+ return nil, nil
+}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index ef5d344a5d..93f3c673f9 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -484,7 +484,7 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v := new(uint256.Int).SetBytes((interpreter.evm.Context.Random.Bytes()))
+ v := new(uint256.Int).SetBytes(interpreter.evm.Context.Random.Bytes())
scope.Stack.push(v)
return nil, nil
}
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 1825b9bb7f..9fb074ed4e 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -22,8 +22,8 @@ import (
"fmt"
"github.com/PlatONnetwork/PlatON-Go/crypto"
"github.com/holiman/uint256"
- "io/ioutil"
"math/big"
+ "os"
"testing"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -260,7 +260,7 @@ func TestWriteExpectedValues(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- _ = ioutil.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0644)
+ _ = os.WriteFile(fmt.Sprintf("testdata/testcases_%v.json", name), data, 0644)
if err != nil {
t.Fatal(err)
}
@@ -270,7 +270,7 @@ func TestWriteExpectedValues(t *testing.T) {
// TestJsonTestcases runs through all the testcases defined as json-files
func TestJsonTestcases(t *testing.T) {
for name := range twoOpMethods {
- data, err := ioutil.ReadFile(fmt.Sprintf("testdata/testcases_%v.json", name))
+ data, err := os.ReadFile(fmt.Sprintf("testdata/testcases_%v.json", name))
if err != nil {
t.Fatal("Failed to read file", err)
}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index bad3eb0fde..a9e3865d31 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -238,11 +238,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err != nil || !contract.UseGas(dynamicCost) {
return nil, ErrOutOfGas
}
+ // Do tracing before memory expansion
+ if in.cfg.Debug {
+ in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
+ logged = true
+ }
if memorySize > 0 {
mem.Resize(memorySize)
}
- }
- if in.cfg.Debug {
+ } else if in.cfg.Debug {
in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
logged = true
}
diff --git a/core/vm/memory.go b/core/vm/memory.go
index ba5f8485dc..73483c7714 100644
--- a/core/vm/memory.go
+++ b/core/vm/memory.go
@@ -55,10 +55,9 @@ func (m *Memory) Set32(offset uint64, val *uint256.Int) {
if offset+32 > uint64(len(m.store)) {
panic("invalid memory: store empty")
}
- // Zero the memory area
- copy(m.store[offset:offset+32], []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
// Fill in relevant bits
- val.WriteToSlice(m.store[offset:])
+ b32 := val.Bytes32()
+ copy(m.store[offset:], b32[:])
}
// Resize resizes the memory to size
@@ -68,7 +67,7 @@ func (m *Memory) Resize(size uint64) {
}
}
-// Get returns offset + size as a new slice
+// GetCopy returns offset + size as a new slice
func (m *Memory) GetCopy(offset, size int64) (cpy []byte) {
if size == 0 {
return nil
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index cc9230a02f..b986d2c680 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -69,7 +69,10 @@ const (
SHL OpCode = 0x1b
SHR OpCode = 0x1c
SAR OpCode = 0x1d
+)
+// 0x20 range - crypto.
+const (
KECCAK256 OpCode = 0x20
)
@@ -124,6 +127,7 @@ const (
BEGINSUB OpCode = 0x5c
RETURNSUB OpCode = 0x5d
JUMPSUB OpCode = 0x5e
+ PUSH0 OpCode = 0x5f
)
// 0x60 range - pushes.
@@ -310,6 +314,7 @@ var opCodeToString = map[OpCode]string{
JUMPSUB: "JUMPSUB",
RETURNSUB: "RETURNSUB",
+ PUSH0: "PUSH0",
// 0x60 range - push.
PUSH1: "PUSH1",
PUSH2: "PUSH2",
@@ -475,6 +480,7 @@ var stringToOp = map[string]OpCode{
"BEGINSUB": BEGINSUB,
"RETURNSUB": RETURNSUB,
"JUMPSUB": JUMPSUB,
+ "PUSH0": PUSH0,
"PUSH1": PUSH1,
"PUSH2": PUSH2,
"PUSH3": PUSH3,
diff --git a/core/vm/stack.go b/core/vm/stack.go
index bcfc954e1c..cbcdbb3109 100644
--- a/core/vm/stack.go
+++ b/core/vm/stack.go
@@ -17,7 +17,6 @@
package vm
import (
- "fmt"
"sync"
"github.com/holiman/uint256"
@@ -82,19 +81,6 @@ func (st *Stack) Back(n int) *uint256.Int {
return &st.data[st.len()-n-1]
}
-// Print dumps the content of the stack
-func (st *Stack) Print() {
- fmt.Println("### stack ###")
- if len(st.data) > 0 {
- for i, val := range st.data {
- fmt.Printf("%-3d %s\n", i, val.String())
- }
- } else {
- fmt.Println("-- empty --")
- }
- fmt.Println("#############")
-}
-
var rStackPool = sync.Pool{
New: func() interface{} {
return &ReturnStack{data: make([]uint32, 0, 10)}
diff --git a/core/vm/wagon_runtime_test.go b/core/vm/wagon_runtime_test.go
index 847abcc905..780e18c6ae 100644
--- a/core/vm/wagon_runtime_test.go
+++ b/core/vm/wagon_runtime_test.go
@@ -7,8 +7,8 @@ import (
"encoding/binary"
"encoding/json"
"hash/fnv"
- "io/ioutil"
"math/big"
+ "os"
"strings"
"testing"
@@ -1403,7 +1403,7 @@ func newTestVM(evm *EVM) *exec.VM {
}
func TestExternalFunction(t *testing.T) {
- buf, err := ioutil.ReadFile("./testdata/external.wasm")
+ buf, err := os.ReadFile("./testdata/external.wasm")
assert.Nil(t, err)
module, err := ReadWasmModule(buf, false)
assert.Nil(t, err)
@@ -1485,7 +1485,7 @@ func ExecCase(t *testing.T, module *exec.CompiledModule, c *Case, i int) {
}
func readContractCode() []byte {
- buf, err := ioutil.ReadFile("./testdata/contract_hello.wasm")
+ buf, err := os.ReadFile("./testdata/contract_hello.wasm")
if nil != err {
panic(err)
}
diff --git a/core/vm/wagon_utils_test.go b/core/vm/wagon_utils_test.go
index a1488e6a6f..6dc6089fb1 100644
--- a/core/vm/wagon_utils_test.go
+++ b/core/vm/wagon_utils_test.go
@@ -2,7 +2,7 @@ package vm
import (
"hash/fnv"
- "io/ioutil"
+ "os"
"testing"
"github.com/PlatONnetwork/PlatON-Go/rlp"
@@ -10,13 +10,13 @@ import (
)
func TestReadWasmModule(t *testing.T) {
- buf, err := ioutil.ReadFile("./testdata/contract1.wasm")
+ buf, err := os.ReadFile("./testdata/contract1.wasm")
assert.Nil(t, err)
module, err := ReadWasmModule(buf, true)
assert.Nil(t, err)
assert.NotNil(t, module)
- buf, err = ioutil.ReadFile("./testdata/bad.wasm")
+ buf, err = os.ReadFile("./testdata/bad.wasm")
assert.Nil(t, err)
module, err = ReadWasmModule(buf, true)
assert.NotNil(t, err)
@@ -41,7 +41,6 @@ func TestDecodeFuncAndParams(t *testing.T) {
name1, _, err := decodeFuncAndParams(b1)
assert.Nil(t, err)
-
assert.Equal(t, initUint64, name1)
type m struct {
diff --git a/core/vm/wasm_engine_test.go b/core/vm/wasm_engine_test.go
index b185909e73..a24e0731fc 100644
--- a/core/vm/wasm_engine_test.go
+++ b/core/vm/wasm_engine_test.go
@@ -3,8 +3,8 @@ package vm
import (
"context"
"hash/fnv"
- "io/ioutil"
"math/big"
+ "os"
"testing"
"github.com/PlatONnetwork/PlatON-Go/rlp"
@@ -193,7 +193,7 @@ func TestWasmRun(t *testing.T) {
func deployData(t *testing.T, funcName, filePath string) []byte {
- buf, err := ioutil.ReadFile(filePath)
+ buf, err := os.ReadFile(filePath)
assert.Nil(t, err)
hash := fnv.New64()
diff --git a/crypto/bls/bls.go b/crypto/bls/bls.go
index cb078f85a3..1b1276935f 100644
--- a/crypto/bls/bls.go
+++ b/crypto/bls/bls.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"strings"
"unsafe"
@@ -107,7 +106,7 @@ func LoadBLS(file string) (*SecretKey, error) {
func SaveBLS(file string, key *SecretKey) error {
k := hex.EncodeToString(key.GetLittleEndian())
- return ioutil.WriteFile(file, []byte(k), 0600)
+ return os.WriteFile(file, []byte(k), 0600)
}
func GenerateKey() *SecretKey {
diff --git a/crypto/crypto.go b/crypto/crypto.go
index cb814d42fa..5ffdb23abe 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -26,7 +26,6 @@ import (
"fmt"
"hash"
"io"
- "io/ioutil"
"math/big"
"os"
@@ -253,7 +252,7 @@ func checkKeyFileEnd(r *bufio.Reader) error {
// restrictive permissions. The key data is saved hex-encoded.
func SaveECDSA(file string, key *ecdsa.PrivateKey) error {
k := hex.EncodeToString(FromECDSA(key))
- return ioutil.WriteFile(file, []byte(k), 0600)
+ return os.WriteFile(file, []byte(k), 0600)
}
// GenerateKey generates a new private key.
diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go
index 85b3db1494..5f162c01e9 100644
--- a/crypto/crypto_test.go
+++ b/crypto/crypto_test.go
@@ -20,7 +20,6 @@ import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
- "io/ioutil"
"math/big"
"os"
"reflect"
@@ -175,7 +174,7 @@ func TestLoadECDSA(t *testing.T) {
}
for _, test := range tests {
- f, err := ioutil.TempFile("", "loadecdsa_test.*.txt")
+ f, err := os.CreateTemp("", "loadecdsa_test.*.txt")
if err != nil {
t.Fatal(err)
}
@@ -196,7 +195,7 @@ func TestLoadECDSA(t *testing.T) {
}
func TestSaveECDSA(t *testing.T) {
- f, err := ioutil.TempFile("", "saveecdsa_test.*.txt")
+ f, err := os.CreateTemp("", "saveecdsa_test.*.txt")
if err != nil {
t.Fatal(err)
}
diff --git a/crypto/signify/signify.go b/crypto/signify/signify.go
index 4f46b1f0d8..bc1edf6e53 100644
--- a/crypto/signify/signify.go
+++ b/crypto/signify/signify.go
@@ -23,7 +23,7 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"strings"
"time"
@@ -77,7 +77,7 @@ func SignifySignFile(input string, output string, key string, unTrustedComment s
return err
}
- filedata, err := ioutil.ReadAll(in)
+ filedata, err := io.ReadAll(in)
if err != nil {
return err
}
diff --git a/crypto/signify/signify_fuzz.go b/crypto/signify/signify_fuzz.go
index bbf49a8f58..88a62767d4 100644
--- a/crypto/signify/signify_fuzz.go
+++ b/crypto/signify/signify_fuzz.go
@@ -23,7 +23,6 @@ import (
"bufio"
"fmt"
fuzz "github.com/google/gofuzz"
- "io/ioutil"
"log"
"os"
"os/exec"
@@ -33,7 +32,7 @@ func Fuzz(data []byte) int {
if len(data) < 32 {
return -1
}
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
panic(err)
}
@@ -74,7 +73,7 @@ func Fuzz(data []byte) int {
// Write the public key into the file to pass it as
// an argument to signify-openbsd
- pubKeyFile, err := ioutil.TempFile("", "")
+ pubKeyFile, err := os.CreateTemp("", "")
if err != nil {
panic(err)
}
@@ -126,7 +125,7 @@ func getKey(fileS string) (string, error) {
func createKeyPair() (string, string) {
// Create key and put it in correct format
- tmpKey, err := ioutil.TempFile("", "")
+ tmpKey, err := os.CreateTemp("", "")
defer os.Remove(tmpKey.Name())
defer os.Remove(tmpKey.Name() + ".pub")
defer os.Remove(tmpKey.Name() + ".sec")
diff --git a/crypto/signify/signify_test.go b/crypto/signify/signify_test.go
index af77eaf227..0915ac248d 100644
--- a/crypto/signify/signify_test.go
+++ b/crypto/signify/signify_test.go
@@ -20,7 +20,6 @@
package signify
import (
- "io/ioutil"
"math/rand"
"os"
"testing"
@@ -35,7 +34,7 @@ var (
)
func TestSignify(t *testing.T) {
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
@@ -79,7 +78,7 @@ func TestSignify(t *testing.T) {
}
func TestSignifyTrustedCommentTooManyLines(t *testing.T) {
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
@@ -104,7 +103,7 @@ func TestSignifyTrustedCommentTooManyLines(t *testing.T) {
}
func TestSignifyTrustedCommentTooManyLinesLF(t *testing.T) {
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
@@ -129,7 +128,7 @@ func TestSignifyTrustedCommentTooManyLinesLF(t *testing.T) {
}
func TestSignifyTrustedCommentEmpty(t *testing.T) {
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
diff --git a/eth/api.go b/eth/api.go
index f228c1522d..1a2d41c6dc 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -199,7 +199,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error
return stateDb.RawDump(opts), nil
}
var block *types.Block
- if blockNr == rpc.LatestBlockNumber {
+ if blockNr == rpc.LatestBlockNumber || blockNr == rpc.FinalizedBlockNumber {
block = api.eth.blockchain.CurrentBlock()
} else {
block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
@@ -306,7 +306,7 @@ func (api *PrivateDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, st
_, stateDb = api.eth.miner.Pending()
} else {
var block *types.Block
- if number == rpc.LatestBlockNumber {
+ if number == rpc.LatestBlockNumber || number == rpc.FinalizedBlockNumber {
block = api.eth.blockchain.CurrentBlock()
} else {
block = api.eth.blockchain.GetBlockByNumber(uint64(number))
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 6c652c971e..be3f4ad913 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -77,7 +77,7 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb
return block.Header(), nil
}
// Otherwise resolve and return the block
- if number == rpc.LatestBlockNumber {
+ if number == rpc.LatestBlockNumber || number == rpc.FinalizedBlockNumber {
return b.eth.blockchain.CurrentBlock().Header(), nil
}
return b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil
@@ -111,7 +111,7 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe
return block, nil
}
// Otherwise resolve and return the block
- if number == rpc.LatestBlockNumber {
+ if number == rpc.LatestBlockNumber || number == rpc.FinalizedBlockNumber {
return b.eth.blockchain.CurrentBlock(), nil
}
return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
index 0f77a17dfe..bd21360975 100644
--- a/eth/fetcher/tx_fetcher_test.go
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -304,7 +304,6 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
func TestTransactionFetcherFailedRescheduling(t *testing.T) {
// Create a channel to control when tx requests can fail
proceed := make(chan struct{})
-
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
@@ -1263,6 +1262,16 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
fetcher.Start()
defer fetcher.Stop()
+ defer func() { // drain the wait chan on exit
+ for {
+ select {
+ case <-wait:
+ default:
+ return
+ }
+ }
+ }()
+
// Crunch through all the test steps and execute them
for i, step := range tt.steps {
switch step := step.(type) {
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 6adc0c71d7..0ed4b5bf35 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -299,7 +299,7 @@ Logs:
}
// If the to filtered topics is greater than the amount of topics in logs, skip.
if len(topics) > len(log.Topics) {
- continue Logs
+ continue
}
for i, sub := range topics {
match := len(sub) == 0 // empty rule set == wildcard
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index a4902363eb..79b72a8652 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -306,12 +306,15 @@ func TestLogFilterCreation(t *testing.T) {
)
for i, test := range testCases {
- _, err := api.NewFilter(test.crit)
- if test.success && err != nil {
+ id, err := api.NewFilter(test.crit)
+ if err != nil && test.success {
t.Errorf("expected filter creation for case %d to success, got %v", i, err)
}
- if !test.success && err == nil {
- t.Errorf("expected testcase %d to fail with an error", i)
+ if err == nil {
+ api.UninstallFilter(id)
+ if !test.success {
+ t.Errorf("expected testcase %d to fail with an error", i)
+ }
}
}
}
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index 99c7e1a3cc..a2674ec913 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -18,7 +18,6 @@ package filters
import (
"context"
- "io/ioutil"
"math/big"
"os"
"testing"
@@ -45,7 +44,7 @@ func makeReceipt(addr common.Address) *types.Receipt {
}
func BenchmarkFilters(b *testing.B) {
- dir, err := ioutil.TempDir("", "filtertest")
+ dir, err := os.MkdirTemp("", "filtertest")
if err != nil {
b.Fatal(err)
}
@@ -104,7 +103,7 @@ func BenchmarkFilters(b *testing.B) {
}
func TestFilters(t *testing.T) {
- dir, err := ioutil.TempDir("", "filtertest")
+ dir, err := os.MkdirTemp("", "filtertest")
if err != nil {
t.Fatal(err)
}
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index 6f1e1adfab..523db5386e 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -321,7 +321,7 @@ func (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
return p2p.Send(p.rw, BlockBodiesMsg, bodies) // Not packed into BlockBodiesPacket to avoid RLP decoding
}
-// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.
+// ReplyBlockBodiesRLP is the eth/66 response to GetBlockBodies.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding
return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index 754b128d83..1516c83ba2 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -181,12 +181,22 @@ func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {
// BlockHeadersPacket represents a block header response.
type BlockHeadersPacket []*types.Header
-// BlockHeadersPacket represents a block header response over eth/66.
+// BlockHeadersPacket66 represents a block header response over eth/66.
type BlockHeadersPacket66 struct {
RequestId uint64
BlockHeadersPacket
}
+// BlockHeadersRLPPacket represents a block header response, to use when we already
+// have the headers rlp encoded.
+type BlockHeadersRLPPacket []rlp.RawValue
+
+// BlockHeadersRLPPacket66 represents a block header response over eth/66.
+type BlockHeadersRLPPacket66 struct {
+ RequestId uint64
+ BlockHeadersRLPPacket
+}
+
// NewBlockPacket is the network packet for the block propagation message.
type NewBlockPacket struct {
Block *types.Block
@@ -203,7 +213,7 @@ func (request *NewBlockPacket) sanityCheck() error {
// GetBlockBodiesPacket represents a block body query.
type GetBlockBodiesPacket []common.Hash
-// GetBlockBodiesPacket represents a block body query over eth/66.
+// GetBlockBodiesPacket66 represents a block body query over eth/66.
type GetBlockBodiesPacket66 struct {
RequestId uint64
GetBlockBodiesPacket
@@ -212,7 +222,7 @@ type GetBlockBodiesPacket66 struct {
// BlockBodiesPacket is the network packet for block content distribution.
type BlockBodiesPacket []*BlockBody
-// BlockBodiesPacket is the network packet for block content distribution over eth/66.
+// BlockBodiesPacket66 is the network packet for block content distribution over eth/66.
type BlockBodiesPacket66 struct {
RequestId uint64
BlockBodiesPacket
@@ -251,7 +261,7 @@ func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]byte) {
// GetNodeDataPacket represents a trie node data query.
type GetNodeDataPacket []common.Hash
-// GetNodeDataPacket represents a trie node data query over eth/66.
+// GetNodeDataPacket66 represents a trie node data query over eth/66.
type GetNodeDataPacket66 struct {
RequestId uint64
GetNodeDataPacket
@@ -260,7 +270,7 @@ type GetNodeDataPacket66 struct {
// NodeDataPacket is the network packet for trie node data distribution.
type NodeDataPacket [][]byte
-// NodeDataPacket is the network packet for trie node data distribution over eth/66.
+// NodeDataPacket66 is the network packet for trie node data distribution over eth/66.
type NodeDataPacket66 struct {
RequestId uint64
NodeDataPacket
@@ -269,7 +279,7 @@ type NodeDataPacket66 struct {
// GetReceiptsPacket represents a block receipts query.
type GetReceiptsPacket []common.Hash
-// GetReceiptsPacket represents a block receipts query over eth/66.
+// GetReceiptsPacket66 represents a block receipts query over eth/66.
type GetReceiptsPacket66 struct {
RequestId uint64
GetReceiptsPacket
@@ -278,7 +288,7 @@ type GetReceiptsPacket66 struct {
// ReceiptsPacket is the network packet for block receipts distribution.
type ReceiptsPacket [][]*types.Receipt
-// ReceiptsPacket is the network packet for block receipts distribution over eth/66.
+// ReceiptsPacket66 is the network packet for block receipts distribution over eth/66.
type ReceiptsPacket66 struct {
RequestId uint64
ReceiptsPacket
@@ -287,7 +297,7 @@ type ReceiptsPacket66 struct {
// ReceiptsRLPPacket is used for receipts, when we already have it encoded
type ReceiptsRLPPacket []rlp.RawValue
-// ReceiptsPacket66 is the eth-66 version of ReceiptsRLPPacket
+// ReceiptsRLPPacket66 is the eth-66 version of ReceiptsRLPPacket
type ReceiptsRLPPacket66 struct {
RequestId uint64
ReceiptsRLPPacket
@@ -307,13 +317,13 @@ type GetPooledTransactionsPacket66 struct {
// PooledTransactionsPacket is the network packet for transaction distribution.
type PooledTransactionsPacket []*types.Transaction
-// PooledTransactionsPacket is the network packet for transaction distribution over eth/66.
+// PooledTransactionsPacket66 is the network packet for transaction distribution over eth/66.
type PooledTransactionsPacket66 struct {
RequestId uint64
PooledTransactionsPacket
}
-// PooledTransactionsPacket is the network packet for transaction distribution, used
+// PooledTransactionsRLPPacket is the network packet for transaction distribution, used
// in the cases we already have them in rlp-encoded form
type PooledTransactionsRLPPacket []rlp.RawValue
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index 8278ea0674..268d44b98a 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -309,13 +309,15 @@ func HandleMessage(backend Backend, peer *Peer) error {
break
}
}
- slots = append(slots, storage)
+ if len(storage) > 0 {
+ slots = append(slots, storage)
+ }
it.Release()
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
- if origin != (common.Hash{}) || abort {
+ if origin != (common.Hash{}) || (abort && len(storage) > 0) {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
diff --git a/eth/protocols/snap/sort_test.go b/eth/protocols/snap/sort_test.go
new file mode 100644
index 0000000000..eb74f86ee2
--- /dev/null
+++ b/eth/protocols/snap/sort_test.go
@@ -0,0 +1,109 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package snap
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/PlatONnetwork/PlatON-Go/common"
+ "github.com/PlatONnetwork/PlatON-Go/trie"
+)
+
+func hexToNibbles(s string) []byte {
+ if len(s) >= 2 && s[0] == '0' && s[1] == 'x' {
+ s = s[2:]
+ }
+ var s2 []byte
+ for _, ch := range []byte(s) {
+ s2 = append(s2, '0')
+ s2 = append(s2, ch)
+ }
+ return common.Hex2Bytes(string(s2))
+}
+
+func TestRequestSorting(t *testing.T) {
+
+ // - Path 0x9 -> {0x19}
+ // - Path 0x99 -> {0x0099}
+ // - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19}
+ // - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
+ var f = func(path string) (trie.SyncPath, TrieNodePathSet, common.Hash) {
+ data := hexToNibbles(path)
+ sp := trie.NewSyncPath(data)
+ tnps := TrieNodePathSet([][]byte(sp))
+ hash := common.Hash{}
+ return sp, tnps, hash
+ }
+ var (
+ hashes []common.Hash
+ paths []trie.SyncPath
+ pathsets []TrieNodePathSet
+ )
+ for _, x := range []string{
+ "0x9",
+ "0x012345678901234567890123456789010123456789012345678901234567890195",
+ "0x012345678901234567890123456789010123456789012345678901234567890197",
+ "0x012345678901234567890123456789010123456789012345678901234567890196",
+ "0x99",
+ "0x012345678901234567890123456789010123456789012345678901234567890199",
+ "0x01234567890123456789012345678901012345678901234567890123456789019",
+ "0x0123456789012345678901234567890101234567890123456789012345678901",
+ "0x01234567890123456789012345678901012345678901234567890123456789010",
+ "0x01234567890123456789012345678901012345678901234567890123456789011",
+ } {
+ sp, tnps, hash := f(x)
+ hashes = append(hashes, hash)
+ paths = append(paths, sp)
+ pathsets = append(pathsets, tnps)
+ }
+ _, paths, pathsets = sortByAccountPath(hashes, paths)
+ {
+ var b = new(bytes.Buffer)
+ for i := 0; i < len(paths); i++ {
+ fmt.Fprintf(b, "\n%d. paths %x", i, paths[i])
+ }
+ want := `
+0. paths [0099]
+1. paths [0123456789012345678901234567890101234567890123456789012345678901 00]
+2. paths [0123456789012345678901234567890101234567890123456789012345678901 0095]
+3. paths [0123456789012345678901234567890101234567890123456789012345678901 0096]
+4. paths [0123456789012345678901234567890101234567890123456789012345678901 0097]
+5. paths [0123456789012345678901234567890101234567890123456789012345678901 0099]
+6. paths [0123456789012345678901234567890101234567890123456789012345678901 10]
+7. paths [0123456789012345678901234567890101234567890123456789012345678901 11]
+8. paths [0123456789012345678901234567890101234567890123456789012345678901 19]
+9. paths [19]`
+ if have := b.String(); have != want {
+ t.Errorf("have:%v\nwant:%v\n", have, want)
+ }
+ }
+ {
+ var b = new(bytes.Buffer)
+ for i := 0; i < len(pathsets); i++ {
+ fmt.Fprintf(b, "\n%d. pathset %x", i, pathsets[i])
+ }
+ want := `
+0. pathset [0099]
+1. pathset [0123456789012345678901234567890101234567890123456789012345678901 00 0095 0096 0097 0099 10 11 19]
+2. pathset [19]`
+ if have := b.String(); have != want {
+ t.Errorf("have:%v\nwant:%v\n", have, want)
+ }
+ }
+}
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 9ba1baad2d..7f124d5155 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -170,7 +170,7 @@ type bytecodeResponse struct {
// to actual requests and to validate any security constraints.
//
// Concurrency note: storage requests and responses are handled concurrently from
-// the main runloop to allow Merkel proof verifications on the peer's thread and
+// the main runloop to allow Merkle proof verifications on the peer's thread and
// to drop on invalid response. The request struct must contain all the data to
// construct the response without accessing runloop internals (i.e. tasks). That
// is only included to allow the runloop to match a response to the task being
@@ -327,10 +327,10 @@ type healTask struct {
codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval
}
-// syncProgress is a database entry to allow suspending and resuming a snapshot state
+// SyncProgress is a database entry to allow suspending and resuming a snapshot state
// sync. Opposed to full and fast sync, there is no way to restart a suspended
// snap sync without prior knowledge of the suspension point.
-type syncProgress struct {
+type SyncProgress struct {
Tasks []*accountTask // The suspended account tasks (contract tasks within)
// Status report during syncing phase
@@ -344,12 +344,15 @@ type syncProgress struct {
// Status report during healing phase
TrienodeHealSynced uint64 // Number of state trie nodes downloaded
TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk
- TrienodeHealDups uint64 // Number of state trie nodes already processed
- TrienodeHealNops uint64 // Number of state trie nodes not requested
BytecodeHealSynced uint64 // Number of bytecodes downloaded
BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk
- BytecodeHealDups uint64 // Number of bytecodes already processed
- BytecodeHealNops uint64 // Number of bytecodes not requested
+}
+
+// SyncPending is analogous to SyncProgress, but it's used to report on pending
+// ephemeral sync progress that doesn't get persisted into the database.
+type SyncPending struct {
+ TrienodeHeal uint64 // Number of state trie nodes pending
+ BytecodeHeal uint64 // Number of bytecodes pending
}
// SyncPeer abstracts out the methods required for a peer to be synced against
@@ -421,6 +424,8 @@ type Syncer struct {
storageSynced uint64 // Number of storage slots downloaded
storageBytes common.StorageSize // Number of storage trie bytes persisted to disk
+ extProgress *SyncProgress // progress that can be exposed to external caller.
+
// Request tracking during healing phase
trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests
bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests
@@ -476,6 +481,8 @@ func NewSyncer(db ethdb.KeyValueStore) *Syncer {
trienodeHealReqs: make(map[uint64]*trienodeHealRequest),
bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest),
stateWriter: db.NewBatch(),
+
+ extProgress: new(SyncProgress),
}
}
@@ -632,6 +639,21 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
}
+ // Update sync progress
+ s.lock.Lock()
+ s.extProgress = &SyncProgress{
+ AccountSynced: s.accountSynced,
+ AccountBytes: s.accountBytes,
+ BytecodeSynced: s.bytecodeSynced,
+ BytecodeBytes: s.bytecodeBytes,
+ StorageSynced: s.storageSynced,
+ StorageBytes: s.storageBytes,
+ TrienodeHealSynced: s.trienodeHealSynced,
+ TrienodeHealBytes: s.trienodeHealBytes,
+ BytecodeHealSynced: s.bytecodeHealSynced,
+ BytecodeHealBytes: s.bytecodeHealBytes,
+ }
+ s.lock.Unlock()
// Wait for something to happen
select {
case <-s.update:
@@ -673,7 +695,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
// loadSyncStatus retrieves a previously aborted sync status from the database,
// or generates a fresh one if none is available.
func (s *Syncer) loadSyncStatus() {
- var progress syncProgress
+ var progress SyncProgress
if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
if err := json.Unmarshal(status, &progress); err != nil {
@@ -704,6 +726,9 @@ func (s *Syncer) loadSyncStatus() {
}
}
}
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
s.snapped = len(s.tasks) == 0
s.accountSynced = progress.AccountSynced
@@ -777,7 +802,7 @@ func (s *Syncer) saveSyncStatus() {
}
}
// Store the actual progress markers
- progress := &syncProgress{
+ progress := &SyncProgress{
Tasks: s.tasks,
AccountSynced: s.accountSynced,
AccountBytes: s.accountBytes,
@@ -797,6 +822,18 @@ func (s *Syncer) saveSyncStatus() {
rawdb.WriteSnapshotSyncStatus(s.db, status)
}
+// Progress returns the snap sync status statistics.
+func (s *Syncer) Progress() (*SyncProgress, *SyncPending) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+ pending := new(SyncPending)
+ if s.healer != nil {
+ pending.TrienodeHeal = uint64(len(s.healer.trieTasks))
+ pending.BytecodeHeal = uint64(len(s.healer.codeTasks))
+ }
+ return s.extProgress, pending
+}
+
// cleanAccountTasks removes account range retrieval tasks that have already been
// completed.
func (s *Syncer) cleanAccountTasks() {
@@ -1296,12 +1333,13 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
hashes = append(hashes, hash)
paths = append(paths, pathset)
- pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash
if len(hashes) >= cap {
break
}
}
+ // Group requests by account hash
+ hashes, paths, pathsets = sortByAccountPath(hashes, paths)
req := &trienodeHealRequest{
peer: idle,
id: reqid,
@@ -2802,12 +2840,10 @@ func (s *Syncer) reportSyncProgress(force bool) {
new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
accountFills,
).Uint64())
-
// Don't report anything until we have a meaningful progress
if estBytes < 1.0 {
return
}
-
elapsed := time.Since(s.startTime)
estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)
@@ -2877,3 +2913,76 @@ func (s *capacitySort) Swap(i, j int) {
s.ids[i], s.ids[j] = s.ids[j], s.ids[i]
s.caps[i], s.caps[j] = s.caps[j], s.caps[i]
}
+
+// healRequestSort implements the Sort interface, allowing sorting trienode
+// heal requests, which is a prerequisite for merging storage-requests.
+type healRequestSort struct {
+ hashes []common.Hash
+ paths []trie.SyncPath
+}
+
+func (t *healRequestSort) Len() int {
+ return len(t.hashes)
+}
+
+func (t *healRequestSort) Less(i, j int) bool {
+ a := t.paths[i]
+ b := t.paths[j]
+ switch bytes.Compare(a[0], b[0]) {
+ case -1:
+ return true
+ case 1:
+ return false
+ }
+ // identical first part
+ if len(a) < len(b) {
+ return true
+ }
+ if len(b) < len(a) {
+ return false
+ }
+ if len(a) == 2 {
+ return bytes.Compare(a[1], b[1]) < 0
+ }
+ return false
+}
+
+func (t *healRequestSort) Swap(i, j int) {
+ t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
+ t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
+}
+
+// Merge merges the pathsets, so that several storage requests concerning the
+// same account are merged into one, to reduce bandwidth.
+// OBS: This operation is moot if t has not first been sorted.
+func (t *healRequestSort) Merge() []TrieNodePathSet {
+ var result []TrieNodePathSet
+ for _, path := range t.paths {
+ pathset := TrieNodePathSet([][]byte(path))
+ if len(path) == 1 {
+ // It's an account reference.
+ result = append(result, pathset)
+ } else {
+ // It's a storage reference.
+ end := len(result) - 1
+ if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
+ // The account doesn't doesn't match last, create a new entry.
+ result = append(result, pathset)
+ } else {
+ // It's the same account as the previous one, add to the storage
+ // paths of that request.
+ result[end] = append(result[end], pathset[1])
+ }
+ }
+ }
+ return result
+}
+
+// sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
+// the TrieNodePaths and merges paths which belongs to the same account path.
+func sortByAccountPath(hashes []common.Hash, paths []trie.SyncPath) ([]common.Hash, []trie.SyncPath, []TrieNodePathSet) {
+ n := &healRequestSort{hashes, paths}
+ sort.Sort(n)
+ pathsets := n.Merge()
+ return n.hashes, n.paths, pathsets
+}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index e60478cd07..8341d01a6f 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -335,13 +335,14 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
break
}
}
- hashes = append(hashes, keys)
- slots = append(slots, vals)
-
+ if len(keys) > 0 {
+ hashes = append(hashes, keys)
+ slots = append(slots, vals)
+ }
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
- if originHash != (common.Hash{}) || abort {
+ if originHash != (common.Hash{}) || (abort && len(keys) > 0) {
// If we're aborting, we need to prove the first and last item
// This terminates the response (and thus the loop)
proof := light.NewNodeSet()
@@ -368,8 +369,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
return hashes, slots, proofs
}
-// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
-//
+// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
// supplies the proof for the last account, even if it is 'complete'.h
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
@@ -1098,13 +1098,15 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
+
// There are only 8 unique hashes, and 3K accounts. However, the code
// deduplication is per request batch. If it were a perfect global dedup,
// we would expect only 8 requests. If there were no dedup, there would be
// 3k requests.
- // We expect somewhere below 100 requests for these 8 unique hashes.
+ // We expect somewhere below 100 requests for these 8 unique hashes. But
+ // the number can be flaky, so don't limit it so strictly.
if threshold := 100; counter > threshold {
- t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
+ t.Logf("Error, expected < %d invocations, got %d", threshold, counter)
}
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index f786683ea4..20b02cff70 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/eth/tracers/logger"
- "io/ioutil"
"os"
"runtime"
"sync"
@@ -465,7 +464,7 @@ func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig
// TraceBlockFromFile returns the structured logs created during the execution of
// EVM and returns them as a JSON object.
func (api *API) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) {
- blob, err := ioutil.ReadFile(file)
+ blob, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("could not read file: %v", err)
}
@@ -722,7 +721,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
if !canon {
prefix = fmt.Sprintf("%valt-", prefix)
}
- dump, err = ioutil.TempFile(os.TempDir(), prefix)
+ dump, err = os.CreateTemp(os.TempDir(), prefix)
if err != nil {
return nil, err
}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 3518c21f3f..0c93e97e9c 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -19,8 +19,8 @@ package tracetest
import (
"encoding/json"
"github.com/PlatONnetwork/PlatON-Go/params"
- "io/ioutil"
"math/big"
+ "os"
"path/filepath"
"reflect"
"strings"
@@ -135,7 +135,7 @@ func TestCallTracerNative(t *testing.T) {
}
func testCallTracer(tracerName string, dirPath string, t *testing.T) {
- files, err := ioutil.ReadDir(filepath.Join("testdata", dirPath))
+ files, err := os.ReadDir(filepath.Join("testdata", dirPath))
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
}
@@ -152,7 +152,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
tx = new(types.Transaction)
)
// Call tracer test found, read if from disk
- if blob, err := ioutil.ReadFile(filepath.Join("testdata", dirPath, file.Name())); err != nil {
+ if blob, err := os.ReadFile(filepath.Join("testdata", dirPath, file.Name())); err != nil {
t.Fatalf("failed to read testcase: %v", err)
} else if err := json.Unmarshal(blob, test); err != nil {
t.Fatalf("failed to parse testcase: %v", err)
@@ -240,7 +240,7 @@ func camel(str string) string {
return strings.Join(pieces, "")
}
func BenchmarkTracers(b *testing.B) {
- files, err := ioutil.ReadDir(filepath.Join("testdata", "call_tracer"))
+ files, err := os.ReadDir(filepath.Join("testdata", "call_tracer"))
if err != nil {
b.Fatalf("failed to retrieve tracer test suite: %v", err)
}
@@ -250,7 +250,7 @@ func BenchmarkTracers(b *testing.B) {
}
file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
- blob, err := ioutil.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
+ blob, err := os.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
if err != nil {
b.Fatalf("failed to read testcase: %v", err)
}
diff --git a/eth/tracers/js/internal/tracers/assets.go b/eth/tracers/js/internal/tracers/assets.go
index c6df3348d7..8fa1f95707 100644
--- a/eth/tracers/js/internal/tracers/assets.go
+++ b/eth/tracers/js/internal/tracers/assets.go
@@ -16,7 +16,6 @@ import (
"compress/gzip"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -269,7 +268,7 @@ func unigram_tracerJs() (*asset, error) {
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
@@ -295,7 +294,7 @@ func MustAsset(name string) []byte {
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
@@ -346,7 +345,7 @@ var _bindata = map[string]func() (*asset, error){
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
@@ -396,7 +395,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
- err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
@@ -425,6 +424,6 @@ func RestoreAssets(dir, name string) error {
}
func _filePath(dir, name string) string {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index 7131e492be..ffed08c85b 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -96,6 +96,13 @@ func (ec *Client) BlockNumber(ctx context.Context) (uint64, error) {
return uint64(result), err
}
+// PeerCount returns the number of p2p peers as reported by the net_peerCount method.
+func (ec *Client) PeerCount(ctx context.Context) (uint64, error) {
+ var result hexutil.Uint64
+ err := ec.c.CallContext(ctx, &result, "net_peerCount")
+ return uint64(result), err
+}
+
type rpcBlock struct {
Hash common.Hash `json:"hash"`
Transactions []rpcTransaction `json:"transactions"`
diff --git a/ethdb/database.go b/ethdb/database.go
index 7865b4535e..3ce5aa35b2 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -37,8 +37,8 @@ type KeyValueWriter interface {
Delete(key []byte) error
}
-// Stater wraps the Stat method of a backing data store.
-type Stater interface {
+// KeyValueStater wraps the Stat method of a backing data store.
+type KeyValueStater interface {
// Stat returns a particular internal stat of the database.
Stat(property string) (string, error)
}
@@ -60,16 +60,16 @@ type Compacter interface {
type KeyValueStore interface {
KeyValueReader
KeyValueWriter
+ KeyValueStater
Batcher
Iteratee
- Stater
Compacter
Snapshotter
io.Closer
}
-// AncientReader contains the methods required to read from immutable ancient data.
-type AncientReader interface {
+// AncientReaderOp contains the methods required to read from immutable ancient data.
+type AncientReaderOp interface {
// HasAncient returns an indicator whether the specified data exists in the
// ancient store.
HasAncient(kind string, number uint64) (bool, error)
@@ -95,13 +95,13 @@ type AncientReader interface {
AncientSize(kind string) (uint64, error)
}
-// AncientBatchReader is the interface for 'batched' or 'atomic' reading.
-type AncientBatchReader interface {
- AncientReader
+// AncientReader is the extended ancient reader interface including 'batched' or 'atomic' reading.
+type AncientReader interface {
+ AncientReaderOp
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
- ReadAncients(fn func(AncientReader) error) (err error)
+ ReadAncients(fn func(AncientReaderOp) error) (err error)
}
// AncientWriter contains the methods required to write to immutable ancient data.
@@ -140,11 +140,17 @@ type AncientWriteOp interface {
AppendRaw(kind string, number uint64, item []byte) error
}
+// AncientStater wraps the Stat method of a backing data store.
+type AncientStater interface {
+ // AncientDatadir returns the root directory path of the ancient store.
+ AncientDatadir() (string, error)
+}
+
// Reader contains the methods required to read data from both key-value as well as
// immutable ancient data.
type Reader interface {
KeyValueReader
- AncientBatchReader
+ AncientReader
}
// Writer contains the methods required to write data to both key-value as well as
@@ -154,11 +160,19 @@ type Writer interface {
AncientWriter
}
+// Stater contains the methods required to retrieve states from both key-value as well as
+// immutable ancient data.
+type Stater interface {
+ KeyValueStater
+ AncientStater
+}
+
// AncientStore contains all the methods required to allow handling different
// ancient data stores backing immutable chain data store.
type AncientStore interface {
- AncientBatchReader
+ AncientReader
AncientWriter
+ AncientStater
io.Closer
}
diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go
new file mode 100644
index 0000000000..2d10bbc3e2
--- /dev/null
+++ b/ethdb/remotedb/remotedb.go
@@ -0,0 +1,173 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package remotedb implements the key-value database layer based on a remote geth
+// node. Under the hood, it utilises the `debug_dbGet` method to implement a
+// read-only database.
+// There really are no guarantees in this database, since the local geth does not
+// exclusive access, but it can be used for basic diagnostics of a remote node.
+package remotedb
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/PlatONnetwork/PlatON-Go/common/hexutil"
+ "github.com/PlatONnetwork/PlatON-Go/ethdb"
+ "github.com/PlatONnetwork/PlatON-Go/rpc"
+)
+
+// Database is a key-value lookup for a remote database via debug_dbGet.
+type Database struct {
+ remote *rpc.Client
+}
+
+func (db *Database) Has(key []byte) (bool, error) {
+ if _, err := db.Get(key); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (db *Database) Get(key []byte) ([]byte, error) {
+ var resp hexutil.Bytes
+ err := db.remote.Call(&resp, "debug_dbGet", hexutil.Bytes(key))
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (db *Database) HasAncient(kind string, number uint64) (bool, error) {
+ if _, err := db.Ancient(kind, number); err != nil {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (db *Database) Ancient(kind string, number uint64) ([]byte, error) {
+ var resp hexutil.Bytes
+ err := db.remote.Call(&resp, "debug_dbAncient", kind, number)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (db *Database) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
+ panic("not supported")
+}
+
+func (db *Database) Ancients() (uint64, error) {
+ var resp uint64
+ err := db.remote.Call(&resp, "debug_dbAncients")
+ return resp, err
+}
+
+func (db *Database) Tail() (uint64, error) {
+ panic("not supported")
+}
+
+func (db *Database) AncientSize(kind string) (uint64, error) {
+ panic("not supported")
+}
+
+func (db *Database) ReadAncients(fn func(op ethdb.AncientReaderOp) error) (err error) {
+ return fn(db)
+}
+
+func (db *Database) Put(key []byte, value []byte) error {
+ panic("not supported")
+}
+
+func (db *Database) Delete(key []byte) error {
+ panic("not supported")
+}
+
+func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, error) {
+ panic("not supported")
+}
+
+func (db *Database) TruncateHead(n uint64) error {
+ panic("not supported")
+}
+
+func (db *Database) TruncateTail(n uint64) error {
+ panic("not supported")
+}
+
+func (db *Database) Sync() error {
+ return nil
+}
+
+func (db *Database) MigrateTable(s string, f func([]byte) ([]byte, error)) error {
+ panic("not supported")
+}
+
+func (db *Database) NewBatch() ethdb.Batch {
+ panic("not supported")
+}
+
+func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
+ panic("not supported")
+}
+
+func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
+ panic("not supported")
+}
+
+func (db *Database) Stat(property string) (string, error) {
+ panic("not supported")
+}
+
+func (db *Database) AncientDatadir() (string, error) {
+ panic("not supported")
+}
+
+func (db *Database) Compact(start []byte, limit []byte) error {
+ return nil
+}
+
+func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
+ panic("not supported")
+}
+
+func (db *Database) Close() error {
+ db.remote.Close()
+ return nil
+}
+
+func dialRPC(endpoint string) (*rpc.Client, error) {
+ if endpoint == "" {
+ return nil, errors.New("endpoint must be specified")
+ }
+ if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ // Backwards compatibility with geth < 1.5 which required
+ // these prefixes.
+ endpoint = endpoint[4:]
+ }
+ return rpc.Dial(endpoint)
+}
+
+func New(endpoint string) (ethdb.Database, error) {
+ client, err := dialRPC(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ return &Database{
+ remote: client,
+ }, nil
+}
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index a04321a398..6911ead6d1 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -365,7 +365,7 @@ func (s *Service) readLoop(conn *connWrapper) {
// If the network packet is a system ping, respond to it directly
var ping string
if err := json.Unmarshal(blob, &ping); err == nil && strings.HasPrefix(ping, "primus::ping::") {
- if err := conn.WriteJSON(strings.Replace(ping, "ping", "pong", -1)); err != nil {
+ if err := conn.WriteJSON(strings.ReplaceAll(ping, "ping", "pong")); err != nil {
log.Warn("Failed to respond to system ping message", "err", err)
return
}
diff --git a/graphql/graphql.go b/graphql/graphql.go
index d815a1cd93..cfbaf1573a 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -33,6 +33,7 @@ import (
"github.com/PlatONnetwork/PlatON-Go/core/types"
"github.com/PlatONnetwork/PlatON-Go/eth/filters"
"github.com/PlatONnetwork/PlatON-Go/internal/ethapi"
+ "github.com/PlatONnetwork/PlatON-Go/rlp"
"github.com/PlatONnetwork/PlatON-Go/rpc"
)
@@ -65,6 +66,8 @@ func (b *Long) UnmarshalGraphQL(input interface{}) error {
*b = Long(input)
case int64:
*b = Long(input)
+ case float64:
+ *b = Long(input)
default:
err = fmt.Errorf("unexpected type %T for Long", input)
}
@@ -514,6 +517,22 @@ func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {
return hexutil.Big(*v), nil
}
+func (t *Transaction) Raw(ctx context.Context) (hexutil.Bytes, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return hexutil.Bytes{}, err
+ }
+ return tx.MarshalBinary()
+}
+
+func (t *Transaction) RawReceipt(ctx context.Context) (hexutil.Bytes, error) {
+ receipt, err := t.getReceipt(ctx)
+ if err != nil || receipt == nil {
+ return hexutil.Bytes{}, err
+ }
+ return receipt.MarshalBinary()
+}
+
type BlockType int
// Block represents an PlatON block.
@@ -746,6 +765,22 @@ func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {
return hexutil.Big{}, nil
}
+func (b *Block) RawHeader(ctx context.Context) (hexutil.Bytes, error) {
+ header, err := b.resolveHeader(ctx)
+ if err != nil {
+ return hexutil.Bytes{}, err
+ }
+ return rlp.EncodeToBytes(header)
+}
+
+func (b *Block) Raw(ctx context.Context) (hexutil.Bytes, error) {
+ block, err := b.resolve(ctx)
+ if err != nil {
+ return hexutil.Bytes{}, err
+ }
+ return rlp.EncodeToBytes(block)
+}
+
// BlockNumberArgs encapsulates arguments to accessors that specify a block number.
type BlockNumberArgs struct {
// TODO: Ideally we could use input unions to allow the query to specify the
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 8752aebb18..42e33daa53 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -29,9 +29,10 @@ import (
"github.com/PlatONnetwork/PlatON-Go/x/gov"
"github.com/PlatONnetwork/PlatON-Go/x/xcom"
"github.com/stretchr/testify/assert"
- "io/ioutil"
+ "io"
"math/big"
"net/http"
+ "os"
"strings"
"testing"
"time"
@@ -43,7 +44,7 @@ import (
)
func TestBuildSchema(t *testing.T) {
- ddir, err := ioutil.TempDir("", "graphql-buildschema")
+ ddir, err := os.MkdirTemp("", "graphql-buildschema")
if err != nil {
t.Fatalf("failed to create temporary datadir: %v", err)
}
@@ -152,7 +153,7 @@ func TestGraphQLBlockSerialization(t *testing.T) {
if err != nil {
t.Fatalf("could not post: %v", err)
}
- bodyBytes, err := ioutil.ReadAll(resp.Body)
+ bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("could not read from response body: %v", err)
}
@@ -188,7 +189,7 @@ func TestGraphQLBlockSerializationEIP2718(t *testing.T) {
if err != nil {
t.Fatalf("could not post: %v", err)
}
- bodyBytes, err := ioutil.ReadAll(resp.Body)
+ bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("could not read from response body: %v", err)
}
diff --git a/graphql/schema.go b/graphql/schema.go
index f031c5bc5f..cfe0fb34c3 100644
--- a/graphql/schema.go
+++ b/graphql/schema.go
@@ -69,7 +69,7 @@ const schema string = `
transaction: Transaction!
}
- #EIP-2718
+ #EIP-2718
type AccessTuple{
address: Address!
storageKeys : [Bytes32!]!
@@ -137,9 +137,16 @@ const schema string = `
r: BigInt!
s: BigInt!
v: BigInt!
- #Envelope transaction support
+ # Envelope transaction support
type: Int
accessList: [AccessTuple!]
+ # Raw is the canonical encoding of the transaction.
+ # For legacy transactions, it returns the RLP encoding.
+ # For EIP-2718 typed transactions, it returns the type and payload.
+ raw: Bytes!
+ # RawReceipt is the canonical encoding of the receipt. For post EIP-2718 typed transactions
+ # this is equivalent to TxType || ReceiptEncoding.
+ rawReceipt: Bytes!
}
# BlockFilterCriteria encapsulates log filter criteria for a filter applied
@@ -235,6 +242,10 @@ const schema string = `
# EstimateGas estimates the amount of gas that will be required for
# successful execution of a transaction at the current block's state.
estimateGas(data: CallData!): Long!
+ # RawHeader is the RLP encoding of the block's header.
+ rawHeader: Bytes!
+ # Raw is the RLP encoding of the block.
+ raw: Bytes!
}
# CallData represents the data associated with a local contract call.
diff --git a/internal/build/download.go b/internal/build/download.go
index 0ed0b5e130..c00bc46a41 100644
--- a/internal/build/download.go
+++ b/internal/build/download.go
@@ -22,7 +22,6 @@ import (
"encoding/hex"
"fmt"
"io"
- "io/ioutil"
"log"
"net/http"
"os"
@@ -37,7 +36,7 @@ type ChecksumDB struct {
// MustLoadChecksums loads a file containing checksums.
func MustLoadChecksums(file string) *ChecksumDB {
- content, err := ioutil.ReadFile(file)
+ content, err := os.ReadFile(file)
if err != nil {
log.Fatal("can't load checksum file: " + err.Error())
}
diff --git a/internal/build/gosrc.go b/internal/build/gosrc.go
index c85e469680..52aae6c811 100644
--- a/internal/build/gosrc.go
+++ b/internal/build/gosrc.go
@@ -20,7 +20,7 @@ import (
"bytes"
"crypto/sha256"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"os"
"path/filepath"
@@ -36,7 +36,7 @@ func EnsureGoSources(version string, hash []byte, path string) error {
return fmt.Errorf("destination path (%s) must end with .tar.gz", path)
}
// If the file exists, validate it's hash
- if archive, err := ioutil.ReadFile(path); err == nil { // Go sources are ~20MB, it's fine to read all
+ if archive, err := os.ReadFile(path); err == nil { // Go sources are ~20MB, it's fine to read all
hasher := sha256.New()
hasher.Write(archive)
have := hasher.Sum(nil)
@@ -59,7 +59,7 @@ func EnsureGoSources(version string, hash []byte, path string) error {
}
defer res.Body.Close()
- archive, err := ioutil.ReadAll(res.Body)
+ archive, err := io.ReadAll(res.Body)
if err != nil {
return err
}
@@ -73,7 +73,7 @@ func EnsureGoSources(version string, hash []byte, path string) error {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
- if err := ioutil.WriteFile(path, archive, 0644); err != nil {
+ if err := os.WriteFile(path, archive, 0644); err != nil {
return err
}
fmt.Printf("Downloaded Go %s [%x] into %s\n", version, hash, path)
diff --git a/internal/build/util.go b/internal/build/util.go
index 9dfba6b87e..f9a4409dcc 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -24,7 +24,6 @@ import (
"go/parser"
"go/token"
"io"
- "io/ioutil"
"log"
"os"
"os/exec"
@@ -87,7 +86,7 @@ func RunGit(args ...string) string {
// readGitFile returns content of file in .git directory.
func readGitFile(file string) string {
- content, err := ioutil.ReadFile(path.Join(".git", file))
+ content, err := os.ReadFile(path.Join(".git", file))
if err != nil {
return ""
}
@@ -252,7 +251,7 @@ func UploadSFTP(identityFile, host, dir string, files []string) error {
// package paths.
func FindMainPackages(dir string) []string {
var commands []string
- cmds, err := ioutil.ReadDir(dir)
+ cmds, err := os.ReadDir(dir)
if err != nil {
log.Fatal(err)
}
diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go
index 3a77d79c4c..1c66ba9468 100644
--- a/internal/cmdtest/test_cmd.go
+++ b/internal/cmdtest/test_cmd.go
@@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"regexp"
@@ -184,7 +183,7 @@ func (tt *TestCmd) ExpectRegexp(regex string) (*regexp.Regexp, []string) {
func (tt *TestCmd) ExpectExit() {
var output []byte
tt.withKillTimeout(func() {
- output, _ = ioutil.ReadAll(tt.stdout)
+ output, _ = io.ReadAll(tt.stdout)
})
tt.WaitExit()
if tt.Cleanup != nil {
diff --git a/internal/debug/api.go b/internal/debug/api.go
index 24b48e2491..7b1e627d8d 100644
--- a/internal/debug/api.go
+++ b/internal/debug/api.go
@@ -215,8 +215,8 @@ func (*HandlerT) Stacks(filter *string) string {
// E.g. (eth || snap) && !p2p -> (eth in Value || snap in Value) && p2p not in Value
expanded = regexp.MustCompile(`[:/\.A-Za-z0-9_-]+`).ReplaceAllString(expanded, "`$0` in Value")
expanded = regexp.MustCompile("!(`[:/\\.A-Za-z0-9_-]+`)").ReplaceAllString(expanded, "$1 not")
- expanded = strings.Replace(expanded, "||", "or", -1)
- expanded = strings.Replace(expanded, "&&", "and", -1)
+ expanded = strings.ReplaceAll(expanded, "||", "or")
+ expanded = strings.ReplaceAll(expanded, "&&", "and")
log.Info("Expanded filter expression", "filter", *filter, "expanded", expanded)
expr, err := bexpr.CreateEvaluator(expanded)
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 2ab6e45a3c..bb5ce13038 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -1925,6 +1925,33 @@ func (api *PublicDebugAPI) GetBlockRlp(ctx context.Context, number uint64) (hexu
return rlp.EncodeToBytes(block)
}
+// GetRawReceipts retrieves the binary-encoded raw receipts of a single block.
+func (api *PublicDebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) {
+ var hash common.Hash
+ if h, ok := blockNrOrHash.Hash(); ok {
+ hash = h
+ } else {
+ block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash)
+ if err != nil {
+ return nil, err
+ }
+ hash = block.Hash()
+ }
+ receipts, err := api.b.GetReceipts(ctx, hash)
+ if err != nil {
+ return nil, err
+ }
+ result := make([]hexutil.Bytes, len(receipts))
+ for i, receipt := range receipts {
+ b, err := receipt.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ result[i] = b
+ }
+ return result, nil
+}
+
// PrintBlock retrieves a block and returns its pretty printed form.
func (api *PublicDebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) {
block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number))
diff --git a/internal/ethapi/dbapi.go b/internal/ethapi/dbapi.go
new file mode 100644
index 0000000000..9438ae2843
--- /dev/null
+++ b/internal/ethapi/dbapi.go
@@ -0,0 +1,43 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethapi
+
+import (
+ "github.com/PlatONnetwork/PlatON-Go/common"
+ "github.com/PlatONnetwork/PlatON-Go/common/hexutil"
+)
+
+// DbGet returns the raw value of a key stored in the database.
+func (api *PrivateDebugAPI) DbGet(key string) (hexutil.Bytes, error) {
+ blob, err := common.ParseHexOrString(key)
+ if err != nil {
+ return nil, err
+ }
+ return api.b.ChainDb().Get(blob)
+}
+
+// DbAncient retrieves an ancient binary blob from the append-only immutable files.
+// It is a mapping to the `AncientReaderOp.Ancient` method
+func (api *PrivateDebugAPI) DbAncient(kind string, number uint64) (hexutil.Bytes, error) {
+ return api.b.ChainDb().Ancient(kind, number)
+}
+
+// DbAncients returns the ancient item numbers in the ancient store.
+// It is a mapping to the `AncientReaderOp.Ancients` method
+func (api *PrivateDebugAPI) DbAncients() (uint64, error) {
+ return api.b.ChainDb().Ancients()
+}
diff --git a/internal/guide/guide_test.go b/internal/guide/guide_test.go
index b9d8f6e32a..163386eb11 100644
--- a/internal/guide/guide_test.go
+++ b/internal/guide/guide_test.go
@@ -23,7 +23,6 @@
package guide
import (
- "io/ioutil"
"math/big"
"os"
"path/filepath"
@@ -38,7 +37,7 @@ import (
// Tests that the account management snippets work correctly.
func TestAccountManagement(t *testing.T) {
// Create a temporary folder to work with
- workdir, err := ioutil.TempDir("", "")
+ workdir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("Failed to create temporary work dir: %v", err)
}
diff --git a/internal/jsre/deps/bindata.go b/internal/jsre/deps/bindata.go
index e266002152..71c987dbae 100644
--- a/internal/jsre/deps/bindata.go
+++ b/internal/jsre/deps/bindata.go
@@ -11,7 +11,6 @@ import (
"crypto/sha256"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -114,7 +113,7 @@ func web3Js() (*asset, error) {
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
+ canonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
@@ -152,7 +151,7 @@ func MustAssetString(name string) string {
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
+ canonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
@@ -166,7 +165,7 @@ func AssetInfo(name string) (os.FileInfo, error) {
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
- canonicalName := strings.Replace(name, "\\", "/", -1)
+ canonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
@@ -226,7 +225,7 @@ const AssetDebug = false
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
- canonicalName := strings.Replace(name, "\\", "/", -1)
+ canonicalName := strings.ReplaceAll(name, "\\", "/")
pathList := strings.Split(canonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
@@ -269,7 +268,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
- err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
@@ -294,6 +293,6 @@ func RestoreAssets(dir, name string) error {
}
func _filePath(dir, name string) string {
- canonicalName := strings.Replace(name, "\\", "/", -1)
+ canonicalName := strings.ReplaceAll(name, "\\", "/")
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
}
diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js
index 38682c3feb..d990f1e7f4 100644
--- a/internal/jsre/deps/web3.js
+++ b/internal/jsre/deps/web3.js
@@ -4012,7 +4012,7 @@ require=(function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c=
};
var isPredefinedBlockNumber = function (blockNumber) {
- return blockNumber === 'latest' || blockNumber === 'pending' || blockNumber === 'earliest';
+ return blockNumber === 'latest' || blockNumber === 'pending' || blockNumber === 'earliest' || blockNumber === 'finalized';
};
var inputDefaultBlockNumberFormatter = function (blockNumber) {
diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go
index b414d369df..e2983963f7 100644
--- a/internal/jsre/jsre.go
+++ b/internal/jsre/jsre.go
@@ -23,8 +23,8 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"math/rand"
+ "os"
"time"
"github.com/dop251/goja"
@@ -255,7 +255,7 @@ func (re *JSRE) Stop(waitForCallbacks bool) {
// Exec(file) loads and runs the contents of a file
// if a relative path is given, the jsre's assetPath is used
func (re *JSRE) Exec(file string) error {
- code, err := ioutil.ReadFile(common.AbsolutePath(re.assetPath, file))
+ code, err := os.ReadFile(common.AbsolutePath(re.assetPath, file))
if err != nil {
return err
}
@@ -321,7 +321,7 @@ func (re *JSRE) Compile(filename string, src string) (err error) {
func (re *JSRE) loadScript(call Call) (goja.Value, error) {
file := call.Argument(0).ToString().String()
file = common.AbsolutePath(re.assetPath, file)
- source, err := ioutil.ReadFile(file)
+ source, err := os.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("Could not read file %s: %v", file, err)
}
diff --git a/internal/jsre/jsre_test.go b/internal/jsre/jsre_test.go
index 57acdaed90..1645cfe583 100644
--- a/internal/jsre/jsre_test.go
+++ b/internal/jsre/jsre_test.go
@@ -17,7 +17,6 @@
package jsre
import (
- "io/ioutil"
"os"
"path"
"reflect"
@@ -41,12 +40,12 @@ func (no *testNativeObjectBinding) TestMethod(call goja.FunctionCall) goja.Value
}
func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) {
- dir, err := ioutil.TempDir("", "jsre-test")
+ dir, err := os.MkdirTemp("", "jsre-test")
if err != nil {
t.Fatal("cannot create temporary directory:", err)
}
if testjs != "" {
- if err := ioutil.WriteFile(path.Join(dir, "test.js"), []byte(testjs), os.ModePerm); err != nil {
+ if err := os.WriteFile(path.Join(dir, "test.js"), []byte(testjs), os.ModePerm); err != nil {
t.Fatal("cannot create test.js:", err)
}
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index a5be9eb30c..8a47a0d7d2 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -152,6 +152,11 @@ web3._extend({
call: 'debug_getBlockRlp',
params: 1
}),
+ new web3._extend.Method({
+ name: 'getRawReceipts',
+ call: 'debug_getRawReceipts',
+ params: 1
+ }),
//new web3._extend.Method({
// name: 'setHead',
// call: 'debug_setHead',
@@ -379,6 +384,21 @@ web3._extend({
params: 2,
inputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter],
}),
+ new web3._extend.Method({
+ name: 'dbGet',
+ call: 'debug_dbGet',
+ params: 1
+ }),
+ new web3._extend.Method({
+ name: 'dbAncient',
+ call: 'debug_dbAncient',
+ params: 2
+ }),
+ new web3._extend.Method({
+ name: 'dbAncients',
+ call: 'debug_dbAncients',
+ params: 0
+ }),
new web3._extend.Method({
name: 'consensusStatus',
call: 'debug_consensusStatus',
diff --git a/metrics/librato/client.go b/metrics/librato/client.go
index f7aed3e4ef..eebe20521b 100644
--- a/metrics/librato/client.go
+++ b/metrics/librato/client.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
)
@@ -93,7 +93,7 @@ func (c *LibratoClient) PostMetrics(batch Batch) (err error) {
if resp.StatusCode != http.StatusOK {
var body []byte
- if body, err = ioutil.ReadAll(resp.Body); err != nil {
+ if body, err = io.ReadAll(resp.Body); err != nil {
body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
}
err = fmt.Errorf("unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
index df36da0ade..029c99870e 100644
--- a/metrics/metrics_test.go
+++ b/metrics/metrics_test.go
@@ -2,7 +2,7 @@ package metrics
import (
"fmt"
- "io/ioutil"
+ "io"
"log"
"sync"
"testing"
@@ -13,7 +13,7 @@ const FANOUT = 128
// Stop the compiler from complaining during debugging.
var (
- _ = ioutil.Discard
+ _ = io.Discard
_ = log.LstdFlags
)
@@ -78,7 +78,7 @@ func BenchmarkMetrics(b *testing.B) {
//log.Println("done Write")
return
default:
- WriteOnce(r, ioutil.Discard)
+ WriteOnce(r, io.Discard)
}
}
}()
diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go
index 85cf04e1f8..0b5933f4f2 100644
--- a/metrics/prometheus/collector.go
+++ b/metrics/prometheus/collector.go
@@ -116,5 +116,5 @@ func (c *collector) writeSummaryPercentile(name, p string, value interface{}) {
}
func mutateKey(key string) string {
- return strings.Replace(key, "/", "_", -1)
+ return strings.ReplaceAll(key, "/", "_")
}
diff --git a/miner/stress_cbft.go b/miner/stress_cbft.go
index 14b0bf330a..56183e52d3 100644
--- a/miner/stress_cbft.go
+++ b/miner/stress_cbft.go
@@ -25,7 +25,6 @@ import (
"crypto/ecdsa"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/eth/ethconfig"
- "io/ioutil"
"math/big"
"math/rand"
"os"
@@ -178,7 +177,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
func makeSealer(genesis *core.Genesis, nodes []string) (*node.Node, error) {
// Define the basic configurations for the Ethereum node
- datadir, _ := ioutil.TempDir("", "")
+ datadir, _ := os.MkdirTemp("", "")
config := &node.Config{
Name: "platon",
diff --git a/node/config.go b/node/config.go
index 3e98479ebc..c69045c8c2 100644
--- a/node/config.go
+++ b/node/config.go
@@ -19,7 +19,6 @@ package node
import (
"crypto/ecdsa"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -503,7 +502,7 @@ func getKeyStoreDir(conf *Config) (string, bool, error) {
isEphemeral := false
if keydir == "" {
// There is no datadir.
- keydir, err = ioutil.TempDir("", "go-ethereum-keystore")
+ keydir, err = os.MkdirTemp("", "go-ethereum-keystore")
isEphemeral = true
}
diff --git a/node/config_test.go b/node/config_test.go
index cc56834e9b..42ff0ba310 100644
--- a/node/config_test.go
+++ b/node/config_test.go
@@ -18,7 +18,6 @@ package node
import (
"bytes"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -32,7 +31,7 @@ import (
// ones or automatically generated temporary ones.
func TestDatadirCreation(t *testing.T) {
// Create a temporary data dir and check that it can be used by a node
- dir, err := ioutil.TempDir("", "")
+ dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("failed to create manual data dir: %v", err)
}
@@ -58,7 +57,7 @@ func TestDatadirCreation(t *testing.T) {
t.Fatalf("freshly created datadir not accessible: %v", err)
}
// Verify that an impossible datadir fails creation
- file, err := ioutil.TempFile("", "")
+ file, err := os.CreateTemp("", "")
if err != nil {
t.Fatalf("failed to create temporary file: %v", err)
}
@@ -109,7 +108,7 @@ func TestIPCPathResolution(t *testing.T) {
// ephemeral.
func TestNodeKeyPersistency(t *testing.T) {
// Create a temporary folder and make sure no key is present
- dir, err := ioutil.TempDir("", "node-test")
+ dir, err := os.MkdirTemp("", "node-test")
if err != nil {
t.Fatalf("failed to create temporary data directory: %v", err)
}
@@ -137,7 +136,7 @@ func TestNodeKeyPersistency(t *testing.T) {
if _, err = crypto.LoadECDSA(keyfile); err != nil {
t.Fatalf("failed to load freshly persisted node key: %v", err)
}
- blob1, err := ioutil.ReadFile(keyfile)
+ blob1, err := os.ReadFile(keyfile)
if err != nil {
t.Fatalf("failed to read freshly persisted node key: %v", err)
}
@@ -145,7 +144,7 @@ func TestNodeKeyPersistency(t *testing.T) {
// Configure a new node and ensure the previously persisted key is loaded
config = &Config{Name: "unit-test", DataDir: dir}
config.NodeKey()
- blob2, err := ioutil.ReadFile(filepath.Join(keyfile))
+ blob2, err := os.ReadFile(filepath.Join(keyfile))
if err != nil {
t.Fatalf("failed to read previously persisted node key: %v", err)
}
diff --git a/node/node_test.go b/node/node_test.go
index 292040e414..ef9b41b540 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"os"
@@ -87,7 +86,7 @@ func TestNodeStartMultipleTimes(t *testing.T) {
// Tests that if the data dir is already in use, an appropriate error is returned.
func TestNodeUsedDataDir(t *testing.T) {
// Create a temporary folder to use as the data directory
- dir, err := ioutil.TempDir("", "")
+ dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("failed to create temporary data directory: %v", err)
}
diff --git a/node/rpcstack.go b/node/rpcstack.go
index d82129af00..f2542718b5 100644
--- a/node/rpcstack.go
+++ b/node/rpcstack.go
@@ -21,7 +21,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"sort"
@@ -360,7 +359,7 @@ func (h *httpServer) wsAllowed() bool {
// isWebsocket checks the header of an http request for a websocket upgrade request.
func isWebsocket(r *http.Request) bool {
- return strings.ToLower(r.Header.Get("Upgrade")) == "websocket" &&
+ return strings.EqualFold(r.Header.Get("Upgrade"), "websocket") &&
strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade")
}
@@ -446,7 +445,7 @@ func (h *virtualHostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var gzPool = sync.Pool{
New: func() interface{} {
- w := gzip.NewWriter(ioutil.Discard)
+ w := gzip.NewWriter(io.Discard)
return w
},
}
diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go
index 62cfba0766..f31fa0c307 100644
--- a/node/rpcstack_test.go
+++ b/node/rpcstack_test.go
@@ -284,7 +284,7 @@ func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response
}
for i := 0; i < len(extraHeaders); i += 2 {
key, value := extraHeaders[i], extraHeaders[i+1]
- if strings.ToLower(key) == "host" {
+ if strings.EqualFold(key, "host") {
req.Host = value
} else {
req.Header.Set(key, value)
diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go
index ce99cf13d0..d6cb996b52 100644
--- a/p2p/discover/v5wire/encoding_test.go
+++ b/p2p/discover/v5wire/encoding_test.go
@@ -23,7 +23,6 @@ import (
"errors"
"flag"
"fmt"
- "io/ioutil"
"net"
"os"
"path/filepath"
@@ -580,7 +579,7 @@ func (n *handshakeTestNode) id() enode.ID {
// hexFile reads the given file and decodes the hex data contained in it.
// Whitespace and any lines beginning with the # character are ignored.
func hexFile(file string) []byte {
- fileContent, err := ioutil.ReadFile(file)
+ fileContent, err := os.ReadFile(file)
if err != nil {
panic(err)
}
diff --git a/p2p/enode/nodedb_test.go b/p2p/enode/nodedb_test.go
index dbb26a21fa..5c07612a27 100644
--- a/p2p/enode/nodedb_test.go
+++ b/p2p/enode/nodedb_test.go
@@ -19,7 +19,6 @@ package enode
import (
"bytes"
"fmt"
- "io/ioutil"
"net"
"os"
"path/filepath"
@@ -300,7 +299,7 @@ func testSeedQuery() error {
}
func TestDBPersistency(t *testing.T) {
- root, err := ioutil.TempDir("", "nodedb-")
+ root, err := os.MkdirTemp("", "nodedb-")
if err != nil {
t.Fatalf("failed to create temporary data folder: %v", err)
}
diff --git a/p2p/nat/natupnp_test.go b/p2p/nat/natupnp_test.go
index 17483a7036..9072451d50 100644
--- a/p2p/nat/natupnp_test.go
+++ b/p2p/nat/natupnp_test.go
@@ -223,7 +223,7 @@ func (dev *fakeIGD) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func (dev *fakeIGD) replaceListenAddr(resp string) string {
- return strings.Replace(resp, "{{listenAddr}}", dev.listener.Addr().String(), -1)
+ return strings.ReplaceAll(resp, "{{listenAddr}}", dev.listener.Addr().String())
}
func (dev *fakeIGD) listen() (err error) {
diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go
index cfdcc9a659..fc03311c43 100644
--- a/p2p/simulations/examples/ping-pong.go
+++ b/p2p/simulations/examples/ping-pong.go
@@ -19,7 +19,7 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"os"
"sync/atomic"
@@ -64,7 +64,7 @@ func main() {
adapter = adapters.NewSimAdapter(services)
case "exec":
- tmpdir, err := ioutil.TempDir("", "p2p-example")
+ tmpdir, err := os.MkdirTemp("", "p2p-example")
if err != nil {
log.Crit("error creating temp dir", "err", err)
}
@@ -157,7 +157,7 @@ func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
errC <- err
return
}
- payload, err := ioutil.ReadAll(msg.Payload)
+ payload, err := io.ReadAll(msg.Payload)
if err != nil {
errC <- err
return
diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go
index 9cdf1a7d36..ddf11030a7 100644
--- a/p2p/simulations/http.go
+++ b/p2p/simulations/http.go
@@ -22,8 +22,8 @@ import (
"context"
"encoding/json"
"fmt"
+ "html"
"io"
- "io/ioutil"
"net/http"
"strconv"
"strings"
@@ -113,7 +113,7 @@ func (c *Client) SubscribeNetwork(events chan *Event, opts SubscribeOpts) (event
return nil, err
}
if res.StatusCode != http.StatusOK {
- response, _ := ioutil.ReadAll(res.Body)
+ response, _ := io.ReadAll(res.Body)
res.Body.Close()
return nil, fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
}
@@ -253,7 +253,7 @@ func (c *Client) Send(method, path string, in, out interface{}) error {
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
- response, _ := ioutil.ReadAll(res.Body)
+ response, _ := io.ReadAll(res.Body)
return fmt.Errorf("unexpected HTTP status: %s: %s", res.Status, response)
}
if out != nil {
@@ -338,7 +338,7 @@ func (s *Server) StartMocker(w http.ResponseWriter, req *http.Request) {
mockerType := req.FormValue("mocker-type")
mockerFn := LookupMocker(mockerType)
if mockerFn == nil {
- http.Error(w, fmt.Sprintf("unknown mocker type %q", mockerType), http.StatusBadRequest)
+ http.Error(w, fmt.Sprintf("unknown mocker type %q", html.EscapeString(mockerType)), http.StatusBadRequest)
return
}
nodeCount, err := strconv.Atoi(req.FormValue("node-count"))
diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go
index 166ff58668..d0c3ee69e0 100644
--- a/p2p/simulations/network_test.go
+++ b/p2p/simulations/network_test.go
@@ -448,7 +448,7 @@ func TestGetNodeIDs(t *testing.T) {
numNodes := 5
nodes, err := createTestNodes(numNodes, network)
if err != nil {
- t.Fatalf("Could not creat test nodes %v", err)
+ t.Fatalf("Could not create test nodes %v", err)
}
gotNodeIDs := network.GetNodeIDs()
@@ -497,7 +497,7 @@ func TestGetNodes(t *testing.T) {
numNodes := 5
nodes, err := createTestNodes(numNodes, network)
if err != nil {
- t.Fatalf("Could not creat test nodes %v", err)
+ t.Fatalf("Could not create test nodes %v", err)
}
gotNodes := network.GetNodes()
diff --git a/rlp/decode_test.go b/rlp/decode_test.go
index 4ac8e3e0eb..7929642da1 100644
--- a/rlp/decode_test.go
+++ b/rlp/decode_test.go
@@ -1203,7 +1203,7 @@ func encodeTestSlice(n uint) []byte {
}
func unhex(str string) []byte {
- b, err := hex.DecodeString(strings.Replace(str, " ", "", -1))
+ b, err := hex.DecodeString(strings.ReplaceAll(str, " ", ""))
if err != nil {
panic(fmt.Sprintf("invalid hex string: %q", str))
}
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index f8779ee2d4..9e40d57806 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"math/big"
"runtime"
"sync"
@@ -420,7 +419,7 @@ func TestEncodeToReader(t *testing.T) {
if err != nil {
return nil, err
}
- return ioutil.ReadAll(r)
+ return io.ReadAll(r)
})
}
@@ -461,7 +460,7 @@ func TestEncodeToReaderReturnToPool(t *testing.T) {
go func() {
for i := 0; i < 1000; i++ {
_, r, _ := EncodeToReader("foo")
- ioutil.ReadAll(r)
+ io.ReadAll(r)
r.Read(buf)
r.Read(buf)
r.Read(buf)
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
index c871875383..c03081ab11 100644
--- a/rlp/rlpgen/gen_test.go
+++ b/rlp/rlpgen/gen_test.go
@@ -8,7 +8,6 @@ import (
"go/parser"
"go/token"
"go/types"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -51,11 +50,11 @@ func TestOutput(t *testing.T) {
// Set this environment variable to regenerate the test outputs.
if os.Getenv("WRITE_TEST_FILES") != "" {
- ioutil.WriteFile(outputFile, output, 0644)
+ os.WriteFile(outputFile, output, 0644)
}
// Check if output matches.
- wantOutput, err := ioutil.ReadFile(outputFile)
+ wantOutput, err := os.ReadFile(outputFile)
if err != nil {
t.Fatal("error loading expected test output:", err)
}
@@ -71,7 +70,7 @@ func TestOutput(t *testing.T) {
func loadTestSource(file string, typeName string) (*buildContext, *types.Named, error) {
// Load the test input.
- content, err := ioutil.ReadFile(file)
+ content, err := os.ReadFile(file)
if err != nil {
return nil, nil, err
}
diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go
index 1a47525971..ae85dc057a 100644
--- a/rlp/rlpgen/main.go
+++ b/rlp/rlpgen/main.go
@@ -22,7 +22,6 @@ import (
"flag"
"fmt"
"go/types"
- "io/ioutil"
"os"
"golang.org/x/tools/go/packages"
@@ -52,7 +51,7 @@ func main() {
}
if *output == "-" {
os.Stdout.Write(code)
- } else if err := ioutil.WriteFile(*output, code, 0644); err != nil {
+ } else if err := os.WriteFile(*output, code, 0644); err != nil {
fatal(err)
}
}
diff --git a/rpc/http.go b/rpc/http.go
index 18404c060a..9f44649573 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"mime"
"net/http"
"net/url"
@@ -176,12 +175,12 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos
if err != nil {
return nil, err
}
- req, err := http.NewRequestWithContext(ctx, "POST", hc.url, ioutil.NopCloser(bytes.NewReader(body)))
+ req, err := http.NewRequestWithContext(ctx, "POST", hc.url, io.NopCloser(bytes.NewReader(body)))
if err != nil {
return nil, err
}
req.ContentLength = int64(len(body))
- req.GetBody = func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewReader(body)), nil }
+ req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(body)), nil }
// set headers
hc.mu.Lock()
diff --git a/rpc/server_test.go b/rpc/server_test.go
index 9c8a3f1383..60973e926c 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -20,8 +20,8 @@ import (
"bufio"
"bytes"
"io"
- "io/ioutil"
"net"
+ "os"
"path/filepath"
"strings"
"testing"
@@ -52,7 +52,7 @@ func TestServerRegisterName(t *testing.T) {
}
func TestServer(t *testing.T) {
- files, err := ioutil.ReadDir("testdata")
+ files, err := os.ReadDir("testdata")
if err != nil {
t.Fatal("where'd my testdata go?")
}
@@ -70,7 +70,7 @@ func TestServer(t *testing.T) {
func runTestScript(t *testing.T, file string) {
server := newTestServer()
- content, err := ioutil.ReadFile(file)
+ content, err := os.ReadFile(file)
if err != nil {
t.Fatal(err)
}
diff --git a/rpc/types.go b/rpc/types.go
index 10528b6d34..9b076c79d6 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -62,9 +62,10 @@ type jsonWriter interface {
type BlockNumber int64
const (
- PendingBlockNumber = BlockNumber(-2)
- LatestBlockNumber = BlockNumber(-1)
- EarliestBlockNumber = BlockNumber(0)
+ FinalizedBlockNumber = BlockNumber(-3)
+ PendingBlockNumber = BlockNumber(-2)
+ LatestBlockNumber = BlockNumber(-1)
+ EarliestBlockNumber = BlockNumber(0)
)
// UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports:
@@ -89,6 +90,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error {
case "pending":
*bn = PendingBlockNumber
return nil
+ case "finalized":
+ *bn = FinalizedBlockNumber
+ return nil
}
blckNum, err := hexutil.DecodeUint64(input)
@@ -113,6 +117,8 @@ func (bn BlockNumber) MarshalText() ([]byte, error) {
return []byte("latest"), nil
case PendingBlockNumber:
return []byte("pending"), nil
+ case FinalizedBlockNumber:
+ return []byte("finalized"), nil
default:
return hexutil.Uint64(bn).MarshalText()
}
@@ -159,6 +165,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error {
bn := PendingBlockNumber
bnh.BlockNumber = &bn
return nil
+ case "finalized":
+ bn := FinalizedBlockNumber
+ bnh.BlockNumber = &bn
+ return nil
default:
if len(input) == 66 {
hash := common.Hash{}
diff --git a/signer/core/api_test.go b/signer/core/api_test.go
index 1ac84e5b9e..f5bb872abd 100644
--- a/signer/core/api_test.go
+++ b/signer/core/api_test.go
@@ -24,7 +24,6 @@ import (
"github.com/PlatONnetwork/PlatON-Go/signer/core/apitypes"
"github.com/PlatONnetwork/PlatON-Go/signer/fourbyte"
"github.com/PlatONnetwork/PlatON-Go/signer/storage"
- "io/ioutil"
"math/big"
"os"
"path/filepath"
@@ -108,7 +107,7 @@ func (ui *headlessUi) ShowInfo(message string) {
}
func tmpDirName(t *testing.T) string {
- d, err := ioutil.TempDir("", "eth-keystore-test")
+ d, err := os.MkdirTemp("", "eth-keystore-test")
if err != nil {
t.Fatal(err)
}
diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go
index 5c3bc18690..ef01cb2915 100644
--- a/signer/core/signed_data.go
+++ b/signer/core/signed_data.go
@@ -144,13 +144,11 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType
if err := rlp.DecodeBytes(cliqueData, header); err != nil {
return nil, useEthereumV, err
}
- // The incoming clique header is already truncated, sent to us with a extradata already shortened
- if len(header.Extra) < 65 {
- // Need to add it back, to get a suitable length for hashing
- newExtra := make([]byte, len(header.Extra)+65)
- copy(newExtra, header.Extra)
- header.Extra = newExtra
- }
+ // Add space in the extradata to put the signature
+ newExtra := make([]byte, len(header.Extra)+65)
+ copy(newExtra, header.Extra)
+ header.Extra = newExtra
+
// Get back the rlp data, encoded by us
sighash, cliqueRlp, err := cliqueHeaderHashAndRlp(header)
if err != nil {
diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go
index 7c4440cc26..632c648e86 100644
--- a/signer/core/signed_data_test.go
+++ b/signer/core/signed_data_test.go
@@ -24,7 +24,7 @@ import (
"github.com/PlatONnetwork/PlatON-Go/crypto"
"github.com/PlatONnetwork/PlatON-Go/signer/core"
"github.com/PlatONnetwork/PlatON-Go/signer/core/apitypes"
- "io/ioutil"
+ "os"
"path"
"strings"
"testing"
@@ -352,7 +352,7 @@ func sign(typedData apitypes.TypedData) ([]byte, []byte, error) {
}
func TestJsonFiles(t *testing.T) {
- testfiles, err := ioutil.ReadDir("testdata/")
+ testfiles, err := os.ReadDir("testdata/")
if err != nil {
t.Fatalf("failed reading files: %v", err)
}
@@ -361,7 +361,7 @@ func TestJsonFiles(t *testing.T) {
continue
}
expectedFailure := strings.HasPrefix(fInfo.Name(), "expfail")
- data, err := ioutil.ReadFile(path.Join("testdata", fInfo.Name()))
+ data, err := os.ReadFile(path.Join("testdata", fInfo.Name()))
if err != nil {
t.Errorf("Failed to read file %v: %v", fInfo.Name(), err)
continue
@@ -387,13 +387,13 @@ func TestJsonFiles(t *testing.T) {
// crashes or hangs.
func TestFuzzerFiles(t *testing.T) {
corpusdir := path.Join("testdata", "fuzzing")
- testfiles, err := ioutil.ReadDir(corpusdir)
+ testfiles, err := os.ReadDir(corpusdir)
if err != nil {
t.Fatalf("failed reading files: %v", err)
}
verbose := false
for i, fInfo := range testfiles {
- data, err := ioutil.ReadFile(path.Join(corpusdir, fInfo.Name()))
+ data, err := os.ReadFile(path.Join(corpusdir, fInfo.Name()))
if err != nil {
t.Errorf("Failed to read file %v: %v", fInfo.Name(), err)
continue
diff --git a/signer/core/uiapi.go b/signer/core/uiapi.go
index bd239d70c2..cff598a313 100644
--- a/signer/core/uiapi.go
+++ b/signer/core/uiapi.go
@@ -22,8 +22,8 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
"math/big"
+ "os"
"github.com/PlatONnetwork/PlatON-Go/accounts"
"github.com/PlatONnetwork/PlatON-Go/accounts/keystore"
@@ -176,7 +176,7 @@ func (s *UIServerAPI) Export(ctx context.Context, addr common.Address) (json.Raw
if wallet.URL().Scheme != keystore.KeyStoreScheme {
return nil, fmt.Errorf("account is not a keystore-account")
}
- return ioutil.ReadFile(wallet.URL().Path)
+ return os.ReadFile(wallet.URL().Path)
}
// Import tries to import the given keyJSON in the local keystore. The keyJSON data is expected to be
diff --git a/signer/fourbyte/4byte.go b/signer/fourbyte/4byte.go
index ff09f0ba78..e3871d1ae3 100644
--- a/signer/fourbyte/4byte.go
+++ b/signer/fourbyte/4byte.go
@@ -5,7 +5,6 @@ package fourbyte
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -146918,7 +146917,7 @@ func _4byteJson() (*asset, error) {
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
@@ -146944,7 +146943,7 @@ func MustAsset(name string) []byte {
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
@@ -146985,7 +146984,7 @@ var _bindata = map[string]func() (*asset, error){
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
@@ -147027,7 +147026,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
- err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
@@ -147056,6 +147055,6 @@ func RestoreAssets(dir, name string) error {
}
func _filePath(dir, name string) string {
- cannonicalName := strings.Replace(name, "\\", "/", -1)
+ cannonicalName := strings.ReplaceAll(name, "\\", "/")
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
diff --git a/signer/fourbyte/fourbyte.go b/signer/fourbyte/fourbyte.go
index 01988dcab0..672498277a 100644
--- a/signer/fourbyte/fourbyte.go
+++ b/signer/fourbyte/fourbyte.go
@@ -25,7 +25,6 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
- "io/ioutil"
"os"
)
@@ -86,7 +85,7 @@ func NewWithFile(path string) (*Database, error) {
}
// Custom file may not exist. Will be created during save, if needed.
if _, err := os.Stat(path); err == nil {
- if blob, err = ioutil.ReadFile(path); err != nil {
+ if blob, err = os.ReadFile(path); err != nil {
return nil, err
}
if err := json.Unmarshal(blob, &db.custom); err != nil {
@@ -140,5 +139,5 @@ func (db *Database) AddSelector(selector string, data []byte) error {
if err != nil {
return err
}
- return ioutil.WriteFile(db.customPath, blob, 0600)
+ return os.WriteFile(db.customPath, blob, 0600)
}
diff --git a/signer/fourbyte/fourbyte_test.go b/signer/fourbyte/fourbyte_test.go
index 7aaff846b1..5d62d26dae 100644
--- a/signer/fourbyte/fourbyte_test.go
+++ b/signer/fourbyte/fourbyte_test.go
@@ -18,7 +18,7 @@ package fourbyte
import (
"fmt"
- "io/ioutil"
+ "os"
"strings"
"testing"
@@ -57,7 +57,7 @@ func TestEmbeddedDatabase(t *testing.T) {
// Tests that custom 4byte datasets can be handled too.
func TestCustomDatabase(t *testing.T) {
// Create a new custom 4byte database with no embedded component
- tmpdir, err := ioutil.TempDir("", "signer-4byte-test")
+ tmpdir, err := os.MkdirTemp("", "signer-4byte-test")
if err != nil {
t.Fatal(err)
}
diff --git a/signer/storage/aes_gcm_storage.go b/signer/storage/aes_gcm_storage.go
index 8a84586090..7ab7b1414f 100644
--- a/signer/storage/aes_gcm_storage.go
+++ b/signer/storage/aes_gcm_storage.go
@@ -23,7 +23,6 @@ import (
"crypto/rand"
"encoding/json"
"io"
- "io/ioutil"
"os"
"github.com/PlatONnetwork/PlatON-Go/log"
@@ -115,7 +114,7 @@ func (s *AESEncryptedStorage) Del(key string) {
// readEncryptedStorage reads the file with encrypted creds
func (s *AESEncryptedStorage) readEncryptedStorage() (map[string]storedCredential, error) {
creds := make(map[string]storedCredential)
- raw, err := ioutil.ReadFile(s.filename)
+ raw, err := os.ReadFile(s.filename)
if err != nil {
if os.IsNotExist(err) {
@@ -137,7 +136,7 @@ func (s *AESEncryptedStorage) writeEncryptedStorage(creds map[string]storedCrede
if err != nil {
return err
}
- if err = ioutil.WriteFile(s.filename, raw, 0600); err != nil {
+ if err = os.WriteFile(s.filename, raw, 0600); err != nil {
return err
}
return nil
diff --git a/signer/storage/aes_gcm_storage_test.go b/signer/storage/aes_gcm_storage_test.go
index 0fd1b905ab..aafab17b52 100644
--- a/signer/storage/aes_gcm_storage_test.go
+++ b/signer/storage/aes_gcm_storage_test.go
@@ -20,7 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"testing"
"github.com/PlatONnetwork/PlatON-Go/common"
@@ -62,7 +62,7 @@ func TestFileStorage(t *testing.T) {
CipherText: common.Hex2Bytes("2df87baf86b5073ef1f03e3cc738de75b511400f5465bb0ddeacf47ae4dc267d"),
},
}
- d, err := ioutil.TempDir("", "eth-encrypted-storage-test")
+ d, err := os.MkdirTemp("", "eth-encrypted-storage-test")
if err != nil {
t.Fatal(err)
}
@@ -95,7 +95,7 @@ func TestFileStorage(t *testing.T) {
func TestEnd2End(t *testing.T) {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(3), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
- d, err := ioutil.TempDir("", "eth-encrypted-storage-test")
+ d, err := os.MkdirTemp("", "eth-encrypted-storage-test")
if err != nil {
t.Fatal(err)
}
@@ -120,7 +120,7 @@ func TestSwappedKeys(t *testing.T) {
// K1:V1, K2:V2 can be swapped into K1:V2, K2:V1
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(3), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
- d, err := ioutil.TempDir("", "eth-encrypted-storage-test")
+ d, err := os.MkdirTemp("", "eth-encrypted-storage-test")
if err != nil {
t.Fatal(err)
}
@@ -134,7 +134,7 @@ func TestSwappedKeys(t *testing.T) {
// Now make a modified copy
creds := make(map[string]storedCredential)
- raw, err := ioutil.ReadFile(s1.filename)
+ raw, err := os.ReadFile(s1.filename)
if err != nil {
t.Fatal(err)
}
@@ -149,7 +149,7 @@ func TestSwappedKeys(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err = ioutil.WriteFile(s1.filename, raw, 0600); err != nil {
+ if err = os.WriteFile(s1.filename, raw, 0600); err != nil {
t.Fatal(err)
}
}
diff --git a/tests/init_test.go b/tests/init_test.go
index a3a7675d20..3086c6e547 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -21,7 +21,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -55,7 +54,7 @@ var (
)
func readJSON(reader io.Reader, value interface{}) error {
- data, err := ioutil.ReadAll(reader)
+ data, err := io.ReadAll(reader)
if err != nil {
return fmt.Errorf("error reading JSON file: %v", err)
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 0b41dc61df..e2a9e1dfe3 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -207,15 +207,14 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
if _, err := core.ApplyMessage(evm, msg, gaspool); err != nil {
statedb.RevertToSnapshot(snapshot)
}
-
- // Commit block
- statedb.Commit(true)
// Add 0-value mining reward. This only makes a difference in the cases
// where
// - the coinbase suicided, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
statedb.AddBalance(block.Coinbase(), new(big.Int))
+ // Commit block
+ statedb.Commit(true)
// And _now_ get the state root
root := statedb.IntermediateRoot(true)
return snaps, statedb, root, nil
diff --git a/trie/sync.go b/trie/sync.go
index f075575934..f758c011b7 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -72,9 +72,9 @@ type request struct {
// - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
type SyncPath [][]byte
-// newSyncPath converts an expanded trie path from nibble form into a compact
+// NewSyncPath converts an expanded trie path from nibble form into a compact
// version that can be sent over the network.
-func newSyncPath(path []byte) SyncPath {
+func NewSyncPath(path []byte) SyncPath {
// If the hash is from the account trie, append a single item, if it
// is from the a storage trie, append a tuple. Note, the length 64 is
// clashing between account leaf and storage root. It's fine though
@@ -240,7 +240,7 @@ func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []
hash := item.(common.Hash)
if req, ok := s.nodeReqs[hash]; ok {
nodeHashes = append(nodeHashes, hash)
- nodePaths = append(nodePaths, newSyncPath(req.path))
+ nodePaths = append(nodePaths, NewSyncPath(req.path))
} else {
codeHashes = append(codeHashes, hash)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 57b011373d..90b1f91600 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -21,7 +21,6 @@ import (
"encoding/binary"
"fmt"
"github.com/PlatONnetwork/PlatON-Go/core/types"
- "io/ioutil"
"math/big"
"math/rand"
"os"
@@ -566,7 +565,7 @@ func BenchmarkHash(b *testing.B) {
}
func tempDB() (string, *Database) {
- dir, err := ioutil.TempDir("", "trie-bench")
+ dir, err := os.MkdirTemp("", "trie-bench")
if err != nil {
panic(fmt.Sprintf("can't create temporary directory: %v", err))
}