diff --git a/internal/third_party/pkgsite/README.md b/internal/third_party/pkgsite/README.md index 82ad47a4..6285eda2 100644 --- a/internal/third_party/pkgsite/README.md +++ b/internal/third_party/pkgsite/README.md @@ -1,12 +1,12 @@ # go/pkgsite/source -Vendored from , +Vendored from , because the source package is internal, and there's no plan to move it out anytime soon: . The entire source folder and be downloaded via: ```bash -curl -LO https://go.googlesource.com/pkgsite/+archive/beceacdece62d95d6dc41a9b5f09da7b2a021020/internal.tar.gz +curl -LO https://go.googlesource.com/pkgsite/+archive/8d5d35d6a95412a4720d586912c1a15f7da019f9/internal.tar.gz ``` Local modifications: diff --git a/internal/third_party/pkgsite/derrors/derrors.go b/internal/third_party/pkgsite/derrors/derrors.go index 2c75fd14..94f0b9ae 100644 --- a/internal/third_party/pkgsite/derrors/derrors.go +++ b/internal/third_party/pkgsite/derrors/derrors.go @@ -9,19 +9,225 @@ package derrors import ( "errors" "fmt" + "net/http" + "runtime" ) //lint:file-ignore ST1012 prefixing error values with Err would stutter var ( + // Unsupported operation indicates that a requested operation cannot be performed, because it + // is unsupported. It is used here instead of errors.ErrUnsupported until we are able to depend + // on Go 1.21 in the pkgsite repo. + Unsupported = errors.New("unsupported operation") + + // HasIncompletePackages indicates a module containing packages that + // were processed with a 60x error code. + HasIncompletePackages = errors.New("has incomplete packages") + // NotFound indicates that a requested entity was not found (HTTP 404). NotFound = errors.New("not found") + // NotFetched means that the proxy returned "not found" with the + // Disable-Module-Fetch header set. We don't know if the module really + // doesn't exist, or the proxy just didn't fetch it. + NotFetched = errors.New("not fetched by proxy") + // InvalidArgument indicates that the input into the request is invalid in // some way (HTTP 400). InvalidArgument = errors.New("invalid argument") + // BadModule indicates a problem with a module. + BadModule = errors.New("bad module") + // Excluded indicates that the module is excluded. (See internal/postgres/excluded.go.) + Excluded = errors.New("excluded") + + // AlternativeModule indicates that the path of the module zip file differs + // from the path specified in the go.mod file. + AlternativeModule = errors.New("alternative module") + + // ModuleTooLarge indicates that the module is too large for us to process. + // This should be temporary: we should obtain sufficient resources to process + // any module, up to the max size allowed by the proxy. + ModuleTooLarge = errors.New("module too large") + + // SheddingLoad indicates that the server is overloaded and cannot process the + // module at this time. + SheddingLoad = errors.New("shedding load") + + // Cleaned indicates that the module version was cleaned from the DB and + // shouldn't be reprocessed. + Cleaned = errors.New("cleaned") + + // Unknown indicates that the error has unknown semantics. + Unknown = errors.New("unknown") + + // ProxyTimedOut indicates that a request timed out when fetching from the Module Mirror. + ProxyTimedOut = errors.New("proxy timed out") + + // ProxyError is used to capture non-actionable server errors returned from the proxy. + ProxyError = errors.New("proxy error") + + // VulnDBError is used to capture non-actionable server errors returned from vulndb. + VulnDBError = errors.New("vulndb error") + + // PackageBuildContextNotSupported indicates that the build context for the + // package is not supported. + PackageBuildContextNotSupported = errors.New("package build context not supported") + // PackageMaxImportsLimitExceeded indicates that the package has too many + // imports. + PackageMaxImportsLimitExceeded = errors.New("package max imports limit exceeded") + // PackageMaxFileSizeLimitExceeded indicates that the package contains a file + // that exceeds fetch.MaxFileSize. + PackageMaxFileSizeLimitExceeded = errors.New("package max file size limit exceeded") + // PackageDocumentationHTMLTooLarge indicates that the rendered documentation + // HTML size exceeded the specified limit for dochtml.RenderOptions. + PackageDocumentationHTMLTooLarge = errors.New("package documentation HTML is too large") + // PackageBadImportPath represents an error loading a package because its + // contents do not make up a valid package. This can happen, for + // example, if the .go files fail to parse or declare different package + // names. + // Go files were found in a directory, but the resulting import path is invalid. + PackageBadImportPath = errors.New("package bad import path") + // PackageInvalidContents represents an error loading a package because + // its contents do not make up a valid package. This can happen, for + // example, if the .go files fail to parse or declare different package + // names. + PackageInvalidContents = errors.New("package invalid contents") + + // DBModuleInsertInvalid represents a module that was successfully + // fetched but could not be inserted due to invalid arguments to + // postgres.InsertModule. + DBModuleInsertInvalid = errors.New("db module insert invalid") + + // ReprocessStatusOK indicates that the module to be reprocessed + // previously had a status of http.StatusOK. + ReprocessStatusOK = errors.New("reprocess status ok") + // ReprocessHasIncompletePackages indicates that the module to be reprocessed + // previously had a status of 290. + ReprocessHasIncompletePackages = errors.New("reprocess has incomplete packages") + // ReprocessBadModule indicates that the module to be reprocessed + // previously had a status of derrors.BadModule. + ReprocessBadModule = errors.New("reprocess bad module") + // ReprocessAlternativeModule indicates that the module to be reprocessed + // previously had a status of derrors.AlternativeModule. + ReprocessAlternative = errors.New("reprocess alternative module") + // ReprocessDBModuleInsertInvalid represents a module to be reprocessed + // that was successfully fetched but could not be inserted due to invalid + // arguments to postgres.InsertModule. + ReprocessDBModuleInsertInvalid = errors.New("reprocess db module insert invalid") ) +var codes = []struct { + err error + code int +}{ + {NotFound, http.StatusNotFound}, + {InvalidArgument, http.StatusBadRequest}, + {Excluded, http.StatusForbidden}, + {SheddingLoad, http.StatusServiceUnavailable}, + + // Since the following aren't HTTP statuses, pick unused codes. + {HasIncompletePackages, 290}, + {DBModuleInsertInvalid, 480}, + {NotFetched, 481}, + {BadModule, 490}, + {AlternativeModule, 491}, + {ModuleTooLarge, 492}, + {Cleaned, 493}, + + {ProxyTimedOut, 550}, // not a real code + {ProxyError, 551}, // not a real code + {VulnDBError, 552}, // not a real code + // 52x and 54x errors represents modules that need to be reprocessed, and the + // previous status code the module had. Note that the status code + // matters for determining reprocessing order. + {ReprocessStatusOK, 520}, + {ReprocessHasIncompletePackages, 521}, + {ReprocessBadModule, 540}, + {ReprocessAlternative, 541}, + {ReprocessDBModuleInsertInvalid, 542}, + + // 60x errors represents errors that occurred when processing a + // package. + {PackageBuildContextNotSupported, 600}, + {PackageMaxImportsLimitExceeded, 601}, + {PackageMaxFileSizeLimitExceeded, 602}, + {PackageDocumentationHTMLTooLarge, 603}, + {PackageInvalidContents, 604}, + {PackageBadImportPath, 605}, +} + +// FromStatus generates an error according for the given status code. It uses +// the given format string and arguments to create the error string according +// to the fmt package. If format is the empty string, then the error +// corresponding to the code is returned unwrapped. +// +// If code is http.StatusOK, it returns nil. +func FromStatus(code int, format string, args ...any) error { + if code == http.StatusOK { + return nil + } + var innerErr = Unknown + for _, e := range codes { + if e.code == code { + innerErr = e.err + break + } + } + if format == "" { + return innerErr + } + return fmt.Errorf(format+": %w", append(args, innerErr)...) +} + +// ToStatus returns a status code corresponding to err. +func ToStatus(err error) int { + if err == nil { + return http.StatusOK + } + for _, e := range codes { + if errors.Is(err, e.err) { + return e.code + } + } + return http.StatusInternalServerError +} + +// ToReprocessStatus returns the reprocess status code corresponding to the +// provided status. +func ToReprocessStatus(status int) int { + switch status { + case http.StatusOK: + return ToStatus(ReprocessStatusOK) + case ToStatus(HasIncompletePackages): + return ToStatus(ReprocessHasIncompletePackages) + case ToStatus(BadModule): + return ToStatus(ReprocessBadModule) + case ToStatus(AlternativeModule): + return ToStatus(ReprocessAlternative) + case ToStatus(DBModuleInsertInvalid): + return ToStatus(ReprocessDBModuleInsertInvalid) + default: + return status + } +} + +// Add adds context to the error. +// The result cannot be unwrapped to recover the original error. +// It does nothing when *errp == nil. +// +// Example: +// +// defer derrors.Add(&err, "copy(%s, %s)", src, dst) +// +// See Wrap for an equivalent function that allows +// the result to be unwrapped. +func Add(errp *error, format string, args ...any) { + if *errp != nil { + *errp = fmt.Errorf("%s: %v", fmt.Sprintf(format, args...), *errp) + } +} + // Wrap adds context to the error and allows // unwrapping the result to recover the original error. // @@ -36,3 +242,66 @@ func Wrap(errp *error, format string, args ...any) { *errp = fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), *errp) } } + +// WrapStack is like Wrap, but adds a stack trace if there isn't one already. +func WrapStack(errp *error, format string, args ...any) { + if *errp != nil { + if se := (*StackError)(nil); !errors.As(*errp, &se) { + *errp = NewStackError(*errp) + } + Wrap(errp, format, args...) + } +} + +// StackError wraps an error and adds a stack trace. +type StackError struct { + Stack []byte + err error +} + +// NewStackError returns a StackError, capturing a stack trace. +func NewStackError(err error) *StackError { + // Limit the stack trace to 16K. Same value used in the errorreporting client, + // cloud.google.com/go@v0.66.0/errorreporting/errors.go. + var buf [16 * 1024]byte + n := runtime.Stack(buf[:], false) + return &StackError{ + err: err, + Stack: buf[:n], + } +} + +func (e *StackError) Error() string { + return e.err.Error() // ignore the stack +} + +func (e *StackError) Unwrap() error { + return e.err +} + +// WrapAndReport calls Wrap followed by Report. +func WrapAndReport(errp *error, format string, args ...any) { + Wrap(errp, format, args...) + if *errp != nil { + Report(*errp) + } +} + +var reporter Reporter + +// SetReporter the Reporter to use, for use by Report. +func SetReporter(r Reporter) { + reporter = r +} + +// Reporter is an interface used for reporting errors. +type Reporter interface { + Report(err error, req *http.Request, stack []byte) +} + +// Report uses the Reporter to report an error. +func Report(err error) { + if reporter != nil { + reporter.Report(err, nil, nil) + } +} diff --git a/internal/third_party/pkgsite/derrors/derrors_test.go b/internal/third_party/pkgsite/derrors/derrors_test.go index 277e5008..ab2db26c 100644 --- a/internal/third_party/pkgsite/derrors/derrors_test.go +++ b/internal/third_party/pkgsite/derrors/derrors_test.go @@ -6,9 +6,82 @@ package derrors import ( "errors" + "fmt" + "io" + "net/http" + "strings" "testing" ) +func TestFromHTTPStatus(t *testing.T) { + tests := []struct { + label string + status int + want error + }{ + { + label: "OK translates to nil error", + status: 200, + }, + { + label: "400 translates to invalid argument", + status: 400, + want: InvalidArgument, + }, + // Testing other specific HTTP status codes is intentionally omitted to + // avoid writing a change detector. + } + + for _, test := range tests { + test := test + t.Run(test.label, func(t *testing.T) { + err := FromStatus(test.status, "error") + if !errors.Is(err, test.want) { + t.Errorf("FromHTTPStatus(%d, ...) = %v, want %v", test.status, err, test.want) + } + }) + } +} + +func TestToHTTPStatus(t *testing.T) { + for _, test := range []struct { + in error + want int + }{ + {nil, http.StatusOK}, + {InvalidArgument, http.StatusBadRequest}, + {NotFound, http.StatusNotFound}, + {BadModule, 490}, + {AlternativeModule, 491}, + {Unknown, http.StatusInternalServerError}, + {fmt.Errorf("wrapping: %w", NotFound), http.StatusNotFound}, + {io.ErrUnexpectedEOF, http.StatusInternalServerError}, + } { + got := ToStatus(test.in) + if got != test.want { + t.Errorf("ToHTTPStatus(%v) = %d, want %d", test.in, got, test.want) + } + } +} + +func TestAdd(t *testing.T) { + var err error + Add(&err, "whatever") + if err != nil { + t.Errorf("got %v, want nil", err) + } + + err = errors.New("bad stuff") + Add(&err, "Frob(%d)", 3) + want := "Frob(3): bad stuff" + if got := err.Error(); got != want { + t.Errorf("got %s, want %s", got, want) + } + if got := errors.Unwrap(err); got != nil { + t.Errorf("Unwrap: got %v, want nil", got) + } +} + func TestWrap(t *testing.T) { var err error Wrap(&err, "whatever") @@ -27,3 +100,18 @@ func TestWrap(t *testing.T) { t.Errorf("Unwrap: got %#v, want %#v", got, orig) } } + +func TestWrapStack(t *testing.T) { + var err error = io.ErrShortWrite + WrapStack(&err, "while frobbing") + if !errors.Is(err, io.ErrShortWrite) { + t.Error("is not io.ErrShortWrite") + } + var se *StackError + if !errors.As(err, &se) { + t.Fatal("not as StackError") + } + if !strings.Contains(string(se.Stack), "WrapStack") { + t.Fatal("bad stack trace") + } +} diff --git a/internal/third_party/pkgsite/source/source.go b/internal/third_party/pkgsite/source/source.go index 62b5f093..8ef9a2c4 100644 --- a/internal/third_party/pkgsite/source/source.go +++ b/internal/third_party/pkgsite/source/source.go @@ -22,20 +22,20 @@ package source import ( "context" + "encoding/json" "fmt" - "log" // We cannot use glog instead, because its "v" flag conflicts with other libraries we use. + "log" // We cannot use glog instead, because its "v" flag conflicts with other libraries we use "net/http" "path" + "path/filepath" "regexp" "strconv" "strings" - "time" "github.com/pulumi/go-licenses/internal/third_party/pkgsite/derrors" "github.com/pulumi/go-licenses/internal/third_party/pkgsite/stdlib" + "github.com/pulumi/go-licenses/internal/third_party/pkgsite/trace" "github.com/pulumi/go-licenses/internal/third_party/pkgsite/version" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/trace" "golang.org/x/net/context/ctxhttp" ) @@ -48,6 +48,38 @@ type Info struct { templates urlTemplates // for building URLs } +// RepoURL returns a URL for the home page of the repository. +func (i *Info) RepoURL() string { + if i == nil { + return "" + } + if i.templates.Repo == "" { + // The default repo template is just "{repo}". + return i.repoURL + } + return expand(i.templates.Repo, map[string]string{ + "repo": i.repoURL, + }) +} + +// ModuleURL returns a URL for the home page of the module. +func (i *Info) ModuleURL() string { + return i.DirectoryURL("") +} + +// DirectoryURL returns a URL for a directory relative to the module's home directory. +func (i *Info) DirectoryURL(dir string) string { + if i == nil { + return "" + } + return strings.TrimSuffix(expand(i.templates.Directory, map[string]string{ + "repo": i.repoURL, + "importPath": path.Join(strings.TrimPrefix(i.repoURL, "https://"), dir), + "commit": i.commit, + "dir": path.Join(i.moduleDir, dir), + }), "/") +} + // FileURL returns a URL for a file whose pathname is relative to the module's home directory. func (i *Info) FileURL(pathname string) string { if i == nil { @@ -64,20 +96,117 @@ func (i *Info) FileURL(pathname string) string { }) } +// LineURL returns a URL referring to a line in a file relative to the module's home directory. +func (i *Info) LineURL(pathname string, line int) string { + if i == nil { + return "" + } + dir, base := path.Split(pathname) + return expand(i.templates.Line, map[string]string{ + "repo": i.repoURL, + "importPath": path.Join(strings.TrimPrefix(i.repoURL, "https://"), dir), + "commit": i.commit, + "file": path.Join(i.moduleDir, pathname), + "dir": dir, + "base": base, + "line": strconv.Itoa(line), + }) +} + +// RawURL returns a URL referring to the raw contents of a file relative to the +// module's home directory. +func (i *Info) RawURL(pathname string) string { + if i == nil { + return "" + } + // Some templates don't support raw content serving. + if i.templates.Raw == "" { + return "" + } + moduleDir := i.moduleDir + // Special case: the standard library's source module path is set to "src", + // which is correct for source file links. But the README is at the repo + // root, not in the src directory. In other words, + // Module.Units[0].Readme.FilePath is not relative to + // Module.Units[0].SourceInfo.moduleDir, as it is for every other module. + // Correct for that here. + if i.repoURL == stdlib.GoSourceRepoURL { + moduleDir = "" + } + return expand(i.templates.Raw, map[string]string{ + "repo": i.repoURL, + "commit": i.commit, + "file": path.Join(moduleDir, pathname), + }) +} + +// map of common urlTemplates +var urlTemplatesByKind = map[string]urlTemplates{ + "github": githubURLTemplates, + "gitlab": gitlabURLTemplates, + "bitbucket": bitbucketURLTemplates, +} + +// jsonInfo is a Go struct describing the JSON structure of an INFO. +type jsonInfo struct { + RepoURL string + ModuleDir string + Commit string + // Store common templates efficiently by setting this to a short string + // we look up in a map. If Kind != "", then Templates == nil. + Kind string `json:",omitempty"` + Templates *urlTemplates `json:",omitempty"` +} + +// MarshalJSON returns the Info encoded for storage in the database. +func (i *Info) MarshalJSON() (_ []byte, err error) { + defer derrors.Wrap(&err, "MarshalJSON") + + ji := &jsonInfo{ + RepoURL: i.repoURL, + ModuleDir: i.moduleDir, + Commit: i.commit, + } + // Store common templates efficiently, by name. + for kind, templs := range urlTemplatesByKind { + if i.templates == templs { + ji.Kind = kind + break + } + } + if ji.Kind == "" && i.templates != (urlTemplates{}) { + ji.Templates = &i.templates + } + return json.Marshal(ji) +} + +func (i *Info) UnmarshalJSON(data []byte) (err error) { + defer derrors.Wrap(&err, "UnmarshalJSON(data)") + + var ji jsonInfo + if err := json.Unmarshal(data, &ji); err != nil { + return err + } + i.repoURL = trimVCSSuffix(ji.RepoURL) + i.moduleDir = ji.ModuleDir + i.commit = ji.Commit + if ji.Kind != "" { + i.templates = urlTemplatesByKind[ji.Kind] + } else if ji.Templates != nil { + i.templates = *ji.Templates + } + return nil +} + type Client struct { // client used for HTTP requests. It is mutable for testing purposes. // If nil, then moduleInfoDynamic will return nil, nil; also for testing. httpClient *http.Client } -// New constructs a *Client using the provided timeout. -func NewClient(timeout time.Duration) *Client { - return &Client{ - httpClient: &http.Client{ - Transport: &ochttp.Transport{}, - Timeout: timeout, - }, - } +// New constructs a *Client using the provided *http.Client. +func NewClient(httpClient *http.Client) *Client { + return &Client{httpClient: httpClient} } // NewClientForTesting returns a Client suitable for testing. It returns the @@ -128,10 +257,6 @@ func ModuleInfo(ctx context.Context, client *Client, modulePath, v string) (info return NewGitHubInfo("https://"+modulePath, "", v), nil } - if modulePath == stdlib.ModulePath { - return newStdlibInfo(v) - } - repo, relativeModulePath, templates, transformCommit, err := matchStatic(modulePath) if err != nil { info, err = moduleInfoDynamic(ctx, client, modulePath, v) @@ -161,8 +286,8 @@ func ModuleInfo(ctx context.Context, client *Client, modulePath, v string) (info // in cmd/go/internal/get/vcs.go. } -func newStdlibInfo(version string) (_ *Info, err error) { - defer derrors.Wrap(&err, "newStdlibInfo(%q)", version) +func NewStdlibInfo(version string) (_ *Info, err error) { + defer derrors.Wrap(&err, "NewStdlibInfo(%q)", version) commit, err := stdlib.TagForVersion(version) if err != nil { @@ -783,3 +908,32 @@ func NewGitHubInfo(repoURL, moduleDir, commit string) *Info { templates: githubURLTemplates, } } + +// NewStdlibInfoForTest returns a source.Info for the standard library at the given +// semantic version. It panics if the version does not correspond to a Go release +// tag. It is for testing only. +func NewStdlibInfoForTest(version string) *Info { + info, err := NewStdlibInfo(version) + if err != nil { + panic(err) + } + return info +} + +// FilesInfo returns an Info that links to a path in the server's /files +// namespace. The same path needs to be installed via frontend.Server.InstallFS. +func FilesInfo(dir string) *Info { + // The repo and directory patterns need a final slash. Without it, + // http.FileServer redirects instead of serving the directory contents, with + // confusing results. + return &Info{ + repoURL: path.Join("/files", filepath.ToSlash(dir)), + templates: urlTemplates{ + Repo: "{repo}/", + Directory: "{repo}/{dir}/", + File: "{repo}/{file}", + Line: "{repo}/{file}#L{line}", // not supported now, but maybe someday + Raw: "{repo}/{file}", + }, + } +} diff --git a/internal/third_party/pkgsite/source/source_test.go b/internal/third_party/pkgsite/source/source_test.go index 05d1d8ec..abd9011a 100644 --- a/internal/third_party/pkgsite/source/source_test.go +++ b/internal/third_party/pkgsite/source/source_test.go @@ -1,458 +1,21 @@ // Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. + package source import ( "context" - "flag" + "encoding/json" "fmt" "io" "net/http" - "os" - "path/filepath" "strings" "testing" - "time" "github.com/google/go-cmp/cmp" - "github.com/google/go-replayers/httpreplay" -) - -var ( - testTimeout = 2 * time.Second - record = flag.Bool("record", false, "record interactions with other systems, for replay") ) -func TestModuleInfo(t *testing.T) { - client, done := newReplayClient(t, *record) - defer done() - - // Test names where we don't replay/record actual URLs. - skipReplayTests := map[string]bool{ - // On 5-Jan-2022, gitee.com took too long to respond, so it wasn't possible - // to record the results. - "gitee.com": true, - } - - check := func(t *testing.T, msg, got, want string, skipReplay bool) { - t.Helper() - if got != want { - t.Fatalf("%s:\ngot %s\nwant %s", msg, got, want) - } - if !skipReplay { - res, err := client.Head(got) - if err != nil { - t.Fatalf("%s: %v", got, err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - t.Fatalf("%s: %v", got, res.Status) - } - } - } - - for _, test := range []struct { - desc string - modulePath, version, file string - wantRepo, wantModule, wantFile, wantLine, wantRaw string - }{ - { - "standard library", - "std", "v1.12.0", "bytes/buffer.go", - "https://cs.opensource.google/go/go", - "https://cs.opensource.google/go/go/+/go1.12:src", - "https://cs.opensource.google/go/go/+/go1.12:src/bytes/buffer.go", - "https://cs.opensource.google/go/go/+/go1.12:src/bytes/buffer.go;l=1", - // The raw URLs for the standard library are relative to the repo root, not - // the module directory. - "", - }, - { - "old standard library", - "std", "v1.3.0", "bytes/buffer.go", - "https://cs.opensource.google/go/go", - "https://cs.opensource.google/go/go/+/go1.3:src/pkg", - "https://cs.opensource.google/go/go/+/go1.3:src/pkg/bytes/buffer.go", - "https://cs.opensource.google/go/go/+/go1.3:src/pkg/bytes/buffer.go;l=1", - // The raw URLs for the standard library are relative to the repo root, not - // the module directory. - "", - }, - { - "github module at repo root", - "github.com/pkg/errors", "v0.8.1", "errors.go", - - "https://github.com/pkg/errors", - "https://github.com/pkg/errors/tree/v0.8.1", - "https://github.com/pkg/errors/blob/v0.8.1/errors.go", - "https://github.com/pkg/errors/blob/v0.8.1/errors.go#L1", - "https://github.com/pkg/errors/raw/v0.8.1/errors.go", - }, - { - "github module not at repo root", - "github.com/hashicorp/consul/sdk", "v0.2.0", "freeport/freeport.go", - - "https://github.com/hashicorp/consul", - "https://github.com/hashicorp/consul/tree/sdk/v0.2.0/sdk", - "https://github.com/hashicorp/consul/blob/sdk/v0.2.0/sdk/freeport/freeport.go", - "https://github.com/hashicorp/consul/blob/sdk/v0.2.0/sdk/freeport/freeport.go#L1", - "https://github.com/hashicorp/consul/raw/sdk/v0.2.0/sdk/freeport/freeport.go", - }, - { - "github module with VCS suffix", - "github.com/pkg/errors.git", "v0.8.1", "errors.go", - - "https://github.com/pkg/errors", - "https://github.com/pkg/errors/tree/v0.8.1", - "https://github.com/pkg/errors/blob/v0.8.1/errors.go", - "https://github.com/pkg/errors/blob/v0.8.1/errors.go#L1", - "https://github.com/pkg/errors/raw/v0.8.1/errors.go", - }, - { - "bitbucket", - "bitbucket.org/plazzaro/kami", "v1.2.1", "defaults.go", - - "https://bitbucket.org/plazzaro/kami", - "https://bitbucket.org/plazzaro/kami/src/v1.2.1", - "https://bitbucket.org/plazzaro/kami/src/v1.2.1/defaults.go", - "https://bitbucket.org/plazzaro/kami/src/v1.2.1/defaults.go#lines-1", - "https://bitbucket.org/plazzaro/kami/raw/v1.2.1/defaults.go", - }, - { - "incompatible", - "github.com/airbrake/gobrake", "v3.5.1+incompatible", "gobrake.go", - - "https://github.com/airbrake/gobrake", - "https://github.com/airbrake/gobrake/tree/v3.5.1", - "https://github.com/airbrake/gobrake/blob/v3.5.1/gobrake.go", - "https://github.com/airbrake/gobrake/blob/v3.5.1/gobrake.go#L1", - "https://github.com/airbrake/gobrake/raw/v3.5.1/gobrake.go", - }, - { - "golang x-tools", - "golang.org/x/tools", "v0.0.0-20190927191325-030b2cf1153e", "README.md", - - "https://cs.opensource.google/go/x/tools", - "https://cs.opensource.google/go/x/tools/+/030b2cf1:", - "https://cs.opensource.google/go/x/tools/+/030b2cf1:README.md", - "https://cs.opensource.google/go/x/tools/+/030b2cf1:README.md;l=1", - "https://github.com/golang/tools/raw/030b2cf1/README.md", - }, - { - "golang x-tools-gopls", - "golang.org/x/tools/gopls", "v0.4.0", "main.go", - - "https://cs.opensource.google/go/x/tools", - "https://cs.opensource.google/go/x/tools/+/gopls/v0.4.0:gopls", - "https://cs.opensource.google/go/x/tools/+/gopls/v0.4.0:gopls/main.go", - "https://cs.opensource.google/go/x/tools/+/gopls/v0.4.0:gopls/main.go;l=1", - "https://github.com/golang/tools/raw/gopls/v0.4.0/gopls/main.go", - }, - { - "golang dl", - "golang.org/dl", "c5c89f6c", "go1.16/main.go", - - "https://cs.opensource.google/go/dl", - "https://cs.opensource.google/go/dl/+/c5c89f6c:", - "https://cs.opensource.google/go/dl/+/c5c89f6c:go1.16/main.go", - "https://cs.opensource.google/go/dl/+/c5c89f6c:go1.16/main.go;l=1", - "https://github.com/golang/dl/raw/c5c89f6c/go1.16/main.go", - }, - { - "golang x-image", - "golang.org/x/image", "v0.0.0-20190910094157-69e4b8554b2a", "math/fixed/fixed.go", - - "https://cs.opensource.google/go/x/image", - "https://cs.opensource.google/go/x/image/+/69e4b855:", - "https://cs.opensource.google/go/x/image/+/69e4b855:math/fixed/fixed.go", - "https://cs.opensource.google/go/x/image/+/69e4b855:math/fixed/fixed.go;l=1", - "https://github.com/golang/image/raw/69e4b855/math/fixed/fixed.go", - }, - { - "git.apache.org", - "git.apache.org/thrift.git", "v0.12.0", "lib/go/thrift/client.go", - - "https://github.com/apache/thrift", - "https://github.com/apache/thrift/tree/v0.12.0", - "https://github.com/apache/thrift/blob/v0.12.0/lib/go/thrift/client.go", - "https://github.com/apache/thrift/blob/v0.12.0/lib/go/thrift/client.go#L1", - "https://github.com/apache/thrift/raw/v0.12.0/lib/go/thrift/client.go", - }, - { - "vanity for github", - "cloud.google.com/go/spanner", "v1.0.0", "doc.go", - - "https://github.com/googleapis/google-cloud-go", - "https://github.com/googleapis/google-cloud-go/tree/spanner/v1.0.0/spanner", - "https://github.com/googleapis/google-cloud-go/blob/spanner/v1.0.0/spanner/doc.go", - "https://github.com/googleapis/google-cloud-go/blob/spanner/v1.0.0/spanner/doc.go#L1", - "https://github.com/googleapis/google-cloud-go/raw/spanner/v1.0.0/spanner/doc.go", - }, - { - "vanity for bitbucket", - "go.niquid.tech/civic-sip-api", "v0.2.0", "client.go", - - "https://bitbucket.org/niquid/civic-sip-api.git", - "https://bitbucket.org/niquid/civic-sip-api.git/src/v0.2.0", - "https://bitbucket.org/niquid/civic-sip-api.git/src/v0.2.0/client.go", - "https://bitbucket.org/niquid/civic-sip-api.git/src/v0.2.0/client.go#lines-1", - "https://bitbucket.org/niquid/civic-sip-api.git/raw/v0.2.0/client.go", - }, - { - "vanity for googlesource.com", - "go.chromium.org/goma/server", "v0.0.23", "log/log.go", - - "https://chromium.googlesource.com/infra/goma/server", - "https://chromium.googlesource.com/infra/goma/server/+/v0.0.23", - "https://chromium.googlesource.com/infra/goma/server/+/v0.0.23/log/log.go", - "https://chromium.googlesource.com/infra/goma/server/+/v0.0.23/log/log.go#1", - "", - }, - { - "gitlab.com", - "gitlab.com/tozd/go/errors", "v0.3.0", "errors.go", - - "https://gitlab.com/tozd/go/errors", - "https://gitlab.com/tozd/go/errors/-/tree/v0.3.0", - "https://gitlab.com/tozd/go/errors/-/blob/v0.3.0/errors.go", - "https://gitlab.com/tozd/go/errors/-/blob/v0.3.0/errors.go#L1", - "https://gitlab.com/tozd/go/errors/-/raw/v0.3.0/errors.go", - }, - { - "other gitlab", - "gitlab.void-ptr.org/go/nu40c16", "v0.1.2", "nu40c16.go", - - "https://gitlab.void-ptr.org/go/nu40c16", - "https://gitlab.void-ptr.org/go/nu40c16/-/tree/v0.1.2", - "https://gitlab.void-ptr.org/go/nu40c16/-/blob/v0.1.2/nu40c16.go", - "https://gitlab.void-ptr.org/go/nu40c16/-/blob/v0.1.2/nu40c16.go#L1", - "https://gitlab.void-ptr.org/go/nu40c16/-/raw/v0.1.2/nu40c16.go", - }, - { - "gitee.com", - "gitee.com/eden-framework/plugins", "v0.0.7", "file.go", - - "https://gitee.com/eden-framework/plugins", - "https://gitee.com/eden-framework/plugins/tree/v0.0.7", - "https://gitee.com/eden-framework/plugins/blob/v0.0.7/file.go", - "https://gitee.com/eden-framework/plugins/blob/v0.0.7/file.go#L1", - "https://gitee.com/eden-framework/plugins/raw/v0.0.7/file.go", - }, - { - "sourcehut", - "gioui.org", "v0.0.0-20200726090130-3b95e2918359", "op/op.go", - - "https://git.sr.ht/~eliasnaur/gio", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359/op/op.go", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359/op/op.go#L1", - "https://git.sr.ht/~eliasnaur/gio/blob/3b95e2918359/op/op.go", - }, - { - "sourcehut nested", - "gioui.org/app", "v0.0.0-20200726090130-3b95e2918359", "app.go", - - "https://git.sr.ht/~eliasnaur/gio", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359/app", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359/app/app.go", - "https://git.sr.ht/~eliasnaur/gio/tree/3b95e2918359/app/app.go#L1", - "https://git.sr.ht/~eliasnaur/gio/blob/3b95e2918359/app/app.go", - }, - { - "git.fd.io tag", - "git.fd.io/govpp", "v0.3.5", "doc.go", - - "https://git.fd.io/govpp", - "https://git.fd.io/govpp/tree/?h=v0.3.5", - "https://git.fd.io/govpp/tree/doc.go?h=v0.3.5", - "https://git.fd.io/govpp/tree/doc.go?h=v0.3.5#n1", - "https://git.fd.io/govpp/plain/doc.go?h=v0.3.5", - }, - { - "git.fd.io hash", - "git.fd.io/govpp", "v0.0.0-20200726090130-f04939006063", "doc.go", - - "https://git.fd.io/govpp", - "https://git.fd.io/govpp/tree/?id=f04939006063", - "https://git.fd.io/govpp/tree/doc.go?id=f04939006063", - "https://git.fd.io/govpp/tree/doc.go?id=f04939006063#n1", - "https://git.fd.io/govpp/plain/doc.go?id=f04939006063", - }, - { - "gitea", - "gitea.com/chenli/reverse", "v0.1.2", "main.go", - - "https://gitea.com/chenli/reverse", - "https://gitea.com/chenli/reverse/src/tag/v0.1.2", - "https://gitea.com/chenli/reverse/src/tag/v0.1.2/main.go", - "https://gitea.com/chenli/reverse/src/tag/v0.1.2/main.go#L1", - "https://gitea.com/chenli/reverse/raw/tag/v0.1.2/main.go", - }, - { - "gogs", - "gogs.buffalo-robot.com/zouhy/micro", "v0.4.2", "go.mod", - - "https://gogs.buffalo-robot.com/zouhy/micro", - "https://gogs.buffalo-robot.com/zouhy/micro/src/v0.4.2", - "https://gogs.buffalo-robot.com/zouhy/micro/src/v0.4.2/go.mod", - "https://gogs.buffalo-robot.com/zouhy/micro/src/v0.4.2/go.mod#L1", - "https://gogs.buffalo-robot.com/zouhy/micro/raw/v0.4.2/go.mod", - }, - { - "v2 as a branch", - "github.com/jrick/wsrpc/v2", "v2.1.1", "rpc.go", - - "https://github.com/jrick/wsrpc", - "https://github.com/jrick/wsrpc/tree/v2.1.1", - "https://github.com/jrick/wsrpc/blob/v2.1.1/rpc.go", - "https://github.com/jrick/wsrpc/blob/v2.1.1/rpc.go#L1", - "https://github.com/jrick/wsrpc/raw/v2.1.1/rpc.go", - }, - { - "v2 as subdirectory", - "github.com/gonutz/w32/v2", "v2.2.3", "com.go", - - "https://github.com/gonutz/w32", - "https://github.com/gonutz/w32/tree/v2.2.3/v2", - "https://github.com/gonutz/w32/blob/v2.2.3/v2/com.go", - "https://github.com/gonutz/w32/blob/v2.2.3/v2/com.go#L1", - "https://github.com/gonutz/w32/raw/v2.2.3/v2/com.go", - }, - { - "gopkg.in, one element", - "gopkg.in/yaml.v2", "v2.2.2", "yaml.go", - - "https://github.com/go-yaml/yaml", - "https://github.com/go-yaml/yaml/tree/v2.2.2", - "https://github.com/go-yaml/yaml/blob/v2.2.2/yaml.go", - "https://github.com/go-yaml/yaml/blob/v2.2.2/yaml.go#L1", - "https://github.com/go-yaml/yaml/raw/v2.2.2/yaml.go", - }, - { - "gopkg.in, two elements", - "gopkg.in/boltdb/bolt.v1", "v1.3.0", "doc.go", - - "https://github.com/boltdb/bolt", - "https://github.com/boltdb/bolt/tree/v1.3.0", - "https://github.com/boltdb/bolt/blob/v1.3.0/doc.go", - "https://github.com/boltdb/bolt/blob/v1.3.0/doc.go#L1", - "https://github.com/boltdb/bolt/raw/v1.3.0/doc.go", - }, - { - "gonum.org", - "gonum.org/v1/gonum", "v0.6.1", "doc.go", - - "https://github.com/gonum/gonum", - "https://github.com/gonum/gonum/tree/v0.6.1", - "https://github.com/gonum/gonum/blob/v0.6.1/doc.go", - "https://github.com/gonum/gonum/blob/v0.6.1/doc.go#L1", - "https://github.com/gonum/gonum/raw/v0.6.1/doc.go", - }, - { - "custom with gotools at repo root", - "dmitri.shuralyov.com/gpu/mtl", "v0.0.0-20191203043605-d42048ed14fd", "mtl.go", - - "https://dmitri.shuralyov.com/gpu/mtl/...", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl?rev=d42048ed14fd", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl?rev=d42048ed14fd#mtl.go", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl?rev=d42048ed14fd#mtl.go-L1", - "", - }, - { - "custom with gotools in subdir", - "dmitri.shuralyov.com/gpu/mtl", "v0.0.0-20191203043605-d42048ed14fd", "example/movingtriangle/internal/coreanim/coreanim.go", - - "https://dmitri.shuralyov.com/gpu/mtl/...", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl?rev=d42048ed14fd", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl/example/movingtriangle/internal/coreanim?rev=d42048ed14fd#coreanim.go", - "https://gotools.org/dmitri.shuralyov.com/gpu/mtl/example/movingtriangle/internal/coreanim?rev=d42048ed14fd#coreanim.go-L1", - "", - }, - { - "go-source templates match gitea with transform", - "opendev.org/airship/airshipctl", "v2.0.0-beta.1", "pkg/cluster/command.go", - "https://opendev.org/airship/airshipctl", - "https://opendev.org/airship/airshipctl/src/tag/v2.0.0-beta.1", - "https://opendev.org/airship/airshipctl/src/tag/v2.0.0-beta.1/pkg/cluster/command.go", - "https://opendev.org/airship/airshipctl/src/tag/v2.0.0-beta.1/pkg/cluster/command.go#L1", - "", - }, - { - "go-source templates match gitea without transform", - "git.borago.de/Marco/gqltest", "v0.0.18", "go.mod", - "https://git.borago.de/Marco/gqltest", - "https://git.borago.de/Marco/gqltest/src/v0.0.18", - "https://git.borago.de/Marco/gqltest/src/v0.0.18/go.mod", - "https://git.borago.de/Marco/gqltest/src/v0.0.18/go.mod#L1", - "https://git.borago.de/Marco/gqltest/raw/v0.0.18/go.mod", - }, - { - "go-source templates match gitlab2", - "git.pluggableideas.com/destrealm/3rdparty/go-yaml", "v2.2.6", "go.mod", - "https://git.pluggableideas.com/destrealm/3rdparty/go-yaml", - "https://git.pluggableideas.com/destrealm/3rdparty/go-yaml/-/tree/v2.2.6", - "https://git.pluggableideas.com/destrealm/3rdparty/go-yaml/-/blob/v2.2.6/go.mod", - "https://git.pluggableideas.com/destrealm/3rdparty/go-yaml/-/blob/v2.2.6/go.mod#L1", - "https://git.pluggableideas.com/destrealm/3rdparty/go-yaml/-/raw/v2.2.6/go.mod", - }, - { - "go-source templates match fdio", - "golang.zx2c4.com/wireguard/windows", "v0.3.4", "go.mod", - "https://git.zx2c4.com/wireguard-windows", - "https://git.zx2c4.com/wireguard-windows/tree/?h=v0.3.4", - "https://git.zx2c4.com/wireguard-windows/tree/go.mod?h=v0.3.4", - "https://git.zx2c4.com/wireguard-windows/tree/go.mod?h=v0.3.4#n1", - "https://git.zx2c4.com/wireguard-windows/plain/go.mod?h=v0.3.4", - }, - { - "go-source templates match blitiri.com.ar", - "blitiri.com.ar/go/log", "v1.1.0", "go.mod", - "https://blitiri.com.ar/git/r/log", - "https://blitiri.com.ar/git/r/log/b/master/t", - "https://blitiri.com.ar/git/r/log/b/master/t/f=go.mod.html", - "https://blitiri.com.ar/git/r/log/b/master/t/f=go.mod.html#line-1", - "", - }, - } { - t.Run(test.desc, func(t *testing.T) { - info, err := ModuleInfo(context.Background(), &Client{client}, test.modulePath, test.version) - if err != nil { - t.Fatal(err) - } - - skip := skipReplayTests[test.desc] - check(t, "file", info.FileURL(test.file), test.wantFile, skip) - }) - } -} - -func newReplayClient(t *testing.T, record bool) (*http.Client, func()) { - replayFilePath := filepath.Join("testdata", t.Name()+".replay") - if record { - httpreplay.DebugHeaders() - t.Logf("Recording into %s", replayFilePath) - if err := os.MkdirAll(filepath.Dir(replayFilePath), 0755); err != nil { - t.Fatal(err) - } - rec, err := httpreplay.NewRecorder(replayFilePath, nil) - if err != nil { - t.Fatal(err) - } - return rec.Client(), func() { - if err := rec.Close(); err != nil { - t.Fatal(err) - } - } - } else { - rep, err := httpreplay.NewReplayer(replayFilePath) - if err != nil { - t.Fatal(err) - } - return rep.Client(), func() { _ = rep.Close() } - } -} - func TestMatchStatic(t *testing.T) { for _, test := range []struct { in string @@ -488,7 +51,6 @@ func TestModuleInfoDynamic(t *testing.T) { client := &Client{ httpClient: &http.Client{ Transport: testTransport(testWeb), - Timeout: testTimeout, }, } // The version doesn't figure into the interesting work and we test versions to commits @@ -668,7 +230,8 @@ func TestRemoveVersionSuffix(t *testing.T) { func TestAdjustVersionedModuleDirectory(t *testing.T) { ctx := context.Background() - client := NewClient(testTimeout) + + client := NewClient(http.DefaultClient) client.httpClient.Transport = testTransport(map[string]string{ // Repo "branch" follows the "major branch" convention: versions 2 and higher // live in the same directory as versions 0 and 1, but on a different branch (or tag). @@ -889,6 +452,55 @@ var testWeb = map[string]string{ ``, } +func TestJSON(t *testing.T) { + for _, test := range []struct { + in *Info + want string + }{ + { + nil, + `null`, + }, + { + &Info{repoURL: "r", moduleDir: "m", commit: "c"}, + `{"RepoURL":"r","ModuleDir":"m","Commit":"c"}`, + }, + { + &Info{repoURL: "r", moduleDir: "m", commit: "c", templates: githubURLTemplates}, + `{"RepoURL":"r","ModuleDir":"m","Commit":"c","Kind":"github"}`, + }, + { + &Info{repoURL: "r", moduleDir: "m", commit: "c", templates: urlTemplates{File: "f"}}, + `{"RepoURL":"r","ModuleDir":"m","Commit":"c","Templates":{"Directory":"","File":"f","Line":"","Raw":""}}`, + }, + { + &Info{repoURL: "r", moduleDir: "m", commit: "c", templates: urlTemplates{Repo: "r", File: "f"}}, + `{"RepoURL":"r","ModuleDir":"m","Commit":"c","Templates":{"Repo":"r","Directory":"","File":"f","Line":"","Raw":""}}`, + }, + } { + bytes, err := json.Marshal(&test.in) + if err != nil { + t.Fatal(err) + } + got := string(bytes) + if got != test.want { + t.Errorf("%#v:\ngot %s\nwant %s", test.in, got, test.want) + continue + } + var out Info + if err := json.Unmarshal(bytes, &out); err != nil { + t.Fatal(err) + } + var want Info + if test.in != nil { + want = *test.in + } + if out != want { + t.Errorf("got %#v\nwant %#v", out, want) + } + } +} + func TestURLTemplates(t *testing.T) { // Check that templates contain the right variables. @@ -960,3 +572,18 @@ func TestMatchLegacyTemplates(t *testing.T) { } } } + +func TestFilesInfo(t *testing.T) { + info := FilesInfo("/Users/bob") + + check := func(got, want string) { + t.Helper() + if got != want { + t.Errorf("got %q, want %q", got, want) + } + } + + check(info.RepoURL(), "/files/Users/bob/") + check(info.ModuleURL(), "/files/Users/bob/") + check(info.FileURL("dir/a.go"), "/files/Users/bob/dir/a.go") +} diff --git a/internal/third_party/pkgsite/stdlib/gorepo.go b/internal/third_party/pkgsite/stdlib/gorepo.go new file mode 100644 index 00000000..5e26814f --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/gorepo.go @@ -0,0 +1,231 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/pulumi/go-licenses/internal/third_party/pkgsite/version" + "github.com/pulumi/go-licenses/internal/third_party/pkgsite/derrors" +) + +// A goRepo represents a git repo holding the Go standard library. +type goRepo interface { + // Clone the repo at the given version to the directory. + clone(ctx context.Context, version string, toDirectory string) (refName string, err error) + + // Return all the refs of the repo. + refs(ctx context.Context) ([]ref, error) +} + +type remoteGoRepo struct{} + +func (remoteGoRepo) clone(ctx context.Context, v, directory string) (refName string, err error) { + defer derrors.Wrap(&err, "remoteGoRepo.clone(%q)", v) + + refName, err = refNameForVersion(v) + if err != nil { + return "", err + } + if err := os.MkdirAll(directory, 0777); err != nil { + return "", err + } + cmd := exec.CommandContext(ctx, "git", "init") + cmd.Dir = directory + if err := cmd.Run(); err != nil { + return "", err + } + cmd = exec.CommandContext(ctx, "git", "fetch", "-f", "--depth=1", "--", GoRepoURL, refName+":main") + cmd.Dir = directory + if b, err := cmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("running git fetch: %v: %s", err, b) + } + cmd = exec.CommandContext(ctx, "git", "checkout", "main") + cmd.Dir = directory + if b, err := cmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("running git checkout: %v: %s", err, b) + } + return refName, nil +} + +type ref struct { + hash string + name string +} + +func (remoteGoRepo) refs(ctx context.Context) ([]ref, error) { + cmd := exec.CommandContext(ctx, "git", "ls-remote", "--", GoRepoURL) + b, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("running git ls-remote: %v: %s", err, ee.Stderr) + } + return nil, fmt.Errorf("running git ls-remote: %v", err) + } + return gitOutputToRefs(b) +} + +func gitOutputToRefs(b []byte) ([]ref, error) { + var refs []ref + b = bytes.TrimSpace(b) + for _, line := range bytes.Split(b, []byte("\n")) { + fields := bytes.Fields(line) + if len(fields) != 2 { + return nil, fmt.Errorf("invalid line in output from git ls-remote: %q: should have two fields", line) + } + refs = append(refs, ref{hash: string(fields[0]), name: string(fields[1])}) + } + return refs, nil +} + +type localGoRepo struct { + path string +} + +func newLocalGoRepo(path string) *localGoRepo { + return &localGoRepo{ + path: path, + } +} + +func (g *localGoRepo) refs(ctx context.Context) (refs []ref, err error) { + defer derrors.Wrap(&err, "localGoRepo(%s).refs", g.path) + + cmd := exec.CommandContext(ctx, "git", "show-ref") + cmd.Dir = g.path + b, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("running git show-ref: %v", err) + } + return gitOutputToRefs(b) +} + +func (g *localGoRepo) clone(ctx context.Context, v, directory string) (refName string, err error) { + return "", nil +} + +type testGoRepo struct { +} + +func (t *testGoRepo) clone(ctx context.Context, v, directory string) (refName string, err error) { + defer derrors.Wrap(&err, "testGoRepo.clone(%q)", v) + if v == TestMasterVersion { + v = version.Master + } + if v == TestDevFuzzVersion { + v = DevFuzz + } + cmd := exec.CommandContext(ctx, "git", "init") + cmd.Dir = directory + if err := cmd.Run(); err != nil { + return "", err + } + testdatadir := filepath.Join(testDataPath("testdata"), v) + err = filepath.Walk(testdatadir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(testdatadir, path) + if err != nil { + return err + } + dstpath := filepath.Join(directory, rel) + if info.Mode().IsDir() { + os.MkdirAll(dstpath, 0777) + return nil + } + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading %q: %v", path, err) + } + os.WriteFile(dstpath, b, 0666) + cmd := exec.CommandContext(ctx, "git", "add", "--", dstpath) + cmd.Dir = directory + if err := cmd.Run(); err != nil { + return fmt.Errorf("running git add: %v", err) + } + return nil + }) + if err != nil { + return "", err + } + cmd = exec.CommandContext(ctx, "git", "commit", "--allow-empty-message", "--author=Joe Random ", + "--message=") + cmd.Dir = directory + commitTime := fmt.Sprintf("%v +0000", TestCommitTime.Unix()) + name := "Joe Random" + email := "joe@example.com" + cmd.Env = append(cmd.Environ(), []string{ + "GIT_COMMITTER_NAME=" + name, "GIT_AUTHOR_NAME=" + name, + "GIT_COMMITTER_EMAIL=" + email, "GIT_AUTHOR_EMAIL=" + email, + "GIT_COMMITTER_DATE=" + commitTime, "GIT_AUTHOR_DATE=" + commitTime}...) + if err := cmd.Run(); err != nil { + if ee, ok := err.(*exec.ExitError); ok { + return "", fmt.Errorf("running git commit: %v: %s", err, ee.Stderr) + } + return "", fmt.Errorf("running git commit: %v", err) + } + return "HEAD", nil +} + +// testDataPath returns a path corresponding to a path relative to the calling +// test file. For convenience, rel is assumed to be "/"-delimited. +// It is a copy of testhelper.TestDataPath, which we can't use in this +// file because it is supposed to only be depended on by test files. +// +// It panics on failure. +func testDataPath(rel string) string { + _, filename, _, ok := runtime.Caller(1) + if !ok { + panic("unable to determine relative path") + } + return filepath.Clean(filepath.Join(filepath.Dir(filename), filepath.FromSlash(rel))) +} + +// References used for Versions during testing. +var testRefs = []string{ + // stdlib versions + "refs/tags/go1.2.1", + "refs/tags/go1.3.2", + "refs/tags/go1.4.2", + "refs/tags/go1.4.3", + "refs/tags/go1.6", + "refs/tags/go1.6.3", + "refs/tags/go1.6beta1", + "refs/tags/go1.8", + "refs/tags/go1.8rc2", + "refs/tags/go1.9rc1", + "refs/tags/go1.11", + "refs/tags/go1.12", + "refs/tags/go1.12.1", + "refs/tags/go1.12.5", + "refs/tags/go1.12.9", + "refs/tags/go1.13", + "refs/tags/go1.13beta1", + "refs/tags/go1.14.6", + "refs/tags/go1.21.0", + "refs/heads/dev.fuzz", + "refs/heads/master", + // other tags + "refs/changes/56/93156/13", + "refs/tags/release.r59", + "refs/tags/weekly.2011-04-13", +} + +func (t *testGoRepo) refs(ctx context.Context) ([]ref, error) { + var rs []ref + for _, r := range testRefs { + // Only the name is ever used, so the referent can be empty. + rs = append(rs, ref{name: r}) + } + return rs, nil +} diff --git a/internal/third_party/pkgsite/stdlib/stdlib.go b/internal/third_party/pkgsite/stdlib/stdlib.go index 054d0ff8..767de550 100644 --- a/internal/third_party/pkgsite/stdlib/stdlib.go +++ b/internal/third_party/pkgsite/stdlib/stdlib.go @@ -9,8 +9,20 @@ package stdlib import ( + "archive/zip" + "bytes" + "context" "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" "strings" + "sync" + "time" "github.com/pulumi/go-licenses/internal/third_party/pkgsite/derrors" "github.com/pulumi/go-licenses/internal/third_party/pkgsite/version" @@ -28,6 +40,16 @@ const ( DevBoringCrypto = "dev.boringcrypto" ) +var ( + // Regexp for matching go tags. The groups are: + // 1 the major.minor version + // 2 the patch version, or empty if none + // 3 the entire prerelease, if present + // 4 the prerelease type ("beta" or "rc") + // 5 the prerelease number + tagRegexp = regexp.MustCompile(`^go(\d+\.\d+)(\.\d+|)((beta|rc)(\d+))?$`) +) + // SupportedBranches are the branches of the stdlib repo supported by pkgsite. var SupportedBranches = map[string]bool{ version.Master: true, @@ -35,9 +57,51 @@ var SupportedBranches = map[string]bool{ DevFuzz: true, } +// VersionForTag returns the semantic version for the Go tag, or "" if +// tag doesn't correspond to a Go release or beta tag. In special cases, +// when the tag specified is either `latest` or `master` it will return the tag. +// Examples: +// +// "go1" => "v1.0.0" +// "go1.2" => "v1.2.0" +// "go1.13beta1" => "v1.13.0-beta.1" +// "go1.9rc2" => "v1.9.0-rc.2" +// "latest" => "latest" +// "master" => "master" +func VersionForTag(tag string) string { + // Special cases for go1. + if tag == "go1" { + return "v1.0.0" + } + if tag == "go1.0" { + return "" + } + // Special case for latest and master. + if tag == version.Latest || SupportedBranches[tag] { + return tag + } + m := tagRegexp.FindStringSubmatch(tag) + if m == nil { + return "" + } + version := "v" + m[1] + if m[2] != "" { + version += m[2] + } else { + version += ".0" + } + if m[3] != "" { + version += "-" + m[4] + "." + m[5] + } + return version +} + // TagForVersion returns the Go standard library repository tag corresponding // to semver. The Go tags differ from standard semantic versions in a few ways, // such as beginning with "go" instead of "v". +// +// Starting with go1.21.0, the first patch release of major go versions include +// the .0 suffix. Previously, the .0 suffix was elided (golang/go#57631). func TagForVersion(v string) (_ string, err error) { defer derrors.Wrap(&err, "TagForVersion(%q)", v) @@ -59,7 +123,9 @@ func TagForVersion(v string) (_ string, err error) { prerelease := semver.Prerelease(goVersion) versionWithoutPrerelease := strings.TrimSuffix(goVersion, prerelease) patch := strings.TrimPrefix(versionWithoutPrerelease, semver.MajorMinor(goVersion)+".") - if patch == "0" { + if patch == "0" && (semver.Compare(v, "v1.21.0") < 0 || prerelease != "") { + // Starting with go1.21.0, the first patch version includes .0. + // Prereleases do not include .0 (we don't do prereleases for other patch releases). versionWithoutPrerelease = strings.TrimSuffix(versionWithoutPrerelease, ".0") } goVersion = fmt.Sprintf("go%s", strings.TrimPrefix(versionWithoutPrerelease, "v")) @@ -80,6 +146,25 @@ func TagForVersion(v string) (_ string, err error) { return goVersion, nil } +// MajorVersionForVersion returns the Go major version for version. +// E.g. "v1.13.3" => "go1". +func MajorVersionForVersion(version string) (_ string, err error) { + defer derrors.Wrap(&err, "MajorVersionForVersion(%q)", version) + + tag, err := TagForVersion(version) + if err != nil { + return "", err + } + if tag == "go1" || tag == "master" { + return "go1", nil + } + i := strings.IndexRune(tag, '.') + if i < 0 { + return "", fmt.Errorf("no '.' in go tag %q", tag) + } + return tag[:i], nil +} + // finalDigitsIndex returns the index of the first digit in the sequence of digits ending s. // If s doesn't end in digits, it returns -1. func finalDigitsIndex(s string) int { @@ -97,9 +182,123 @@ func finalDigitsIndex(s string) int { } const ( + GoRepoURL = "https://go.googlesource.com/go" GoSourceRepoURL = "https://cs.opensource.google/go/go" + + GitHubRepo = "github.com/golang/go" +) + +// TestCommitTime is the time used for all commits when UseTestData is true. +var ( + TestCommitTime = time.Date(2019, 9, 4, 1, 2, 3, 0, time.UTC) + TestMasterVersion = "v0.0.0-20190904010203-89fb59e2e920" + TestDevFuzzVersion = "v0.0.0-20190904010203-12de34vf56uz" +) + +var ( + goRepoMu sync.Mutex + theGoRepo goRepo = &remoteGoRepo{} ) +func getGoRepo() goRepo { + goRepoMu.Lock() + defer goRepoMu.Unlock() + return theGoRepo +} + +func swapGoRepo(gr goRepo) goRepo { + goRepoMu.Lock() + defer goRepoMu.Unlock() + old := theGoRepo + theGoRepo = gr + return old +} + +// WithTestData arranges for this package to use a testing version of the Go repo. +// The returned function restores the previous state. Use with defer: +// +// defer WithTestData()() +func WithTestData() func() { + return withGoRepo(&testGoRepo{}) +} + +func withGoRepo(gr goRepo) func() { + old := swapGoRepo(gr) + return func() { + swapGoRepo(old) + } +} + +// SetGoRepoPath tells this package to obtain the Go repo from the +// local filesystem at path, instead of cloning it. +func SetGoRepoPath(path string) error { + gr := newLocalGoRepo(path) + swapGoRepo(gr) + return nil +} + +func refNameForVersion(v string) (string, error) { + if v == version.Master { + return "HEAD", nil + } + if SupportedBranches[v] { + return "refs/heads/" + v, nil + } + tag, err := TagForVersion(v) + if err != nil { + return "", err + } + return "refs/tags/" + tag, nil +} + +// Versions returns all the semantic versions of Go that are relevant to the +// discovery site. These are all release versions (derived from tags of the +// forms "goN.N" and "goN.N.N", where N is a number) and beta or rc versions +// (derived from tags of the forms "goN.NbetaN" and "goN.N.NbetaN", and +// similarly for "rc" replacing "beta"). +func Versions() (_ []string, err error) { + defer derrors.Wrap(&err, "stdlib.Versions()") + + refs, err := getGoRepo().refs(context.TODO()) + if err != nil { + return nil, err + } + var versions []string + for _, r := range refs { + if !strings.HasPrefix(r.name, "refs/tags/") { + continue + } + tagName := strings.TrimPrefix(r.name, "refs/tags/") + v := VersionForTag(tagName) + if v != "" { + versions = append(versions, v) + } + } + return versions, nil +} + +// ResolveSupportedBranches returns the current hashes for each ref in +// SupportedBranches. +func ResolveSupportedBranches() (_ map[string]string, err error) { + defer derrors.Wrap(&err, "ResolveSupportedBranches") + + refs, err := getGoRepo().refs(context.TODO()) + if err != nil { + return nil, fmt.Errorf("getting refs: %v", err) + } + m := map[string]string{} + for _, r := range refs { + if !strings.HasPrefix(r.name, "refs/heads/") { + continue + } + name := strings.TrimPrefix(r.name, "refs/heads/") + if SupportedBranches[name] { + m[name] = r.hash + } + } + return m, nil +} + // Directory returns the directory of the standard library relative to the repo root. func Directory(v string) string { if semver.Compare(v, "v1.4.0-beta.1") >= 0 || @@ -109,3 +308,271 @@ func Directory(v string) string { // For versions older than v1.4.0-beta.1, the stdlib is in src/pkg. return "src/pkg" } + +// EstimatedZipSize is the approximate size of +// Zip("v1.15.2"). +const EstimatedZipSize = 16 * 1024 * 1024 + +// ZipInfo returns the proxy .info information for the module std. +func ZipInfo(requestedVersion string) (resolvedVersion string, err error) { + defer derrors.Wrap(&err, "stdlib.ZipInfo(%q)", requestedVersion) + + resolvedVersion, err = semanticVersion(requestedVersion) + if err != nil { + return "", err + } + return resolvedVersion, nil +} + +func hashForRef(ctx context.Context, dir, tag string) (string, error) { + cmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--", tag) + cmd.Dir = dir + b, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("running git show-ref: %v", err) + } + b = bytes.TrimSpace(b) + f := bytes.Fields(b) + if len(f) != 2 { + return "", fmt.Errorf("invalid output from git show-ref: %q: expect two fields", b) + } + return string(f[0]), nil +} + +func commiterTime(ctx context.Context, dir, object string) (time.Time, error) { + cmd := exec.CommandContext(ctx, "git", "show", "--no-patch", "--no-notes", "--format=%aI", object) + cmd.Dir = dir + b, err := cmd.Output() + if err != nil { + return time.Time{}, fmt.Errorf("running git show: %v, %s", err, b) + } + t, err := time.Parse(time.RFC3339, string(bytes.TrimSpace(b))) + if err != nil { + return time.Time{}, fmt.Errorf("parsing time output %q from command %v: %s", b, cmd, err) + } + return t, nil +} + +func zipInternal(ctx context.Context, requestedVersion string) (_ *zip.Reader, resolvedVersion string, commitTime time.Time, prefix string, err error) { + if requestedVersion == version.Latest { + requestedVersion, err = semanticVersion(requestedVersion) + if err != nil { + return nil, "", time.Time{}, "", err + } + } + dir, err := os.MkdirTemp("", "") + if err != nil { + return nil, "", time.Time{}, "", err + } + defer func() { + rmallerr := os.RemoveAll(dir) + if err == nil { + err = rmallerr + } + }() + refName, err := getGoRepo().clone(ctx, requestedVersion, dir) + if err != nil { + return nil, "", time.Time{}, "", err + } + var buf bytes.Buffer + z := zip.NewWriter(&buf) + + hash, err := hashForRef(ctx, dir, refName) + if err != nil { + return nil, "", time.Time{}, "", err + } + commitTime, err = commiterTime(ctx, dir, hash) + if err != nil { + return nil, "", time.Time{}, "", err + } + resolvedVersion = requestedVersion + if SupportedBranches[requestedVersion] { + resolvedVersion = newPseudoVersion("v0.0.0", commitTime, hash) + } + prefixPath := ModulePath + "@" + requestedVersion + // Add top-level files. + if err := addFiles(z, dir, prefixPath, false); err != nil { + return nil, "", time.Time{}, "", err + } + // Add files from the stdlib directory. + libDir := filepath.Join(dir, Directory(resolvedVersion)) + if err := addFiles(z, libDir, prefixPath, true); err != nil { + return nil, "", time.Time{}, "", err + } + if err := z.Close(); err != nil { + return nil, "", time.Time{}, "", err + } + br := bytes.NewReader(buf.Bytes()) + zr, err := zip.NewReader(br, int64(br.Len())) + if err != nil { + return nil, "", time.Time{}, "", err + } + return zr, resolvedVersion, commitTime, prefixPath, nil +} + +// ContentDir creates an fs.FS representing the entire Go standard library at the +// given version (which must have been resolved with ZipInfo) and returns a +// reader to it. It also returns the time of the commit for that version. +// +// Normally, ContentDir returns the resolved version it was passed. If the +// resolved version is a supported branch like "master", ContentDir returns a +// semantic version for the branch. +// +// ContentDir reads the standard library at the Go repository tag corresponding +// to the given semantic version. +// +// ContentDir ignores go.mod files in the standard library, treating it as if it +// were a single module named "std" at the given version. +func ContentDir(ctx context.Context, requestedVersion string) (_ fs.FS, resolvedVersion string, commitTime time.Time, err error) { + defer derrors.Wrap(&err, "stdlib.ContentDir(%q)", requestedVersion) + + zr, resolvedVersion, commitTime, prefix, err := zipInternal(ctx, requestedVersion) + if err != nil { + return nil, "", time.Time{}, err + } + cdir, err := fs.Sub(zr, prefix) + if err != nil { + return nil, "", time.Time{}, err + } + return cdir, resolvedVersion, commitTime, nil +} + +const pseudoHashLen = 12 + +func newPseudoVersion(version string, commitTime time.Time, hash string) string { + return fmt.Sprintf("%s-%s-%s", version, commitTime.Format("20060102150405"), hash[:pseudoHashLen]) +} + +// VersionMatchesHash reports whether v is a pseudo-version whose hash +// part matches the prefix of the given hash. +func VersionMatchesHash(v, hash string) bool { + if !version.IsPseudo(v) { + return false + } + return v[len(v)-pseudoHashLen:] == hash[:pseudoHashLen] +} + +// semanticVersion returns the semantic version corresponding to the +// requestedVersion. If the requested version is version.Master, then semanticVersion +// returns it as is. The branch name is resolved to a proper pseudo-version in +// Zip. +func semanticVersion(requestedVersion string) (_ string, err error) { + defer derrors.Wrap(&err, "semanticVersion(%q)", requestedVersion) + + if SupportedBranches[requestedVersion] { + return requestedVersion, nil + } + + knownVersions, err := Versions() + if err != nil { + return "", err + } + + switch requestedVersion { + case version.Latest: + var latestVersion string + for _, v := range knownVersions { + if !strings.HasPrefix(v, "v") { + continue + } + versionType, err := version.ParseType(v) + if err != nil { + return "", err + } + if versionType != version.TypeRelease { + // We expect there to always be at least 1 release version. + continue + } + if semver.Compare(v, latestVersion) > 0 { + latestVersion = v + } + } + return latestVersion, nil + default: + for _, v := range knownVersions { + if v == requestedVersion { + return requestedVersion, nil + } + } + } + + return "", fmt.Errorf("%w: requested version unknown: %q", derrors.InvalidArgument, requestedVersion) +} + +// addFiles adds the files in t to z, using dirpath as the path prefix. +// If recursive is true, it also adds the files in all subdirectories. +func addFiles(z *zip.Writer, directory string, dirpath string, recursive bool) (err error) { + defer derrors.Wrap(&err, "addFiles(zip, repository, tree, %q, %t)", dirpath, recursive) + + dirents, err := os.ReadDir(directory) + if err != nil { + return err + } + for _, e := range dirents { + if strings.HasPrefix(e.Name(), ".") || strings.HasPrefix(e.Name(), "_") { + continue + } + if e.Name() == "go.mod" { + // Ignore; we don't need it. + continue + } + if strings.HasPrefix(e.Name(), "README") && !strings.Contains(dirpath, "/") { + // For versions newer than v1.4.0-beta.1, the stdlib is in src/pkg. + // This means that our construction of the zip files will return + // two READMEs at the root: + // https://golang.org/README.md and + // https://golang.org/src/README.vendor + // + // We do not want to display the README.md + // or any README.vendor. + // However, we do want to store the README in + // other directories. + continue + } + switch { + case e.Type().IsRegular(): + f, err := os.Open(filepath.Join(directory, e.Name())) + if err != nil { + return err + } + if err := writeZipFile(z, path.Join(dirpath, e.Name()), f); err != nil { + _ = f.Close() + return err + } + if err := f.Close(); err != nil { + return err + } + case e.Type().IsDir(): + if !recursive || e.Name() == "testdata" { + continue + } + if err != nil { + return err + } + if err := addFiles(z, filepath.Join(directory, e.Name()), path.Join(dirpath, e.Name()), recursive); err != nil { + return err + } + } + } + return nil +} + +func writeZipFile(z *zip.Writer, pathname string, src io.Reader) (err error) { + defer derrors.Wrap(&err, "writeZipFile(zip, %q, src)", pathname) + + dst, err := z.Create(pathname) + if err != nil { + return err + } + _, err = io.Copy(dst, src) + return err +} + +// Contains reports whether the given import path could be part of the Go standard library, +// by reporting whether the first component lacks a '.'. +func Contains(path string) bool { + if i := strings.IndexByte(path, '/'); i != -1 { + path = path[:i] + } + return !strings.Contains(path, ".") +} diff --git a/internal/third_party/pkgsite/stdlib/stdlib_test.go b/internal/third_party/pkgsite/stdlib/stdlib_test.go index 965e9eb5..8b7edf6c 100644 --- a/internal/third_party/pkgsite/stdlib/stdlib_test.go +++ b/internal/third_party/pkgsite/stdlib/stdlib_test.go @@ -5,7 +5,21 @@ package stdlib import ( + "context" + "errors" + "flag" + "io/fs" + "reflect" "testing" + + "github.com/pulumi/go-licenses/internal/third_party/pkgsite/testenv" + "github.com/pulumi/go-licenses/internal/third_party/pkgsite/version" + "golang.org/x/mod/semver" +) + +var ( + clone = flag.Bool("clone", false, "test actual clones of the Go repo") + repoPath = flag.String("path", "", "path to Go repo to test") ) func TestTagForVersion(t *testing.T) { @@ -50,6 +64,26 @@ func TestTagForVersion(t *testing.T) { version: "v1.13.0", want: "go1.13", }, + { + name: "version v1.20.0-rc.2", + version: "v1.20.0-rc.2", + want: "go1.20rc2", + }, + { + name: "version v1.20.0", + version: "v1.20.0", + want: "go1.20", + }, + { + name: "version v1.21.0-rc.2", + version: "v1.21.0-rc.2", + want: "go1.21rc2", + }, + { + name: "version v1.21.0", + version: "v1.21.0", + want: "go1.21.0", + }, { name: "master branch", version: "master", @@ -83,12 +117,254 @@ func TestTagForVersion(t *testing.T) { return } if got != test.want { - t.Errorf("TagForVersion(%q) = %q, %v, wanted %q, %v", test.version, got, err, test.want, nil) + t.Fatalf("TagForVersion(%q) = %q, %v, wanted %q, %v", test.version, got, err, test.want, nil) + } + }) + } +} + +func TestMajorVersionForVersion(t *testing.T) { + for _, test := range []struct { + in string + want string // empty => error + }{ + {"", ""}, + {"garbage", ""}, + {"v1.0.0", "go1"}, + {"v1.13.3", "go1"}, + {"v1.9.0-rc.2", "go1"}, + {"v2.1.3", "go2"}, + {"v2.1.3", "go2"}, + {"v0.0.0-20230307225218-457fd1d52d17", "go1"}, + } { + got, err := MajorVersionForVersion(test.in) + if (err != nil) != (test.want == "") { + t.Errorf("%q: err: got %v, wanted error: %t", test.in, err, test.want == "") + } + if err == nil && got != test.want { + t.Errorf("%q: got %q, want %q", test.in, got, test.want) + } + } +} + +func TestContentDir(t *testing.T) { + ctx := context.Background() + testenv.MustHaveExecPath(t, "git") + defer WithTestData()() + for _, resolvedVersion := range []string{ + "v1.3.2", + "v1.12.5", + "v1.14.6", + DevFuzz, + version.Master, + } { + t.Run(resolvedVersion, func(t *testing.T) { + cdir, gotResolvedVersion, gotTime, err := ContentDir(ctx, resolvedVersion) + if err != nil { + t.Fatal(err) + } + if SupportedBranches[resolvedVersion] { + if !version.IsPseudo(gotResolvedVersion) { + t.Errorf("resolved version: %s is not a pseudo-version", gotResolvedVersion) + } + } else if gotResolvedVersion != resolvedVersion { + t.Errorf("resolved version: got %s, want %s", gotResolvedVersion, resolvedVersion) + } + if !gotTime.Equal(TestCommitTime) { + t.Errorf("commit time: got %s, want %s", gotTime, TestCommitTime) } + checkContentDirFiles(t, cdir, resolvedVersion) }) } } +func TestContentDirCloneAndOpen(t *testing.T) { + ctx := context.Background() + run := func(t *testing.T) { + for _, resolvedVersion := range []string{ + "v1.3.2", + "v1.14.6", + version.Master, + version.Latest, + } { + t.Run(resolvedVersion, func(t *testing.T) { + cdir, _, _, err := ContentDir(ctx, resolvedVersion) + if err != nil { + t.Fatal(err) + } + checkContentDirFiles(t, cdir, resolvedVersion) + }) + } + } + + t.Run("clone", func(t *testing.T) { + if !*clone { + t.Skip("-clone not supplied") + } + defer withGoRepo(&remoteGoRepo{})() + run(t) + }) + t.Run("local", func(t *testing.T) { + if *repoPath == "" { + t.Skip("-path not supplied") + } + lgr := newLocalGoRepo(*repoPath) + + defer withGoRepo(lgr)() + run(t) + }) +} + +func checkContentDirFiles(t *testing.T, cdir fs.FS, resolvedVersion string) { + wantFiles := map[string]bool{ + "LICENSE": true, + "errors/errors.go": true, + "errors/errors_test.go": true, + } + if semver.Compare(resolvedVersion, "v1.13.0") > 0 || resolvedVersion == TestMasterVersion { + wantFiles["cmd/README.vendor"] = true + } + if semver.Compare(resolvedVersion, "v1.14.0") > 0 { + wantFiles["context/context.go"] = true + } + const readmeVendorFile = "README.vendor" + if _, err := fs.Stat(cdir, readmeVendorFile); !errors.Is(err, fs.ErrNotExist) { + t.Fatalf("fs.Stat returned %v; want %q to be removed", err, readmeVendorFile) + } + err := fs.WalkDir(cdir, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + delete(wantFiles, path) + return nil + }) + if err != nil { + t.Fatal(err) + } + if len(wantFiles) > 0 { + t.Errorf("zip missing files: %v", reflect.ValueOf(wantFiles).MapKeys()) + } +} + +func TestZipInfo(t *testing.T) { + defer WithTestData()() + + for _, tc := range []struct { + requestedVersion string + want string + }{ + { + requestedVersion: "latest", + want: "v1.21.0", + }, + { + requestedVersion: "master", + want: "master", + }, + } { + gotVersion, err := ZipInfo(tc.requestedVersion) + if err != nil { + t.Fatal(err) + } + if want := tc.want; gotVersion != want { + t.Errorf("version: got %q, want %q", gotVersion, want) + } + } +} + +func TestVersions(t *testing.T) { + testVersions := func(wants []string) { + got, err := Versions() + if err != nil { + t.Fatal(err) + } + gotmap := map[string]bool{} + for _, g := range got { + gotmap[g] = true + } + for _, w := range wants { + if !gotmap[w] { + t.Errorf("missing %s", w) + } + } + } + + commonWants := []string{ + "v1.4.2", + "v1.9.0-rc.1", + "v1.11.0", + "v1.13.0-beta.1", + } + otherWants := append([]string{"v1.17.6"}, commonWants...) + t.Run("test", func(t *testing.T) { + defer WithTestData()() + testWants := append([]string{"v1.21.0"}, commonWants...) + testVersions(testWants) + }) + t.Run("local", func(t *testing.T) { + if *repoPath == "" { + t.Skip("-path not supplied") + } + lgr := newLocalGoRepo(*repoPath) + + defer withGoRepo(lgr)() + testVersions(otherWants) + }) + t.Run("remote", func(t *testing.T) { + if !*clone { + t.Skip("-clone not supplied") + } + defer withGoRepo(&remoteGoRepo{})() + testVersions(otherWants) + }) +} + +func TestVersionForTag(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"", ""}, + {"go1", "v1.0.0"}, + {"go1.9beta2", "v1.9.0-beta.2"}, + {"go1.12", "v1.12.0"}, + {"go1.9.7", "v1.9.7"}, + {"go2.0", "v2.0.0"}, + {"go1.9rc2", "v1.9.0-rc.2"}, + {"go1.1beta", ""}, + {"go1.0", ""}, + {"weekly.2012-02-14", ""}, + {"latest", "latest"}, + {"go1.21.0", "v1.21.0"}, + {"go1.21", "v1.21.0"}, + } { + got := VersionForTag(test.in) + if got != test.want { + t.Errorf("VersionForTag(%q) = %q, want %q", test.in, got, test.want) + } + } +} + +func TestContains(t *testing.T) { + for _, test := range []struct { + in string + want bool + }{ + {"fmt", true}, + {"encoding/json", true}, + {"something/with.dots", true}, + {"example.com", false}, + {"example.com/fmt", false}, + } { + got := Contains(test.in) + if got != test.want { + t.Errorf("Contains(%q) = %t, want %t", test.in, got, test.want) + } + } +} + func TestDirectory(t *testing.T) { for _, tc := range []struct { version string @@ -113,3 +389,34 @@ func TestDirectory(t *testing.T) { } } } + +func TestVersionMatchesHash(t *testing.T) { + v := "v0.0.0-20210910212848-c8dfa306babb" + h := "c8dfa306babb91e88f8ba25329b3ef8aa11944e1" + if !VersionMatchesHash(v, h) { + t.Error("got false, want true") + } + h = "c8dfa306babXb91e88f8ba25329b3ef8aa11944e1" + if VersionMatchesHash(v, h) { + t.Error("got true, want false") + } +} + +func TestResolveSupportedBranches(t *testing.T) { + testenv.MustHaveExternalNetwork(t) // ResolveSupportedBranches accesses the go repo at go.googlesource.com + testenv.MustHaveExecPath(t, "git") // ResolveSupportedBranches uses the git command to do so. + + got, err := ResolveSupportedBranches() + if err != nil { + t.Fatal(err) + } + // We can't check the hashes because they change, but we can check the keys. + for key := range got { + if !SupportedBranches[key] { + t.Errorf("got key %q not in SupportedBranches", key) + } + } + if g, w := len(got), len(SupportedBranches); g != w { + t.Errorf("got %d hashes, want %d", g, w) + } +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/LICENSE b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/README.md b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/README.md new file mode 100644 index 00000000..d6d2b9d2 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/README.md @@ -0,0 +1 @@ +# The Go Programming Language diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/README.vendor b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/README.vendor new file mode 100644 index 00000000..e74fc2f3 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/README.vendor @@ -0,0 +1,54 @@ +Vendoring in std and cmd +======================== + +The Go command maintains copies of external packages needed by the +standard library in the src/vendor and src/cmd/vendor directories. + +In GOPATH mode, imports of vendored packages are resolved to these +directories following normal vendor directory logic +(see golang.org/s/go15vendor). + +In module mode, std and cmd are modules (defined in src/go.mod and +src/cmd/go.mod). When a package outside std or cmd is imported +by a package inside std or cmd, the import path is interpreted +as if it had a "vendor/" prefix. For example, within "crypto/tls", +an import of "golang.org/x/crypto/cryptobyte" resolves to +"vendor/golang.org/x/crypto/cryptobyte". When a package with the +same path is imported from a package outside std or cmd, it will +be resolved normally. Consequently, a binary may be built with two +copies of a package at different versions if the package is +imported normally and vendored by the standard library. + +Vendored packages are internally renamed with a "vendor/" prefix +to preserve the invariant that all packages have distinct paths. +This is necessary to avoid compiler and linker conflicts. Adding +a "vendor/" prefix also maintains the invariant that standard +library packages begin with a dotless path element. + +The module requirements of std and cmd do not influence version +selection in other modules. They are only considered when running +module commands like 'go get' and 'go mod vendor' from a directory +in GOROOT/src. + +Maintaining vendor directories +============================== + +Before updating vendor directories, ensure that module mode is enabled. +Make sure GO111MODULE=off is not set ('on' or 'auto' should work). + +Requirements may be added, updated, and removed with 'go get'. +The vendor directory may be updated with 'go mod vendor'. +A typical sequence might be: + + cd src + go get -d golang.org/x/net@latest + go mod tidy + go mod vendor + +Use caution when passing '-u' to 'go get'. The '-u' flag updates +modules providing all transitively imported packages, not only +the module providing the target package. + +Note that 'go mod vendor' only copies packages that are transitively +imported by packages in the current module. If a new package is needed, +it should be imported before running 'go mod vendor'. diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/cmd/README.vendor b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/cmd/README.vendor new file mode 100644 index 00000000..ac0df5e9 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/cmd/README.vendor @@ -0,0 +1,2 @@ +See src/README.vendor for information on loading vendored packages +and updating the vendor directory. diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors.go b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors.go new file mode 100644 index 00000000..b8a46921 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors.go @@ -0,0 +1,20 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +// New returns an error that formats as the given text. +func New(text string) error { + return &errorString{text} +} + +// errorString is a trivial implementation of error. +type errorString struct { + s string +} + +func (e *errorString) Error() string { + return e.s +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors_test.go b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors_test.go new file mode 100644 index 00000000..cf4df90b --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/dev.fuzz/src/errors/errors_test.go @@ -0,0 +1,53 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors_test + +import ( + "errors" + "fmt" + "testing" +) + +func TestNewEqual(t *testing.T) { + // Different allocations should not be equal. + if errors.New("abc") == errors.New("abc") { + t.Errorf(`New("abc") == New("abc")`) + } + if errors.New("abc") == errors.New("xyz") { + t.Errorf(`New("abc") == New("xyz")`) + } + + // Same allocation should be equal to itself (not crash). + err := errors.New("jkl") + if err != err { + t.Errorf(`err != err`) + } +} + +func TestErrorMethod(t *testing.T) { + err := errors.New("abc") + if err.Error() != "abc" { + t.Errorf(`New("abc").Error() = %q, want %q`, err.Error(), "abc") + } +} + +func ExampleNew() { + err := errors.New("emit macho dwarf: elf header corrupted") + if err != nil { + fmt.Print(err) + } + // Output: emit macho dwarf: elf header corrupted +} + +// The fmt package's Errorf function lets us use the package's formatting +// features to create descriptive error messages. +func ExampleNew_errorf() { + const name, id = "bimmler", 17 + err := fmt.Errorf("user %q (id %d) not found", name, id) + if err != nil { + fmt.Print(err) + } + // Output: user "bimmler" (id 17) not found +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/LICENSE b/internal/third_party/pkgsite/stdlib/testdata/master/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/README.md b/internal/third_party/pkgsite/stdlib/testdata/master/README.md new file mode 100644 index 00000000..d6d2b9d2 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/README.md @@ -0,0 +1 @@ +# The Go Programming Language diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/src/README.vendor b/internal/third_party/pkgsite/stdlib/testdata/master/src/README.vendor new file mode 100644 index 00000000..e74fc2f3 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/src/README.vendor @@ -0,0 +1,54 @@ +Vendoring in std and cmd +======================== + +The Go command maintains copies of external packages needed by the +standard library in the src/vendor and src/cmd/vendor directories. + +In GOPATH mode, imports of vendored packages are resolved to these +directories following normal vendor directory logic +(see golang.org/s/go15vendor). + +In module mode, std and cmd are modules (defined in src/go.mod and +src/cmd/go.mod). When a package outside std or cmd is imported +by a package inside std or cmd, the import path is interpreted +as if it had a "vendor/" prefix. For example, within "crypto/tls", +an import of "golang.org/x/crypto/cryptobyte" resolves to +"vendor/golang.org/x/crypto/cryptobyte". When a package with the +same path is imported from a package outside std or cmd, it will +be resolved normally. Consequently, a binary may be built with two +copies of a package at different versions if the package is +imported normally and vendored by the standard library. + +Vendored packages are internally renamed with a "vendor/" prefix +to preserve the invariant that all packages have distinct paths. +This is necessary to avoid compiler and linker conflicts. Adding +a "vendor/" prefix also maintains the invariant that standard +library packages begin with a dotless path element. + +The module requirements of std and cmd do not influence version +selection in other modules. They are only considered when running +module commands like 'go get' and 'go mod vendor' from a directory +in GOROOT/src. + +Maintaining vendor directories +============================== + +Before updating vendor directories, ensure that module mode is enabled. +Make sure GO111MODULE=off is not set ('on' or 'auto' should work). + +Requirements may be added, updated, and removed with 'go get'. +The vendor directory may be updated with 'go mod vendor'. +A typical sequence might be: + + cd src + go get -d golang.org/x/net@latest + go mod tidy + go mod vendor + +Use caution when passing '-u' to 'go get'. The '-u' flag updates +modules providing all transitively imported packages, not only +the module providing the target package. + +Note that 'go mod vendor' only copies packages that are transitively +imported by packages in the current module. If a new package is needed, +it should be imported before running 'go mod vendor'. diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/src/cmd/README.vendor b/internal/third_party/pkgsite/stdlib/testdata/master/src/cmd/README.vendor new file mode 100644 index 00000000..ac0df5e9 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/src/cmd/README.vendor @@ -0,0 +1,2 @@ +See src/README.vendor for information on loading vendored packages +and updating the vendor directory. diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors.go b/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors.go new file mode 100644 index 00000000..b8a46921 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors.go @@ -0,0 +1,20 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +// New returns an error that formats as the given text. +func New(text string) error { + return &errorString{text} +} + +// errorString is a trivial implementation of error. +type errorString struct { + s string +} + +func (e *errorString) Error() string { + return e.s +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors_test.go b/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors_test.go new file mode 100644 index 00000000..cf4df90b --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/master/src/errors/errors_test.go @@ -0,0 +1,53 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors_test + +import ( + "errors" + "fmt" + "testing" +) + +func TestNewEqual(t *testing.T) { + // Different allocations should not be equal. + if errors.New("abc") == errors.New("abc") { + t.Errorf(`New("abc") == New("abc")`) + } + if errors.New("abc") == errors.New("xyz") { + t.Errorf(`New("abc") == New("xyz")`) + } + + // Same allocation should be equal to itself (not crash). + err := errors.New("jkl") + if err != err { + t.Errorf(`err != err`) + } +} + +func TestErrorMethod(t *testing.T) { + err := errors.New("abc") + if err.Error() != "abc" { + t.Errorf(`New("abc").Error() = %q, want %q`, err.Error(), "abc") + } +} + +func ExampleNew() { + err := errors.New("emit macho dwarf: elf header corrupted") + if err != nil { + fmt.Print(err) + } + // Output: emit macho dwarf: elf header corrupted +} + +// The fmt package's Errorf function lets us use the package's formatting +// features to create descriptive error messages. +func ExampleNew_errorf() { + const name, id = "bimmler", 17 + err := fmt.Errorf("user %q (id %d) not found", name, id) + if err != nil { + fmt.Print(err) + } + // Output: user "bimmler" (id 17) not found +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/LICENSE b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/README.md b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/README.md new file mode 100644 index 00000000..d6d2b9d2 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/README.md @@ -0,0 +1 @@ +# The Go Programming Language diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/_foo/README.md b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/_foo/README.md new file mode 100644 index 00000000..177b08ee --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/_foo/README.md @@ -0,0 +1 @@ +Can not read content diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/builtin/builtin.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/builtin/builtin.go new file mode 100644 index 00000000..c78fe09e --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/builtin/builtin.go @@ -0,0 +1,263 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package builtin provides documentation for Go's predeclared identifiers. + The items documented here are not actually in package builtin + but their descriptions here allow godoc to present documentation + for the language's special identifiers. +*/ +package builtin + +// bool is the set of boolean values, true and false. +type bool bool + +// true and false are the two untyped boolean values. +const ( + true = 0 == 0 // Untyped bool. + false = 0 != 0 // Untyped bool. +) + +// uint8 is the set of all unsigned 8-bit integers. +// Range: 0 through 255. +type uint8 uint8 + +// uint16 is the set of all unsigned 16-bit integers. +// Range: 0 through 65535. +type uint16 uint16 + +// uint32 is the set of all unsigned 32-bit integers. +// Range: 0 through 4294967295. +type uint32 uint32 + +// uint64 is the set of all unsigned 64-bit integers. +// Range: 0 through 18446744073709551615. +type uint64 uint64 + +// int8 is the set of all signed 8-bit integers. +// Range: -128 through 127. +type int8 int8 + +// int16 is the set of all signed 16-bit integers. +// Range: -32768 through 32767. +type int16 int16 + +// int32 is the set of all signed 32-bit integers. +// Range: -2147483648 through 2147483647. +type int32 int32 + +// int64 is the set of all signed 64-bit integers. +// Range: -9223372036854775808 through 9223372036854775807. +type int64 int64 + +// float32 is the set of all IEEE-754 32-bit floating-point numbers. +type float32 float32 + +// float64 is the set of all IEEE-754 64-bit floating-point numbers. +type float64 float64 + +// complex64 is the set of all complex numbers with float32 real and +// imaginary parts. +type complex64 complex64 + +// complex128 is the set of all complex numbers with float64 real and +// imaginary parts. +type complex128 complex128 + +// string is the set of all strings of 8-bit bytes, conventionally but not +// necessarily representing UTF-8-encoded text. A string may be empty, but +// not nil. Values of string type are immutable. +type string string + +// int is a signed integer type that is at least 32 bits in size. It is a +// distinct type, however, and not an alias for, say, int32. +type int int + +// uint is an unsigned integer type that is at least 32 bits in size. It is a +// distinct type, however, and not an alias for, say, uint32. +type uint uint + +// uintptr is an integer type that is large enough to hold the bit pattern of +// any pointer. +type uintptr uintptr + +// byte is an alias for uint8 and is equivalent to uint8 in all ways. It is +// used, by convention, to distinguish byte values from 8-bit unsigned +// integer values. +type byte = uint8 + +// rune is an alias for int32 and is equivalent to int32 in all ways. It is +// used, by convention, to distinguish character values from integer values. +type rune = int32 + +// iota is a predeclared identifier representing the untyped integer ordinal +// number of the current const specification in a (usually parenthesized) +// const declaration. It is zero-indexed. +const iota = 0 // Untyped int. + +// nil is a predeclared identifier representing the zero value for a +// pointer, channel, func, interface, map, or slice type. +var nil Type // Type must be a pointer, channel, func, interface, map, or slice type + +// Type is here for the purposes of documentation only. It is a stand-in +// for any Go type, but represents the same type for any given function +// invocation. +type Type int + +// Type1 is here for the purposes of documentation only. It is a stand-in +// for any Go type, but represents the same type for any given function +// invocation. +type Type1 int + +// IntegerType is here for the purposes of documentation only. It is a stand-in +// for any integer type: int, uint, int8 etc. +type IntegerType int + +// FloatType is here for the purposes of documentation only. It is a stand-in +// for either float type: float32 or float64. +type FloatType float32 + +// ComplexType is here for the purposes of documentation only. It is a +// stand-in for either complex type: complex64 or complex128. +type ComplexType complex64 + +// The append built-in function appends elements to the end of a slice. If +// it has sufficient capacity, the destination is resliced to accommodate the +// new elements. If it does not, a new underlying array will be allocated. +// Append returns the updated slice. It is therefore necessary to store the +// result of append, often in the variable holding the slice itself: +// slice = append(slice, elem1, elem2) +// slice = append(slice, anotherSlice...) +// As a special case, it is legal to append a string to a byte slice, like this: +// slice = append([]byte("hello "), "world"...) +func append(slice []Type, elems ...Type) []Type + +// The copy built-in function copies elements from a source slice into a +// destination slice. (As a special case, it also will copy bytes from a +// string to a slice of bytes.) The source and destination may overlap. Copy +// returns the number of elements copied, which will be the minimum of +// len(src) and len(dst). +func copy(dst, src []Type) int + +// The delete built-in function deletes the element with the specified key +// (m[key]) from the map. If m is nil or there is no such element, delete +// is a no-op. +func delete(m map[Type]Type1, key Type) + +// The len built-in function returns the length of v, according to its type: +// Array: the number of elements in v. +// Pointer to array: the number of elements in *v (even if v is nil). +// Slice, or map: the number of elements in v; if v is nil, len(v) is zero. +// String: the number of bytes in v. +// Channel: the number of elements queued (unread) in the channel buffer; +// if v is nil, len(v) is zero. +// For some arguments, such as a string literal or a simple array expression, the +// result can be a constant. See the Go language specification's "Length and +// capacity" section for details. +func len(v Type) int + +// The cap built-in function returns the capacity of v, according to its type: +// Array: the number of elements in v (same as len(v)). +// Pointer to array: the number of elements in *v (same as len(v)). +// Slice: the maximum length the slice can reach when resliced; +// if v is nil, cap(v) is zero. +// Channel: the channel buffer capacity, in units of elements; +// if v is nil, cap(v) is zero. +// For some arguments, such as a simple array expression, the result can be a +// constant. See the Go language specification's "Length and capacity" section for +// details. +func cap(v Type) int + +// The make built-in function allocates and initializes an object of type +// slice, map, or chan (only). Like new, the first argument is a type, not a +// value. Unlike new, make's return type is the same as the type of its +// argument, not a pointer to it. The specification of the result depends on +// the type: +// Slice: The size specifies the length. The capacity of the slice is +// equal to its length. A second integer argument may be provided to +// specify a different capacity; it must be no smaller than the +// length. For example, make([]int, 0, 10) allocates an underlying array +// of size 10 and returns a slice of length 0 and capacity 10 that is +// backed by this underlying array. +// Map: An empty map is allocated with enough space to hold the +// specified number of elements. The size may be omitted, in which case +// a small starting size is allocated. +// Channel: The channel's buffer is initialized with the specified +// buffer capacity. If zero, or the size is omitted, the channel is +// unbuffered. +func make(t Type, size ...IntegerType) Type + +// The new built-in function allocates memory. The first argument is a type, +// not a value, and the value returned is a pointer to a newly +// allocated zero value of that type. +func new(Type) *Type + +// The complex built-in function constructs a complex value from two +// floating-point values. The real and imaginary parts must be of the same +// size, either float32 or float64 (or assignable to them), and the return +// value will be the corresponding complex type (complex64 for float32, +// complex128 for float64). +func complex(r, i FloatType) ComplexType + +// The real built-in function returns the real part of the complex number c. +// The return value will be floating point type corresponding to the type of c. +func real(c ComplexType) FloatType + +// The imag built-in function returns the imaginary part of the complex +// number c. The return value will be floating point type corresponding to +// the type of c. +func imag(c ComplexType) FloatType + +// The close built-in function closes a channel, which must be either +// bidirectional or send-only. It should be executed only by the sender, +// never the receiver, and has the effect of shutting down the channel after +// the last sent value is received. After the last value has been received +// from a closed channel c, any receive from c will succeed without +// blocking, returning the zero value for the channel element. The form +// x, ok := <-c +// will also set ok to false for a closed channel. +func close(c chan<- Type) + +// The panic built-in function stops normal execution of the current +// goroutine. When a function F calls panic, normal execution of F stops +// immediately. Any functions whose execution was deferred by F are run in +// the usual way, and then F returns to its caller. To the caller G, the +// invocation of F then behaves like a call to panic, terminating G's +// execution and running any deferred functions. This continues until all +// functions in the executing goroutine have stopped, in reverse order. At +// that point, the program is terminated and the error condition is reported, +// including the value of the argument to panic. This termination sequence +// is called panicking and can be controlled by the built-in function +// recover. +func panic(v interface{}) + +// The recover built-in function allows a program to manage behavior of a +// panicking goroutine. Executing a call to recover inside a deferred +// function (but not any function called by it) stops the panicking sequence +// by restoring normal execution and retrieves the error value passed to the +// call of panic. If recover is called outside the deferred function it will +// not stop a panicking sequence. In this case, or when the goroutine is not +// panicking, or if the argument supplied to panic was nil, recover returns +// nil. Thus the return value from recover reports whether the goroutine is +// panicking. +func recover() interface{} + +// The print built-in function formats its arguments in an +// implementation-specific way and writes the result to standard error. +// Print is useful for bootstrapping and debugging; it is not guaranteed +// to stay in the language. +func print(args ...Type) + +// The println built-in function formats its arguments in an +// implementation-specific way and writes the result to standard error. +// Spaces are always added between arguments and a newline is appended. +// Println is useful for bootstrapping and debugging; it is not guaranteed +// to stay in the language. +func println(args ...Type) + +// The error built-in interface type is the conventional interface for +// representing an error condition, with the nil value representing no error. +type error interface { + Error() string +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/README b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/README new file mode 100644 index 00000000..9de1f515 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/README @@ -0,0 +1 @@ +This directory is the copy of Google's pprof shipped as part of the Go distribution. diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/doc.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/doc.go new file mode 100644 index 00000000..84de0366 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/doc.go @@ -0,0 +1,12 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pprof interprets and displays profiles of Go programs. +// +// Usage: +// +// go tool pprof binary profile +// +// For more information, see https://blog.golang.org/profiling-go-programs. +package main diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/pprof.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/pprof.go new file mode 100644 index 00000000..89f39690 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/pprof.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// pprof is a tool for visualization of profile.data. It is based on +// the upstream version at github.com/google/pprof, with minor +// modifications specific to the Go distribution. Please consider +// upstreaming any modifications to these packages. + +package main + +import ( + "crypto/tls" + "debug/dwarf" + "fmt" + "io" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "cmd/internal/objfile" + + "github.com/google/pprof/driver" + "github.com/google/pprof/profile" +) + +func main() { + options := &driver.Options{ + Fetch: new(fetcher), + Obj: new(objTool), + UI: newUI(), + } + if err := driver.PProf(options); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(2) + } +} + +type fetcher struct { +} + +func (f *fetcher) Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) { + sourceURL, timeout := adjustURL(src, duration, timeout) + if sourceURL == "" { + // Could not recognize URL, let regular pprof attempt to fetch the profile (eg. from a file) + return nil, "", nil + } + fmt.Fprintln(os.Stderr, "Fetching profile over HTTP from", sourceURL) + if duration > 0 { + fmt.Fprintf(os.Stderr, "Please wait... (%v)\n", duration) + } + p, err := getProfile(sourceURL, timeout) + return p, sourceURL, err +} + +func getProfile(source string, timeout time.Duration) (*profile.Profile, error) { + url, err := url.Parse(source) + if err != nil { + return nil, err + } + + var tlsConfig *tls.Config + if url.Scheme == "https+insecure" { + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + url.Scheme = "https" + source = url.String() + } + + client := &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: timeout + 5*time.Second, + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(source) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + return nil, statusCodeError(resp) + } + return profile.Parse(resp.Body) +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// cpuProfileHandler is the Go pprof CPU profile handler URL. +const cpuProfileHandler = "/debug/pprof/profile" + +// adjustURL applies the duration/timeout values and Go specific defaults +func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) { + u, err := url.Parse(source) + if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") { + // Try adding http:// to catch sources of the form hostname:port/path. + // url.Parse treats "hostname" as the scheme. + u, err = url.Parse("http://" + source) + } + if err != nil || u.Host == "" { + return "", 0 + } + + if u.Path == "" || u.Path == "/" { + u.Path = cpuProfileHandler + } + + // Apply duration/timeout overrides to URL. + values := u.Query() + if duration > 0 { + values.Set("seconds", fmt.Sprint(int(duration.Seconds()))) + } else { + if urlSeconds := values.Get("seconds"); urlSeconds != "" { + if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil { + duration = time.Duration(us) * time.Second + } + } + } + if timeout <= 0 { + if duration > 0 { + timeout = duration + duration/2 + } else { + timeout = 60 * time.Second + } + } + u.RawQuery = values.Encode() + return u.String(), timeout +} + +// objTool implements driver.ObjTool using Go libraries +// (instead of invoking GNU binutils). +type objTool struct { + mu sync.Mutex + disasmCache map[string]*objfile.Disasm +} + +func (*objTool) Open(name string, start, limit, offset uint64) (driver.ObjFile, error) { + of, err := objfile.Open(name) + if err != nil { + return nil, err + } + f := &file{ + name: name, + file: of, + } + if start != 0 { + if load, err := of.LoadAddress(); err == nil { + f.offset = start - load + } + } + return f, nil +} + +func (*objTool) Demangle(names []string) (map[string]string, error) { + // No C++, nothing to demangle. + return make(map[string]string), nil +} + +func (t *objTool) Disasm(file string, start, end uint64) ([]driver.Inst, error) { + d, err := t.cachedDisasm(file) + if err != nil { + return nil, err + } + var asm []driver.Inst + d.Decode(start, end, nil, func(pc, size uint64, file string, line int, text string) { + asm = append(asm, driver.Inst{Addr: pc, File: file, Line: line, Text: text}) + }) + return asm, nil +} + +func (t *objTool) cachedDisasm(file string) (*objfile.Disasm, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.disasmCache == nil { + t.disasmCache = make(map[string]*objfile.Disasm) + } + d := t.disasmCache[file] + if d != nil { + return d, nil + } + f, err := objfile.Open(file) + if err != nil { + return nil, err + } + d, err = f.Disasm() + f.Close() + if err != nil { + return nil, err + } + t.disasmCache[file] = d + return d, nil +} + +func (*objTool) SetConfig(config string) { + // config is usually used to say what binaries to invoke. + // Ignore entirely. +} + +// file implements driver.ObjFile using Go libraries +// (instead of invoking GNU binutils). +// A file represents a single executable being analyzed. +type file struct { + name string + offset uint64 + sym []objfile.Sym + file *objfile.File + pcln objfile.Liner + + triedDwarf bool + dwarf *dwarf.Data +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) Base() uint64 { + // No support for shared libraries. + return 0 +} + +func (f *file) BuildID() string { + // No support for build ID. + return "" +} + +func (f *file) SourceLine(addr uint64) ([]driver.Frame, error) { + if f.pcln == nil { + pcln, err := f.file.PCLineTable() + if err != nil { + return nil, err + } + f.pcln = pcln + } + addr -= f.offset + file, line, fn := f.pcln.PCToLine(addr) + if fn != nil { + frame := []driver.Frame{ + { + Func: fn.Name, + File: file, + Line: line, + }, + } + return frame, nil + } + + frames := f.dwarfSourceLine(addr) + if frames != nil { + return frames, nil + } + + return nil, fmt.Errorf("no line information for PC=%#x", addr) +} + +// dwarfSourceLine tries to get file/line information using DWARF. +// This is for C functions that appear in the profile. +// Returns nil if there is no information available. +func (f *file) dwarfSourceLine(addr uint64) []driver.Frame { + if f.dwarf == nil && !f.triedDwarf { + // Ignore any error--we don't care exactly why there + // is no DWARF info. + f.dwarf, _ = f.file.DWARF() + f.triedDwarf = true + } + + if f.dwarf != nil { + r := f.dwarf.Reader() + unit, err := r.SeekPC(addr) + if err == nil { + if frames := f.dwarfSourceLineEntry(r, unit, addr); frames != nil { + return frames + } + } + } + + return nil +} + +// dwarfSourceLineEntry tries to get file/line information from a +// DWARF compilation unit. Returns nil if it doesn't find anything. +func (f *file) dwarfSourceLineEntry(r *dwarf.Reader, entry *dwarf.Entry, addr uint64) []driver.Frame { + lines, err := f.dwarf.LineReader(entry) + if err != nil { + return nil + } + var lentry dwarf.LineEntry + if err := lines.SeekPC(addr, &lentry); err != nil { + return nil + } + + // Try to find the function name. + name := "" +FindName: + for entry, err := r.Next(); entry != nil && err == nil; entry, err = r.Next() { + if entry.Tag == dwarf.TagSubprogram { + ranges, err := f.dwarf.Ranges(entry) + if err != nil { + return nil + } + for _, pcs := range ranges { + if pcs[0] <= addr && addr < pcs[1] { + var ok bool + // TODO: AT_linkage_name, AT_MIPS_linkage_name. + name, ok = entry.Val(dwarf.AttrName).(string) + if ok { + break FindName + } + } + } + } + } + + // TODO: Report inlined functions. + + frames := []driver.Frame{ + { + Func: name, + File: lentry.File.Name, + Line: lentry.Line, + }, + } + + return frames +} + +func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*driver.Sym, error) { + if f.sym == nil { + sym, err := f.file.Symbols() + if err != nil { + return nil, err + } + f.sym = sym + } + var out []*driver.Sym + for _, s := range f.sym { + // Ignore a symbol with address 0 and size 0. + // An ELF STT_FILE symbol will look like that. + if s.Addr == 0 && s.Size == 0 { + continue + } + if (r == nil || r.MatchString(s.Name)) && (addr == 0 || s.Addr <= addr && addr < s.Addr+uint64(s.Size)) { + out = append(out, &driver.Sym{ + Name: []string{s.Name}, + File: f.name, + Start: s.Addr, + End: s.Addr + uint64(s.Size) - 1, + }) + } + } + return out, nil +} + +func (f *file) Close() error { + f.file.Close() + return nil +} + +// newUI will be set in readlineui.go in some platforms +// for interactive readline functionality. +var newUI = func() driver.UI { return nil } diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/readlineui.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/readlineui.go new file mode 100644 index 00000000..5b9701a0 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/cmd/pprof/readlineui.go @@ -0,0 +1,120 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains an driver.UI implementation +// that provides the readline functionality if possible. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build !appengine +// +build !android + +package main + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/google/pprof/driver" + "golang.org/x/crypto/ssh/terminal" +) + +func init() { + newUI = newReadlineUI +} + +// readlineUI implements driver.UI interface using the +// golang.org/x/crypto/ssh/terminal package. +// The upstream pprof command implements the same functionality +// using the github.com/chzyer/readline package. +type readlineUI struct { + term *terminal.Terminal +} + +func newReadlineUI() driver.UI { + // disable readline UI in dumb terminal. (golang.org/issue/26254) + if v := strings.ToLower(os.Getenv("TERM")); v == "" || v == "dumb" { + return nil + } + // test if we can use terminal.ReadLine + // that assumes operation in the raw mode. + oldState, err := terminal.MakeRaw(0) + if err != nil { + return nil + } + terminal.Restore(0, oldState) + + rw := struct { + io.Reader + io.Writer + }{os.Stdin, os.Stderr} + return &readlineUI{term: terminal.NewTerminal(rw, "")} +} + +// Read returns a line of text (a command) read from the user. +// prompt is printed before reading the command. +func (r *readlineUI) ReadLine(prompt string) (string, error) { + r.term.SetPrompt(prompt) + + // skip error checking because we tested it + // when creating this readlineUI initially. + oldState, _ := terminal.MakeRaw(0) + defer terminal.Restore(0, oldState) + + s, err := r.term.ReadLine() + return s, err +} + +// Print shows a message to the user. +// It formats the text as fmt.Print would and adds a final \n if not already present. +// For line-based UI, Print writes to standard error. +// (Standard output is reserved for report data.) +func (r *readlineUI) Print(args ...interface{}) { + r.print(false, args...) +} + +// PrintErr shows an error message to the user. +// It formats the text as fmt.Print would and adds a final \n if not already present. +// For line-based UI, PrintErr writes to standard error. +func (r *readlineUI) PrintErr(args ...interface{}) { + r.print(true, args...) +} + +func (r *readlineUI) print(withColor bool, args ...interface{}) { + text := fmt.Sprint(args...) + if !strings.HasSuffix(text, "\n") { + text += "\n" + } + if withColor { + text = colorize(text) + } + fmt.Fprint(r.term, text) +} + +// colorize prints the msg in red using ANSI color escapes. +func colorize(msg string) string { + const red = 31 + var colorEscape = fmt.Sprintf("\033[0;%dm", red) + var colorResetEscape = "\033[0m" + return colorEscape + msg + colorResetEscape +} + +// IsTerminal reports whether the UI is known to be tied to an +// interactive terminal (as opposed to being redirected to a file). +func (r *readlineUI) IsTerminal() bool { + const stdout = 1 + return terminal.IsTerminal(stdout) +} + +// WantBrowser indicates whether browser should be opened with the -http option. +func (r *readlineUI) WantBrowser() bool { + return r.IsTerminal() +} + +// SetAutoComplete instructs the UI to call complete(cmd) to obtain +// the auto-completion of cmd, if the UI supports auto-completion at all. +func (r *readlineUI) SetAutoComplete(complete func(string) string) { + // TODO: Implement auto-completion support. +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/benchmark_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/benchmark_test.go new file mode 100644 index 00000000..5d568630 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/benchmark_test.go @@ -0,0 +1,140 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + . "context" + "fmt" + "runtime" + "sync" + "testing" + "time" +) + +func BenchmarkCommonParentCancel(b *testing.B) { + root := WithValue(Background(), "key", "value") + shared, sharedcancel := WithCancel(root) + defer sharedcancel() + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + x := 0 + for pb.Next() { + ctx, cancel := WithCancel(shared) + if ctx.Value("key").(string) != "value" { + b.Fatal("should not be reached") + } + for i := 0; i < 100; i++ { + x /= x + 1 + } + cancel() + for i := 0; i < 100; i++ { + x /= x + 1 + } + } + }) +} + +func BenchmarkWithTimeout(b *testing.B) { + for concurrency := 40; concurrency <= 4e5; concurrency *= 100 { + name := fmt.Sprintf("concurrency=%d", concurrency) + b.Run(name, func(b *testing.B) { + benchmarkWithTimeout(b, concurrency) + }) + } +} + +func benchmarkWithTimeout(b *testing.B, concurrentContexts int) { + gomaxprocs := runtime.GOMAXPROCS(0) + perPContexts := concurrentContexts / gomaxprocs + root := Background() + + // Generate concurrent contexts. + var wg sync.WaitGroup + ccf := make([][]CancelFunc, gomaxprocs) + for i := range ccf { + wg.Add(1) + go func(i int) { + defer wg.Done() + cf := make([]CancelFunc, perPContexts) + for j := range cf { + _, cf[j] = WithTimeout(root, time.Hour) + } + ccf[i] = cf + }(i) + } + wg.Wait() + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + wcf := make([]CancelFunc, 10) + for pb.Next() { + for i := range wcf { + _, wcf[i] = WithTimeout(root, time.Hour) + } + for _, f := range wcf { + f() + } + } + }) + b.StopTimer() + + for _, cf := range ccf { + for _, f := range cf { + f() + } + } +} + +func BenchmarkCancelTree(b *testing.B) { + depths := []int{1, 10, 100, 1000} + for _, d := range depths { + b.Run(fmt.Sprintf("depth=%d", d), func(b *testing.B) { + b.Run("Root=Background", func(b *testing.B) { + for i := 0; i < b.N; i++ { + buildContextTree(Background(), d) + } + }) + b.Run("Root=OpenCanceler", func(b *testing.B) { + for i := 0; i < b.N; i++ { + ctx, cancel := WithCancel(Background()) + buildContextTree(ctx, d) + cancel() + } + }) + b.Run("Root=ClosedCanceler", func(b *testing.B) { + for i := 0; i < b.N; i++ { + ctx, cancel := WithCancel(Background()) + cancel() + buildContextTree(ctx, d) + } + }) + }) + } +} + +func buildContextTree(root Context, depth int) { + for d := 0; d < depth; d++ { + root, _ = WithCancel(root) + } +} + +func BenchmarkCheckCanceled(b *testing.B) { + ctx, cancel := WithCancel(Background()) + cancel() + b.Run("Err", func(b *testing.B) { + for i := 0; i < b.N; i++ { + ctx.Err() + } + }) + b.Run("Done", func(b *testing.B) { + for i := 0; i < b.N; i++ { + select { + case <-ctx.Done(): + default: + } + } + }) +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context.go new file mode 100644 index 00000000..21a40d59 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context.go @@ -0,0 +1,493 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using WithCancel, WithDeadline, +// WithTimeout, or WithValue. When a Context is canceled, all +// Contexts derived from it are also canceled. +// +// The WithCancel, WithDeadline, and WithTimeout functions take a +// Context (the parent) and return a derived Context (the child) and a +// CancelFunc. Calling the CancelFunc cancels the child and its +// children, removes the parent's reference to the child, and stops +// any associated timers. Failing to call the CancelFunc leaks the +// child and its children until the parent is canceled or the timer +// fires. The go vet tool checks that CancelFuncs are used on all +// control-flow paths. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See https://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See https://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // If Done is not yet closed, Err returns nil. + // If Done is closed, Err returns a non-nil error explaining why: + // Canceled if the context was canceled + // or DeadlineExceeded if the context's deadline passed. + // After Err returns a non-nil error, successive calls to Err return the same error. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stored using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded error = deadlineExceededError{} + +type deadlineExceededError struct{} + +func (deadlineExceededError) Error() string { return "context deadline exceeded" } +func (deadlineExceededError) Timeout() bool { return true } +func (deadlineExceededError) Temporary() bool { return true } + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{Context: parent} +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]struct{}) + } + p.children[child] = struct{}{} + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// closedchan is a reusable closed channel. +var closedchan = make(chan struct{}) + +func init() { + close(closedchan) +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + mu sync.Mutex // protects following fields + done chan struct{} // created lazily, closed by first cancel call + children map[canceler]struct{} // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + c.mu.Lock() + if c.done == nil { + c.done = make(chan struct{}) + } + d := c.done + c.mu.Unlock() + return d +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + if c.done == nil { + c.done = closedchan + } else { + close(c.done) + } + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(d) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: d, + } + propagateCancel(parent, c) + dur := time.Until(d) + if dur <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(false, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(dur, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, time.Until(c.deadline)) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context_test.go new file mode 100644 index 00000000..0b6ca742 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/context_test.go @@ -0,0 +1,650 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "time" +) + +type testingT interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func XTestBackground(t testingT) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func XTestTODO(t testingT) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func XTestWithCancel(t testingT) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func contains(m map[canceler]struct{}, key canceler) bool { + _, ret := m[key] + return ret +} + +func XTestParentFinishesChild(t testingT) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + test := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !contains(pc.children, cc) || !contains(pc.children, test) { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, test) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(test.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func XTestChildFinishesFirst(t testingT) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !contains(pc.children, cc) { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, name string, failAfter time.Duration, t testingT) { + select { + case <-time.After(failAfter): + t.Fatalf("%s: context should have timed out", name) + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("%s: c.Err() == %v; want %v", name, e, DeadlineExceeded) + } +} + +func XTestDeadline(t testingT) { + c, _ := WithDeadline(Background(), time.Now().Add(50*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, "WithDeadline", time.Second, t) + + c, _ = WithDeadline(Background(), time.Now().Add(50*time.Millisecond)) + o := otherContext{c} + testDeadline(o, "WithDeadline+otherContext", time.Second, t) + + c, _ = WithDeadline(Background(), time.Now().Add(50*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(4*time.Second)) + testDeadline(c, "WithDeadline+otherContext+WithDeadline", 2*time.Second, t) + + c, _ = WithDeadline(Background(), time.Now().Add(-time.Millisecond)) + testDeadline(c, "WithDeadline+inthepast", time.Second, t) + + c, _ = WithDeadline(Background(), time.Now()) + testDeadline(c, "WithDeadline+now", time.Second, t) +} + +func XTestTimeout(t testingT) { + c, _ := WithTimeout(Background(), 50*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, "WithTimeout", time.Second, t) + + c, _ = WithTimeout(Background(), 50*time.Millisecond) + o := otherContext{c} + testDeadline(o, "WithTimeout+otherContext", time.Second, t) + + c, _ = WithTimeout(Background(), 50*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 3*time.Second) + testDeadline(c, "WithTimeout+otherContext+WithTimeout", 2*time.Second, t) +} + +func XTestCanceledTimeout(t testingT) { + c, _ := WithTimeout(Background(), time.Second) + o := otherContext{c} + c, cancel := WithTimeout(o, 2*time.Second) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func XTestValues(t testingT) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func XTestAllocs(t testingT, testingShort func() bool, testingAllocsPerRun func(int, func()) float64) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 15, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 5*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 5*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + numRuns := 100 + if testingShort() { + numRuns = 10 + } + if n := testingAllocsPerRun(numRuns, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func XTestSimultaneousCancels(t testingT) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func XTestInterlockedCancels(t testingT) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func XTestLayersCancel(t testingT) { + testLayers(t, time.Now().UnixNano(), false) +} + +func XTestLayersTimeout(t testingT) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t testingT, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + time.Second): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func XTestCancelRemoves(t testingT) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after canceling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after canceling WithTimeout child", ctx, 0) +} + +func XTestWithCancelCanceledParent(t testingT) { + parent, pcancel := WithCancel(Background()) + pcancel() + + c, _ := WithCancel(parent) + select { + case <-c.Done(): + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Done") + } + if got, want := c.Err(), Canceled; got != want { + t.Errorf("child not cancelled; got = %v, want = %v", got, want) + } +} + +func XTestWithValueChecksKey(t testingT) { + panicVal := recoveredValue(func() { WithValue(Background(), []byte("foo"), "bar") }) + if panicVal == nil { + t.Error("expected panic") + } + panicVal = recoveredValue(func() { WithValue(Background(), nil, "bar") }) + if got, want := fmt.Sprint(panicVal), "nil key"; got != want { + t.Errorf("panic = %q; want %q", got, want) + } +} + +func recoveredValue(fn func()) (v interface{}) { + defer func() { v = recover() }() + fn() + return +} + +func XTestDeadlineExceededSupportsTimeout(t testingT) { + i, ok := DeadlineExceeded.(interface { + Timeout() bool + }) + if !ok { + t.Fatal("DeadlineExceeded does not support Timeout interface") + } + if !i.Timeout() { + t.Fatal("wrong value for timeout") + } +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/example_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/example_test.go new file mode 100644 index 00000000..2b28b577 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/example_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "context" + "fmt" + "time" +) + +// This example demonstrates the use of a cancelable context to prevent a +// goroutine leak. By the end of the example function, the goroutine started +// by gen will return without leaking. +func ExampleWithCancel() { + // gen generates integers in a separate goroutine and + // sends them to the returned channel. + // The callers of gen need to cancel the context once + // they are done consuming generated integers not to leak + // the internal goroutine started by gen. + gen := func(ctx context.Context) <-chan int { + dst := make(chan int) + n := 1 + go func() { + for { + select { + case <-ctx.Done(): + return // returning not to leak the goroutine + case dst <- n: + n++ + } + } + }() + return dst + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // cancel when we are finished consuming integers + + for n := range gen(ctx) { + fmt.Println(n) + if n == 5 { + break + } + } + // Output: + // 1 + // 2 + // 3 + // 4 + // 5 +} + +// This example passes a context with an arbitrary deadline to tell a blocking +// function that it should abandon its work as soon as it gets to it. +func ExampleWithDeadline() { + d := time.Now().Add(50 * time.Millisecond) + ctx, cancel := context.WithDeadline(context.Background(), d) + + // Even though ctx will be expired, it is good practice to call its + // cancelation function in any case. Failure to do so may keep the + // context and its parent alive longer than necessary. + defer cancel() + + select { + case <-time.After(1 * time.Second): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) + } + + // Output: + // context deadline exceeded +} + +// This example passes a context with a timeout to tell a blocking function that +// it should abandon its work after the timeout elapses. +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + select { + case <-time.After(1 * time.Second): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + + // Output: + // context deadline exceeded +} + +// This example demonstrates how a value can be passed to the context +// and also how to retrieve it if it exists. +func ExampleWithValue() { + type favContextKey string + + f := func(ctx context.Context, k favContextKey) { + if v := ctx.Value(k); v != nil { + fmt.Println("found value:", v) + return + } + fmt.Println("key not found:", k) + } + + k := favContextKey("language") + ctx := context.WithValue(context.Background(), k, "Go") + + f(ctx, k) + f(ctx, favContextKey("color")) + + // Output: + // found value: Go + // key not found: color +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/net_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/net_test.go new file mode 100644 index 00000000..a007689d --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/net_test.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "context" + "net" + "testing" +) + +func TestDeadlineExceededIsNetError(t *testing.T) { + err, ok := context.DeadlineExceeded.(net.Error) + if !ok { + t.Fatal("DeadlineExceeded does not implement net.Error") + } + if !err.Timeout() || !err.Temporary() { + t.Fatalf("Timeout() = %v, Temporary() = %v, want true, true", err.Timeout(), err.Temporary()) + } +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/x_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/x_test.go new file mode 100644 index 00000000..d14b6f1a --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/context/x_test.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + . "context" + "testing" +) + +func TestBackground(t *testing.T) { XTestBackground(t) } +func TestTODO(t *testing.T) { XTestTODO(t) } +func TestWithCancel(t *testing.T) { XTestWithCancel(t) } +func TestParentFinishesChild(t *testing.T) { XTestParentFinishesChild(t) } +func TestChildFinishesFirst(t *testing.T) { XTestChildFinishesFirst(t) } +func TestDeadline(t *testing.T) { XTestDeadline(t) } +func TestTimeout(t *testing.T) { XTestTimeout(t) } +func TestCanceledTimeout(t *testing.T) { XTestCanceledTimeout(t) } +func TestValues(t *testing.T) { XTestValues(t) } +func TestAllocs(t *testing.T) { XTestAllocs(t, testing.Short, testing.AllocsPerRun) } +func TestSimultaneousCancels(t *testing.T) { XTestSimultaneousCancels(t) } +func TestInterlockedCancels(t *testing.T) { XTestInterlockedCancels(t) } +func TestLayersCancel(t *testing.T) { XTestLayersCancel(t) } +func TestLayersTimeout(t *testing.T) { XTestLayersTimeout(t) } +func TestCancelRemoves(t *testing.T) { XTestCancelRemoves(t) } +func TestWithCancelCanceledParent(t *testing.T) { XTestWithCancelCanceledParent(t) } +func TestWithValueChecksKey(t *testing.T) { XTestWithValueChecksKey(t) } +func TestDeadlineExceededSupportsTimeout(t *testing.T) { XTestDeadlineExceededSupportsTimeout(t) } diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/bench_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/bench_test.go new file mode 100644 index 00000000..6a72e778 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/bench_test.go @@ -0,0 +1,363 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Large data benchmark. +// The JSON data is a summary of agl's changes in the +// go, webkit, and chromium open source projects. +// We benchmark converting between the JSON form +// and in-memory data structures. + +package json + +import ( + "bytes" + "compress/gzip" + "fmt" + "internal/testenv" + "io" + "os" + "reflect" + "runtime" + "strings" + "sync" + "testing" +) + +type codeResponse struct { + Tree *codeNode `json:"tree"` + Username string `json:"username"` +} + +type codeNode struct { + Name string `json:"name"` + Kids []*codeNode `json:"kids"` + CLWeight float64 `json:"cl_weight"` + Touches int `json:"touches"` + MinT int64 `json:"min_t"` + MaxT int64 `json:"max_t"` + MeanT int64 `json:"mean_t"` +} + +var codeJSON []byte +var codeStruct codeResponse + +func codeInit() { + f, err := os.Open("testdata/code.json.gz") + if err != nil { + panic(err) + } + defer f.Close() + gz, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + data, err := io.ReadAll(gz) + if err != nil { + panic(err) + } + + codeJSON = data + + if err := Unmarshal(codeJSON, &codeStruct); err != nil { + panic("unmarshal code.json: " + err.Error()) + } + + if data, err = Marshal(&codeStruct); err != nil { + panic("marshal code.json: " + err.Error()) + } + + if !bytes.Equal(data, codeJSON) { + println("different lengths", len(data), len(codeJSON)) + for i := 0; i < len(data) && i < len(codeJSON); i++ { + if data[i] != codeJSON[i] { + println("re-marshal: changed at byte", i) + println("orig: ", string(codeJSON[i-10:i+10])) + println("new: ", string(data[i-10:i+10])) + break + } + } + panic("re-marshal code.json: different result") + } +} + +func BenchmarkCodeEncoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + b.RunParallel(func(pb *testing.PB) { + enc := NewEncoder(io.Discard) + for pb.Next() { + if err := enc.Encode(&codeStruct); err != nil { + b.Fatal("Encode:", err) + } + } + }) + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeMarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if _, err := Marshal(&codeStruct); err != nil { + b.Fatal("Marshal:", err) + } + } + }) + b.SetBytes(int64(len(codeJSON))) +} + +func benchMarshalBytes(n int) func(*testing.B) { + sample := []byte("hello world") + // Use a struct pointer, to avoid an allocation when passing it as an + // interface parameter to Marshal. + v := &struct { + Bytes []byte + }{ + bytes.Repeat(sample, (n/len(sample))+1)[:n], + } + return func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := Marshal(v); err != nil { + b.Fatal("Marshal:", err) + } + } + } +} + +func BenchmarkMarshalBytes(b *testing.B) { + // 32 fits within encodeState.scratch. + b.Run("32", benchMarshalBytes(32)) + // 256 doesn't fit in encodeState.scratch, but is small enough to + // allocate and avoid the slower base64.NewEncoder. + b.Run("256", benchMarshalBytes(256)) + // 4096 is large enough that we want to avoid allocating for it. + b.Run("4096", benchMarshalBytes(4096)) +} + +func BenchmarkCodeDecoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + b.RunParallel(func(pb *testing.PB) { + var buf bytes.Buffer + dec := NewDecoder(&buf) + var r codeResponse + for pb.Next() { + buf.Write(codeJSON) + // hide EOF + buf.WriteByte('\n') + buf.WriteByte('\n') + buf.WriteByte('\n') + if err := dec.Decode(&r); err != nil { + b.Fatal("Decode:", err) + } + } + }) + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkUnicodeDecoder(b *testing.B) { + j := []byte(`"\uD83D\uDE01"`) + b.SetBytes(int64(len(j))) + r := bytes.NewReader(j) + dec := NewDecoder(r) + var out string + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := dec.Decode(&out); err != nil { + b.Fatal("Decode:", err) + } + r.Seek(0, 0) + } +} + +func BenchmarkDecoderStream(b *testing.B) { + b.StopTimer() + var buf bytes.Buffer + dec := NewDecoder(&buf) + buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") + var x interface{} + if err := dec.Decode(&x); err != nil { + b.Fatal("Decode:", err) + } + ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" + b.StartTimer() + for i := 0; i < b.N; i++ { + if i%300000 == 0 { + buf.WriteString(ones) + } + x = nil + if err := dec.Decode(&x); err != nil || x != 1.0 { + b.Fatalf("Decode: %v after %d", err, i) + } + } +} + +func BenchmarkCodeUnmarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var r codeResponse + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } + }) + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeUnmarshalReuse(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + b.RunParallel(func(pb *testing.PB) { + var r codeResponse + for pb.Next() { + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } + }) + // TODO(bcmills): Is there a missing b.SetBytes here? +} + +func BenchmarkUnmarshalString(b *testing.B) { + data := []byte(`"hello, world"`) + b.RunParallel(func(pb *testing.PB) { + var s string + for pb.Next() { + if err := Unmarshal(data, &s); err != nil { + b.Fatal("Unmarshal:", err) + } + } + }) +} + +func BenchmarkUnmarshalFloat64(b *testing.B) { + data := []byte(`3.14`) + b.RunParallel(func(pb *testing.PB) { + var f float64 + for pb.Next() { + if err := Unmarshal(data, &f); err != nil { + b.Fatal("Unmarshal:", err) + } + } + }) +} + +func BenchmarkUnmarshalInt64(b *testing.B) { + data := []byte(`3`) + b.RunParallel(func(pb *testing.PB) { + var x int64 + for pb.Next() { + if err := Unmarshal(data, &x); err != nil { + b.Fatal("Unmarshal:", err) + } + } + }) +} + +func BenchmarkIssue10335(b *testing.B) { + b.ReportAllocs() + j := []byte(`{"a":{ }}`) + b.RunParallel(func(pb *testing.PB) { + var s struct{} + for pb.Next() { + if err := Unmarshal(j, &s); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkUnmapped(b *testing.B) { + b.ReportAllocs() + j := []byte(`{"s": "hello", "y": 2, "o": {"x": 0}, "a": [1, 99, {"x": 1}]}`) + b.RunParallel(func(pb *testing.PB) { + var s struct{} + for pb.Next() { + if err := Unmarshal(j, &s); err != nil { + b.Fatal(err) + } + } + }) +} + +func BenchmarkTypeFieldsCache(b *testing.B) { + var maxTypes int = 1e6 + if testenv.Builder() != "" { + maxTypes = 1e3 // restrict cache sizes on builders + } + + // Dynamically generate many new types. + types := make([]reflect.Type, maxTypes) + fs := []reflect.StructField{{ + Type: reflect.TypeOf(""), + Index: []int{0}, + }} + for i := range types { + fs[0].Name = fmt.Sprintf("TypeFieldsCache%d", i) + types[i] = reflect.StructOf(fs) + } + + // clearClear clears the cache. Other JSON operations, must not be running. + clearCache := func() { + fieldCache = sync.Map{} + } + + // MissTypes tests the performance of repeated cache misses. + // This measures the time to rebuild a cache of size nt. + for nt := 1; nt <= maxTypes; nt *= 10 { + ts := types[:nt] + b.Run(fmt.Sprintf("MissTypes%d", nt), func(b *testing.B) { + nc := runtime.GOMAXPROCS(0) + for i := 0; i < b.N; i++ { + clearCache() + var wg sync.WaitGroup + for j := 0; j < nc; j++ { + wg.Add(1) + go func(j int) { + for _, t := range ts[(j*len(ts))/nc : ((j+1)*len(ts))/nc] { + cachedTypeFields(t) + } + wg.Done() + }(j) + } + wg.Wait() + } + }) + } + + // HitTypes tests the performance of repeated cache hits. + // This measures the average time of each cache lookup. + for nt := 1; nt <= maxTypes; nt *= 10 { + // Pre-warm a cache of size nt. + clearCache() + for _, t := range types[:nt] { + cachedTypeFields(t) + } + b.Run(fmt.Sprintf("HitTypes%d", nt), func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cachedTypeFields(types[0]) + } + }) + }) + } +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode.go new file mode 100644 index 00000000..731553dc --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode.go @@ -0,0 +1,1292 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be a string, an integer, or implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // name of the field holding the Go value +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and https://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext struct { // provides context for type errors + Struct reflect.Type + Field string + } + savedError error + useNumber bool + disallowUnknownFields bool +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + d.errorContext.Struct = nil + d.errorContext.Field = "" + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext.Struct != nil || d.errorContext.Field != "" { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = d.errorContext.Field + return err + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.scanWhile(scanContinue) + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields []field + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + originalErrorContext := d.errorContext + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.scanWhile(scanContinue) + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + d.errorContext.Field = f.name + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + + d.errorContext = originalErrorContext + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val interface{}) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.scanWhile(scanContinue) + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.scanWhile(scanContinue) + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode_test.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode_test.go new file mode 100644 index 00000000..54432600 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/decode_test.go @@ -0,0 +1,2294 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "image" + "math" + "math/big" + "net" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +type T struct { + X string + Y int + Z int `json:"-"` +} + +type U struct { + Alphabet string `json:"alpha"` +} + +type V struct { + F1 interface{} + F2 int32 + F3 Number + F4 *VOuter +} + +type VOuter struct { + V V +} + +type W struct { + S SS +} + +type SS string + +func (*SS) UnmarshalJSON(data []byte) error { + return &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(SS(""))} +} + +// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and +// without UseNumber +var ifaceNumAsFloat64 = map[string]interface{}{ + "k1": float64(1), + "k2": "s", + "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, + "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, +} + +var ifaceNumAsNumber = map[string]interface{}{ + "k1": Number("1"), + "k2": "s", + "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, + "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, +} + +type tx struct { + x int +} + +type u8 uint8 + +// A type that can unmarshal itself. + +type unmarshaler struct { + T bool +} + +func (u *unmarshaler) UnmarshalJSON(b []byte) error { + *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. + return nil +} + +type ustruct struct { + M unmarshaler +} + +type unmarshalerText struct { + A, B string +} + +// needed for re-marshaling tests +func (u unmarshalerText) MarshalText() ([]byte, error) { + return []byte(u.A + ":" + u.B), nil +} + +func (u *unmarshalerText) UnmarshalText(b []byte) error { + pos := bytes.IndexByte(b, ':') + if pos == -1 { + return errors.New("missing separator") + } + u.A, u.B = string(b[:pos]), string(b[pos+1:]) + return nil +} + +var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) + +type ustructText struct { + M unmarshalerText +} + +// u8marshal is an integer type that can marshal/unmarshal itself. +type u8marshal uint8 + +func (u8 u8marshal) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("u%d", u8)), nil +} + +var errMissingU8Prefix = errors.New("missing 'u' prefix") + +func (u8 *u8marshal) UnmarshalText(b []byte) error { + if !bytes.HasPrefix(b, []byte{'u'}) { + return errMissingU8Prefix + } + n, err := strconv.Atoi(string(b[1:])) + if err != nil { + return err + } + *u8 = u8marshal(n) + return nil +} + +var _ encoding.TextUnmarshaler = (*u8marshal)(nil) + +var ( + um0, um1 unmarshaler // target2 of unmarshaling + ump = &um1 + umtrue = unmarshaler{true} + umslice = []unmarshaler{{true}} + umslicep = new([]unmarshaler) + umstruct = ustruct{unmarshaler{true}} + + um0T, um1T unmarshalerText // target2 of unmarshaling + umpType = &um1T + umtrueXY = unmarshalerText{"x", "y"} + umsliceXY = []unmarshalerText{{"x", "y"}} + umslicepType = new([]unmarshalerText) + umstructType = new(ustructText) + umstructXY = ustructText{unmarshalerText{"x", "y"}} + + ummapType = map[unmarshalerText]bool{} + ummapXY = map[unmarshalerText]bool{{"x", "y"}: true} +) + +// Test data structures for anonymous fields. + +type Point struct { + Z int +} + +type Top struct { + Level0 int + Embed0 + *Embed0a + *Embed0b `json:"e,omitempty"` // treated as named + Embed0c `json:"-"` // ignored + Loop + Embed0p // has Point with X, Y, used + Embed0q // has Point with Z, used + embed // contains exported field +} + +type Embed0 struct { + Level1a int // overridden by Embed0a's Level1a with json tag + Level1b int // used because Embed0a's Level1b is renamed + Level1c int // used because Embed0a's Level1c is ignored + Level1d int // annihilated by Embed0a's Level1d + Level1e int `json:"x"` // annihilated by Embed0a.Level1e +} + +type Embed0a struct { + Level1a int `json:"Level1a,omitempty"` + Level1b int `json:"LEVEL1B,omitempty"` + Level1c int `json:"-"` + Level1d int // annihilated by Embed0's Level1d + Level1f int `json:"x"` // annihilated by Embed0's Level1e +} + +type Embed0b Embed0 + +type Embed0c Embed0 + +type Embed0p struct { + image.Point +} + +type Embed0q struct { + Point +} + +type embed struct { + Q int +} + +type Loop struct { + Loop1 int `json:",omitempty"` + Loop2 int `json:",omitempty"` + *Loop +} + +// From reflect test: +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// From reflect test: +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +type Ambig struct { + // Given "hello", the first match should win. + First int `json:"HELLO"` + Second int `json:"Hello"` +} + +type XYZ struct { + X interface{} + Y interface{} + Z interface{} +} + +type unexportedWithMethods struct{} + +func (unexportedWithMethods) F() {} + +func sliceAddr(x []int) *[]int { return &x } +func mapAddr(x map[string]int) *map[string]int { return &x } + +type byteWithMarshalJSON byte + +func (b byteWithMarshalJSON) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"Z%.2x"`, byte(b))), nil +} + +func (b *byteWithMarshalJSON) UnmarshalJSON(data []byte) error { + if len(data) != 5 || data[0] != '"' || data[1] != 'Z' || data[4] != '"' { + return fmt.Errorf("bad quoted string") + } + i, err := strconv.ParseInt(string(data[2:4]), 16, 8) + if err != nil { + return fmt.Errorf("bad hex") + } + *b = byteWithMarshalJSON(i) + return nil +} + +type byteWithPtrMarshalJSON byte + +func (b *byteWithPtrMarshalJSON) MarshalJSON() ([]byte, error) { + return byteWithMarshalJSON(*b).MarshalJSON() +} + +func (b *byteWithPtrMarshalJSON) UnmarshalJSON(data []byte) error { + return (*byteWithMarshalJSON)(b).UnmarshalJSON(data) +} + +type byteWithMarshalText byte + +func (b byteWithMarshalText) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf(`Z%.2x`, byte(b))), nil +} + +func (b *byteWithMarshalText) UnmarshalText(data []byte) error { + if len(data) != 3 || data[0] != 'Z' { + return fmt.Errorf("bad quoted string") + } + i, err := strconv.ParseInt(string(data[1:3]), 16, 8) + if err != nil { + return fmt.Errorf("bad hex") + } + *b = byteWithMarshalText(i) + return nil +} + +type byteWithPtrMarshalText byte + +func (b *byteWithPtrMarshalText) MarshalText() ([]byte, error) { + return byteWithMarshalText(*b).MarshalText() +} + +func (b *byteWithPtrMarshalText) UnmarshalText(data []byte) error { + return (*byteWithMarshalText)(b).UnmarshalText(data) +} + +type intWithMarshalJSON int + +func (b intWithMarshalJSON) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"Z%.2x"`, int(b))), nil +} + +func (b *intWithMarshalJSON) UnmarshalJSON(data []byte) error { + if len(data) != 5 || data[0] != '"' || data[1] != 'Z' || data[4] != '"' { + return fmt.Errorf("bad quoted string") + } + i, err := strconv.ParseInt(string(data[2:4]), 16, 8) + if err != nil { + return fmt.Errorf("bad hex") + } + *b = intWithMarshalJSON(i) + return nil +} + +type intWithPtrMarshalJSON int + +func (b *intWithPtrMarshalJSON) MarshalJSON() ([]byte, error) { + return intWithMarshalJSON(*b).MarshalJSON() +} + +func (b *intWithPtrMarshalJSON) UnmarshalJSON(data []byte) error { + return (*intWithMarshalJSON)(b).UnmarshalJSON(data) +} + +type intWithMarshalText int + +func (b intWithMarshalText) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf(`Z%.2x`, int(b))), nil +} + +func (b *intWithMarshalText) UnmarshalText(data []byte) error { + if len(data) != 3 || data[0] != 'Z' { + return fmt.Errorf("bad quoted string") + } + i, err := strconv.ParseInt(string(data[1:3]), 16, 8) + if err != nil { + return fmt.Errorf("bad hex") + } + *b = intWithMarshalText(i) + return nil +} + +type intWithPtrMarshalText int + +func (b *intWithPtrMarshalText) MarshalText() ([]byte, error) { + return intWithMarshalText(*b).MarshalText() +} + +func (b *intWithPtrMarshalText) UnmarshalText(data []byte) error { + return (*intWithMarshalText)(b).UnmarshalText(data) +} + +type mapStringToStringData struct { + Data map[string]string `json:"data"` +} + +type unmarshalTest struct { + in string + ptr interface{} + out interface{} + err error + useNumber bool + golden bool + disallowUnknownFields bool +} + +type B struct { + B bool `json:",string"` +} + +var unmarshalTests = []unmarshalTest{ + // basic types + {in: `true`, ptr: new(bool), out: true}, + {in: `1`, ptr: new(int), out: 1}, + {in: `1.2`, ptr: new(float64), out: 1.2}, + {in: `-5`, ptr: new(int16), out: int16(-5)}, + {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, + {in: `2`, ptr: new(Number), out: Number("2")}, + {in: `2`, ptr: new(interface{}), out: float64(2.0)}, + {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, + {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, + {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, + {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, + {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, + {in: "null", ptr: new(interface{}), out: nil}, + {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7, "T", "X"}}, + {in: `{"X": 23}`, ptr: new(T), out: T{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(""), 8, "T", "X"}}, {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, + {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, + {in: `{"x": 1}`, ptr: new(tx), err: fmt.Errorf("json: unknown field \"x\""), disallowUnknownFields: true}, + {in: `{"S": 23}`, ptr: new(W), out: W{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(SS("")), 0, "W", "S"}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, + + // raw values with whitespace + {in: "\n true ", ptr: new(bool), out: true}, + {in: "\t 1 ", ptr: new(int), out: 1}, + {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, + {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, + {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, + + // Z has a "-" tag. + {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, + {in: `{"Y": 1, "Z": 2}`, ptr: new(T), err: fmt.Errorf("json: unknown field \"Z\""), disallowUnknownFields: true}, + + {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true}, + {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, + {in: `{"alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true}, + + // syntax errors + {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, + {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, + {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, + {in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}}, + + // raw value errors + {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, + {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, + {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, + {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, + + // array tests + {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, + {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, + {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + {in: `[1, 2, 3]`, ptr: new(MustNotUnmarshalJSON), err: errors.New("MustNotUnmarshalJSON was used")}, + + // empty array to interface test + {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, + {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, + {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, + {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, + + // composite tests + {in: allValueIndent, ptr: new(All), out: allValue}, + {in: allValueCompact, ptr: new(All), out: allValue}, + {in: allValueIndent, ptr: new(*All), out: &allValue}, + {in: allValueCompact, ptr: new(*All), out: &allValue}, + {in: pallValueIndent, ptr: new(All), out: pallValue}, + {in: pallValueCompact, ptr: new(All), out: pallValue}, + {in: pallValueIndent, ptr: new(*All), out: &pallValue}, + {in: pallValueCompact, ptr: new(*All), out: &pallValue}, + + // unmarshal interface test + {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called + {in: `{"T":false}`, ptr: &ump, out: &umtrue}, + {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, + {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, + {in: `{"M":{"T":"x:y"}}`, ptr: &umstruct, out: umstruct}, + + // UnmarshalText interface test + {in: `"x:y"`, ptr: &um0T, out: umtrueXY}, + {in: `"x:y"`, ptr: &umpType, out: &umtrueXY}, + {in: `["x:y"]`, ptr: &umsliceXY, out: umsliceXY}, + {in: `["x:y"]`, ptr: &umslicepType, out: &umsliceXY}, + {in: `{"M":"x:y"}`, ptr: umstructType, out: umstructXY}, + + // integer-keyed map test + { + in: `{"-1":"a","0":"b","1":"c"}`, + ptr: new(map[int]string), + out: map[int]string{-1: "a", 0: "b", 1: "c"}, + }, + { + in: `{"0":"a","10":"c","9":"b"}`, + ptr: new(map[u8]string), + out: map[u8]string{0: "a", 9: "b", 10: "c"}, + }, + { + in: `{"-9223372036854775808":"min","9223372036854775807":"max"}`, + ptr: new(map[int64]string), + out: map[int64]string{math.MinInt64: "min", math.MaxInt64: "max"}, + }, + { + in: `{"18446744073709551615":"max"}`, + ptr: new(map[uint64]string), + out: map[uint64]string{math.MaxUint64: "max"}, + }, + { + in: `{"0":false,"10":true}`, + ptr: new(map[uintptr]bool), + out: map[uintptr]bool{0: false, 10: true}, + }, + + // Check that MarshalText and UnmarshalText take precedence + // over default integer handling in map keys. + { + in: `{"u2":4}`, + ptr: new(map[u8marshal]int), + out: map[u8marshal]int{2: 4}, + }, + { + in: `{"2":4}`, + ptr: new(map[u8marshal]int), + err: errMissingU8Prefix, + }, + + // integer-keyed map errors + { + in: `{"abc":"abc"}`, + ptr: new(map[int]string), + err: &UnmarshalTypeError{Value: "number abc", Type: reflect.TypeOf(0), Offset: 2}, + }, + { + in: `{"256":"abc"}`, + ptr: new(map[uint8]string), + err: &UnmarshalTypeError{Value: "number 256", Type: reflect.TypeOf(uint8(0)), Offset: 2}, + }, + { + in: `{"128":"abc"}`, + ptr: new(map[int8]string), + err: &UnmarshalTypeError{Value: "number 128", Type: reflect.TypeOf(int8(0)), Offset: 2}, + }, + { + in: `{"-1":"abc"}`, + ptr: new(map[uint8]string), + err: &UnmarshalTypeError{Value: "number -1", Type: reflect.TypeOf(uint8(0)), Offset: 2}, + }, + { + in: `{"F":{"a":2,"3":4}}`, + ptr: new(map[string]map[int]int), + err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(int(0)), Offset: 7}, + }, + { + in: `{"F":{"a":2,"3":4}}`, + ptr: new(map[string]map[uint]int), + err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(uint(0)), Offset: 7}, + }, + + // Map keys can be encoding.TextUnmarshalers. + {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY}, + // If multiple values for the same key exists, only the most recent value is used. + {in: `{"x:y":false,"x:y":true}`, ptr: &ummapType, out: ummapXY}, + + // Overwriting of data. + // This is different from package xml, but it's what we've always done. + // Now documented and tested. + {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, + {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, + + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12 + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18 + }`, + ptr: new(Top), + out: Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + }, + }, + { + in: `{"hello": 1}`, + ptr: new(Ambig), + out: Ambig{First: 1}, + }, + + { + in: `{"X": 1,"Y":2}`, + ptr: new(S5), + out: S5{S8: S8{S9: S9{Y: 2}}}, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S5), + err: fmt.Errorf("json: unknown field \"X\""), + disallowUnknownFields: true, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S10), + out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S10), + err: fmt.Errorf("json: unknown field \"X\""), + disallowUnknownFields: true, + }, + + // invalid UTF-8 is coerced to valid UTF-8. + { + in: "\"hello\xffworld\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\xc2\xc2world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xc2\xffworld\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800world\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", + ptr: new(string), + out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", + }, + + // Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now. + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[time.Time]string{}, + out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"}, + }, + + // issue 8305 + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[Point]string{}, + err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(map[Point]string{}), Offset: 1}, + }, + { + in: `{"asdf": "hello world"}`, + ptr: &map[unmarshaler]string{}, + err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(map[unmarshaler]string{}), Offset: 1}, + }, + + // related to issue 13783. + // Go 1.7 changed marshaling a slice of typed byte to use the methods on the byte type, + // similar to marshaling a slice of typed int. + // These tests check that, assuming the byte type also has valid decoding methods, + // either the old base64 string encoding or the new per-element encoding can be + // successfully unmarshaled. The custom unmarshalers were accessible in earlier + // versions of Go, even though the custom marshaler was not. + { + in: `"AQID"`, + ptr: new([]byteWithMarshalJSON), + out: []byteWithMarshalJSON{1, 2, 3}, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]byteWithMarshalJSON), + out: []byteWithMarshalJSON{1, 2, 3}, + golden: true, + }, + { + in: `"AQID"`, + ptr: new([]byteWithMarshalText), + out: []byteWithMarshalText{1, 2, 3}, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]byteWithMarshalText), + out: []byteWithMarshalText{1, 2, 3}, + golden: true, + }, + { + in: `"AQID"`, + ptr: new([]byteWithPtrMarshalJSON), + out: []byteWithPtrMarshalJSON{1, 2, 3}, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]byteWithPtrMarshalJSON), + out: []byteWithPtrMarshalJSON{1, 2, 3}, + golden: true, + }, + { + in: `"AQID"`, + ptr: new([]byteWithPtrMarshalText), + out: []byteWithPtrMarshalText{1, 2, 3}, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]byteWithPtrMarshalText), + out: []byteWithPtrMarshalText{1, 2, 3}, + golden: true, + }, + + // ints work with the marshaler but not the base64 []byte case + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]intWithMarshalJSON), + out: []intWithMarshalJSON{1, 2, 3}, + golden: true, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]intWithMarshalText), + out: []intWithMarshalText{1, 2, 3}, + golden: true, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]intWithPtrMarshalJSON), + out: []intWithPtrMarshalJSON{1, 2, 3}, + golden: true, + }, + { + in: `["Z01","Z02","Z03"]`, + ptr: new([]intWithPtrMarshalText), + out: []intWithPtrMarshalText{1, 2, 3}, + golden: true, + }, + + {in: `0.000001`, ptr: new(float64), out: 0.000001, golden: true}, + {in: `1e-7`, ptr: new(float64), out: 1e-7, golden: true}, + {in: `100000000000000000000`, ptr: new(float64), out: 100000000000000000000.0, golden: true}, + {in: `1e+21`, ptr: new(float64), out: 1e21, golden: true}, + {in: `-0.000001`, ptr: new(float64), out: -0.000001, golden: true}, + {in: `-1e-7`, ptr: new(float64), out: -1e-7, golden: true}, + {in: `-100000000000000000000`, ptr: new(float64), out: -100000000000000000000.0, golden: true}, + {in: `-1e+21`, ptr: new(float64), out: -1e21, golden: true}, + {in: `999999999999999900000`, ptr: new(float64), out: 999999999999999900000.0, golden: true}, + {in: `9007199254740992`, ptr: new(float64), out: 9007199254740992.0, golden: true}, + {in: `9007199254740993`, ptr: new(float64), out: 9007199254740992.0, golden: false}, + + { + in: `{"V": {"F2": "hello"}}`, + ptr: new(VOuter), + err: &UnmarshalTypeError{ + Value: "string", + Struct: "V", + Field: "F2", + Type: reflect.TypeOf(int32(0)), + Offset: 20, + }, + }, + { + in: `{"V": {"F4": {}, "F2": "hello"}}`, + ptr: new(VOuter), + err: &UnmarshalTypeError{ + Value: "string", + Struct: "V", + Field: "F2", + Type: reflect.TypeOf(int32(0)), + Offset: 30, + }, + }, + + // issue 15146. + // invalid inputs in wrongStringTests below. + {in: `{"B":"true"}`, ptr: new(B), out: B{true}, golden: true}, + {in: `{"B":"false"}`, ptr: new(B), out: B{false}, golden: true}, + {in: `{"B": "maybe"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "maybe" into bool`)}, + {in: `{"B": "tru"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "tru" into bool`)}, + {in: `{"B": "False"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "False" into bool`)}, + {in: `{"B": "null"}`, ptr: new(B), out: B{false}}, + {in: `{"B": "nul"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "nul" into bool`)}, + {in: `{"B": [2, 3]}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal unquoted value into bool`)}, + + // additional tests for disallowUnknownFields + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12 + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18, + "extra": true + }`, + ptr: new(Top), + err: fmt.Errorf("json: unknown field \"extra\""), + disallowUnknownFields: true, + }, + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12, + "extra": null + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18 + }`, + ptr: new(Top), + err: fmt.Errorf("json: unknown field \"extra\""), + disallowUnknownFields: true, + }, + // issue 26444 + // UnmarshalTypeError without field & struct values + { + in: `{"data":{"test1": "bob", "test2": 123}}`, + ptr: new(mapStringToStringData), + err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 37, Struct: "mapStringToStringData", Field: "data"}, + }, + { + in: `{"data":{"test1": 123, "test2": "bob"}}`, + ptr: new(mapStringToStringData), + err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 21, Struct: "mapStringToStringData", Field: "data"}, + }, + + // trying to decode JSON arrays or objects via TextUnmarshaler + { + in: `[1, 2, 3]`, + ptr: new(MustNotUnmarshalText), + err: &UnmarshalTypeError{Value: "array", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1}, + }, + { + in: `{"foo": "bar"}`, + ptr: new(MustNotUnmarshalText), + err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1}, + }, +} + +func TestMarshal(t *testing.T) { + b, err := Marshal(allValue) + if err != nil { + t.Fatalf("Marshal allValue: %v", err) + } + if string(b) != allValueCompact { + t.Errorf("Marshal allValueCompact") + diff(t, b, []byte(allValueCompact)) + return + } + + b, err = Marshal(pallValue) + if err != nil { + t.Fatalf("Marshal pallValue: %v", err) + } + if string(b) != pallValueCompact { + t.Errorf("Marshal pallValueCompact") + diff(t, b, []byte(pallValueCompact)) + return + } +} + +var badUTF8 = []struct { + in, out string +}{ + {"hello\xffworld", `"hello\ufffdworld"`}, + {"", `""`}, + {"\xff", `"\ufffd"`}, + {"\xff\xff", `"\ufffd\ufffd"`}, + {"a\xffb", `"a\ufffdb"`}, + {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, +} + +func TestMarshalBadUTF8(t *testing.T) { + for _, tt := range badUTF8 { + b, err := Marshal(tt.in) + if string(b) != tt.out || err != nil { + t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) + } + } +} + +func TestMarshalNumberZeroVal(t *testing.T) { + var n Number + out, err := Marshal(n) + if err != nil { + t.Fatal(err) + } + outStr := string(out) + if outStr != "0" { + t.Fatalf("Invalid zero val for Number: %q", outStr) + } +} + +func TestMarshalEmbeds(t *testing.T) { + top := &Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + } + b, err := Marshal(top) + if err != nil { + t.Fatal(err) + } + want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" + if string(b) != want { + t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) + } +} + +func TestUnmarshal(t *testing.T) { + for i, tt := range unmarshalTests { + var scan scanner + in := []byte(tt.in) + if err := checkValid(in, &scan); err != nil { + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: checkValid: %#v", i, err) + continue + } + } + if tt.ptr == nil { + continue + } + + // v = new(right-type) + v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec := NewDecoder(bytes.NewReader(in)) + if tt.useNumber { + dec.UseNumber() + } + if tt.disallowUnknownFields { + dec.DisallowUnknownFields() + } + if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: %v, want %v", i, err, tt.err) + continue + } else if err != nil { + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) + data, _ := Marshal(v.Elem().Interface()) + println(string(data)) + data, _ = Marshal(tt.out) + println(string(data)) + continue + } + + // Check round trip also decodes correctly. + if tt.err == nil { + enc, err := Marshal(v.Interface()) + if err != nil { + t.Errorf("#%d: error re-marshaling: %v", i, err) + continue + } + if tt.golden && !bytes.Equal(enc, in) { + t.Errorf("#%d: remarshal mismatch:\nhave: %s\nwant: %s", i, enc, in) + } + vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec = NewDecoder(bytes.NewReader(enc)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(vv.Interface()); err != nil { + t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) + t.Errorf(" In: %q", strings.Map(noSpace, string(in))) + t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) + continue + } + } + } +} + +func TestUnmarshalMarshal(t *testing.T) { + initBig() + var v interface{} + if err := Unmarshal(jsonBig, &v); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + b, err := Marshal(v) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(jsonBig, b) { + t.Errorf("Marshal jsonBig") + diff(t, b, jsonBig) + return + } +} + +var numberTests = []struct { + in string + i int64 + intErr string + f float64 + floatErr string +}{ + {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, + {in: "-12", i: -12, f: -12.0}, + {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, +} + +// Independent of Decode, basic coverage of the accessors in Number +func TestNumberAccessors(t *testing.T) { + for _, tt := range numberTests { + n := Number(tt.in) + if s := n.String(); s != tt.in { + t.Errorf("Number(%q).String() is %q", tt.in, s) + } + if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { + t.Errorf("Number(%q).Int64() is %d", tt.in, i) + } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { + t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) + } + if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { + t.Errorf("Number(%q).Float64() is %g", tt.in, f) + } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { + t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) + } + } +} + +func TestLargeByteSlice(t *testing.T) { + s0 := make([]byte, 2000) + for i := range s0 { + s0[i] = byte(i) + } + b, err := Marshal(s0) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + var s1 []byte + if err := Unmarshal(b, &s1); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !bytes.Equal(s0, s1) { + t.Errorf("Marshal large byte slice") + diff(t, s0, s1) + } +} + +type Xint struct { + X int +} + +func TestUnmarshalInterface(t *testing.T) { + var xint Xint + var i interface{} = &xint + if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestUnmarshalPtrPtr(t *testing.T) { + var xint Xint + pxint := &xint + if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestEscape(t *testing.T) { + const input = `"foobar"` + " [\u2028 \u2029]" + const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` + b, err := Marshal(input) + if err != nil { + t.Fatalf("Marshal error: %v", err) + } + if s := string(b); s != expected { + t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) + } +} + +// WrongString is a struct that's misusing the ,string modifier. +type WrongString struct { + Message string `json:"result,string"` +} + +type wrongStringTest struct { + in, err string +} + +var wrongStringTests = []wrongStringTest{ + {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, + {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, + {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, + {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, +} + +// If people misuse the ,string modifier, the error message should be +// helpful, telling the user that they're doing it wrong. +func TestErrorMessageFromMisusedString(t *testing.T) { + for n, tt := range wrongStringTests { + r := strings.NewReader(tt.in) + var s WrongString + err := NewDecoder(r).Decode(&s) + got := fmt.Sprintf("%v", err) + if got != tt.err { + t.Errorf("%d. got err = %q, want %q", n, got, tt.err) + } + } +} + +func noSpace(c rune) rune { + if isSpace(byte(c)) { //only used for ascii + return -1 + } + return c +} + +type All struct { + Bool bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + + Foo string `json:"bar"` + Foo2 string `json:"bar2,dummyopt"` + + IntStr int64 `json:",string"` + UintptrStr uintptr `json:",string"` + + PBool *bool + PInt *int + PInt8 *int8 + PInt16 *int16 + PInt32 *int32 + PInt64 *int64 + PUint *uint + PUint8 *uint8 + PUint16 *uint16 + PUint32 *uint32 + PUint64 *uint64 + PUintptr *uintptr + PFloat32 *float32 + PFloat64 *float64 + + String string + PString *string + + Map map[string]Small + MapP map[string]*Small + PMap *map[string]Small + PMapP *map[string]*Small + + EmptyMap map[string]Small + NilMap map[string]Small + + Slice []Small + SliceP []*Small + PSlice *[]Small + PSliceP *[]*Small + + EmptySlice []Small + NilSlice []Small + + StringSlice []string + ByteSlice []byte + + Small Small + PSmall *Small + PPSmall **Small + + Interface interface{} + PInterface *interface{} + + unexported int +} + +type Small struct { + Tag string +} + +var allValue = All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Uintptr: 12, + Float32: 14.1, + Float64: 15.1, + Foo: "foo", + Foo2: "foo2", + IntStr: 42, + UintptrStr: 44, + String: "16", + Map: map[string]Small{ + "17": {Tag: "tag17"}, + "18": {Tag: "tag18"}, + }, + MapP: map[string]*Small{ + "19": {Tag: "tag19"}, + "20": nil, + }, + EmptyMap: map[string]Small{}, + Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, + SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, + EmptySlice: []Small{}, + StringSlice: []string{"str24", "str25", "str26"}, + ByteSlice: []byte{27, 28, 29}, + Small: Small{Tag: "tag30"}, + PSmall: &Small{Tag: "tag31"}, + Interface: 5.2, +} + +var pallValue = All{ + PBool: &allValue.Bool, + PInt: &allValue.Int, + PInt8: &allValue.Int8, + PInt16: &allValue.Int16, + PInt32: &allValue.Int32, + PInt64: &allValue.Int64, + PUint: &allValue.Uint, + PUint8: &allValue.Uint8, + PUint16: &allValue.Uint16, + PUint32: &allValue.Uint32, + PUint64: &allValue.Uint64, + PUintptr: &allValue.Uintptr, + PFloat32: &allValue.Float32, + PFloat64: &allValue.Float64, + PString: &allValue.String, + PMap: &allValue.Map, + PMapP: &allValue.MapP, + PSlice: &allValue.Slice, + PSliceP: &allValue.SliceP, + PPSmall: &allValue.PSmall, + PInterface: &allValue.Interface, +} + +var allValueIndent = `{ + "Bool": true, + "Int": 2, + "Int8": 3, + "Int16": 4, + "Int32": 5, + "Int64": 6, + "Uint": 7, + "Uint8": 8, + "Uint16": 9, + "Uint32": 10, + "Uint64": 11, + "Uintptr": 12, + "Float32": 14.1, + "Float64": 15.1, + "bar": "foo", + "bar2": "foo2", + "IntStr": "42", + "UintptrStr": "44", + "PBool": null, + "PInt": null, + "PInt8": null, + "PInt16": null, + "PInt32": null, + "PInt64": null, + "PUint": null, + "PUint8": null, + "PUint16": null, + "PUint32": null, + "PUint64": null, + "PUintptr": null, + "PFloat32": null, + "PFloat64": null, + "String": "16", + "PString": null, + "Map": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "MapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "PMap": null, + "PMapP": null, + "EmptyMap": {}, + "NilMap": null, + "Slice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "SliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "PSlice": null, + "PSliceP": null, + "EmptySlice": [], + "NilSlice": null, + "StringSlice": [ + "str24", + "str25", + "str26" + ], + "ByteSlice": "Gxwd", + "Small": { + "Tag": "tag30" + }, + "PSmall": { + "Tag": "tag31" + }, + "PPSmall": null, + "Interface": 5.2, + "PInterface": null +}` + +var allValueCompact = strings.Map(noSpace, allValueIndent) + +var pallValueIndent = `{ + "Bool": false, + "Int": 0, + "Int8": 0, + "Int16": 0, + "Int32": 0, + "Int64": 0, + "Uint": 0, + "Uint8": 0, + "Uint16": 0, + "Uint32": 0, + "Uint64": 0, + "Uintptr": 0, + "Float32": 0, + "Float64": 0, + "bar": "", + "bar2": "", + "IntStr": "0", + "UintptrStr": "0", + "PBool": true, + "PInt": 2, + "PInt8": 3, + "PInt16": 4, + "PInt32": 5, + "PInt64": 6, + "PUint": 7, + "PUint8": 8, + "PUint16": 9, + "PUint32": 10, + "PUint64": 11, + "PUintptr": 12, + "PFloat32": 14.1, + "PFloat64": 15.1, + "String": "", + "PString": "16", + "Map": null, + "MapP": null, + "PMap": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "PMapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "EmptyMap": null, + "NilMap": null, + "Slice": null, + "SliceP": null, + "PSlice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "PSliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "EmptySlice": null, + "NilSlice": null, + "StringSlice": null, + "ByteSlice": null, + "Small": { + "Tag": "" + }, + "PSmall": null, + "PPSmall": { + "Tag": "tag31" + }, + "Interface": null, + "PInterface": 5.2 +}` + +var pallValueCompact = strings.Map(noSpace, pallValueIndent) + +func TestRefUnmarshal(t *testing.T) { + type S struct { + // Ref is defined in encode_test.go. + R0 Ref + R1 *Ref + R2 RefText + R3 *RefText + } + want := S{ + R0: 12, + R1: new(Ref), + R2: 13, + R3: new(RefText), + } + *want.R1 = 12 + *want.R3 = 13 + + var got S + if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +// Test that the empty string doesn't panic decoding when ,string is specified +// Issue 3450 +func TestEmptyString(t *testing.T) { + type T2 struct { + Number1 int `json:",string"` + Number2 int `json:",string"` + } + data := `{"Number1":"1", "Number2":""}` + dec := NewDecoder(strings.NewReader(data)) + var t2 T2 + err := dec.Decode(&t2) + if err == nil { + t.Fatal("Decode: did not return error") + } + if t2.Number1 != 1 { + t.Fatal("Decode: did not set Number1") + } +} + +// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). +// It should also not be an error (issue 2540, issue 8587). +func TestNullString(t *testing.T) { + type T struct { + A int `json:",string"` + B int `json:",string"` + C *int `json:",string"` + } + data := []byte(`{"A": "1", "B": null, "C": null}`) + var s T + s.B = 1 + s.C = new(int) + *s.C = 2 + err := Unmarshal(data, &s) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if s.B != 1 || s.C != nil { + t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) + } +} + +func intp(x int) *int { + p := new(int) + *p = x + return p +} + +func intpp(x *int) **int { + pp := new(*int) + *pp = x + return pp +} + +var interfaceSetTests = []struct { + pre interface{} + json string + post interface{} +}{ + {"foo", `"bar"`, "bar"}, + {"foo", `2`, 2.0}, + {"foo", `true`, true}, + {"foo", `null`, nil}, + + {nil, `null`, nil}, + {new(int), `null`, nil}, + {(*int)(nil), `null`, nil}, + {new(*int), `null`, new(*int)}, + {(**int)(nil), `null`, nil}, + {intp(1), `null`, nil}, + {intpp(nil), `null`, intpp(nil)}, + {intpp(intp(1)), `null`, intpp(nil)}, +} + +func TestInterfaceSet(t *testing.T) { + for _, tt := range interfaceSetTests { + b := struct{ X interface{} }{tt.pre} + blob := `{"X":` + tt.json + `}` + if err := Unmarshal([]byte(blob), &b); err != nil { + t.Errorf("Unmarshal %#q: %v", blob, err) + continue + } + if !reflect.DeepEqual(b.X, tt.post) { + t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) + } + } +} + +type NullTest struct { + Bool bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Float32 float32 + Float64 float64 + String string + PBool *bool + Map map[string]string + Slice []string + Interface interface{} + + PRaw *RawMessage + PTime *time.Time + PBigInt *big.Int + PText *MustNotUnmarshalText + PBuffer *bytes.Buffer // has methods, just not relevant ones + PStruct *struct{} + + Raw RawMessage + Time time.Time + BigInt big.Int + Text MustNotUnmarshalText + Buffer bytes.Buffer + Struct struct{} +} + +type NullTestStrings struct { + Bool bool `json:",string"` + Int int `json:",string"` + Int8 int8 `json:",string"` + Int16 int16 `json:",string"` + Int32 int32 `json:",string"` + Int64 int64 `json:",string"` + Uint uint `json:",string"` + Uint8 uint8 `json:",string"` + Uint16 uint16 `json:",string"` + Uint32 uint32 `json:",string"` + Uint64 uint64 `json:",string"` + Float32 float32 `json:",string"` + Float64 float64 `json:",string"` + String string `json:",string"` + PBool *bool `json:",string"` + Map map[string]string `json:",string"` + Slice []string `json:",string"` + Interface interface{} `json:",string"` + + PRaw *RawMessage `json:",string"` + PTime *time.Time `json:",string"` + PBigInt *big.Int `json:",string"` + PText *MustNotUnmarshalText `json:",string"` + PBuffer *bytes.Buffer `json:",string"` + PStruct *struct{} `json:",string"` + + Raw RawMessage `json:",string"` + Time time.Time `json:",string"` + BigInt big.Int `json:",string"` + Text MustNotUnmarshalText `json:",string"` + Buffer bytes.Buffer `json:",string"` + Struct struct{} `json:",string"` +} + +// JSON null values should be ignored for primitives and string values instead of resulting in an error. +// Issue 2540 +func TestUnmarshalNulls(t *testing.T) { + // Unmarshal docs: + // The JSON null value unmarshals into an interface, map, pointer, or slice + // by setting that Go value to nil. Because null is often used in JSON to mean + // ``not present,'' unmarshaling a JSON null into any other Go type has no effect + // on the value and produces no error. + + jsonData := []byte(`{ + "Bool" : null, + "Int" : null, + "Int8" : null, + "Int16" : null, + "Int32" : null, + "Int64" : null, + "Uint" : null, + "Uint8" : null, + "Uint16" : null, + "Uint32" : null, + "Uint64" : null, + "Float32" : null, + "Float64" : null, + "String" : null, + "PBool": null, + "Map": null, + "Slice": null, + "Interface": null, + "PRaw": null, + "PTime": null, + "PBigInt": null, + "PText": null, + "PBuffer": null, + "PStruct": null, + "Raw": null, + "Time": null, + "BigInt": null, + "Text": null, + "Buffer": null, + "Struct": null + }`) + nulls := NullTest{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Float32: 12.1, + Float64: 13.1, + String: "14", + PBool: new(bool), + Map: map[string]string{}, + Slice: []string{}, + Interface: new(MustNotUnmarshalJSON), + PRaw: new(RawMessage), + PTime: new(time.Time), + PBigInt: new(big.Int), + PText: new(MustNotUnmarshalText), + PStruct: new(struct{}), + PBuffer: new(bytes.Buffer), + Raw: RawMessage("123"), + Time: time.Unix(123456789, 0), + BigInt: *big.NewInt(123), + } + + before := nulls.Time.String() + + err := Unmarshal(jsonData, &nulls) + if err != nil { + t.Errorf("Unmarshal of null values failed: %v", err) + } + if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || + nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || + nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { + t.Errorf("Unmarshal of null values affected primitives") + } + + if nulls.PBool != nil { + t.Errorf("Unmarshal of null did not clear nulls.PBool") + } + if nulls.Map != nil { + t.Errorf("Unmarshal of null did not clear nulls.Map") + } + if nulls.Slice != nil { + t.Errorf("Unmarshal of null did not clear nulls.Slice") + } + if nulls.Interface != nil { + t.Errorf("Unmarshal of null did not clear nulls.Interface") + } + if nulls.PRaw != nil { + t.Errorf("Unmarshal of null did not clear nulls.PRaw") + } + if nulls.PTime != nil { + t.Errorf("Unmarshal of null did not clear nulls.PTime") + } + if nulls.PBigInt != nil { + t.Errorf("Unmarshal of null did not clear nulls.PBigInt") + } + if nulls.PText != nil { + t.Errorf("Unmarshal of null did not clear nulls.PText") + } + if nulls.PBuffer != nil { + t.Errorf("Unmarshal of null did not clear nulls.PBuffer") + } + if nulls.PStruct != nil { + t.Errorf("Unmarshal of null did not clear nulls.PStruct") + } + + if string(nulls.Raw) != "null" { + t.Errorf("Unmarshal of RawMessage null did not record null: %v", string(nulls.Raw)) + } + if nulls.Time.String() != before { + t.Errorf("Unmarshal of time.Time null set time to %v", nulls.Time.String()) + } + if nulls.BigInt.String() != "123" { + t.Errorf("Unmarshal of big.Int null set int to %v", nulls.BigInt.String()) + } +} + +type MustNotUnmarshalJSON struct{} + +func (x MustNotUnmarshalJSON) UnmarshalJSON(data []byte) error { + return errors.New("MustNotUnmarshalJSON was used") +} + +type MustNotUnmarshalText struct{} + +func (x MustNotUnmarshalText) UnmarshalText(text []byte) error { + return errors.New("MustNotUnmarshalText was used") +} + +func TestStringKind(t *testing.T) { + type stringKind string + + var m1, m2 map[stringKind]int + m1 = map[stringKind]int{ + "foo": 42, + } + + data, err := Marshal(m1) + if err != nil { + t.Errorf("Unexpected error marshaling: %v", err) + } + + err = Unmarshal(data, &m2) + if err != nil { + t.Errorf("Unexpected error unmarshaling: %v", err) + } + + if !reflect.DeepEqual(m1, m2) { + t.Error("Items should be equal after encoding and then decoding") + } +} + +// Custom types with []byte as underlying type could not be marshaled +// and then unmarshaled. +// Issue 8962. +func TestByteKind(t *testing.T) { + type byteKind []byte + + a := byteKind("hello") + + data, err := Marshal(a) + if err != nil { + t.Error(err) + } + var b byteKind + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Errorf("expected %v == %v", a, b) + } +} + +// The fix for issue 8962 introduced a regression. +// Issue 12921. +func TestSliceOfCustomByte(t *testing.T) { + type Uint8 uint8 + + a := []Uint8("hello") + + data, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + var b []Uint8 + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Fatalf("expected %v == %v", a, b) + } +} + +var decodeTypeErrorTests = []struct { + dest interface{} + src string +}{ + {new(string), `{"user": "name"}`}, // issue 4628. + {new(error), `{}`}, // issue 4222 + {new(error), `[]`}, + {new(error), `""`}, + {new(error), `123`}, + {new(error), `true`}, +} + +func TestUnmarshalTypeError(t *testing.T) { + for _, item := range decodeTypeErrorTests { + err := Unmarshal([]byte(item.src), item.dest) + if _, ok := err.(*UnmarshalTypeError); !ok { + t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", + item.src, item.dest, err) + } + } +} + +var unmarshalSyntaxTests = []string{ + "tru", + "fals", + "nul", + "123e", + `"hello`, + `[1,2,3`, + `{"key":1`, + `{"key":1,`, +} + +func TestUnmarshalSyntax(t *testing.T) { + var x interface{} + for _, src := range unmarshalSyntaxTests { + err := Unmarshal([]byte(src), &x) + if _, ok := err.(*SyntaxError); !ok { + t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) + } + } +} + +// Test handling of unexported fields that should be ignored. +// Issue 4660 +type unexportedFields struct { + Name string + m map[string]interface{} `json:"-"` + m2 map[string]interface{} `json:"abcd"` + + s []int `json:"-"` +} + +func TestUnmarshalUnexported(t *testing.T) { + input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}, "s": [2, 3]}` + want := &unexportedFields{Name: "Bob"} + + out := &unexportedFields{} + err := Unmarshal([]byte(input), out) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !reflect.DeepEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +// Time3339 is a time.Time which encodes to and from JSON +// as an RFC 3339 time in UTC. +type Time3339 time.Time + +func (t *Time3339) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) + } + tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) + if err != nil { + return err + } + *t = Time3339(tm) + return nil +} + +func TestUnmarshalJSONLiteralError(t *testing.T) { + var t3 Time3339 + err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) + if err == nil { + t.Fatalf("expected error; got time %v", time.Time(t3)) + } + if !strings.Contains(err.Error(), "range") { + t.Errorf("got err = %v; want out of range error", err) + } +} + +// Test that extra object elements in an array do not result in a +// "data changing underfoot" error. +// Issue 3717 +func TestSkipArrayObjects(t *testing.T) { + json := `[{}]` + var dest [0]interface{} + + err := Unmarshal([]byte(json), &dest) + if err != nil { + t.Errorf("got error %q, want nil", err) + } +} + +// Test semantics of pre-filled struct fields and pre-filled map fields. +// Issue 4900. +func TestPrefilled(t *testing.T) { + ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } + + // Values here change, cannot reuse table across runs. + var prefillTests = []struct { + in string + ptr interface{} + out interface{} + }{ + { + in: `{"X": 1, "Y": 2}`, + ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, + out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, + }, + { + in: `{"X": 1, "Y": 2}`, + ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), + out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), + }, + } + + for _, tt := range prefillTests { + ptrstr := fmt.Sprintf("%v", tt.ptr) + err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here + if err != nil { + t.Errorf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(tt.ptr, tt.out) { + t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) + } + } +} + +var invalidUnmarshalTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, +} + +func TestInvalidUnmarshal(t *testing.T) { + buf := []byte(`{"a":"1"}`) + for _, tt := range invalidUnmarshalTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +var invalidUnmarshalTextTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, + {new(net.IP), "json: cannot unmarshal number into Go value of type *net.IP"}, +} + +func TestInvalidUnmarshalText(t *testing.T) { + buf := []byte(`123`) + for _, tt := range invalidUnmarshalTextTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +// Test that string option is ignored for invalid types. +// Issue 9812. +func TestInvalidStringOption(t *testing.T) { + num := 0 + item := struct { + T time.Time `json:",string"` + M map[string]string `json:",string"` + S []string `json:",string"` + A [1]string `json:",string"` + I interface{} `json:",string"` + P *int `json:",string"` + }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} + + data, err := Marshal(item) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + err = Unmarshal(data, &item) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } +} + +// Test unmarshal behavior with regards to embedded unexported structs. +// +// (Issue 21357) If the embedded struct is a pointer and is unallocated, +// this returns an error because unmarshal cannot set the field. +// +// (Issue 24152) If the embedded struct is given an explicit name, +// ensure that the normal unmarshal logic does not panic in reflect. +// +// (Issue 28145) If the embedded struct is given an explicit name and has +// exported methods, don't cause a panic trying to get its value. +func TestUnmarshalEmbeddedUnexported(t *testing.T) { + type ( + embed1 struct{ Q int } + embed2 struct{ Q int } + embed3 struct { + Q int64 `json:",string"` + } + S1 struct { + *embed1 + R int + } + S2 struct { + *embed1 + Q int + } + S3 struct { + embed1 + R int + } + S4 struct { + *embed1 + embed2 + } + S5 struct { + *embed3 + R int + } + S6 struct { + embed1 `json:"embed1"` + } + S7 struct { + embed1 `json:"embed1"` + embed2 + } + S8 struct { + embed1 `json:"embed1"` + embed2 `json:"embed2"` + Q int + } + S9 struct { + unexportedWithMethods `json:"embed"` + } + ) + + tests := []struct { + in string + ptr interface{} + out interface{} + err error + }{{ + // Error since we cannot set S1.embed1, but still able to set S1.R. + in: `{"R":2,"Q":1}`, + ptr: new(S1), + out: &S1{R: 2}, + err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed1"), + }, { + // The top level Q field takes precedence. + in: `{"Q":1}`, + ptr: new(S2), + out: &S2{Q: 1}, + }, { + // No issue with non-pointer variant. + in: `{"R":2,"Q":1}`, + ptr: new(S3), + out: &S3{embed1: embed1{Q: 1}, R: 2}, + }, { + // No error since both embedded structs have field R, which annihilate each other. + // Thus, no attempt is made at setting S4.embed1. + in: `{"R":2}`, + ptr: new(S4), + out: new(S4), + }, { + // Error since we cannot set S5.embed1, but still able to set S5.R. + in: `{"R":2,"Q":1}`, + ptr: new(S5), + out: &S5{R: 2}, + err: fmt.Errorf("json: cannot set embedded pointer to unexported struct: json.embed3"), + }, { + // Issue 24152, ensure decodeState.indirect does not panic. + in: `{"embed1": {"Q": 1}}`, + ptr: new(S6), + out: &S6{embed1{1}}, + }, { + // Issue 24153, check that we can still set forwarded fields even in + // the presence of a name conflict. + // + // This relies on obscure behavior of reflect where it is possible + // to set a forwarded exported field on an unexported embedded struct + // even though there is a name conflict, even when it would have been + // impossible to do so according to Go visibility rules. + // Go forbids this because it is ambiguous whether S7.Q refers to + // S7.embed1.Q or S7.embed2.Q. Since embed1 and embed2 are unexported, + // it should be impossible for an external package to set either Q. + // + // It is probably okay for a future reflect change to break this. + in: `{"embed1": {"Q": 1}, "Q": 2}`, + ptr: new(S7), + out: &S7{embed1{1}, embed2{2}}, + }, { + // Issue 24153, similar to the S7 case. + in: `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`, + ptr: new(S8), + out: &S8{embed1{1}, embed2{2}, 3}, + }, { + // Issue 228145, similar to the cases above. + in: `{"embed": {}}`, + ptr: new(S9), + out: &S9{}, + }} + + for i, tt := range tests { + err := Unmarshal([]byte(tt.in), tt.ptr) + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: %v, want %v", i, err, tt.err) + } + if !reflect.DeepEqual(tt.ptr, tt.out) { + t.Errorf("#%d: mismatch\ngot: %#+v\nwant: %#+v", i, tt.ptr, tt.out) + } + } +} + +type unmarshalPanic struct{} + +func (unmarshalPanic) UnmarshalJSON([]byte) error { panic(0xdead) } + +func TestUnmarshalPanic(t *testing.T) { + defer func() { + if got := recover(); !reflect.DeepEqual(got, 0xdead) { + t.Errorf("panic() = (%T)(%v), want 0xdead", got, got) + } + }() + Unmarshal([]byte("{}"), &unmarshalPanic{}) + t.Fatalf("Unmarshal should have panicked") +} diff --git a/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/encode.go b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/encode.go new file mode 100644 index 00000000..dea63f18 --- /dev/null +++ b/internal/third_party/pkgsite/stdlib/testdata/v1.12.5/src/encoding/json/encode.go @@ -0,0 +1,1273 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder that had SetEscapeHTML(false) +// called on it. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. +// Each exported struct field becomes a member of the object, using the +// field name as the object key, unless the field is omitted for one of the +// reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. +// The format string gives the name of the field, possibly followed by a +// comma-separated list of options. The name may be empty in order to +// specify options without overriding the default field name. +// +// The "omitempty" option specifies that the field should be omitted +// from the encoding if the field has an empty value, defined as +// false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. +// +// As a special case, if the field tag is "-", the field is always omitted. +// Note that a field with name "-" can still be generated using the tag "-,". +// +// Examples of struct field tags and their meanings: +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "-". +// Field int `json:"-,"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, and ASCII punctuation except quotation +// marks, backslash, and comma. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a +// string, an integer type, or implement encoding.TextMarshaler. The map keys +// are sorted and used as JSON object keys by applying the following rules, +// subject to the UTF-8 coercion described for string values above: +// - string keys are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := newEncodeState() + + err := e.marshal(v, encOpts{escapeHTML: true}) + if err != nil { + return nil, err + } + buf := append([]byte(nil), e.Bytes()...) + + e.Reset() + encodeStatePool.Put(e) + + return buf, nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +// Each JSON element in the output will begin on a new line beginning with prefix +// followed by one or more copies of indent according to the indentation nesting. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML