diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go
index bbbebde3edf5..4daf82e9a97c 100644
--- a/internal/backend/local/test.go
+++ b/internal/backend/local/test.go
@@ -17,6 +17,7 @@ import (
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/backend/backendrun"
+ "github.com/hashicorp/terraform/internal/command/junit"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/lang"
@@ -48,7 +49,8 @@ type TestSuiteRunner struct {
Opts *terraform.ContextOpts
- View views.Test
+ View views.Test
+ JUnit junit.JUnit
// Stopped and Cancelled track whether the user requested the testing
// process to be interrupted. Stopped is a nice graceful exit, we'll still
@@ -171,6 +173,14 @@ func (runner *TestSuiteRunner) Test() (moduletest.Status, tfdiags.Diagnostics) {
runner.View.Conclusion(suite)
+ if runner.JUnit != nil {
+ artifactDiags := runner.JUnit.Save(suite)
+ diags = diags.Append(artifactDiags)
+ if artifactDiags.HasErrors() {
+ return moduletest.Error, diags
+ }
+ }
+
return suite.Status, diags
}
diff --git a/internal/command/junit/junit.go b/internal/command/junit/junit.go
new file mode 100644
index 000000000000..2aae55a91cf8
--- /dev/null
+++ b/internal/command/junit/junit.go
@@ -0,0 +1,265 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+package junit
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "os"
+ "slices"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/v2"
+ "github.com/hashicorp/terraform/internal/command/format"
+ "github.com/hashicorp/terraform/internal/configs/configload"
+ "github.com/hashicorp/terraform/internal/moduletest"
+ "github.com/hashicorp/terraform/internal/tfdiags"
+)
+
+// TestJUnitXMLFile produces a JUnit XML file at the conclusion of a test
+// run, summarizing the outcome of the test in a form that can then be
+// interpreted by tools which render JUnit XML result reports.
+//
+// The de-facto convention for JUnit XML is for it to be emitted as a separate
+// file as a complement to human-oriented output, rather than _instead of_
+// human-oriented output. To meet that expectation the method [TestJUnitXMLFile.Save]
+// should be called at the same time as the test's view reaches its "Conclusion" event.
+// If that event isn't reached for any reason then no file should be created at
+// all, which JUnit XML-consuming tools tend to expect as an outcome of a
+// catastrophically-errored test suite.
+//
+// TestJUnitXMLFile implements the JUnit interface, which allows creation of a local
+// file that contains a description of a completed test suite. It is intended only
+// for use in conjunction with a View that provides the streaming output of ongoing
+// testing events.
+
+type TestJUnitXMLFile struct {
+ filename string
+
+ // A config loader is required to access sources, which are used with diagnostics to create XML content
+ configLoader *configload.Loader
+}
+
+type JUnit interface {
+ Save(*moduletest.Suite) tfdiags.Diagnostics
+}
+
+var _ JUnit = (*TestJUnitXMLFile)(nil)
+
+// NewTestJUnitXML returns a [Test] implementation that will, when asked to
+// report "conclusion", write a JUnit XML report to the given filename.
+//
+// If the file already exists then this view will silently overwrite it at the
+// point of being asked to write a conclusion. Otherwise it will create the
+// file at that time. If creating or overwriting the file fails, a subsequent
+// call to method Err will return information about the problem.
+func NewTestJUnitXMLFile(filename string, configLoader *configload.Loader) *TestJUnitXMLFile {
+ return &TestJUnitXMLFile{
+ filename: filename,
+ configLoader: configLoader,
+ }
+}
+
+// Save takes in a test suite, generates JUnit XML summarising the test results,
+// and saves the content to the filename specified by user
+func (v *TestJUnitXMLFile) Save(suite *moduletest.Suite) tfdiags.Diagnostics {
+ var diags tfdiags.Diagnostics
+
+ // Prepare XML content
+ sources := v.configLoader.Parser().Sources()
+ xmlSrc, err := junitXMLTestReport(suite, sources)
+ if err != nil {
+ diags = diags.Append(&hcl.Diagnostic{
+ Severity: hcl.DiagError,
+ Summary: "error generating JUnit XML test output",
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ // Save XML to the specified path
+ saveDiags := v.save(xmlSrc)
+ diags = append(diags, saveDiags...)
+
+ return diags
+
+}
+
+func (v *TestJUnitXMLFile) save(xmlSrc []byte) tfdiags.Diagnostics {
+ var diags tfdiags.Diagnostics
+ err := os.WriteFile(v.filename, xmlSrc, 0660)
+ if err != nil {
+ diags = diags.Append(&hcl.Diagnostic{
+ Severity: hcl.DiagError,
+ Summary: fmt.Sprintf("error saving JUnit XML to file %q", v.filename),
+ Detail: err.Error(),
+ })
+ return diags
+ }
+
+ return nil
+}
+
+type withMessage struct {
+ Message string `xml:"message,attr,omitempty"`
+ Body string `xml:",cdata"`
+}
+
+type testCase struct {
+ Name string `xml:"name,attr"`
+ Classname string `xml:"classname,attr"`
+ Skipped *withMessage `xml:"skipped,omitempty"`
+ Failure *withMessage `xml:"failure,omitempty"`
+ Error *withMessage `xml:"error,omitempty"`
+ Stderr *withMessage `xml:"system-err,omitempty"`
+
+ // RunTime is the time spent executing the run associated
+ // with this test case, in seconds with the fractional component
+ // representing partial seconds.
+ //
+ // We assume here that it's not practically possible for an
+ // execution to take literally zero fractional seconds at
+ // the accuracy we're using here (nanoseconds converted into
+ // floating point seconds) and so use zero to represent
+ // "not known", and thus omit that case. (In practice many
+ // JUnit XML consumers treat the absense of this attribute
+ // as zero anyway.)
+ RunTime float64 `xml:"time,attr,omitempty"`
+ Timestamp string `xml:"timestamp,attr,omitempty"`
+}
+
+func junitXMLTestReport(suite *moduletest.Suite, sources map[string][]byte) ([]byte, error) {
+ var buf bytes.Buffer
+ enc := xml.NewEncoder(&buf)
+ enc.EncodeToken(xml.ProcInst{
+ Target: "xml",
+ Inst: []byte(`version="1.0" encoding="UTF-8"`),
+ })
+ enc.Indent("", " ")
+
+ // Some common element/attribute names we'll use repeatedly below.
+ suitesName := xml.Name{Local: "testsuites"}
+ suiteName := xml.Name{Local: "testsuite"}
+ caseName := xml.Name{Local: "testcase"}
+ nameName := xml.Name{Local: "name"}
+ testsName := xml.Name{Local: "tests"}
+ skippedName := xml.Name{Local: "skipped"}
+ failuresName := xml.Name{Local: "failures"}
+ errorsName := xml.Name{Local: "errors"}
+
+ enc.EncodeToken(xml.StartElement{Name: suitesName})
+
+ sortedFiles := suiteFilesAsSortedList(suite.Files) // to ensure consistent ordering in XML
+ for _, file := range sortedFiles {
+ // Each test file is modelled as a "test suite".
+
+ // First we'll count the number of tests and number of failures/errors
+ // for the suite-level summary.
+ totalTests := len(file.Runs)
+ totalFails := 0
+ totalErrs := 0
+ totalSkipped := 0
+ for _, run := range file.Runs {
+ switch run.Status {
+ case moduletest.Skip:
+ totalSkipped++
+ case moduletest.Fail:
+ totalFails++
+ case moduletest.Error:
+ totalErrs++
+ }
+ }
+ enc.EncodeToken(xml.StartElement{
+ Name: suiteName,
+ Attr: []xml.Attr{
+ {Name: nameName, Value: file.Name},
+ {Name: testsName, Value: strconv.Itoa(totalTests)},
+ {Name: skippedName, Value: strconv.Itoa(totalSkipped)},
+ {Name: failuresName, Value: strconv.Itoa(totalFails)},
+ {Name: errorsName, Value: strconv.Itoa(totalErrs)},
+ },
+ })
+
+ for _, run := range file.Runs {
+ // Each run is a "test case".
+
+ testCase := testCase{
+ Name: run.Name,
+
+ // We treat the test scenario filename as the "class name",
+ // implying that the run name is the "method name", just
+ // because that seems to inspire more useful rendering in
+ // some consumers of JUnit XML that were designed for
+ // Java-shaped languages.
+ Classname: file.Name,
+ }
+ if execMeta := run.ExecutionMeta; execMeta != nil {
+ testCase.RunTime = execMeta.Duration.Seconds()
+ testCase.Timestamp = execMeta.StartTimestamp()
+ }
+ switch run.Status {
+ case moduletest.Skip:
+ testCase.Skipped = &withMessage{
+ // FIXME: Is there something useful we could say here about
+ // why the test was skipped?
+ }
+ case moduletest.Fail:
+ testCase.Failure = &withMessage{
+ Message: "Test run failed",
+ // FIXME: What's a useful thing to report in the body
+ // here? A summary of the statuses from all of the
+ // checkable objects in the configuration?
+ }
+ case moduletest.Error:
+ var diagsStr strings.Builder
+ for _, diag := range run.Diagnostics {
+ diagsStr.WriteString(format.DiagnosticPlain(diag, sources, 80))
+ }
+ testCase.Error = &withMessage{
+ Message: "Encountered an error",
+ Body: diagsStr.String(),
+ }
+ }
+ if len(run.Diagnostics) != 0 && testCase.Error == nil {
+ // If we have diagnostics but the outcome wasn't an error
+ // then we're presumably holding diagnostics that didn't
+ // cause the test to error, such as warnings. We'll place
+ // those into the "system-err" element instead, so that
+ // they'll be reported _somewhere_ at least.
+ var diagsStr strings.Builder
+ for _, diag := range run.Diagnostics {
+ diagsStr.WriteString(format.DiagnosticPlain(diag, sources, 80))
+ }
+ testCase.Stderr = &withMessage{
+ Body: diagsStr.String(),
+ }
+ }
+ enc.EncodeElement(&testCase, xml.StartElement{
+ Name: caseName,
+ })
+ }
+
+ enc.EncodeToken(xml.EndElement{Name: suiteName})
+ }
+ enc.EncodeToken(xml.EndElement{Name: suitesName})
+ enc.Close()
+ return buf.Bytes(), nil
+}
+
+func suiteFilesAsSortedList(files map[string]*moduletest.File) []*moduletest.File {
+ fileNames := make([]string, len(files))
+ i := 0
+ for k := range files {
+ fileNames[i] = k
+ i++
+ }
+ slices.Sort(fileNames)
+
+ sortedFiles := make([]*moduletest.File, len(files))
+ for i, name := range fileNames {
+ sortedFiles[i] = files[name]
+ }
+ return sortedFiles
+}
diff --git a/internal/command/junit/junit_test.go b/internal/command/junit/junit_test.go
new file mode 100644
index 000000000000..6c8444a30630
--- /dev/null
+++ b/internal/command/junit/junit_test.go
@@ -0,0 +1,224 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+package junit
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/internal/configs/configload"
+ "github.com/hashicorp/terraform/internal/moduletest"
+)
+
+// This test cannot access sources when contructing output for XML files. Due to this, the majority of testing
+// for TestJUnitXMLFile is in internal/command/test_test.go
+// In the junit package we can write some limited tests about XML output as long as there are no errors and/or
+// failing tests in the test.
+func Test_TestJUnitXMLFile_Save(t *testing.T) {
+
+ cases := map[string]struct {
+ filename string
+ suite moduletest.Suite
+ expectedOuput []byte
+ expectError bool
+ }{
+ "renders output indicating when tests are skipped": {
+ filename: "output.xml",
+ suite: moduletest.Suite{
+ Status: moduletest.Skip,
+ Files: map[string]*moduletest.File{
+ "file1.tftest.hcl": {
+ Name: "file1.tftest.hcl",
+ Status: moduletest.Fail,
+ Runs: []*moduletest.Run{
+ {
+ Name: "my_test",
+ Status: moduletest.Skip,
+ },
+ },
+ },
+ },
+ },
+ expectedOuput: []byte(`
+
+
+
+
+
+`),
+ },
+ }
+
+ for tn, tc := range cases {
+ t.Run(tn, func(t *testing.T) {
+ // Setup test
+ td := t.TempDir()
+ path := fmt.Sprintf("%s/%s", td, tc.filename)
+
+ loader, cleanup := configload.NewLoaderForTests(t)
+ defer cleanup()
+
+ j := TestJUnitXMLFile{
+ filename: path,
+ configLoader: loader,
+ }
+
+ // Process data & save file
+ j.Save(&tc.suite)
+
+ // Assertions
+ actualOut, err := os.ReadFile(path)
+ if err != nil {
+ t.Fatalf("error opening XML file: %s", err)
+ }
+
+ if !bytes.Equal(actualOut, tc.expectedOuput) {
+ t.Fatalf("expected output:\n%s\ngot:\n%s", tc.expectedOuput, actualOut)
+ }
+ })
+ }
+
+}
+
+func Test_TestJUnitXMLFile_save(t *testing.T) {
+
+ cases := map[string]struct {
+ filename string
+ expectError bool
+ }{
+ "can save output to the specified filename": {
+ filename: func() string {
+ td := t.TempDir()
+ return fmt.Sprintf("%s/output.xml", td)
+ }(),
+ },
+ "returns an error when given a filename that isn't absolute or relative": {
+ filename: "~/output.xml",
+ expectError: true,
+ },
+ }
+
+ for tn, tc := range cases {
+ t.Run(tn, func(t *testing.T) {
+ j := TestJUnitXMLFile{
+ filename: tc.filename,
+ }
+
+ xml := []byte(`
+
+
+
+`)
+
+ diags := j.save(xml)
+
+ if diags.HasErrors() {
+ if !tc.expectError {
+ t.Fatalf("got unexpected error: %s", diags.Err())
+ }
+ // return early if testing error case
+ return
+ }
+
+ if !diags.HasErrors() && tc.expectError {
+ t.Fatalf("expected an error but got none")
+ }
+
+ fileContent, err := os.ReadFile(tc.filename)
+ if err != nil {
+ t.Fatalf("unexpected error opening file")
+ }
+
+ if !bytes.Equal(fileContent, xml) {
+ t.Fatalf("wanted XML:\n%s\n got XML:\n%s\n", string(xml), string(fileContent))
+ }
+ })
+ }
+}
+
+func Test_suiteFilesAsSortedList(t *testing.T) {
+ cases := map[string]struct {
+ Suite *moduletest.Suite
+ ExpectedNames map[int]string
+ }{
+ "no test files": {
+ Suite: &moduletest.Suite{},
+ },
+ "3 test files ordered in map": {
+ Suite: &moduletest.Suite{
+ Status: moduletest.Skip,
+ Files: map[string]*moduletest.File{
+ "test_file_1.tftest.hcl": {
+ Name: "test_file_1.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ "test_file_2.tftest.hcl": {
+ Name: "test_file_2.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ "test_file_3.tftest.hcl": {
+ Name: "test_file_3.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ },
+ },
+ ExpectedNames: map[int]string{
+ 0: "test_file_1.tftest.hcl",
+ 1: "test_file_2.tftest.hcl",
+ 2: "test_file_3.tftest.hcl",
+ },
+ },
+ "3 test files unordered in map": {
+ Suite: &moduletest.Suite{
+ Status: moduletest.Skip,
+ Files: map[string]*moduletest.File{
+ "test_file_3.tftest.hcl": {
+ Name: "test_file_3.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ "test_file_1.tftest.hcl": {
+ Name: "test_file_1.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ "test_file_2.tftest.hcl": {
+ Name: "test_file_2.tftest.hcl",
+ Status: moduletest.Skip,
+ Runs: []*moduletest.Run{},
+ },
+ },
+ },
+ ExpectedNames: map[int]string{
+ 0: "test_file_1.tftest.hcl",
+ 1: "test_file_2.tftest.hcl",
+ 2: "test_file_3.tftest.hcl",
+ },
+ },
+ }
+
+ for tn, tc := range cases {
+ t.Run(tn, func(t *testing.T) {
+ list := suiteFilesAsSortedList(tc.Suite.Files)
+
+ if len(tc.ExpectedNames) != len(tc.Suite.Files) {
+ t.Fatalf("expected there to be %d items, got %d", len(tc.ExpectedNames), len(tc.Suite.Files))
+ }
+
+ if len(tc.ExpectedNames) == 0 {
+ return
+ }
+
+ for k, v := range tc.ExpectedNames {
+ if list[k].Name != v {
+ t.Fatalf("expected element %d in sorted list to be named %s, got %s", k, v, list[k].Name)
+ }
+ }
+ })
+ }
+}
diff --git a/internal/command/test.go b/internal/command/test.go
index 993fce4f2397..c6fb8cfd93da 100644
--- a/internal/command/test.go
+++ b/internal/command/test.go
@@ -5,7 +5,6 @@ package command
import (
"context"
- "fmt"
"path/filepath"
"strings"
"time"
@@ -14,6 +13,7 @@ import (
"github.com/hashicorp/terraform/internal/cloud"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/command/jsonformat"
+ "github.com/hashicorp/terraform/internal/command/junit"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/logging"
"github.com/hashicorp/terraform/internal/moduletest"
@@ -101,7 +101,27 @@ func (c *TestCommand) Run(rawArgs []string) int {
}
view := views.NewTest(args.ViewType, c.View)
- var junitXMLView *views.TestJUnitXMLFile
+
+ // The specified testing directory must be a relative path, and it must
+ // point to a directory that is a descendant of the configuration directory.
+ if !filepath.IsLocal(args.TestDirectory) {
+ diags = diags.Append(tfdiags.Sourceless(
+ tfdiags.Error,
+ "Invalid testing directory",
+ "The testing directory must be a relative path pointing to a directory local to the configuration directory."))
+
+ view.Diagnostics(nil, nil, diags)
+ return 1
+ }
+
+ config, configDiags := c.loadConfigWithTests(".", args.TestDirectory)
+ diags = diags.Append(configDiags)
+ if configDiags.HasErrors() {
+ view.Diagnostics(nil, nil, diags)
+ return 1
+ }
+
+ var junitFile junit.JUnit
if args.JUnitXMLFile != "" {
// JUnit XML output is currently experimental, so that we can gather
// feedback on exactly how we should map the test results to this
@@ -120,30 +140,9 @@ func (c *TestCommand) Run(rawArgs []string) int {
"JUnit XML output is experimental",
"The -junit-xml option is currently experimental and therefore subject to breaking changes or removal, even in patch releases.",
))
- junitXMLView = views.NewTestJUnitXMLFile(args.JUnitXMLFile)
- view = views.TestMulti{
- view,
- junitXMLView,
- }
- }
- // The specified testing directory must be a relative path, and it must
- // point to a directory that is a descendant of the configuration directory.
- if !filepath.IsLocal(args.TestDirectory) {
- diags = diags.Append(tfdiags.Sourceless(
- tfdiags.Error,
- "Invalid testing directory",
- "The testing directory must be a relative path pointing to a directory local to the configuration directory."))
-
- view.Diagnostics(nil, nil, diags)
- return 1
- }
-
- config, configDiags := c.loadConfigWithTests(".", args.TestDirectory)
- diags = diags.Append(configDiags)
- if configDiags.HasErrors() {
- view.Diagnostics(nil, nil, diags)
- return 1
+ // This line must happen after the TestCommand's calls loadConfigWithTests and has the configLoader field set
+ junitFile = junit.NewTestJUnitXMLFile(args.JUnitXMLFile, c.configLoader)
}
// Users can also specify variables via the command line, so we'll parse
@@ -236,6 +235,7 @@ func (c *TestCommand) Run(rawArgs []string) int {
TestingDirectory: args.TestDirectory,
Opts: opts,
View: view,
+ JUnit: junitFile,
Stopped: false,
Cancelled: false,
StoppedCtx: stopCtx,
@@ -305,16 +305,6 @@ func (c *TestCommand) Run(rawArgs []string) int {
// tests finished normally with no interrupts.
}
- if junitXMLView != nil {
- if err := junitXMLView.Err(); err != nil {
- testDiags = testDiags.Append(tfdiags.Sourceless(
- tfdiags.Error,
- "Failed to write JUnit XML report",
- fmt.Sprintf("Could not write the requested JUnit XML report: %s.", err),
- ))
- }
- }
-
view.Diagnostics(nil, nil, testDiags)
if status != moduletest.Pass {
diff --git a/internal/command/test_test.go b/internal/command/test_test.go
index 843d9c44837e..f1365c100e77 100644
--- a/internal/command/test_test.go
+++ b/internal/command/test_test.go
@@ -4,10 +4,12 @@
package command
import (
+ "bytes"
"encoding/json"
"fmt"
"os"
"path"
+ "regexp"
"strings"
"testing"
@@ -2333,3 +2335,83 @@ required.
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
}
}
+
+func TestTest_JUnitOutput(t *testing.T) {
+
+ tcs := map[string]struct {
+ path string
+ code int
+ wantFilename string
+ }{
+ "can create XML for a single file with 1 pass, 1 fail": {
+ path: "junit-output/1pass-1fail",
+ wantFilename: "expected-output.xml",
+ code: 1, // Test failure
+ },
+ "can create XML for multiple files with 1 pass each": {
+ path: "junit-output/multiple-files",
+ wantFilename: "expected-output.xml",
+ code: 0,
+ },
+ "can display a test run's errors under the equivalent test case element": {
+ path: "junit-output/missing-provider",
+ wantFilename: "expected-output.xml",
+ code: 1, // Test error
+ },
+ }
+
+ for tn, tc := range tcs {
+ t.Run(tn, func(t *testing.T) {
+ // Setup test
+ td := t.TempDir()
+ testPath := path.Join("test", tc.path)
+ testCopyDir(t, testFixturePath(testPath), td)
+ defer testChdir(t, td)()
+
+ provider := testing_command.NewProvider(nil)
+ view, done := testView(t)
+
+ c := &TestCommand{
+ Meta: Meta{
+ testingOverrides: metaOverridesForProvider(provider.Provider),
+ View: view,
+ AllowExperimentalFeatures: true,
+ },
+ }
+
+ // Run command with -junit-xml=./output.xml flag
+ outputFile := fmt.Sprintf("%s/output.xml", td)
+ code := c.Run([]string{fmt.Sprintf("-junit-xml=%s", outputFile), "-no-color"})
+ done(t)
+
+ // Assertions
+ if code != tc.code {
+ t.Errorf("expected status code %d but got %d", tc.code, code)
+ }
+
+ actualOut, err := os.ReadFile(outputFile)
+ if err != nil {
+ t.Fatalf("error opening XML file: %s", err)
+ }
+ expectedOutputFile := fmt.Sprintf("%s/%s", td, tc.wantFilename)
+ expectedOutput, err := os.ReadFile(expectedOutputFile)
+ if err != nil {
+ t.Fatalf("error opening XML file: %s", err)
+ }
+
+ // actual output will include timestamps and test duration data, which isn't deterministic; redact it for comparison
+ timeRegexp := regexp.MustCompile(`time=\"[0-9\.]+\"`)
+ actualOut = timeRegexp.ReplaceAll(actualOut, []byte("time=\"TIME_REDACTED\""))
+ timestampRegexp := regexp.MustCompile(`timestamp="[^"]+"`)
+ actualOut = timestampRegexp.ReplaceAll(actualOut, []byte("timestamp=\"TIMESTAMP_REDACTED\""))
+
+ if !bytes.Equal(actualOut, expectedOutput) {
+ t.Fatalf("wanted XML:\n%s\n got XML:\n%s\n", string(expectedOutput), string(actualOut))
+ }
+
+ if provider.ResourceCount() > 0 {
+ t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
+ }
+ })
+ }
+}
diff --git a/internal/command/testdata/test/junit-output/1pass-1fail/expected-output.xml b/internal/command/testdata/test/junit-output/1pass-1fail/expected-output.xml
new file mode 100644
index 000000000000..e05aa399adb5
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/1pass-1fail/expected-output.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/internal/command/testdata/test/junit-output/1pass-1fail/main.tf b/internal/command/testdata/test/junit-output/1pass-1fail/main.tf
new file mode 100644
index 000000000000..5e2e17ac2bfa
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/1pass-1fail/main.tf
@@ -0,0 +1,3 @@
+locals {
+ number = 10
+}
diff --git a/internal/command/testdata/test/junit-output/1pass-1fail/main.tftest.hcl b/internal/command/testdata/test/junit-output/1pass-1fail/main.tftest.hcl
new file mode 100644
index 000000000000..04b7bc709d99
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/1pass-1fail/main.tftest.hcl
@@ -0,0 +1,13 @@
+run "failing_assertion" {
+ assert {
+ condition = local.number < 0
+ error_message = "local variable 'number' has a value greater than zero, so this assertion will fail"
+ }
+}
+
+run "passing_assertion" {
+ assert {
+ condition = local.number > 0
+ error_message = "local variable 'number' has a value greater than zero, so this assertion will pass"
+ }
+}
diff --git a/internal/command/testdata/test/junit-output/missing-provider/expected-output.xml b/internal/command/testdata/test/junit-output/missing-provider/expected-output.xml
new file mode 100644
index 000000000000..dc7eb93faf1f
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/missing-provider/expected-output.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/internal/command/testdata/test/junit-output/missing-provider/main.tf b/internal/command/testdata/test/junit-output/missing-provider/main.tf
new file mode 100644
index 000000000000..b7217a3ea288
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/missing-provider/main.tf
@@ -0,0 +1,17 @@
+terraform {
+ required_providers {
+ test = {
+ source = "hashicorp/test"
+ configuration_aliases = [test.secondary]
+ }
+ }
+}
+
+resource "test_resource" "primary" {
+ value = "foo"
+}
+
+resource "test_resource" "secondary" {
+ provider = test.secondary
+ value = "bar"
+}
diff --git a/internal/command/testdata/test/junit-output/missing-provider/main.tftest.hcl b/internal/command/testdata/test/junit-output/missing-provider/main.tftest.hcl
new file mode 100644
index 000000000000..43e8952ec73a
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/missing-provider/main.tftest.hcl
@@ -0,0 +1,13 @@
+provider "test" {}
+
+run "passes_validation" {
+ assert {
+ condition = test_resource.primary.value == "foo"
+ error_message = "primary contains invalid value"
+ }
+
+ assert {
+ condition = test_resource.secondary.value == "bar"
+ error_message = "secondary contains invalid value"
+ }
+}
diff --git a/internal/command/testdata/test/junit-output/multiple-files/expected-output.xml b/internal/command/testdata/test/junit-output/multiple-files/expected-output.xml
new file mode 100644
index 000000000000..1e6a924545fa
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/multiple-files/expected-output.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/internal/command/testdata/test/junit-output/multiple-files/main.tf b/internal/command/testdata/test/junit-output/multiple-files/main.tf
new file mode 100644
index 000000000000..41cc84e5c4ea
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/multiple-files/main.tf
@@ -0,0 +1,3 @@
+resource "test_resource" "foo" {
+ value = "bar"
+}
diff --git a/internal/command/testdata/test/junit-output/multiple-files/one.tftest.hcl b/internal/command/testdata/test/junit-output/multiple-files/one.tftest.hcl
new file mode 100644
index 000000000000..66bf87c39dd0
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/multiple-files/one.tftest.hcl
@@ -0,0 +1,6 @@
+run "validate_test_resource" {
+ assert {
+ condition = test_resource.foo.value == "bar"
+ error_message = "invalid value"
+ }
+}
diff --git a/internal/command/testdata/test/junit-output/multiple-files/two.tftest.hcl b/internal/command/testdata/test/junit-output/multiple-files/two.tftest.hcl
new file mode 100644
index 000000000000..66bf87c39dd0
--- /dev/null
+++ b/internal/command/testdata/test/junit-output/multiple-files/two.tftest.hcl
@@ -0,0 +1,6 @@
+run "validate_test_resource" {
+ assert {
+ condition = test_resource.foo.value == "bar"
+ error_message = "invalid value"
+ }
+}
diff --git a/internal/command/views/test.go b/internal/command/views/test.go
index 8baa6dc50f9c..6f8135c2e063 100644
--- a/internal/command/views/test.go
+++ b/internal/command/views/test.go
@@ -5,12 +5,8 @@ package views
import (
"bytes"
- "encoding/xml"
"fmt"
"net/http"
- "os"
- "strconv"
- "strings"
"time"
"github.com/hashicorp/go-tfe"
@@ -735,313 +731,6 @@ func (t *TestJSON) TFCRetryHook(attemptNum int, resp *http.Response) {
)
}
-// TestJUnitXMLFile produces a JUnit XML file at the conclusion of a test
-// run, summarizing the outcome of the test in a form that can then be
-// interpreted by tools which render JUnit XML result reports.
-//
-// The de-facto convention for JUnit XML is for it to be emitted as a separate
-// file as a complement to human-oriented output, rather than _instead of_
-// human-oriented output, and so this view meets that expectation by creating
-// a new file only once the test run has completed, at the "Conclusion" event.
-// If that event isn't reached for any reason then no file will be created at
-// all, which JUnit XML-consuming tools tend to expect as an outcome of a
-// catastrophically-errored test suite.
-//
-// Views cannot return errors directly from their events, so if this view fails
-// to create or write to the designated file when asked to report the conclusion
-// it will save the error as part of its state, accessible from method
-// [TestJUnitXMLFile.Err].
-//
-// This view is intended only for use in conjunction with another view that
-// provides the streaming output of ongoing testing events, so it should
-// typically be wrapped in a [TestMulti] along with either [TestHuman] or
-// [TestJSON].
-type TestJUnitXMLFile struct {
- filename string
- err error
-}
-
-var _ Test = (*TestJUnitXMLFile)(nil)
-
-// NewTestJUnitXML returns a [Test] implementation that will, when asked to
-// report "conclusion", write a JUnit XML report to the given filename.
-//
-// If the file already exists then this view will silently overwrite it at the
-// point of being asked to write a conclusion. Otherwise it will create the
-// file at that time. If creating or overwriting the file fails, a subsequent
-// call to method Err will return information about the problem.
-func NewTestJUnitXMLFile(filename string) *TestJUnitXMLFile {
- return &TestJUnitXMLFile{
- filename: filename,
- }
-}
-
-// Err returns an error that the receiver previously encountered when trying
-// to handle the Conclusion event by creating and writing into a file.
-//
-// Returns nil if either there was no error or if this object hasn't yet been
-// asked to report a conclusion.
-func (v *TestJUnitXMLFile) Err() error {
- return v.err
-}
-
-func (v *TestJUnitXMLFile) Abstract(suite *moduletest.Suite) {}
-
-func (v *TestJUnitXMLFile) Conclusion(suite *moduletest.Suite) {
- xmlSrc, err := junitXMLTestReport(suite)
- if err != nil {
- v.err = err
- return
- }
- err = os.WriteFile(v.filename, xmlSrc, 0660)
- if err != nil {
- v.err = err
- return
- }
-}
-
-func (v *TestJUnitXMLFile) File(file *moduletest.File, progress moduletest.Progress) {}
-
-func (v *TestJUnitXMLFile) Run(run *moduletest.Run, file *moduletest.File, progress moduletest.Progress, elapsed int64) {
-}
-
-func (v *TestJUnitXMLFile) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
-}
-
-func (v *TestJUnitXMLFile) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
-}
-
-func (v *TestJUnitXMLFile) Interrupted() {}
-
-func (v *TestJUnitXMLFile) FatalInterrupt() {}
-
-func (v *TestJUnitXMLFile) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, states map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) {
-}
-
-func (v *TestJUnitXMLFile) TFCStatusUpdate(status tfe.TestRunStatus, elapsed time.Duration) {}
-
-func (v *TestJUnitXMLFile) TFCRetryHook(attemptNum int, resp *http.Response) {}
-
-func junitXMLTestReport(suite *moduletest.Suite) ([]byte, error) {
- var buf bytes.Buffer
- enc := xml.NewEncoder(&buf)
- enc.EncodeToken(xml.ProcInst{
- Target: "xml",
- Inst: []byte(`version="1.0" encoding="UTF-8"`),
- })
- enc.Indent("", " ")
-
- // Some common element/attribute names we'll use repeatedly below.
- suitesName := xml.Name{Local: "testsuites"}
- suiteName := xml.Name{Local: "testsuite"}
- caseName := xml.Name{Local: "testcase"}
- nameName := xml.Name{Local: "name"}
- testsName := xml.Name{Local: "tests"}
- skippedName := xml.Name{Local: "skipped"}
- failuresName := xml.Name{Local: "failures"}
- errorsName := xml.Name{Local: "errors"}
-
- enc.EncodeToken(xml.StartElement{Name: suitesName})
- for _, file := range suite.Files {
- // Each test file is modelled as a "test suite".
-
- // First we'll count the number of tests and number of failures/errors
- // for the suite-level summary.
- totalTests := len(file.Runs)
- totalFails := 0
- totalErrs := 0
- totalSkipped := 0
- for _, run := range file.Runs {
- switch run.Status {
- case moduletest.Skip:
- totalSkipped++
- case moduletest.Fail:
- totalFails++
- case moduletest.Error:
- totalErrs++
- }
- }
- enc.EncodeToken(xml.StartElement{
- Name: suiteName,
- Attr: []xml.Attr{
- {Name: nameName, Value: file.Name},
- {Name: testsName, Value: strconv.Itoa(totalTests)},
- {Name: skippedName, Value: strconv.Itoa(totalSkipped)},
- {Name: failuresName, Value: strconv.Itoa(totalFails)},
- {Name: errorsName, Value: strconv.Itoa(totalErrs)},
- },
- })
-
- for _, run := range file.Runs {
- // Each run is a "test case".
-
- type WithMessage struct {
- Message string `xml:"message,attr,omitempty"`
- Body string `xml:",cdata"`
- }
- type TestCase struct {
- Name string `xml:"name,attr"`
- Classname string `xml:"classname,attr"`
- Skipped *WithMessage `xml:"skipped,omitempty"`
- Failure *WithMessage `xml:"failure,omitempty"`
- Error *WithMessage `xml:"error,omitempty"`
- Stderr *WithMessage `xml:"system-err,omitempty"`
-
- // RunTime is the time spent executing the run associated
- // with this test case, in seconds with the fractional component
- // representing partial seconds.
- //
- // We assume here that it's not practically possible for an
- // execution to take literally zero fractional seconds at
- // the accuracy we're using here (nanoseconds converted into
- // floating point seconds) and so use zero to represent
- // "not known", and thus omit that case. (In practice many
- // JUnit XML consumers treat the absense of this attribute
- // as zero anyway.)
- RunTime float64 `xml:"time,attr,omitempty"`
- Timestamp string `xml:"timestamp,attr,omitempty"`
- }
-
- testCase := TestCase{
- Name: run.Name,
-
- // We treat the test scenario filename as the "class name",
- // implying that the run name is the "method name", just
- // because that seems to inspire more useful rendering in
- // some consumers of JUnit XML that were designed for
- // Java-shaped languages.
- Classname: file.Name,
- }
- if execMeta := run.ExecutionMeta; execMeta != nil {
- testCase.RunTime = execMeta.Duration.Seconds()
- testCase.Timestamp = execMeta.StartTimestamp()
- }
- switch run.Status {
- case moduletest.Skip:
- testCase.Skipped = &WithMessage{
- // FIXME: Is there something useful we could say here about
- // why the test was skipped?
- }
- case moduletest.Fail:
- testCase.Failure = &WithMessage{
- Message: "Test run failed",
- // FIXME: What's a useful thing to report in the body
- // here? A summary of the statuses from all of the
- // checkable objects in the configuration?
- }
- case moduletest.Error:
- var diagsStr strings.Builder
- for _, diag := range run.Diagnostics {
- // FIXME: Pass in the sources so that these diagnostics
- // can include source snippets when appropriate.
- diagsStr.WriteString(format.DiagnosticPlain(diag, nil, 80))
- }
- testCase.Error = &WithMessage{
- Message: "Encountered an error",
- Body: diagsStr.String(),
- }
- }
- if len(run.Diagnostics) != 0 && testCase.Error == nil {
- // If we have diagnostics but the outcome wasn't an error
- // then we're presumably holding diagnostics that didn't
- // cause the test to error, such as warnings. We'll place
- // those into the "system-err" element instead, so that
- // they'll be reported _somewhere_ at least.
- var diagsStr strings.Builder
- for _, diag := range run.Diagnostics {
- // FIXME: Pass in the sources so that these diagnostics
- // can include source snippets when appropriate.
- diagsStr.WriteString(format.DiagnosticPlain(diag, nil, 80))
- }
- testCase.Stderr = &WithMessage{
- Body: diagsStr.String(),
- }
- }
- enc.EncodeElement(&testCase, xml.StartElement{
- Name: caseName,
- })
- }
-
- enc.EncodeToken(xml.EndElement{Name: suiteName})
- }
- enc.EncodeToken(xml.EndElement{Name: suitesName})
- enc.Close()
- return buf.Bytes(), nil
-}
-
-// TestMulti is an fan-out adapter which delegates all calls to all of the
-// wrapped test views, for situations where multiple outputs are needed at
-// the same time.
-type TestMulti []Test
-
-var _ Test = TestMulti(nil)
-
-func (m TestMulti) Abstract(suite *moduletest.Suite) {
- for _, wrapped := range m {
- wrapped.Abstract(suite)
- }
-}
-
-func (m TestMulti) Conclusion(suite *moduletest.Suite) {
- for _, wrapped := range m {
- wrapped.Conclusion(suite)
- }
-}
-
-func (m TestMulti) File(file *moduletest.File, progress moduletest.Progress) {
- for _, wrapped := range m {
- wrapped.File(file, progress)
- }
-}
-
-func (m TestMulti) Run(run *moduletest.Run, file *moduletest.File, progress moduletest.Progress, elapsed int64) {
- for _, wrapped := range m {
- wrapped.Run(run, file, progress, elapsed)
- }
-}
-
-func (m TestMulti) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
- for _, wrapped := range m {
- wrapped.DestroySummary(diags, run, file, state)
- }
-}
-
-func (m TestMulti) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
- for _, wrapped := range m {
- wrapped.Diagnostics(run, file, diags)
- }
-}
-
-func (m TestMulti) Interrupted() {
- for _, wrapped := range m {
- wrapped.Interrupted()
- }
-}
-
-func (m TestMulti) FatalInterrupt() {
- for _, wrapped := range m {
- wrapped.FatalInterrupt()
- }
-}
-
-func (m TestMulti) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, states map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) {
- for _, wrapped := range m {
- wrapped.FatalInterruptSummary(run, file, states, created)
- }
-}
-
-func (m TestMulti) TFCStatusUpdate(status tfe.TestRunStatus, elapsed time.Duration) {
- for _, wrapped := range m {
- wrapped.TFCStatusUpdate(status, elapsed)
- }
-}
-
-func (m TestMulti) TFCRetryHook(attemptNum int, resp *http.Response) {
- for _, wrapped := range m {
- wrapped.TFCRetryHook(attemptNum, resp)
- }
-}
-
func colorizeTestStatus(status moduletest.Status, color *colorstring.Colorize) string {
switch status {
case moduletest.Error, moduletest.Fail: