Skip to content

Commit

Permalink
chore: Fix the dry-run output (again)
Browse files Browse the repository at this point in the history
This is an evolution on the update of dry-run output that was recently
done. That was done in a rush and there have been new learnings since.

The biggest learning is that all log messages go to stderr,
but the dry-run output goes to stdout.
(the reason I didn't know this before is that the test runner
mixes both by default, and I just assumed that everything was going
to stdout. Luckly it's not)

Also I've learned since that the correct way of escaping strings for
shell is actually singlequotes. AND that json is way easier to parse
in the shell, and having everything is terrible

I also learned about tee, that allows you to route stdout from command
to multiple places (including other files).

So now there are explicit format options to the output, and you always
get that in stdout. And only that (in the format you specify).
Default is JSON.

If you want to route it to other files as well as stdout that's fine too.
Up to you, be happy.
  • Loading branch information
giovanni-guidini committed Oct 10, 2023
1 parent cab36bc commit 62bff54
Show file tree
Hide file tree
Showing 2 changed files with 127 additions and 123 deletions.
108 changes: 51 additions & 57 deletions codecov_cli/commands/labelanalysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,15 +66,10 @@
is_flag=True,
)
@click.option(
"--dry-run-output-path",
"dry_run_output_path",
help=(
"Prints the dry-run list (ATS_TESTS_TO_RUN) into dry_run_output_path (in addition to stdout)\n"
+ "AND prints ATS_TESTS_TO_SKIP into dry_run_output_path_skipped\n"
+ "AND prints dry-run JSON output into dry_run_output_path.json"
),
type=pathlib.Path,
default=None,
"--dry-run-format",
"dry_run_format",
type=click.Choice(["json", "space-separated-list"]),
default="json",
)
@click.pass_context
def label_analysis(
Expand All @@ -85,7 +80,7 @@ def label_analysis(
runner_name: str,
max_wait_time: str,
dry_run: bool,
dry_run_output_path: Optional[pathlib.Path],
dry_run_format: str,
):
enterprise_url = ctx.obj.get("enterprise_url")
logger.debug(
Expand Down Expand Up @@ -160,7 +155,7 @@ def label_analysis(
requested_labels,
runner,
dry_run=dry_run,
dry_run_output_path=dry_run_output_path,
dry_run_format=dry_run_format,
)
return

Expand Down Expand Up @@ -192,7 +187,7 @@ def label_analysis(
_dry_run_output(
LabelAnalysisRequestResult(request_result),
runner,
dry_run_output_path,
dry_run_format,
)
return
if resp_json["state"] == "error":
Expand All @@ -204,7 +199,7 @@ def label_analysis(
collected_labels=requested_labels,
runner=runner,
dry_run=dry_run,
dry_run_output_path=dry_run_output_path,
dry_run_format=dry_run_format,
)
return
if max_wait_time and (time.monotonic() - start_wait) > max_wait_time:
Expand All @@ -215,7 +210,7 @@ def label_analysis(
collected_labels=requested_labels,
runner=runner,
dry_run=dry_run,
dry_run_output_path=dry_run_output_path,
dry_run_format=dry_run_format,
)
return
logger.info("Waiting more time for result...")
Expand Down Expand Up @@ -319,63 +314,62 @@ def _send_labelanalysis_request(payload, url, token_header):
return eid


def _dry_run_json_output(
labels_to_run: set, labels_to_skip: set, runner_options: List[str]
) -> None:
output_as_dict = dict(
runner_options=runner_options,
ats_tests_to_run=sorted(labels_to_run),
ats_tests_to_skip=sorted(labels_to_skip),
)
# ⚠️ DON'T use logger
# logger goes to stderr, we want it in stdout
click.echo(json.dumps(output_as_dict))


def _dry_run_list_output(
labels_to_run: set, labels_to_skip: set, runner_options: List[str]
) -> None:
to_run_line = " ".join(
sorted(map(lambda l: f"'{l}'", runner_options))
+ sorted(map(lambda l: f"'{l}'", labels_to_run))
)
to_skip_line = " ".join(
sorted(map(lambda l: f"'{l}'", runner_options))
+ sorted(map(lambda l: f"'{l}'", labels_to_skip))
)
# ⚠️ DON'T use logger
# logger goes to stderr, we want it in stdout
click.echo(f"TESTS_TO_RUN={to_run_line}")
click.echo(f"TESTS_TO_SKIP={to_skip_line}")


def _dry_run_output(
result: LabelAnalysisRequestResult,
runner: LabelAnalysisRunnerInterface,
dry_run_output_path: Optional[pathlib.Path],
dry_run_format: str,
):
labels_to_run = set(
result.absent_labels + result.global_level_labels + result.present_diff_labels
)
labels_skipped = set(result.present_report_labels) - labels_to_run
# If the test label can contain spaces and dashes the test runner might
# interpret it as an option and not a label
# So we wrap it in doublequotes just to be extra sure
labels_run_wrapped_double_quotes = sorted(
map(lambda l: '"' + l + '"', labels_to_run)
)
labels_skip_wrapped_double_quotes = sorted(
map(lambda l: '"' + l + '"', labels_skipped)
)

output_as_dict = dict(
runner_options=runner.dry_run_runner_options,
ats_tests_to_run=labels_run_wrapped_double_quotes,
ats_tests_to_skip=labels_skip_wrapped_double_quotes,
)
if dry_run_output_path is not None:
with open(dry_run_output_path, "w") as fd:
fd.write(
" ".join(
runner.dry_run_runner_options + labels_run_wrapped_double_quotes
)
+ "\n"
)
with open(str(dry_run_output_path) + "_skipped", "w") as fd:
fd.write(
" ".join(
runner.dry_run_runner_options + labels_skip_wrapped_double_quotes
)
+ "\n"
)
with open(str(dry_run_output_path) + ".json", "w") as fd:
fd.write(json.dumps(output_as_dict) + "\n")
labels_to_skip = set(result.present_report_labels) - labels_to_run

click.echo(json.dumps(output_as_dict))
click.echo(
f"ATS_TESTS_TO_RUN={' '.join(runner.dry_run_runner_options + labels_run_wrapped_double_quotes)}"
)
click.echo(
f"ATS_TESTS_TO_SKIP={' '.join(runner.dry_run_runner_options + labels_skip_wrapped_double_quotes)}"
)
format_lookup = {
"json": _dry_run_json_output,
"space-separated-list": _dry_run_list_output,
}
# Because dry_run_format is a click.Choice we can
# be sure the value will be in the dict of choices
fn_to_use = format_lookup[dry_run_format]
fn_to_use(labels_to_run, labels_to_skip, runner.dry_run_runner_options)


def _fallback_to_collected_labels(
collected_labels: List[str],
runner: LabelAnalysisRunnerInterface,
*,
dry_run: bool = False,
dry_run_output_path: Optional[pathlib.Path] = None,
dry_run_format: Optional[pathlib.Path] = None,
) -> dict:
logger.info("Trying to fallback on collected labels")
if collected_labels:
Expand All @@ -392,7 +386,7 @@ def _fallback_to_collected_labels(
return runner.process_labelanalysis_result(fake_response)
else:
return _dry_run_output(
LabelAnalysisRequestResult(fake_response), runner, dry_run_output_path
LabelAnalysisRequestResult(fake_response), runner, dry_run_format
)
logger.error("Cannot fallback to collected labels because no labels were collected")
raise click.ClickException("Failed to get list of labels to run")
142 changes: 76 additions & 66 deletions tests/commands/test_invoke_labelanalysis.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import json
from contextlib import redirect_stdout
from io import StringIO
from pathlib import Path

import click
Expand All @@ -8,6 +10,8 @@
from responses import matchers

from codecov_cli.commands.labelanalysis import (
_dry_run_json_output,
_dry_run_list_output,
_fallback_to_collected_labels,
_potentially_calculate_absent_labels,
_send_labelanalysis_request,
Expand Down Expand Up @@ -102,6 +106,45 @@ def test_send_label_analysis_bad_payload(self):
with pytest.raises(click.ClickException):
_send_labelanalysis_request(payload, url, header)

def test__dry_run_json_output(self):
list_to_run = ["label_1", "label_2"]
list_to_skip = ["label_3", "label_4"]
runner_options = ["--option=1", "--option=2"]

with StringIO() as out:
with redirect_stdout(out):
_dry_run_json_output(
labels_to_run=list_to_run,
labels_to_skip=list_to_skip,
runner_options=runner_options,
)
stdout = out.getvalue()

assert json.loads(stdout) == {
"runner_options": ["--option=1", "--option=2"],
"ats_tests_to_skip": ["label_3", "label_4"],
"ats_tests_to_run": ["label_1", "label_2"],
}

def test__dry_run_json_output(self):
list_to_run = ["label_1", "label_2"]
list_to_skip = ["label_3", "label_4"]
runner_options = ["--option=1", "--option=2"]

with StringIO() as out:
with redirect_stdout(out):
_dry_run_list_output(
labels_to_run=list_to_run,
labels_to_skip=list_to_skip,
runner_options=runner_options,
)
stdout = out.getvalue()

assert (
stdout
== "TESTS_TO_RUN='--option=1' '--option=2' 'label_1' 'label_2'\nTESTS_TO_SKIP='--option=1' '--option=2' 'label_3' 'label_4'\n"
)


class TestLabelAnalysisCommand(object):
def test_labelanalysis_help(self, mocker, fake_ci_provider):
Expand All @@ -115,28 +158,24 @@ def test_labelanalysis_help(self, mocker, fake_ci_provider):
"Usage: cli label-analysis [OPTIONS]",
"",
"Options:",
" --token TEXT The static analysis token (NOT the same token as",
" upload) [required]",
" --head-sha TEXT Commit SHA (with 40 chars) [required]",
" --base-sha TEXT Commit SHA (with 40 chars) [required]",
" --runner-name, --runner TEXT Runner to use",
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
" analysis result before falling back to running",
" all tests. Default is to wait forever.",
" --dry-run Print list of tests to run AND tests skipped",
" (and options that need to be added to the test",
" runner) to stdout. Also prints the same",
" information in JSON format. JSON will have keys",
" 'ats_tests_to_run', 'ats_tests_to_skip' and",
" 'runner_options'. List of tests to run is",
" prefixed with ATS_TESTS_TO_RUN= List of tests to",
" skip is prefixed with ATS_TESTS_TO_SKIP=",
" --dry-run-output-path PATH Prints the dry-run list (ATS_TESTS_TO_RUN) into",
" dry_run_output_path (in addition to stdout) AND",
" prints ATS_TESTS_TO_SKIP into",
" dry_run_output_path_skipped AND prints dry-run",
" JSON output into dry_run_output_path.json",
" -h, --help Show this message and exit.",
" --token TEXT The static analysis token (NOT the same token",
" as upload) [required]",
" --head-sha TEXT Commit SHA (with 40 chars) [required]",
" --base-sha TEXT Commit SHA (with 40 chars) [required]",
" --runner-name, --runner TEXT Runner to use",
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
" analysis result before falling back to running",
" all tests. Default is to wait forever.",
" --dry-run Print list of tests to run AND tests skipped",
" (and options that need to be added to the test",
" runner) to stdout. Also prints the same",
" information in JSON format. JSON will have",
" keys 'ats_tests_to_run', 'ats_tests_to_skip'",
" and 'runner_options'. List of tests to run is",
" prefixed with ATS_TESTS_TO_RUN= List of tests",
" to skip is prefixed with ATS_TESTS_TO_SKIP=",
" --dry-run-format [json|space-separated-list]",
" -h, --help Show this message and exit.",
"",
]

Expand Down Expand Up @@ -267,7 +306,7 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
json={"state": "finished", "result": label_analysis_result},
)
cli_runner = CliRunner()
cli_runner = CliRunner(mix_stderr=False)
with cli_runner.isolated_filesystem():
result = cli_runner.invoke(
cli,
Expand All @@ -281,19 +320,15 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
)
mock_get_runner.assert_called()
fake_runner.process_labelanalysis_result.assert_not_called()
print(result.output)
assert result.exit_code == 0
assert (
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}'
in result.output
)
assert (
'ATS_TESTS_TO_RUN=--labels "test_absent" "test_global" "test_in_diff"'
in result.output
)
assert 'ATS_TESTS_TO_SKIP=--labels "test_present"' in result.output
# Dry run format defaults to json
print(result.stdout)
assert json.loads(result.stdout) == {
"runner_options": ["--labels"],
"ats_tests_to_run": ["test_absent", "test_global", "test_in_diff"],
"ats_tests_to_skip": ["test_present"],
}

def test_invoke_label_analysis_dry_run_with_output_path(
def test_invoke_label_analysis_dry_run_pytest_format(
self, get_labelanalysis_deps, mocker
):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
Expand Down Expand Up @@ -330,7 +365,7 @@ def test_invoke_label_analysis_dry_run_with_output_path(
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
json={"state": "finished", "result": label_analysis_result},
)
cli_runner = CliRunner()
cli_runner = CliRunner(mix_stderr=False)
with cli_runner.isolated_filesystem():
result = cli_runner.invoke(
cli,
Expand All @@ -339,42 +374,17 @@ def test_invoke_label_analysis_dry_run_with_output_path(
"--token=STATIC_TOKEN",
f"--base-sha={FAKE_BASE_SHA}",
"--dry-run",
"--dry-run-output-path=ats_output_path",
"--dry-run-format=space-separated-list",
],
obj={},
)
print(result)
print(result.output)
ats_output_path = "ats_output_path"
labels_file = Path(ats_output_path)
skip_labels_file = Path(ats_output_path + "_skipped")
json_output = Path(ats_output_path + ".json")
assert labels_file.exists() and labels_file.is_file()
assert skip_labels_file.exists() and skip_labels_file.is_file()
assert json_output.exists() and json_output.is_file()
with open(labels_file, "r") as fd:
assert fd.readlines() == [
'--labels "test_absent" "test_global" "test_in_diff"\n'
]
with open(skip_labels_file, "r") as fd:
assert fd.readlines() == ['--labels "test_present"\n']
with open(json_output, "r") as fd:
assert fd.readlines() == [
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}\n'
]
mock_get_runner.assert_called()
fake_runner.process_labelanalysis_result.assert_not_called()
print(result.output)
print(result.stdout)
assert result.exit_code == 0
assert (
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}'
in result.output
)
assert (
'ATS_TESTS_TO_RUN=--labels "test_absent" "test_global" "test_in_diff"'
in result.output
result.stdout
== "TESTS_TO_RUN='--labels' 'test_absent' 'test_global' 'test_in_diff'\nTESTS_TO_SKIP='--labels' 'test_present'\n"
)
assert 'ATS_TESTS_TO_SKIP=--labels "test_present"' in result.output

def test_fallback_to_collected_labels(self, mocker):
mock_runner = mocker.MagicMock()
Expand Down Expand Up @@ -471,7 +481,7 @@ def test_fallback_dry_run(self, get_labelanalysis_deps, mocker, use_verbose_opti
"global_level_labels": [],
},
fake_runner,
None,
"json",
)
assert result.exit_code == 0

Expand Down

0 comments on commit 62bff54

Please sign in to comment.