diff --git a/.github/workflows/js-tests-paver.yml b/.github/workflows/js-tests-paver.yml
new file mode 100644
index 000000000000..566063fdfd22
--- /dev/null
+++ b/.github/workflows/js-tests-paver.yml
@@ -0,0 +1,84 @@
+name: Javascript tests PAVER
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
+
+jobs:
+ run_tests:
+ name: JS
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest]
+ node-version: [18, 20]
+ python-version:
+ - "3.11"
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Fetch master to compare coverage
+ run: git fetch --depth=1 origin master
+
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node-version }}
+
+ - name: Setup npm
+ run: npm i -g npm@10.5.x
+
+ - name: Install Firefox 123.0
+ run: |
+ sudo apt-get purge firefox
+ wget "https://ftp.mozilla.org/pub/firefox/releases/123.0/linux-x86_64/en-US/firefox-123.0.tar.bz2"
+ tar -xjf firefox-123.0.tar.bz2
+ sudo mv firefox /opt/firefox
+ sudo ln -s /opt/firefox/firefox /usr/bin/firefox
+
+ - name: Install Required System Packages
+ run: sudo apt-get update && sudo apt-get install libxmlsec1-dev ubuntu-restricted-extras xvfb
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Get pip cache dir
+ id: pip-cache-dir
+ run: |
+ echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
+
+ - name: Cache pip dependencies
+ id: cache-dependencies
+ uses: actions/cache@v4
+ with:
+ path: ${{ steps.pip-cache-dir.outputs.dir }}
+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/base.txt') }}
+ restore-keys: ${{ runner.os }}-pip-
+
+ - name: Install Required Python Dependencies
+ run: |
+ make base-requirements
+
+ - uses: c-hive/gha-npm-cache@v1
+ - name: Run JS Tests
+ env:
+ TEST_SUITE: js-unit
+ SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh
+ run: |
+ npm install -g jest
+ xvfb-run --auto-servernum ./scripts/all-tests.sh
+
+ - name: Save Job Artifacts
+ uses: actions/upload-artifact@v4
+ with:
+ name: Build-Artifacts
+ path: |
+ reports/**/*
+ test_root/log/*.png
+ test_root/log/*.log
+ **/TEST-*.xml
+ overwrite: true
diff --git a/.github/workflows/quality-checks-paver.yml b/.github/workflows/quality-checks-paver.yml
new file mode 100644
index 000000000000..beb9fea8007f
--- /dev/null
+++ b/.github/workflows/quality-checks-paver.yml
@@ -0,0 +1,82 @@
+name: Quality checks PAVER
+
+on:
+ pull_request:
+ push:
+ branches:
+ - master
+ - open-release/lilac.master
+
+jobs:
+ run_tests:
+ name: Quality Others
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-22.04]
+ python-version:
+ - "3.11"
+ node-version: [20]
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 2
+
+ - name: Fetch base branch for comparison
+ run: git fetch --depth=1 origin ${{ github.base_ref }}
+
+ - name: Install Required System Packages
+ run: sudo apt-get update && sudo apt-get install libxmlsec1-dev
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Setup Node
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ matrix.node-version }}
+
+ - name: Setup npm
+ run: npm i -g npm@8.5.x
+
+ - name: Get pip cache dir
+ id: pip-cache-dir
+ run: |
+ echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
+
+ - name: Cache pip dependencies
+ id: cache-dependencies
+ uses: actions/cache@v4
+ with:
+ path: ${{ steps.pip-cache-dir.outputs.dir }}
+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements/edx/testing.txt') }}
+ restore-keys: ${{ runner.os }}-pip-
+
+ - name: Install Required Python Dependencies
+ env:
+ PIP_SRC: ${{ runner.temp }}
+ run: |
+ make test-requirements
+
+ - name: Run Quality Tests
+ env:
+ TEST_SUITE: quality
+ SCRIPT_TO_RUN: ./scripts/generic-ci-tests.sh
+ PIP_SRC: ${{ runner.temp }}
+ TARGET_BRANCH: ${{ github.base_ref }}
+ run: |
+ ./scripts/all-tests.sh
+
+ - name: Save Job Artifacts
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: Build-Artifacts
+ path: |
+ **/reports/**/*
+ test_root/log/**/*.log
+ *.log
+ overwrite: true
diff --git a/pavelib/__init__.py b/pavelib/__init__.py
index 24f05618bdd7..875068166ff5 100644
--- a/pavelib/__init__.py
+++ b/pavelib/__init__.py
@@ -3,4 +3,4 @@
"""
-from . import assets
+from . import assets, js_test, prereqs, quality
diff --git a/pavelib/js_test.py b/pavelib/js_test.py
new file mode 100644
index 000000000000..fb9c213499ac
--- /dev/null
+++ b/pavelib/js_test.py
@@ -0,0 +1,143 @@
+"""
+Javascript test tasks
+"""
+
+
+import os
+import re
+import sys
+
+from paver.easy import cmdopts, needs, sh, task
+
+from pavelib.utils.envs import Env
+from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite
+from pavelib.utils.timer import timed
+
+try:
+ from pygments.console import colorize
+except ImportError:
+ colorize = lambda color, text: text
+
+__test__ = False # do not collect
+
+
+@task
+@needs(
+ 'pavelib.prereqs.install_node_prereqs',
+ 'pavelib.utils.test.utils.clean_reports_dir',
+)
+@cmdopts([
+ ("suite=", "s", "Test suite to run"),
+ ("mode=", "m", "dev or run"),
+ ("coverage", "c", "Run test under coverage"),
+ ("port=", "p", "Port to run test server on (dev mode only)"),
+ ('skip-clean', 'C', 'skip cleaning repository before running tests'),
+ ('skip_clean', None, 'deprecated in favor of skip-clean'),
+], share_with=["pavelib.utils.tests.utils.clean_reports_dir"])
+@timed
+def test_js(options):
+ """
+ Run the JavaScript tests
+ """
+ mode = getattr(options, 'mode', 'run')
+ port = None
+ skip_clean = getattr(options, 'skip_clean', False)
+
+ if mode == 'run':
+ suite = getattr(options, 'suite', 'all')
+ coverage = getattr(options, 'coverage', False)
+ elif mode == 'dev':
+ suite = getattr(options, 'suite', None)
+ coverage = False
+ port = getattr(options, 'port', None)
+ else:
+ sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.")
+ return
+
+ if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS):
+ sys.stderr.write(
+ "Unknown test suite. Please choose from ({suites})\n".format(
+ suites=", ".join(Env.JS_TEST_ID_KEYS)
+ )
+ )
+ return
+
+ if suite != 'jest-snapshot':
+ test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean)
+ test_suite.run()
+
+ if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in
+ test_suite = JestSnapshotTestSuite('jest')
+ test_suite.run()
+
+
+@task
+@cmdopts([
+ ("suite=", "s", "Test suite to run"),
+ ("coverage", "c", "Run test under coverage"),
+])
+@timed
+def test_js_run(options):
+ """
+ Run the JavaScript tests and print results to the console
+ """
+ options.mode = 'run'
+ test_js(options)
+
+
+@task
+@cmdopts([
+ ("suite=", "s", "Test suite to run"),
+ ("port=", "p", "Port to run test server on"),
+])
+@timed
+def test_js_dev(options):
+ """
+ Run the JavaScript tests in your default browsers
+ """
+ options.mode = 'dev'
+ test_js(options)
+
+
+@task
+@needs('pavelib.prereqs.install_coverage_prereqs')
+@cmdopts([
+ ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
+], share_with=['coverage'])
+@timed
+def diff_coverage(options):
+ """
+ Build the diff coverage reports
+ """
+ compare_branch = options.get('compare_branch', 'origin/master')
+
+ # Find all coverage XML files (both Python and JavaScript)
+ xml_reports = []
+
+ for filepath in Env.REPORT_DIR.walk():
+ if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
+ xml_reports.append(filepath)
+
+ if not xml_reports:
+ err_msg = colorize(
+ 'red',
+ "No coverage info found. Run `paver test` before running "
+ "`paver coverage`.\n"
+ )
+ sys.stderr.write(err_msg)
+ else:
+ xml_report_str = ' '.join(xml_reports)
+ diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
+
+ # Generate the diff coverage reports (HTML and console)
+ # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153
+ sh(
+ "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} "
+ "--html-report {diff_html_path}".format(
+ xml_report_str=xml_report_str,
+ compare_branch=compare_branch,
+ diff_html_path=diff_html_path,
+ )
+ )
+
+ print("\n")
diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py
new file mode 100644
index 000000000000..214a35e3fe85
--- /dev/null
+++ b/pavelib/paver_tests/conftest.py
@@ -0,0 +1,22 @@
+"""
+Pytest fixtures for the pavelib unit tests.
+"""
+
+
+import os
+from shutil import rmtree
+
+import pytest
+
+from pavelib.utils.envs import Env
+
+
+@pytest.fixture(autouse=True, scope='session')
+def delete_quality_junit_xml():
+ """
+ Delete the JUnit XML results files for quality check tasks run during the
+ unit tests.
+ """
+ yield
+ if os.path.exists(Env.QUALITY_DIR):
+ rmtree(Env.QUALITY_DIR, ignore_errors=True)
diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py
new file mode 100644
index 000000000000..5802d7d0d21b
--- /dev/null
+++ b/pavelib/paver_tests/test_eslint.py
@@ -0,0 +1,54 @@
+"""
+Tests for Paver's Stylelint tasks.
+"""
+
+
+import unittest
+from unittest.mock import patch
+
+import pytest
+from paver.easy import BuildFailure, call_task
+
+import pavelib.quality
+
+
+class TestPaverESLint(unittest.TestCase):
+ """
+ For testing run_eslint
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ # Mock the paver @needs decorator
+ self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()
+ self._mock_paver_needs.return_value = 0
+
+ # Mock shell commands
+ patcher = patch('pavelib.quality.sh')
+ self._mock_paver_sh = patcher.start()
+
+ # Cleanup mocks
+ self.addCleanup(patcher.stop)
+ self.addCleanup(self._mock_paver_needs.stop)
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_count_from_last_line')
+ def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
+ """
+ run_eslint encounters an error parsing the eslint output log
+ """
+ mock_count.return_value = None
+ with pytest.raises(BuildFailure):
+ call_task('pavelib.quality.run_eslint', args=[''])
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_count_from_last_line')
+ def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
+ """
+ eslint finds violations, but a limit was not set
+ """
+ mock_count.return_value = 1
+ pavelib.quality.run_eslint("")
diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py
new file mode 100644
index 000000000000..4b165a156674
--- /dev/null
+++ b/pavelib/paver_tests/test_js_test.py
@@ -0,0 +1,148 @@
+"""Unit tests for the Paver JavaScript testing tasks."""
+
+from unittest.mock import patch
+
+import ddt
+from paver.easy import call_task
+
+import pavelib.js_test
+from pavelib.utils.envs import Env
+
+from .utils import PaverTestCase
+
+
+@ddt.ddt
+class TestPaverJavaScriptTestTasks(PaverTestCase):
+ """
+ Test the Paver JavaScript testing tasks.
+ """
+
+ EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete'
+ EXPECTED_KARMA_OPTIONS = (
+ "{config_file} "
+ "--single-run={single_run} "
+ "--capture-timeout=60000 "
+ "--junitreportpath="
+ "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
+ "--browsers={browser}"
+ )
+ EXPECTED_COVERAGE_OPTIONS = (
+ ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
+ )
+
+ EXPECTED_COMMANDS = [
+ "make report_dir",
+ 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
+ "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
+ 'rm -rf test_root/log/auto_screenshots/*',
+ "rm -rf /tmp/mako_[cl]ms",
+ ]
+
+ def setUp(self):
+ super().setUp()
+
+ # Mock the paver @needs decorator
+ self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start()
+ self._mock_paver_needs.return_value = 0
+
+ # Cleanup mocks
+ self.addCleanup(self._mock_paver_needs.stop)
+
+ @ddt.data(
+ [""],
+ ["--coverage"],
+ ["--suite=lms"],
+ ["--suite=lms --coverage"],
+ )
+ @ddt.unpack
+ def test_test_js_run(self, options_string):
+ """
+ Test the "test_js_run" task.
+ """
+ options = self.parse_options_string(options_string)
+ self.reset_task_messages()
+ call_task("pavelib.js_test.test_js_run", options=options)
+ self.verify_messages(options=options, dev_mode=False)
+
+ @ddt.data(
+ [""],
+ ["--port=9999"],
+ ["--suite=lms"],
+ ["--suite=lms --port=9999"],
+ )
+ @ddt.unpack
+ def test_test_js_dev(self, options_string):
+ """
+ Test the "test_js_run" task.
+ """
+ options = self.parse_options_string(options_string)
+ self.reset_task_messages()
+ call_task("pavelib.js_test.test_js_dev", options=options)
+ self.verify_messages(options=options, dev_mode=True)
+
+ def parse_options_string(self, options_string):
+ """
+ Parse a string containing the options for a test run
+ """
+ parameters = options_string.split(" ")
+ suite = "all"
+ if "--system=lms" in parameters:
+ suite = "lms"
+ elif "--system=common" in parameters:
+ suite = "common"
+ coverage = "--coverage" in parameters
+ port = None
+ if "--port=9999" in parameters:
+ port = 9999
+ return {
+ "suite": suite,
+ "coverage": coverage,
+ "port": port,
+ }
+
+ def verify_messages(self, options, dev_mode):
+ """
+ Verify that the messages generated when running tests are as expected
+ for the specified options and dev_mode.
+ """
+ is_coverage = options['coverage']
+ port = options['port']
+ expected_messages = []
+ suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']]
+
+ expected_messages.extend(self.EXPECTED_COMMANDS)
+ if not dev_mode and not is_coverage:
+ expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format(
+ platform_root=self.platform_root
+ ))
+
+ command_template = (
+ 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}'
+ )
+
+ for suite in suites:
+ # Karma test command
+ if suite != 'jest-snapshot':
+ karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)]
+ expected_test_tool_command = command_template.format(
+ options=self.EXPECTED_KARMA_OPTIONS.format(
+ config_file=karma_config_file,
+ single_run='false' if dev_mode else 'true',
+ suite=suite,
+ platform_root=self.platform_root,
+ browser=Env.KARMA_BROWSER,
+ ),
+ )
+ if is_coverage:
+ expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format(
+ platform_root=self.platform_root,
+ suite=suite
+ )
+ if port:
+ expected_test_tool_command += f" --port={port}"
+ else:
+ expected_test_tool_command = 'jest'
+
+ expected_messages.append(expected_test_tool_command)
+
+ assert self.task_messages == expected_messages
diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py
new file mode 100644
index 000000000000..36d6dd59e172
--- /dev/null
+++ b/pavelib/paver_tests/test_paver_quality.py
@@ -0,0 +1,156 @@
+""" # lint-amnesty, pylint: disable=django-not-configured
+Tests for paver quality tasks
+"""
+
+
+import os
+import shutil # lint-amnesty, pylint: disable=unused-import
+import tempfile
+import textwrap
+import unittest
+from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import
+
+import pytest # lint-amnesty, pylint: disable=unused-import
+from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import
+from path import Path as path
+from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import
+
+import pavelib.quality
+from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import
+
+OPEN_BUILTIN = 'builtins.open'
+
+
+@ddt
+class TestPaverQualityViolations(unittest.TestCase):
+ """
+ For testing the paver violations-counting tasks
+ """
+ def setUp(self):
+ super().setUp()
+ self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
+ self.f.close()
+ self.addCleanup(os.remove, self.f.name)
+
+ def test_pep8_parser(self):
+ with open(self.f.name, 'w') as f:
+ f.write("hello\nhithere")
+ num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access
+ assert num == 2
+
+
+class TestPaverReportViolationsCounts(unittest.TestCase):
+ """
+ For testing utility functions for getting counts from reports for
+ run_eslint and run_xsslint.
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ # Temporary file infrastructure
+ self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
+ self.f.close()
+
+ # Cleanup various mocks and tempfiles
+ self.addCleanup(os.remove, self.f.name)
+
+ def test_get_eslint_violations_count(self):
+ with open(self.f.name, 'w') as f:
+ f.write("3000 violations found")
+ actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
+ assert actual_count == 3000
+
+ def test_get_eslint_violations_no_number_found(self):
+ with open(self.f.name, 'w') as f:
+ f.write("Not expected string regex")
+ actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
+ assert actual_count is None
+
+ def test_get_eslint_violations_count_truncated_report(self):
+ """
+ A truncated report (i.e. last line is just a violation)
+ """
+ with open(self.f.name, 'w') as f:
+ f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.")
+ actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
+ assert actual_count is None
+
+ def test_generic_value(self):
+ """
+ Default behavior is to look for an integer appearing at head of line
+ """
+ with open(self.f.name, 'w') as f:
+ f.write("5.777 good to see you")
+ actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
+ assert actual_count == 5
+
+ def test_generic_value_none_found(self):
+ """
+ Default behavior is to look for an integer appearing at head of line
+ """
+ with open(self.f.name, 'w') as f:
+ f.write("hello 5.777 good to see you")
+ actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
+ assert actual_count is None
+
+ def test_get_xsslint_counts_happy(self):
+ """
+ Test happy path getting violation counts from xsslint report.
+ """
+ report = textwrap.dedent("""
+ test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
+
+ javascript-concat-html: 310 violations
+ javascript-escape: 7 violations
+
+ 2608 violations total
+ """)
+ with open(self.f.name, 'w') as f:
+ f.write(report)
+ counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
+ self.assertDictEqual(counts, {
+ 'rules': {
+ 'javascript-concat-html': 310,
+ 'javascript-escape': 7,
+ },
+ 'total': 2608,
+ })
+
+ def test_get_xsslint_counts_bad_counts(self):
+ """
+ Test getting violation counts from truncated and malformed xsslint
+ report.
+ """
+ report = textwrap.dedent("""
+ javascript-concat-html: violations
+ """)
+ with open(self.f.name, 'w') as f:
+ f.write(report)
+ counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
+ self.assertDictEqual(counts, {
+ 'rules': {},
+ 'total': None,
+ })
+
+
+class TestPrepareReportDir(unittest.TestCase):
+ """
+ Tests the report directory preparation
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.test_dir = tempfile.mkdtemp()
+ self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with
+ self.addCleanup(os.removedirs, self.test_dir)
+
+ def test_report_dir_with_files(self):
+ assert os.path.exists(self.test_file.name)
+ pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
+ assert not os.path.exists(self.test_file.name)
+
+ def test_report_dir_without_files(self):
+ os.remove(self.test_file.name)
+ pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
+ assert os.listdir(path(self.test_dir)) == []
diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py
new file mode 100644
index 000000000000..d034360acde0
--- /dev/null
+++ b/pavelib/paver_tests/test_pii_check.py
@@ -0,0 +1,79 @@
+"""
+Tests for Paver's PII checker task.
+"""
+
+import shutil
+import tempfile
+import unittest
+from unittest.mock import patch
+
+from path import Path as path
+from paver.easy import call_task, BuildFailure
+
+import pavelib.quality
+from pavelib.utils.envs import Env
+
+
+class TestPaverPIICheck(unittest.TestCase):
+ """
+ For testing the paver run_pii_check task
+ """
+ def setUp(self):
+ super().setUp()
+ self.report_dir = path(tempfile.mkdtemp())
+ self.addCleanup(shutil.rmtree, self.report_dir)
+
+ @patch.object(pavelib.quality.run_pii_check, 'needs')
+ @patch('pavelib.quality.sh')
+ def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs):
+ """
+ run_pii_check succeeds with proper report dir
+ """
+ # Make the expected stdout files.
+ cms_stdout_report = self.report_dir / 'pii_check_cms.report'
+ cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
+ lms_stdout_report = self.report_dir / 'pii_check_lms.report'
+ lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n'])
+
+ mock_needs.return_value = 0
+ call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
+ mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
+ assert len(mock_calls) == 2
+ assert any('lms.envs.test' in call for call in mock_calls)
+ assert any('cms.envs.test' in call for call in mock_calls)
+ assert all(str(self.report_dir) in call for call in mock_calls)
+ metrics_file = Env.METRICS_DIR / 'pii'
+ assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
+
+ @patch.object(pavelib.quality.run_pii_check, 'needs')
+ @patch('pavelib.quality.sh')
+ def test_pii_check_failed(self, mock_paver_sh, mock_needs):
+ """
+ run_pii_check fails due to crossing the threshold.
+ """
+ # Make the expected stdout files.
+ cms_stdout_report = self.report_dir / 'pii_check_cms.report'
+ cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
+ lms_stdout_report = self.report_dir / 'pii_check_lms.report'
+ lms_stdout_report.write_lines([
+ 'Coverage found 66 uncovered models:',
+ 'Coverage threshold not met! Needed 100.0, actually 95.0!',
+ ])
+
+ mock_needs.return_value = 0
+ try:
+ with self.assertRaises(BuildFailure):
+ call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
+ except SystemExit:
+ # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why.
+ # As a hack, we just wrap it in try-except.
+ # This is not good, but these tests weren't even running for years, and we're removing this whole test
+ # suite soon anyway.
+ pass
+ mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
+ assert len(mock_calls) == 2
+ assert any('lms.envs.test' in call for call in mock_calls)
+ assert any('cms.envs.test' in call for call in mock_calls)
+ assert all(str(self.report_dir) in call for call in mock_calls)
+ metrics_file = Env.METRICS_DIR / 'pii'
+ assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py
new file mode 100644
index 000000000000..3e1c79c93f28
--- /dev/null
+++ b/pavelib/paver_tests/test_stylelint.py
@@ -0,0 +1,36 @@
+"""
+Tests for Paver's Stylelint tasks.
+"""
+
+from unittest.mock import MagicMock, patch
+
+import pytest
+import ddt
+from paver.easy import call_task
+
+from .utils import PaverTestCase
+
+
+@ddt.ddt
+class TestPaverStylelint(PaverTestCase):
+ """
+ Tests for Paver's Stylelint tasks.
+ """
+ @ddt.data(
+ [False],
+ [True],
+ )
+ @ddt.unpack
+ def test_run_stylelint(self, should_pass):
+ """
+ Verify that the quality task fails with Stylelint violations.
+ """
+ if should_pass:
+ _mock_stylelint_violations = MagicMock(return_value=0)
+ with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
+ call_task('pavelib.quality.run_stylelint')
+ else:
+ _mock_stylelint_violations = MagicMock(return_value=100)
+ with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_stylelint')
diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py
new file mode 100644
index 000000000000..5ccbf74abcf9
--- /dev/null
+++ b/pavelib/paver_tests/test_timer.py
@@ -0,0 +1,190 @@
+"""
+Tests of the pavelib.utils.timer module.
+"""
+
+
+from datetime import datetime, timedelta
+from unittest import TestCase
+
+from unittest.mock import MagicMock, patch
+
+from pavelib.utils import timer
+
+
+@timer.timed
+def identity(*args, **kwargs):
+ """
+ An identity function used as a default task to test the timing of.
+ """
+ return args, kwargs
+
+
+MOCK_OPEN = MagicMock(spec=open)
+
+
+@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN)
+class TimedDecoratorTests(TestCase):
+ """
+ Tests of the pavelib.utils.timer:timed decorator.
+ """
+ def setUp(self):
+ super().setUp()
+
+ patch_dumps = patch.object(timer.json, 'dump', autospec=True)
+ self.mock_dump = patch_dumps.start()
+ self.addCleanup(patch_dumps.stop)
+
+ patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True)
+ self.mock_makedirs = patch_makedirs.start()
+ self.addCleanup(patch_makedirs.stop)
+
+ patch_datetime = patch.object(timer, 'datetime', autospec=True)
+ self.mock_datetime = patch_datetime.start()
+ self.addCleanup(patch_datetime.stop)
+
+ patch_exists = patch.object(timer, 'exists', autospec=True)
+ self.mock_exists = patch_exists.start()
+ self.addCleanup(patch_exists.stop)
+
+ MOCK_OPEN.reset_mock()
+
+ def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None):
+ """
+ Return all timing messages recorded during the execution of ``task``.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = {}
+
+ if raises is None:
+ task(*args, **kwargs)
+ else:
+ self.assertRaises(raises, task, *args, **kwargs)
+
+ return [
+ call[0][0] # log_message
+ for call in self.mock_dump.call_args_list
+ ]
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
+ def test_times(self):
+ start = datetime(2016, 7, 20, 10, 56, 19)
+ end = start + timedelta(seconds=35.6)
+
+ self.mock_datetime.utcnow.side_effect = [start, end]
+
+ messages = self.get_log_messages()
+ assert len(messages) == 1
+
+ # I'm not using assertDictContainsSubset because it is
+ # removed in python 3.2 (because the arguments were backwards)
+ # and it wasn't ever replaced by anything *headdesk*
+ assert 'duration' in messages[0]
+ assert 35.6 == messages[0]['duration']
+
+ assert 'started_at' in messages[0]
+ assert start.isoformat(' ') == messages[0]['started_at']
+
+ assert 'ended_at' in messages[0]
+ assert end.isoformat(' ') == messages[0]['ended_at']
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', None)
+ def test_no_logs(self):
+ messages = self.get_log_messages()
+ assert len(messages) == 0
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
+ def test_arguments(self):
+ messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz'))
+ assert len(messages) == 1
+
+ # I'm not using assertDictContainsSubset because it is
+ # removed in python 3.2 (because the arguments were backwards)
+ # and it wasn't ever replaced by anything *headdesk*
+ assert 'args' in messages[0]
+ assert [repr(1), repr('foo')] == messages[0]['args']
+ assert 'kwargs' in messages[0]
+ assert {'bar': repr('baz')} == messages[0]['kwargs']
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
+ def test_task_name(self):
+ messages = self.get_log_messages()
+ assert len(messages) == 1
+
+ # I'm not using assertDictContainsSubset because it is
+ # removed in python 3.2 (because the arguments were backwards)
+ # and it wasn't ever replaced by anything *headdesk*
+ assert 'task' in messages[0]
+ assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task']
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
+ def test_exceptions(self):
+
+ @timer.timed
+ def raises():
+ """
+ A task used for testing exception handling of the timed decorator.
+ """
+ raise Exception('The Message!')
+
+ messages = self.get_log_messages(task=raises, raises=Exception)
+ assert len(messages) == 1
+
+ # I'm not using assertDictContainsSubset because it is
+ # removed in python 3.2 (because the arguments were backwards)
+ # and it wasn't ever replaced by anything *headdesk*
+ assert 'exception' in messages[0]
+ assert 'Exception: The Message!' == messages[0]['exception']
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log')
+ def test_date_formatting(self):
+ start = datetime(2016, 7, 20, 10, 56, 19)
+ end = start + timedelta(seconds=35.6)
+
+ self.mock_datetime.utcnow.side_effect = [start, end]
+
+ messages = self.get_log_messages()
+ assert len(messages) == 1
+
+ MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a')
+
+ @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
+ def test_nested_tasks(self):
+
+ @timer.timed
+ def parent():
+ """
+ A timed task that calls another task
+ """
+ identity()
+
+ parent_start = datetime(2016, 7, 20, 10, 56, 19)
+ parent_end = parent_start + timedelta(seconds=60)
+ child_start = parent_start + timedelta(seconds=10)
+ child_end = parent_end - timedelta(seconds=10)
+
+ self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end]
+
+ messages = self.get_log_messages(task=parent)
+ assert len(messages) == 2
+
+ # Child messages first
+ assert 'duration' in messages[0]
+ assert 40 == messages[0]['duration']
+
+ assert 'started_at' in messages[0]
+ assert child_start.isoformat(' ') == messages[0]['started_at']
+
+ assert 'ended_at' in messages[0]
+ assert child_end.isoformat(' ') == messages[0]['ended_at']
+
+ # Parent messages after
+ assert 'duration' in messages[1]
+ assert 60 == messages[1]['duration']
+
+ assert 'started_at' in messages[1]
+ assert parent_start.isoformat(' ') == messages[1]['started_at']
+
+ assert 'ended_at' in messages[1]
+ assert parent_end.isoformat(' ') == messages[1]['ended_at']
diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py
new file mode 100644
index 000000000000..a9b4a41e1600
--- /dev/null
+++ b/pavelib/paver_tests/test_xsslint.py
@@ -0,0 +1,120 @@
+"""
+Tests for paver xsslint quality tasks
+"""
+from unittest.mock import patch
+
+import pytest
+from paver.easy import call_task
+
+import pavelib.quality
+
+from .utils import PaverTestCase
+
+
+class PaverXSSLintTest(PaverTestCase):
+ """
+ Test run_xsslint with a mocked environment in order to pass in opts
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.reset_task_messages()
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint encounters an error parsing the xsslint output log
+ """
+ _mock_counts.return_value = {}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint')
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint finds violations, but a limit was not set
+ """
+ _mock_counts.return_value = {'total': 0}
+ call_task('pavelib.quality.run_xsslint')
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint fails when thresholds option is poorly formatted
+ """
+ _mock_counts.return_value = {'total': 0}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint fails when thresholds option is poorly formatted
+ """
+ _mock_counts.return_value = {'total': 0}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint finds more violations than are allowed
+ """
+ _mock_counts.return_value = {'total': 4}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint finds fewer violations than are allowed
+ """
+ _mock_counts.return_value = {'total': 4}
+ # No System Exit is expected
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint encounters an error parsing the xsslint output log for a
+ given rule threshold that was set.
+ """
+ _mock_counts.return_value = {'total': 4}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint finds more rule violations than are allowed
+ """
+ _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
+ with pytest.raises(SystemExit):
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
+
+ @patch.object(pavelib.quality, '_write_metric')
+ @patch.object(pavelib.quality, '_prepare_report_dir')
+ @patch.object(pavelib.quality, '_get_xsslint_counts')
+ def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
+ """
+ run_xsslint finds fewer rule violations than are allowed
+ """
+ _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
+ # No System Exit is expected
+ call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'})
diff --git a/pavelib/quality.py b/pavelib/quality.py
new file mode 100644
index 000000000000..774179f45048
--- /dev/null
+++ b/pavelib/quality.py
@@ -0,0 +1,602 @@
+""" # lint-amnesty, pylint: disable=django-not-configured
+Check code quality using pycodestyle, pylint, and diff_quality.
+"""
+
+import json
+import os
+import re
+from datetime import datetime
+from xml.sax.saxutils import quoteattr
+
+from paver.easy import BuildFailure, cmdopts, needs, sh, task
+
+from .utils.envs import Env
+from .utils.timer import timed
+
+ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts'
+JUNIT_XML_TEMPLATE = """
+
+{failure_element}
+
+"""
+JUNIT_XML_FAILURE_TEMPLATE = ''
+START_TIME = datetime.utcnow()
+
+
+def write_junit_xml(name, message=None):
+ """
+ Write a JUnit results XML file describing the outcome of a quality check.
+ """
+ if message:
+ failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message))
+ else:
+ failure_element = ''
+ data = {
+ 'failure_count': 1 if message else 0,
+ 'failure_element': failure_element,
+ 'name': name,
+ 'seconds': (datetime.utcnow() - START_TIME).total_seconds(),
+ }
+ Env.QUALITY_DIR.makedirs_p()
+ filename = Env.QUALITY_DIR / f'{name}.xml'
+ with open(filename, 'w') as f:
+ f.write(JUNIT_XML_TEMPLATE.format(**data))
+
+
+def fail_quality(name, message):
+ """
+ Fail the specified quality check by generating the JUnit XML results file
+ and raising a ``BuildFailure``.
+ """
+ write_junit_xml(name, message)
+ raise BuildFailure(message)
+
+
+def top_python_dirs(dirname):
+ """
+ Find the directories to start from in order to find all the Python files in `dirname`.
+ """
+ top_dirs = []
+
+ dir_init = os.path.join(dirname, "__init__.py")
+ if os.path.exists(dir_init):
+ top_dirs.append(dirname)
+
+ for directory in ['djangoapps', 'lib']:
+ subdir = os.path.join(dirname, directory)
+ subdir_init = os.path.join(subdir, "__init__.py")
+ if os.path.exists(subdir) and not os.path.exists(subdir_init):
+ dirs = os.listdir(subdir)
+ top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d)))
+
+ modules_to_remove = ['__pycache__']
+ for module in modules_to_remove:
+ if module in top_dirs:
+ top_dirs.remove(module)
+
+ return top_dirs
+
+
+def _get_pep8_violations(clean=True):
+ """
+ Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string)
+ where violations_string is a string of all PEP 8 violations found, separated
+ by new lines.
+ """
+ report_dir = (Env.REPORT_DIR / 'pep8')
+ if clean:
+ report_dir.rmtree(ignore_errors=True)
+ report_dir.makedirs_p()
+ report = report_dir / 'pep8.report'
+
+ # Make sure the metrics subdirectory exists
+ Env.METRICS_DIR.makedirs_p()
+
+ if not report.exists():
+ sh(f'pycodestyle . | tee {report} -a')
+
+ violations_list = _pep8_violations(report)
+
+ return len(violations_list), violations_list
+
+
+def _pep8_violations(report_file):
+ """
+ Returns the list of all PEP 8 violations in the given report_file.
+ """
+ with open(report_file) as f:
+ return f.readlines()
+
+
+@task
+@cmdopts([
+ ("system=", "s", "System to act on"),
+])
+@timed
+def run_pep8(options): # pylint: disable=unused-argument
+ """
+ Run pycodestyle on system code.
+ Fail the task if any violations are found.
+ """
+ (count, violations_list) = _get_pep8_violations()
+ violations_list = ''.join(violations_list)
+
+ # Print number of violations to log
+ violations_count_str = f"Number of PEP 8 violations: {count}"
+ print(violations_count_str)
+ print(violations_list)
+
+ # Also write the number of violations to a file
+ with open(Env.METRICS_DIR / "pep8", "w") as f:
+ f.write(violations_count_str + '\n\n')
+ f.write(violations_list)
+
+ # Fail if any violations are found
+ if count:
+ failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str
+ failure_string += f"\n\nViolations:\n{violations_list}"
+ fail_quality('pep8', failure_string)
+ else:
+ write_junit_xml('pep8')
+
+
+@task
+@needs(
+ 'pavelib.prereqs.install_node_prereqs',
+ 'pavelib.utils.test.utils.ensure_clean_package_lock',
+)
+@cmdopts([
+ ("limit=", "l", "limit for number of acceptable violations"),
+])
+@timed
+def run_eslint(options):
+ """
+ Runs eslint on static asset directories.
+ If limit option is passed, fails build if more violations than the limit are found.
+ """
+
+ eslint_report_dir = (Env.REPORT_DIR / "eslint")
+ eslint_report = eslint_report_dir / "eslint.report"
+ _prepare_report_dir(eslint_report_dir)
+ violations_limit = int(getattr(options, 'limit', -1))
+
+ sh(
+ "node --max_old_space_size=4096 node_modules/.bin/eslint "
+ "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format(
+ eslint_report=eslint_report
+ ),
+ ignore_error=True
+ )
+
+ try:
+ num_violations = int(_get_count_from_last_line(eslint_report, "eslint"))
+ except TypeError:
+ fail_quality(
+ 'eslint',
+ "FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
+ eslint_report=eslint_report
+ )
+ )
+
+ # Record the metric
+ _write_metric(num_violations, (Env.METRICS_DIR / "eslint"))
+
+ # Fail if number of violations is greater than the limit
+ if num_violations > violations_limit > -1:
+ fail_quality(
+ 'eslint',
+ "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
+ count=num_violations, violations_limit=violations_limit
+ )
+ )
+ else:
+ write_junit_xml('eslint')
+
+
+def _get_stylelint_violations():
+ """
+ Returns the number of Stylelint violations.
+ """
+ stylelint_report_dir = (Env.REPORT_DIR / "stylelint")
+ stylelint_report = stylelint_report_dir / "stylelint.report"
+ _prepare_report_dir(stylelint_report_dir)
+ formatter = 'node_modules/stylelint-formatter-pretty'
+
+ sh(
+ "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format(
+ formatter=formatter,
+ stylelint_report=stylelint_report,
+ ),
+ ignore_error=True
+ )
+
+ try:
+ return int(_get_count_from_last_line(stylelint_report, "stylelint"))
+ except TypeError:
+ fail_quality(
+ 'stylelint',
+ "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format(
+ stylelint_report=stylelint_report
+ )
+ )
+
+
+@task
+@needs('pavelib.prereqs.install_node_prereqs')
+@cmdopts([
+ ("limit=", "l", "limit for number of acceptable violations"),
+])
+@timed
+def run_stylelint(options):
+ """
+ Runs stylelint on Sass files.
+ If limit option is passed, fails build if more violations than the limit are found.
+ """
+ violations_limit = 0
+ num_violations = _get_stylelint_violations()
+
+ # Record the metric
+ _write_metric(num_violations, (Env.METRICS_DIR / "stylelint"))
+
+ # Fail if number of violations is greater than the limit
+ if num_violations > violations_limit:
+ fail_quality(
+ 'stylelint',
+ "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format(
+ count=num_violations,
+ violations_limit=violations_limit,
+ )
+ )
+ else:
+ write_junit_xml('stylelint')
+
+
+@task
+@needs('pavelib.prereqs.install_python_prereqs')
+@cmdopts([
+ ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"),
+])
+@timed
+def run_xsslint(options):
+ """
+ Runs xsslint/xss_linter.py on the codebase
+ """
+
+ thresholds_option = getattr(options, 'thresholds', '{}')
+ try:
+ violation_thresholds = json.loads(thresholds_option)
+ except ValueError:
+ violation_thresholds = None
+ if isinstance(violation_thresholds, dict) is False or \
+ any(key not in ("total", "rules") for key in violation_thresholds.keys()):
+
+ fail_quality(
+ 'xsslint',
+ """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
+ """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
+ """with property names in double-quotes.""".format(
+ thresholds_option=thresholds_option
+ )
+ )
+
+ xsslint_script = "xss_linter.py"
+ xsslint_report_dir = (Env.REPORT_DIR / "xsslint")
+ xsslint_report = xsslint_report_dir / "xsslint.report"
+ _prepare_report_dir(xsslint_report_dir)
+
+ sh(
+ "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format(
+ repo_root=Env.REPO_ROOT,
+ xsslint_script=xsslint_script,
+ xsslint_report=xsslint_report,
+ cfg_module='scripts.xsslint_config'
+ ),
+ ignore_error=True
+ )
+
+ xsslint_counts = _get_xsslint_counts(xsslint_report)
+
+ try:
+ metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
+ xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total'])
+ )
+ if 'rules' in xsslint_counts and any(xsslint_counts['rules']):
+ metrics_str += "\n"
+ rule_keys = sorted(xsslint_counts['rules'].keys())
+ for rule in rule_keys:
+ metrics_str += "{rule} violations: {count}\n".format(
+ rule=rule,
+ count=int(xsslint_counts['rules'][rule])
+ )
+ except TypeError:
+ fail_quality(
+ 'xsslint',
+ "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format(
+ xsslint_script=xsslint_script, xsslint_report=xsslint_report
+ )
+ )
+
+ metrics_report = (Env.METRICS_DIR / "xsslint")
+ # Record the metric
+ _write_metric(metrics_str, metrics_report)
+ # Print number of violations to log.
+ sh(f"cat {metrics_report}", ignore_error=True)
+
+ error_message = ""
+
+ # Test total violations against threshold.
+ if 'total' in list(violation_thresholds.keys()):
+ if violation_thresholds['total'] < xsslint_counts['total']:
+ error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
+ count=xsslint_counts['total'], violations_limit=violation_thresholds['total']
+ )
+
+ # Test rule violations against thresholds.
+ if 'rules' in violation_thresholds:
+ threshold_keys = sorted(violation_thresholds['rules'].keys())
+ for threshold_key in threshold_keys:
+ if threshold_key not in xsslint_counts['rules']:
+ error_message += (
+ "\nNumber of {xsslint_script} violations for {rule} could not be found in "
+ "{xsslint_report}."
+ ).format(
+ xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report
+ )
+ elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]:
+ error_message += \
+ "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
+ rule=threshold_key, count=xsslint_counts['rules'][threshold_key],
+ violations_limit=violation_thresholds['rules'][threshold_key],
+ )
+
+ if error_message:
+ fail_quality(
+ 'xsslint',
+ "FAILURE: XSSLinter Failed.\n{error_message}\n"
+ "See {xsslint_report} or run the following command to hone in on the problem:\n"
+ " ./scripts/xss-commit-linter.sh -h".format(
+ error_message=error_message, xsslint_report=xsslint_report
+ )
+ )
+ else:
+ write_junit_xml('xsslint')
+
+
+def _write_metric(metric, filename):
+ """
+ Write a given metric to a given file
+ Used for things like reports/metrics/eslint, which will simply tell you the number of
+ eslint violations found
+ """
+ Env.METRICS_DIR.makedirs_p()
+
+ with open(filename, "w") as metric_file:
+ metric_file.write(str(metric))
+
+
+def _prepare_report_dir(dir_name):
+ """
+ Sets a given directory to a created, but empty state
+ """
+ dir_name.rmtree_p()
+ dir_name.mkdir_p()
+
+
+def _get_report_contents(filename, report_name, last_line_only=False):
+ """
+ Returns the contents of the given file. Use last_line_only to only return
+ the last line, which can be used for getting output from quality output
+ files.
+
+ Arguments:
+ last_line_only: True to return the last line only, False to return a
+ string with full contents.
+
+ Returns:
+ String containing full contents of the report, or the last line.
+
+ """
+ if os.path.isfile(filename):
+ with open(filename) as report_file:
+ if last_line_only:
+ lines = report_file.readlines()
+ for line in reversed(lines):
+ if line != '\n':
+ return line
+ return None
+ else:
+ return report_file.read()
+ else:
+ file_not_found_message = f"FAILURE: The following log file could not be found: {filename}"
+ fail_quality(report_name, file_not_found_message)
+
+
+def _get_count_from_last_line(filename, file_type):
+ """
+ This will return the number in the last line of a file.
+ It is returning only the value (as a floating number).
+ """
+ report_contents = _get_report_contents(filename, file_type, last_line_only=True)
+
+ if report_contents is None:
+ return 0
+
+ last_line = report_contents.strip()
+ # Example of the last line of a compact-formatted eslint report (for example): "62829 problems"
+ regex = r'^\d+'
+
+ try:
+ return float(re.search(regex, last_line).group(0))
+ # An AttributeError will occur if the regex finds no matches.
+ # A ValueError will occur if the returned regex cannot be cast as a float.
+ except (AttributeError, ValueError):
+ return None
+
+
+def _get_xsslint_counts(filename):
+ """
+ This returns a dict of violations from the xsslint report.
+
+ Arguments:
+ filename: The name of the xsslint report.
+
+ Returns:
+ A dict containing the following:
+ rules: A dict containing the count for each rule as follows:
+ violation-rule-id: N, where N is the number of violations
+ total: M, where M is the number of total violations
+
+ """
+ report_contents = _get_report_contents(filename, 'xsslint')
+ rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE)
+ total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE)
+ violations = {'rules': {}}
+ for violation_match in rule_count_regex.finditer(report_contents):
+ try:
+ violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
+ except ValueError:
+ violations['rules'][violation_match.group('rule_id')] = None
+ try:
+ violations['total'] = int(total_count_regex.search(report_contents).group('count'))
+ # An AttributeError will occur if the regex finds no matches.
+ # A ValueError will occur if the returned regex cannot be cast as a float.
+ except (AttributeError, ValueError):
+ violations['total'] = None
+ return violations
+
+
+def _extract_missing_pii_annotations(filename):
+ """
+ Returns the number of uncovered models from the stdout report of django_find_annotations.
+
+ Arguments:
+ filename: Filename where stdout of django_find_annotations was captured.
+
+ Returns:
+ three-tuple containing:
+ 1. The number of uncovered models,
+ 2. A bool indicating whether the coverage is still below the threshold, and
+ 3. The full report as a string.
+ """
+ uncovered_models = 0
+ pii_check_passed = True
+ if os.path.isfile(filename):
+ with open(filename) as report_file:
+ lines = report_file.readlines()
+
+ # Find the count of uncovered models.
+ uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered')
+ for line in lines:
+ uncovered_match = uncovered_regex.match(line)
+ if uncovered_match:
+ uncovered_models = int(uncovered_match.groups()[0])
+ break
+
+ # Find a message which suggests the check failed.
+ failure_regex = re.compile(r'^Coverage threshold not met!')
+ for line in lines:
+ failure_match = failure_regex.match(line)
+ if failure_match:
+ pii_check_passed = False
+ break
+
+ # Each line in lines already contains a newline.
+ full_log = ''.join(lines)
+ else:
+ fail_quality('pii', f'FAILURE: Log file could not be found: {filename}')
+
+ return (uncovered_models, pii_check_passed, full_log)
+
+
+@task
+@needs('pavelib.prereqs.install_python_prereqs')
+@cmdopts([
+ ("report-dir=", "r", "Directory in which to put PII reports"),
+])
+@timed
+def run_pii_check(options):
+ """
+ Guarantee that all Django models are PII-annotated.
+ """
+ pii_report_name = 'pii'
+ default_report_dir = (Env.REPORT_DIR / pii_report_name)
+ report_dir = getattr(options, 'report_dir', default_report_dir)
+ output_file = os.path.join(report_dir, 'pii_check_{}.report')
+ env_report = []
+ pii_check_passed = True
+ for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")):
+ try:
+ print()
+ print(f"Running {env_name} PII Annotation check and report")
+ print("-" * 45)
+ run_output_file = str(output_file).format(env_name.lower())
+ sh(
+ "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument
+ "export DJANGO_SETTINGS_MODULE={}; "
+ "code_annotations django_find_annotations "
+ "--config_file .pii_annotations.yml --report_path {} --app_name {} "
+ "--lint --report --coverage | tee {}".format(
+ report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file
+ )
+ )
+ uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file)
+ env_report.append((
+ uncovered_model_count,
+ full_log,
+ ))
+
+ except BuildFailure as error_message:
+ fail_quality(pii_report_name, f'FAILURE: {error_message}')
+
+ if not pii_check_passed_env:
+ pii_check_passed = False
+
+ # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count.
+ uncovered_count, full_log = max(env_report, key=lambda r: r[0])
+
+ # Write metric file.
+ if uncovered_count is None:
+ uncovered_count = 0
+ metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n"
+ _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))
+
+ # Finally, fail the paver task if code_annotations suggests that the check failed.
+ if not pii_check_passed:
+ fail_quality('pii', full_log)
+
+
+@task
+@needs('pavelib.prereqs.install_python_prereqs')
+@timed
+def check_keywords():
+ """
+ Check Django model fields for names that conflict with a list of reserved keywords
+ """
+ report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords')
+ sh(f"mkdir -p {report_path}")
+
+ overall_status = True
+ for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]:
+ report_file = f"{env}_reserved_keyword_report.csv"
+ override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml")
+ try:
+ sh(
+ "export DJANGO_SETTINGS_MODULE={settings_file}; "
+ "python manage.py {app} check_reserved_keywords "
+ "--override_file {override_file} "
+ "--report_path {report_path} "
+ "--report_file {report_file}".format(
+ settings_file=env_settings_file, app=env, override_file=override_file,
+ report_path=report_path, report_file=report_file
+ )
+ )
+ except BuildFailure:
+ overall_status = False
+
+ if not overall_status:
+ fail_quality(
+ 'keywords',
+ 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format(
+ report_path
+ )
+ )
diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py
new file mode 100644
index 000000000000..34ecd49c1c74
--- /dev/null
+++ b/pavelib/utils/test/suites/__init__.py
@@ -0,0 +1,5 @@
+"""
+TestSuite class and subclasses
+"""
+from .js_suite import JestSnapshotTestSuite, JsTestSuite
+from .suite import TestSuite
diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py
new file mode 100644
index 000000000000..4e53d454fee5
--- /dev/null
+++ b/pavelib/utils/test/suites/js_suite.py
@@ -0,0 +1,109 @@
+"""
+Javascript test tasks
+"""
+
+
+from paver import tasks
+
+from pavelib.utils.envs import Env
+from pavelib.utils.test import utils as test_utils
+from pavelib.utils.test.suites.suite import TestSuite
+
+__test__ = False # do not collect
+
+
+class JsTestSuite(TestSuite):
+ """
+ A class for running JavaScript tests.
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.run_under_coverage = kwargs.get('with_coverage', True)
+ self.mode = kwargs.get('mode', 'run')
+ self.report_dir = Env.JS_REPORT_DIR
+ self.opts = kwargs
+
+ suite = args[0]
+ self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)]
+
+ def __enter__(self):
+ super().__enter__()
+ if tasks.environment.dry_run:
+ tasks.environment.info("make report_dir")
+ else:
+ self.report_dir.makedirs_p()
+ if not self.skip_clean:
+ test_utils.clean_test_files()
+
+ if self.mode == 'run' and not self.run_under_coverage:
+ test_utils.clean_dir(self.report_dir)
+
+ @property
+ def _default_subsuites(self):
+ """
+ Returns all JS test suites
+ """
+ return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot']
+
+
+class JsTestSubSuite(TestSuite):
+ """
+ Class for JS suites like cms, cms-squire, lms, common,
+ common-requirejs and xmodule
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.test_id = args[0]
+ self.run_under_coverage = kwargs.get('with_coverage', True)
+ self.mode = kwargs.get('mode', 'run')
+ self.port = kwargs.get('port')
+ self.root = self.root + ' javascript'
+ self.report_dir = Env.JS_REPORT_DIR
+
+ try:
+ self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)]
+ except ValueError:
+ self.test_conf_file = Env.KARMA_CONFIG_FILES[0]
+
+ self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml'
+ self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml'
+
+ @property
+ def cmd(self):
+ """
+ Run the tests using karma runner.
+ """
+ cmd = [
+ "node",
+ "--max_old_space_size=4096",
+ "node_modules/.bin/karma",
+ "start",
+ self.test_conf_file,
+ "--single-run={}".format('false' if self.mode == 'dev' else 'true'),
+ "--capture-timeout=60000",
+ f"--junitreportpath={self.xunit_report}",
+ f"--browsers={Env.KARMA_BROWSER}",
+ ]
+
+ if self.port:
+ cmd.append(f"--port={self.port}")
+
+ if self.run_under_coverage:
+ cmd.extend([
+ "--coverage",
+ f"--coveragereportpath={self.coverage_report}",
+ ])
+
+ return cmd
+
+
+class JestSnapshotTestSuite(TestSuite):
+ """
+ A class for running Jest Snapshot tests.
+ """
+ @property
+ def cmd(self):
+ """
+ Run the tests using Jest.
+ """
+ return ["jest"]
diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py
new file mode 100644
index 000000000000..5a423c827c21
--- /dev/null
+++ b/pavelib/utils/test/suites/suite.py
@@ -0,0 +1,149 @@
+"""
+A class used for defining and running test suites
+"""
+
+
+import os
+import subprocess
+import sys
+
+from paver import tasks
+
+from pavelib.utils.process import kill_process
+
+try:
+ from pygments.console import colorize
+except ImportError:
+ colorize = lambda color, text: text
+
+__test__ = False # do not collect
+
+
+class TestSuite:
+ """
+ TestSuite is a class that defines how groups of tests run.
+ """
+ def __init__(self, *args, **kwargs):
+ self.root = args[0]
+ self.subsuites = kwargs.get('subsuites', [])
+ self.failed_suites = []
+ self.verbosity = int(kwargs.get('verbosity', 1))
+ self.skip_clean = kwargs.get('skip_clean', False)
+ self.passthrough_options = kwargs.get('passthrough_options', [])
+
+ def __enter__(self):
+ """
+ This will run before the test suite is run with the run_suite_tests method.
+ If self.run_test is called directly, it should be run in a 'with' block to
+ ensure that the proper context is created.
+
+ Specific setup tasks should be defined in each subsuite.
+
+ i.e. Checking for and defining required directories.
+ """
+ print(f"\nSetting up for {self.root}")
+ self.failed_suites = []
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """
+ This is run after the tests run with the run_suite_tests method finish.
+ Specific clean up tasks should be defined in each subsuite.
+
+ If self.run_test is called directly, it should be run in a 'with' block
+ to ensure that clean up happens properly.
+
+ i.e. Cleaning mongo after the lms tests run.
+ """
+ print(f"\nCleaning up after {self.root}")
+
+ @property
+ def cmd(self):
+ """
+ The command to run tests (as a string). For this base class there is none.
+ """
+ return None
+
+ @staticmethod
+ def is_success(exit_code):
+ """
+ Determine if the given exit code represents a success of the test
+ suite. By default, only a zero counts as a success.
+ """
+ return exit_code == 0
+
+ def run_test(self):
+ """
+ Runs a self.cmd in a subprocess and waits for it to finish.
+ It returns False if errors or failures occur. Otherwise, it
+ returns True.
+ """
+ cmd = " ".join(self.cmd)
+
+ if tasks.environment.dry_run:
+ tasks.environment.info(cmd)
+ return
+
+ sys.stdout.write(cmd)
+
+ msg = colorize(
+ 'green',
+ '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40),
+ )
+
+ sys.stdout.write(msg)
+ sys.stdout.flush()
+
+ if 'TEST_SUITE' not in os.environ:
+ os.environ['TEST_SUITE'] = self.root.replace("/", "_")
+ kwargs = {'shell': True, 'cwd': None}
+ process = None
+
+ try:
+ process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with
+ return self.is_success(process.wait())
+ except KeyboardInterrupt:
+ kill_process(process)
+ sys.exit(1)
+
+ def run_suite_tests(self):
+ """
+ Runs each of the suites in self.subsuites while tracking failures
+ """
+ # Uses __enter__ and __exit__ for context
+ with self:
+ # run the tests for this class, and for all subsuites
+ if self.cmd:
+ passed = self.run_test()
+ if not passed:
+ self.failed_suites.append(self)
+
+ for suite in self.subsuites:
+ suite.run_suite_tests()
+ if suite.failed_suites:
+ self.failed_suites.extend(suite.failed_suites)
+
+ def report_test_results(self):
+ """
+ Writes a list of failed_suites to sys.stderr
+ """
+ if self.failed_suites:
+ msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48))
+ msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n')
+ else:
+ msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48))
+
+ print(msg)
+
+ def run(self):
+ """
+ Runs the tests in the suite while tracking and reporting failures.
+ """
+ self.run_suite_tests()
+
+ if tasks.environment.dry_run:
+ return
+
+ self.report_test_results()
+
+ if self.failed_suites:
+ sys.exit(1)
diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py
new file mode 100644
index 000000000000..0851251e2222
--- /dev/null
+++ b/pavelib/utils/test/utils.py
@@ -0,0 +1,91 @@
+"""
+Helper functions for test tasks
+"""
+
+
+import os
+
+from paver.easy import cmdopts, sh, task
+
+from pavelib.utils.envs import Env
+from pavelib.utils.timer import timed
+
+
+MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
+
+COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches"
+COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what"
+COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all'))
+WHO_TESTS_WHAT_DIFF = "who_tests_what.diff"
+
+
+__test__ = False # do not collect
+
+
+@task
+@timed
+def clean_test_files():
+ """
+ Clean fixture files used by tests and .pyc files
+ """
+ sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads")
+ # This find command removes all the *.pyc files that aren't in the .git
+ # directory. See this blog post for more details:
+ # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html
+ sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;")
+ sh("rm -rf test_root/log/auto_screenshots/*")
+ sh("rm -rf /tmp/mako_[cl]ms")
+
+
+@task
+@timed
+def ensure_clean_package_lock():
+ """
+ Ensure no untracked changes have been made in the current git context.
+ """
+ sh("""
+ git diff --name-only --exit-code package-lock.json ||
+ (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1)
+ """)
+
+
+def clean_dir(directory):
+ """
+ Delete all the files from the specified directory.
+ """
+ # We delete the files but preserve the directory structure
+ # so that coverage.py has a place to put the reports.
+ sh(f'find {directory} -type f -delete')
+
+
+@task
+@cmdopts([
+ ('skip-clean', 'C', 'skip cleaning repository before running tests'),
+ ('skip_clean', None, 'deprecated in favor of skip-clean'),
+])
+@timed
+def clean_reports_dir(options):
+ """
+ Clean coverage files, to ensure that we don't use stale data to generate reports.
+ """
+ if getattr(options, 'skip_clean', False):
+ print('--skip-clean is set, skipping...')
+ return
+
+ # We delete the files but preserve the directory structure
+ # so that coverage.py has a place to put the reports.
+ reports_dir = Env.REPORT_DIR.makedirs_p()
+ clean_dir(reports_dir)
+
+
+@task
+@timed
+def clean_mongo():
+ """
+ Clean mongo test databases
+ """
+ sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
+ host=Env.MONGO_HOST,
+ port=MONGO_PORT_NUM,
+ repo_root=Env.REPO_ROOT,
+ ))
diff --git a/scripts/generic-ci-tests.sh b/scripts/generic-ci-tests.sh
new file mode 100755
index 000000000000..54b9cbb9d500
--- /dev/null
+++ b/scripts/generic-ci-tests.sh
@@ -0,0 +1,122 @@
+#!/usr/bin/env bash
+set -e
+
+###############################################################################
+#
+# generic-ci-tests.sh
+#
+# Execute some tests for edx-platform.
+# (Most other tests are run by invoking `pytest`, `pylint`, etc. directly)
+#
+# This script can be called from CI jobs that define
+# these environment variables:
+#
+# `TEST_SUITE` defines which kind of test to run.
+# Possible values are:
+#
+# - "quality": Run the quality (pycodestyle/pylint) checks
+# - "js-unit": Run the JavaScript tests
+# - "pavelib-js-unit": Run the JavaScript tests and the Python unit
+# tests from the pavelib/lib directory
+#
+###############################################################################
+
+# Clean up previous builds
+git clean -qxfd
+
+function emptyxunit {
+
+ cat > "reports/$1.xml" <
+
+
+
+END
+
+}
+
+# if specified tox environment is supported, prepend paver commands
+# with tox env invocation
+if [ -z ${TOX_ENV+x} ] || [[ ${TOX_ENV} == 'null' ]]; then
+ echo "TOX_ENV: ${TOX_ENV}"
+ TOX=""
+elif tox -l |grep -q "${TOX_ENV}"; then
+ if [[ "${TOX_ENV}" == 'quality' ]]; then
+ TOX=""
+ else
+ TOX="tox -r -e ${TOX_ENV} --"
+ fi
+else
+ echo "${TOX_ENV} is not currently supported. Please review the"
+ echo "tox.ini file to see which environments are supported"
+ exit 1
+fi
+
+PAVER_ARGS="-v"
+export SUBSET_JOB=$JOB_NAME
+
+function run_paver_quality {
+ QUALITY_TASK=$1
+ shift
+ mkdir -p test_root/log/
+ LOG_PREFIX="test_root/log/$QUALITY_TASK"
+ $TOX paver "$QUALITY_TASK" "$@" 2> "$LOG_PREFIX.err.log" > "$LOG_PREFIX.out.log" || {
+ echo "STDOUT (last 100 lines of $LOG_PREFIX.out.log):";
+ tail -n 100 "$LOG_PREFIX.out.log"
+ echo "STDERR (last 100 lines of $LOG_PREFIX.err.log):";
+ tail -n 100 "$LOG_PREFIX.err.log"
+ return 1;
+ }
+ return 0;
+}
+
+case "$TEST_SUITE" in
+
+ "quality")
+ EXIT=0
+
+ mkdir -p reports
+
+ echo "Finding pycodestyle violations and storing report..."
+ run_paver_quality run_pep8 || { EXIT=1; }
+ echo "Finding ESLint violations and storing report..."
+ run_paver_quality run_eslint -l "$ESLINT_THRESHOLD" || { EXIT=1; }
+ echo "Finding Stylelint violations and storing report..."
+ run_paver_quality run_stylelint || { EXIT=1; }
+ echo "Running xss linter report."
+ run_paver_quality run_xsslint -t "$XSSLINT_THRESHOLDS" || { EXIT=1; }
+ echo "Running PII checker on all Django models..."
+ run_paver_quality run_pii_check || { EXIT=1; }
+ echo "Running reserved keyword checker on all Django models..."
+ run_paver_quality check_keywords || { EXIT=1; }
+
+ # Need to create an empty test result so the post-build
+ # action doesn't fail the build.
+ emptyxunit "stub"
+ exit "$EXIT"
+ ;;
+
+ "js-unit")
+ $TOX paver test_js --coverage
+ $TOX paver diff_coverage
+ ;;
+
+ "pavelib-js-unit")
+ EXIT=0
+ $TOX paver test_js --coverage --skip-clean || { EXIT=1; }
+ paver test_lib --skip-clean $PAVER_ARGS || { EXIT=1; }
+
+ # This is to ensure that the build status of the shard is properly set.
+ # Because we are running two paver commands in a row, we need to capture
+ # their return codes in order to exit with a non-zero code if either of
+ # them fail. We put the || clause there because otherwise, when a paver
+ # command fails, this entire script will exit, and not run the second
+ # paver command in this case statement. So instead of exiting, the value
+ # of a variable named EXIT will be set to 1 if either of the paver
+ # commands fail. We then use this variable's value as our exit code.
+ # Note that by default the value of this variable EXIT is not set, so if
+ # neither command fails then the exit command resolves to simply exit
+ # which is considered successful.
+ exit "$EXIT"
+ ;;
+esac