From b26dbbaa4d429dad6083da962ae4df5b2689b141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tiziano=20M=C3=BCller?= Date: Mon, 9 Dec 2019 15:48:24 +0100 Subject: [PATCH 01/54] (re-)implement coverage reports via codecov.io fixes #3602 --- .github/workflows/ci.yml | 9 +++++++++ .github/workflows/tests.sh | 8 ++++++++ docs/requirements_for_rtd.txt | 1 + setup.json | 1 + 4 files changed, 19 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4ffd8c5940..4b881a08e8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -138,6 +138,15 @@ jobs: run: .github/workflows/tests.sh + - name: Upload coverage report + uses: codecov/codecov-action@v1.0.5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + name: aiida-unittests-${{ matrix.backend }} + flags: ${{ matrix.backend }} + file: ./coverage.xml + fail_ci_if_error: true + verdi: runs-on: ubuntu-latest diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh index 3d639e8f88..ce4bf6a46b 100755 --- a/.github/workflows/tests.sh +++ b/.github/workflows/tests.sh @@ -10,6 +10,14 @@ verdi daemon start 4 verdi -p test_${AIIDA_TEST_BACKEND} run .ci/test_daemon.py verdi daemon stop +# Make sure +# - omissions are taken from the top-level .coveragerc +# - coverage is reported as XML and in terminal including +# the numbers/ranges of lines which are not covered, +# - coverage results of multiple tests are collected, +# - the base directory for files to consider is aiida/ +export PYTEST_ADDOPTS="--cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" + AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest pytest --noconftest .ci/test_test_manager.py diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index 6ac780528c..733dcd1a00 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -31,6 +31,7 @@ pygments~=2.5 pymatgen>=2019.7.2 pymysql~=0.9.3 pyparsing~=2.4 +pytest-cov~=2.7 pytest-timeout~=1.3 pytest~=5.3 python-dateutil~=2.8 diff --git a/setup.json b/setup.json index 9b1eecbebb..e07f10610f 100644 --- a/setup.json +++ b/setup.json @@ -92,6 +92,7 @@ "pgtest~=1.3,>=1.3.1", "pytest~=5.3", "pytest-timeout~=1.3", + "pytest-cov~=2.7", "sqlalchemy-diff~=0.1.3" ], "dev_precommit": [ From 03baa9830a75356b3e30c02f6ef2d7a2acd2c22c Mon Sep 17 00:00:00 2001 From: Casper Welzel Andersen Date: Mon, 9 Dec 2019 18:18:06 +0100 Subject: [PATCH 02/54] Update README coverage badge Only upload coverage for Py3.5 --- .github/workflows/ci.yml | 5 +++-- .github/workflows/tests.sh | 2 +- README.md | 2 +- docs/requirements_for_rtd.txt | 1 + setup.json | 1 + 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4b881a08e8..82ab4a0fff 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,13 +139,14 @@ jobs: .github/workflows/tests.sh - name: Upload coverage report + if: matrix.python-version == 3.5 uses: codecov/codecov-action@v1.0.5 with: token: ${{ secrets.CODECOV_TOKEN }} - name: aiida-unittests-${{ matrix.backend }} + name: aiida-pytests-py3.5-${{ matrix.backend }} flags: ${{ matrix.backend }} file: ./coverage.xml - fail_ci_if_error: true + fail_ci_if_error: false verdi: diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh index ce4bf6a46b..74d3906b7f 100755 --- a/.github/workflows/tests.sh +++ b/.github/workflows/tests.sh @@ -16,7 +16,7 @@ verdi daemon stop # the numbers/ranges of lines which are not covered, # - coverage results of multiple tests are collected, # - the base directory for files to consider is aiida/ -export PYTEST_ADDOPTS="--cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" +export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest diff --git a/README.md b/README.md index 41aa785a5c..a3d1cf98ad 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ AiiDA (www.aiida.net) is a workflow manager for computational science with a str |-----|----------------------------------------------------------------------------| |Latest release| [![PyPI version](https://badge.fury.io/py/aiida-core.svg)](https://badge.fury.io/py/aiida-core) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/aiida-core.svg?style=flat)](https://anaconda.org/conda-forge/aiida-core) [![PyPI pyversions](https://img.shields.io/pypi/pyversions/aiida-core.svg)](https://pypi.python.org/pypi/aiida-core/) | |Getting help| [![Docs status](https://readthedocs.org/projects/aiida-core/badge)](http://aiida-core.readthedocs.io/) [![Google Group](https://img.shields.io/badge/-Google%20Group-lightgrey.svg)](https://groups.google.com/forum/#!forum/aiidausers) -|Build status| [![Build Status](https://travis-ci.org/aiidateam/aiida-core.svg?branch=develop)](https://travis-ci.org/aiidateam/aiida-core) [![Coverage Status](https://coveralls.io/repos/github/aiidateam/aiida-core/badge.svg?branch=develop)](https://coveralls.io/github/aiidateam/aiida-core?branch=develop) | +|Build status| [![Build Status](https://travis-ci.org/aiidateam/aiida-core.svg?branch=develop)](https://travis-ci.org/aiidateam/aiida-core) [![Coverage Status](https://codecov.io/gh/aiidateam/aiida-core/branch/develop/graph/badge.svg)](https://codecov.io/gh/aiidateam/aiida-core) | |Activity| [![PyPI-downloads](https://img.shields.io/pypi/dm/aiida-core.svg?style=flat)](https://pypistats.org/packages/aiida-core) [![Commit Activity](https://img.shields.io/github/commit-activity/m/aiidateam/aiida-core.svg)](https://github.com/aiidateam/aiida-core/pulse) |Community| [![Affiliated with NumFOCUS](https://img.shields.io/badge/NumFOCUS-affiliated%20project-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org/sponsored-projects/affiliated-projects) [![Twitter](https://img.shields.io/twitter/follow/aiidateam.svg?style=social&label=Follow)](https://twitter.com/aiidateam) diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index 733dcd1a00..e245911c86 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -8,6 +8,7 @@ click-completion~=0.5.1 click-config-file~=0.5.0 click-spinner~=0.1.8 click~=7.0 +coverage<5.0 django~=2.2 docutils==0.15.2 ete3~=3.1 diff --git a/setup.json b/setup.json index e07f10610f..26460a9e6a 100644 --- a/setup.json +++ b/setup.json @@ -93,6 +93,7 @@ "pytest~=5.3", "pytest-timeout~=1.3", "pytest-cov~=2.7", + "coverage<5.0", "sqlalchemy-diff~=0.1.3" ], "dev_precommit": [ From 8156bb14a6da241d4e9ad7d2508ab80bb1a7df22 Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Tue, 3 Mar 2020 12:03:32 +0100 Subject: [PATCH 03/54] Remove omissions from coveragerc and reorder tests --- .coveragerc | 1 - .github/workflows/tests.sh | 28 +++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.coveragerc b/.coveragerc index a90fc09bc8..b27dfc7b30 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,6 +1,5 @@ [run] source = aiida -omit = aiida/test*.py,aiida/*/test*.py,aiida/*/*/test*.py,aiida/*/*/*/test*.py,aiida/*/*/*/*/test*.py,aiida/*/*/*/*/*/test*.py,aiida/*/migrations/*.py,aiida/*/migrations/versions/*.py [html] directory = .ci/coverage/html diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh index 74d3906b7f..169ca71525 100755 --- a/.github/workflows/tests.sh +++ b/.github/workflows/tests.sh @@ -3,24 +3,26 @@ set -ev # Make sure the folder containing the workchains is in the python path before the daemon is started export PYTHONPATH="${PYTHONPATH}:${GITHUB_WORKSPACE}/.ci" -# show timings of tests -export PYTEST_ADDOPTS=" --durations=0" +# pytest options: +# - report timings of tests +# - pytest-cov configuration taken from top-level .coveragerc +# - coverage is reported as XML and in terminal, +# including the numbers/ranges of lines which are not covered +# - coverage results of multiple tests are collected +# - coverage is reported on files in aiida/ +export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --durations=0 --cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" + +# daemon tests verdi daemon start 4 verdi -p test_${AIIDA_TEST_BACKEND} run .ci/test_daemon.py verdi daemon stop -# Make sure -# - omissions are taken from the top-level .coveragerc -# - coverage is reported as XML and in terminal including -# the numbers/ranges of lines which are not covered, -# - coverage results of multiple tests are collected, -# - the base directory for files to consider is aiida/ -export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" - -AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests -AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest +# tests for the testing infrastructure pytest --noconftest .ci/test_test_manager.py pytest --noconftest .ci/test_profile_manager.py +python .ci/test_plugin_testcase.py # uses custom unittest test runner +AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest -python .ci/test_plugin_testcase.py +# main aiida-core tests +AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests From ba83912416dc06b367ea19c82e13119939c267d1 Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Wed, 4 Mar 2020 15:05:39 +0100 Subject: [PATCH 04/54] Build status badge: move to github actions (#3825) Replace outdated travis build status badge with github actions one. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a3d1cf98ad..179a9f03e7 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ AiiDA (www.aiida.net) is a workflow manager for computational science with a str |-----|----------------------------------------------------------------------------| |Latest release| [![PyPI version](https://badge.fury.io/py/aiida-core.svg)](https://badge.fury.io/py/aiida-core) [![conda-forge](https://img.shields.io/conda/vn/conda-forge/aiida-core.svg?style=flat)](https://anaconda.org/conda-forge/aiida-core) [![PyPI pyversions](https://img.shields.io/pypi/pyversions/aiida-core.svg)](https://pypi.python.org/pypi/aiida-core/) | |Getting help| [![Docs status](https://readthedocs.org/projects/aiida-core/badge)](http://aiida-core.readthedocs.io/) [![Google Group](https://img.shields.io/badge/-Google%20Group-lightgrey.svg)](https://groups.google.com/forum/#!forum/aiidausers) -|Build status| [![Build Status](https://travis-ci.org/aiidateam/aiida-core.svg?branch=develop)](https://travis-ci.org/aiidateam/aiida-core) [![Coverage Status](https://codecov.io/gh/aiidateam/aiida-core/branch/develop/graph/badge.svg)](https://codecov.io/gh/aiidateam/aiida-core) | +|Build status| [![Build Status](https://github.com/aiidateam/aiida-core/workflows/aiida-core/badge.svg)](https://github.com/aiidateam/aiida-core/actions) [![Coverage Status](https://codecov.io/gh/aiidateam/aiida-core/branch/develop/graph/badge.svg)](https://codecov.io/gh/aiidateam/aiida-core) | |Activity| [![PyPI-downloads](https://img.shields.io/pypi/dm/aiida-core.svg?style=flat)](https://pypistats.org/packages/aiida-core) [![Commit Activity](https://img.shields.io/github/commit-activity/m/aiidateam/aiida-core.svg)](https://github.com/aiidateam/aiida-core/pulse) |Community| [![Affiliated with NumFOCUS](https://img.shields.io/badge/NumFOCUS-affiliated%20project-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org/sponsored-projects/affiliated-projects) [![Twitter](https://img.shields.io/twitter/follow/aiidateam.svg?style=social&label=Follow)](https://twitter.com/aiidateam) From ed7a0b60ab7d2132fb521fa800685f66413febb8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 4 Mar 2020 22:35:29 +0100 Subject: [PATCH 05/54] Add `prepend_text` and `append_text` to `aiida_local_code_factory` pytest fixture (#3831) --- aiida/manage/tests/pytest_fixtures.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/aiida/manage/tests/pytest_fixtures.py b/aiida/manage/tests/pytest_fixtures.py index 5e79c30bdf..d5c9c61837 100644 --- a/aiida/manage/tests/pytest_fixtures.py +++ b/aiida/manage/tests/pytest_fixtures.py @@ -133,13 +133,17 @@ def test_1(aiida_local_code_factory): :rtype: object """ - def get_code(entry_point, executable, computer=aiida_localhost): + def get_code(entry_point, executable, computer=aiida_localhost, prepend_text=None, append_text=None): """Get local code. Sets up code for given entry point on given computer. :param entry_point: Entry point of calculation plugin :param executable: name of executable; will be searched for in local system PATH. :param computer: (local) AiiDA computer + :param prepend_text: a string of code that will be put in the scheduler script before the + execution of the code. + :param append_text: a string of code that will be put in the scheduler script after the + execution of the code. :return: The code node :rtype: :py:class:`aiida.orm.Code` """ @@ -158,6 +162,10 @@ def get_code(entry_point, executable, computer=aiida_localhost): input_plugin_name=entry_point, remote_computer_exec=[computer, executable_path], ) + if prepend_text is not None: + code.set_prepend_text(prepend_text) + if append_text is not None: + code.set_append_text(append_text) code.label = executable return code.store() From 1dde8ea538b4c5f7193d07cd8ee3f621343e54e0 Mon Sep 17 00:00:00 2001 From: Casper Welzel Andersen Date: Mon, 9 Mar 2020 10:05:31 +0100 Subject: [PATCH 06/54] Update coverage upload step Revert to the continuously updated v1 tag for the action. Fail CI if failing to upload coverage, since we can upload coverage for all PRs. Remove secret token. It is now possible to upload coverage reports to codecov without a token (from public GitHub repositories only). Upload coverage only if: - Python version: 3.5; AND - Is a push in or PR against repository 'aiidateam/aiida-core'. --- .github/workflows/ci.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82ab4a0fff..e4d2b5307b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,14 +139,13 @@ jobs: .github/workflows/tests.sh - name: Upload coverage report - if: matrix.python-version == 3.5 - uses: codecov/codecov-action@v1.0.5 + if: matrix.python-version == 3.5 && github.repository == 'aiidateam/aiida-core' + uses: codecov/codecov-action@v1 with: - token: ${{ secrets.CODECOV_TOKEN }} name: aiida-pytests-py3.5-${{ matrix.backend }} flags: ${{ matrix.backend }} file: ./coverage.xml - fail_ci_if_error: false + fail_ci_if_error: true verdi: From 8e309352e5085012124a18586129c26157a25a68 Mon Sep 17 00:00:00 2001 From: Casper Welzel Andersen Date: Mon, 9 Mar 2020 10:43:34 +0100 Subject: [PATCH 07/54] Use actions/checkout@master This is an updated and more robust version of the checkout action. See https://github.com/actions/checkout for more information. Originally, v2 was attempted, but this clashes with being able to upload coverage, hence `master` is used. --- .github/workflows/ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4d2b5307b..f3b14a3051 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -29,7 +29,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -60,7 +60,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -95,7 +95,7 @@ jobs: backend: ['django', 'sqlalchemy'] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - uses: CasperWA/postgresql-action@v1.2 with: postgresql version: '10' @@ -153,7 +153,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -175,7 +175,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@master - name: Install docker run: | From c61ef49d0acf4609d3736e3cd1e703578edbb9fa Mon Sep 17 00:00:00 2001 From: Simon Adorf Date: Mon, 9 Mar 2020 17:58:34 +0100 Subject: [PATCH 08/54] Pin the Click version to 7.0. The recent update to Click (7.1) breaks our tests and this CI workflow, because the help output formatting was slightly changed. Until we have resolved other issues we should pin the Click version to 7.0. --- docs/requirements_for_rtd.txt | 2 +- environment.yml | 2 +- setup.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index e245911c86..bcde4eb181 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -7,7 +7,7 @@ circus~=0.16.1 click-completion~=0.5.1 click-config-file~=0.5.0 click-spinner~=0.1.8 -click~=7.0 +click==7.0 coverage<5.0 django~=2.2 docutils==0.15.2 diff --git a/environment.yml b/environment.yml index 249c93a130..6b8cdddbe8 100644 --- a/environment.yml +++ b/environment.yml @@ -13,7 +13,7 @@ dependencies: - click-completion~=0.5.1 - click-config-file~=0.5.0 - click-spinner~=0.1.8 -- click~=7.0 +- click==7.0 - django~=2.2 - ete3~=3.1 - python-graphviz~=0.13 diff --git a/setup.json b/setup.json index 26460a9e6a..d3b77f0c8b 100644 --- a/setup.json +++ b/setup.json @@ -26,7 +26,7 @@ "click-completion~=0.5.1", "click-config-file~=0.5.0", "click-spinner~=0.1.8", - "click~=7.0", + "click==7.0", "django~=2.2", "ete3~=3.1", "graphviz~=0.13", From 7e99c068faeabe7ff8fdc9168f2d70dad247a902 Mon Sep 17 00:00:00 2001 From: Carl Simon Adorf Date: Mon, 9 Mar 2020 19:59:39 +0100 Subject: [PATCH 09/54] Revise dependency management workflow (#3771) Revise dependency management workflow in accordance with AEP 002. * Use .github/CODEOWNERS file to trigger reviews from dependency-manager team. * All jobs related to testing the installation of aiida-core with different tools (pip, conda) on different python versions and backends are moved into the dedicated 'install-tests' workflow. * The test jobs run as part of the continuous integration on every push are executed against pinned environments (e.g. 'requirements/requirement-py-37.txt'). * The 'install-tests' workflow is triggered for the 'master', 'develop', and release branches ('release/*') as well as branches related to dependency-management (prefixed with 'dm/'). In addition, the workflow is triggered nightly to ensure that changes within the Python ecosystem that break our toolchain and possibly even tests, are automatically detected within 24 hours. * A new 'update-requirements' workflow is run on release branches and automatically creates a pull request with revised versions of requirements files. * The issues caused by pymatgen's use of setup_requires and previously addressed by installing numpy prior to aiida by default, are now addressed by specifically checking the setuptools version for Python 3.5 only. We fail the installation process with a descriptive message to the user in case that the installed setuptools version is insufficient. * All dependency-management related utility functions, such as generating and validating non-authoritative dependency specification files (e.g. 'environment.yml') are concentrated into the 'util/dependency_management.py' scripting file. --- .github/CODEOWNERS | 10 + .github/workflows/ci.yml | 38 +-- .github/workflows/conda.sh | 12 - .github/workflows/test-install.yml | 153 ++++++++++ .github/workflows/update-requirements.yml | 104 +++++++ .pre-commit-config.yaml | 21 +- docs/requirements_for_rtd.txt | 12 +- docs/update_req_for_rtd.py | 53 ---- environment.yml | 9 +- pyproject.toml | 3 +- requirements/requirements-py-3.5.txt | 151 ++++++++++ requirements/requirements-py-3.6.txt | 150 ++++++++++ requirements/requirements-py-3.7.txt | 149 ++++++++++ requirements/requirements-py-3.8.txt | 147 ++++++++++ setup.py | 15 + utils/dependency_management.py | 338 ++++++++++++++++++++++ utils/update_dependencies.py | 142 --------- utils/validate_consistency.py | 90 ------ 18 files changed, 1253 insertions(+), 344 deletions(-) create mode 100644 .github/CODEOWNERS delete mode 100755 .github/workflows/conda.sh create mode 100644 .github/workflows/test-install.yml create mode 100644 .github/workflows/update-requirements.yml delete mode 100644 docs/update_req_for_rtd.py create mode 100644 requirements/requirements-py-3.5.txt create mode 100644 requirements/requirements-py-3.6.txt create mode 100644 requirements/requirements-py-3.7.txt create mode 100644 requirements/requirements-py-3.8.txt create mode 100755 utils/dependency_management.py delete mode 100755 utils/update_dependencies.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..e908ba2fc9 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,10 @@ +# All files related to dependency management are owned by the +# currently active dependency manager (DM) to trigger an automatic review +# request from the DM upon changes. Please see AEP-002 for details: +# https://github.com/aiidateam/AEP/tree/master/002_dependency_management +setup.* @aiidateam/dependency-manager +environment.yml @aiidateam/dependency-manager +requirements*.txt @aiidateam/dependency-manager +pyproject.toml @aiidateam/dependency-manager +utils/dependency_management.py @aiidateam/dependency-manager +.github/workflows/dm.yml @aiidateam/dependency-manager diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f3b14a3051..bf8eae364b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,28 +1,9 @@ -name: aiida-core +name: continuous-integration on: [push, pull_request] jobs: - conda: - - runs-on: ubuntu-latest - timeout-minutes: 30 - - steps: - - uses: actions/checkout@master - - - name: Set up Python 3.7 - uses: actions/setup-python@v1 - with: - python-version: 3.7 - - - name: Conda install - env: - PYTHON_VERSION: 3.7 - run: - .github/workflows/conda.sh - docs: runs-on: ubuntu-latest @@ -91,8 +72,8 @@ jobs: strategy: fail-fast: false matrix: - python-version: [3.5, 3.8] backend: ['django', 'sqlalchemy'] + python-version: [3.5, 3.8] steps: - uses: actions/checkout@master @@ -119,12 +100,21 @@ jobs: sudo apt install postgresql-10 rabbitmq-server graphviz sudo systemctl status rabbitmq-server.service - - name: Install python dependencies + - name: Upgrade pip run: | pip install --upgrade pip - pip install numpy==1.17.4 - pip install -e .[atomic_tools,docs,notebook,rest,testing] + pip --version + + - name: upgrade setuptools [py35] + if: matrix.python-version == 3.5 + run: pip install -I setuptools==38.2.0 # Minimally required version for Python 3.5. + + - name: Install aiida-core + run: | + pip install -r requirements/requirements-py-${{ matrix.python-version }}.txt + pip install --no-deps -e . reentry scan + pip freeze - name: Setup environment env: diff --git a/.github/workflows/conda.sh b/.github/workflows/conda.sh deleted file mode 100755 index 5ad1b6628b..0000000000 --- a/.github/workflows/conda.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -ev - -wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; -bash miniconda.sh -b -p $HOME/miniconda -export PATH="$HOME/miniconda/bin:$PATH" -hash -r -conda config --set always_yes yes --set changeps1 no - -conda update -q conda -conda info -a -conda env create -f environment.yml -n test-environment diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml new file mode 100644 index 0000000000..8c3238ddfa --- /dev/null +++ b/.github/workflows/test-install.yml @@ -0,0 +1,153 @@ +name: test-install + +on: + push: + branch: + - master + - develop + - release/* + - dm/* + paths: + - 'setup.*' + - 'environment.yml' + - 'requirements*.txt' + - 'pyproject.toml' + - '.github/workflows/test-install.yml' + schedule: + - cron: '30 02 * * *' # nightly build + +jobs: + + validate-dependency-specification: + # Note: The specification is also validated by the pre-commit hook. + + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Install dm-script dependencies + run: pip install click~=7.0 pyyaml~=5.1 toml + + - name: Validate + run: python ./utils/dependency_management.py validate-all + + install-with-pip: + + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Pip install + run: | + python -m pip install -e . + python -m pip freeze + + - name: Test importing aiida + run: + python -c "import aiida" + + install-with-conda: + + runs-on: ubuntu-latest + name: install-with-conda + + timeout-minutes: 5 + + steps: + - uses: actions/checkout@v2 + + - name: Setup Conda + uses: s-weigand/setup-conda@v1 + with: + update-conda: true + python-version: 3.7 + - run: conda --version + - run: python --version + - run: which python + + - name: Create conda environment + run: | + conda env create -f environment.yml -n test-environment + source activate test-environment + python -m pip install --no-deps -e . + + - name: Test importing aiida + run: | + source activate test-environment + python -c "import aiida" + + tests: + + needs: [install-with-pip, install-with-conda] + runs-on: ubuntu-latest + timeout-minutes: 30 + + strategy: + fail-fast: false + matrix: + python-version: [3.5, 3.6, 3.7, 3.8] + backend: ['django', 'sqlalchemy'] + + steps: + - uses: actions/checkout@v2 + - uses: CasperWA/postgresql-action@v1.2 + with: + postgresql version: '10' + postgresql db: test_${{ matrix.backend }} + postgresql user: postgres + postgresql password: '' + postgresql auth: trust + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install system dependencies + run: | + wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - + echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + sudo rm -f /etc/apt/sources.list.d/dotnetdev.list /etc/apt/sources.list.d/microsoft-prod.list + sudo apt update + sudo apt install postgresql-10 rabbitmq-server graphviz + sudo systemctl status rabbitmq-server.service + + - run: pip install --upgrade pip + + - name: upgrade setuptools [py35] + if: matrix.python-version == 3.5 + run: pip install -I setuptools==38.2.0 + + - name: Install aiida-core + run: | + pip install -e .[atomic_tools,docs,notebook,rest,testing] + reentry scan + + - run: pip freeze + + - name: Setup AiiDA environment + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/setup.sh + + - name: Run test suite + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/tests.sh diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml new file mode 100644 index 0000000000..7283fe5d02 --- /dev/null +++ b/.github/workflows/update-requirements.yml @@ -0,0 +1,104 @@ +name: update-requirements + +on: + push: + branch: + - release/* + paths: + - 'setup.json' + - '.github/workflows/update-requirements.yml' + +jobs: + + tests: + + runs-on: ubuntu-latest + timeout-minutes: 30 + + strategy: + fail-fast: false + matrix: + backend: ['django', 'sqlalchemy'] + python-version: [3.5, 3.6, 3.7, 3.8] + + steps: + - uses: actions/checkout@v1 + - uses: CasperWA/postgresql-action@v1.2 + with: + postgresql version: '10' + postgresql db: test_${{ matrix.backend }} + postgresql user: postgres + postgresql password: '' + postgresql auth: trust + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install system dependencies + run: | + wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - + echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + sudo rm -f /etc/apt/sources.list.d/dotnetdev.list /etc/apt/sources.list.d/microsoft-prod.list + sudo apt update + sudo apt install postgresql-10 rabbitmq-server graphviz + sudo systemctl status rabbitmq-server.service + + - run: pip install --upgrade pip + + - name: upgrade setuptools [py35] + if: matrix.python-version == 3.5 + run: pip install -I setuptools==38.2.0 + + - name: Install aiida-core + run: | + pip install -e .[atomic_tools,docs,notebook,rest,testing] + reentry scan + + - name: Setup environment + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/setup.sh + + - name: Run test suite + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/tests.sh + + - name: Freeze test environment + run: pip freeze | sed '1d' | tee requirements-py-${{ matrix.python-version }}.txt + + # Add python-version specific requirements file to the requirements.txt artifact. + # Will be used in the next job to create a PR in case they are different from the current version. + - uses: actions/upload-artifact@v1 + if: matrix.backend == 'django' # The requirements are identical between backends. + with: + name: requirements.txt + path: requirements-py-${{ matrix.python-version }}.txt + + update-requirements: + + needs: tests + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Download requirements.txt files + uses: actions/download-artifact@v1 + with: + name: requirements.txt + path: requirements + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v2 + with: + commit-message: "Update requirements.txt" + token: ${{ secrets.GITHUB_TOKEN }} + title: "Update requirements.txt" + team-reviewers: dependency-manager + branch: "dm/update-requirements.txt" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 06e50b4c5f..39bd257146 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -111,38 +111,39 @@ entry: prospector - id: rtd-requirements - name: Requirements for RTD - entry: python ./docs/update_req_for_rtd.py --pre-commit + name: Validate docs/requirements_for_rtd.txt + entry: python ./utils/dependency_management.py validate-rtd-reqs language: system files: >- (?x)^( setup.json| setup.py| + utils/dependency_management.py| docs/requirements_for_rtd.txt| - docs/update_req_for_rtd.py| )$ pass_filenames: false - id: pyproject - name: Validating pyproject.toml - entry: python ./utils/validate_consistency.py toml + name: Validate pyproject.toml + entry: python ./utils/dependency_management.py validate-pyproject-toml language: system files: >- (?x)^( setup.json| setup.py| - utils/validate_consistency.py| + utils/dependency_management.py| + pyproject.toml )$ pass_filenames: false - - id: conda - name: Validating environment.yml - entry: python ./utils/validate_consistency.py conda + - id: dependencies + name: Validate environment.yml + entry: python ./utils/dependency_management.py validate-environment-yml language: system files: >- (?x)^( setup.json| setup.py| - utils/validate_consistency.py| + utils/dependency_management.py| environment.yml| )$ pass_filenames: false diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index bcde4eb181..4336f2f21c 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -19,15 +19,15 @@ graphviz~=0.13 ipython~=7.0 jinja2~=2.10 kiwipy[rmq]~=0.5.1 -numpy~=1.17,<1.18 +numpy<1.18,~=1.17 paramiko~=2.6 pg8000~=1.13 -pgtest~=1.3,>=1.3.1 +pgtest>=1.3.1,~=1.3 pika~=1.1 plumpy~=0.14.5 psutil~=5.6 -psycopg2-binary~=2.8,>=2.8.3 -pyblake2~=1.1; python_version<'3.6' +psycopg2-binary>=2.8.3,~=2.8 +pyblake2~=1.1; python_version < "3.6" pygments~=2.5 pymatgen>=2019.7.2 pymysql~=0.9.3 @@ -40,7 +40,7 @@ python-memcached~=1.59 pytz~=2019.3 pyyaml~=5.1.2 reentry~=1.3 -seekpath~=1.9,>=1.9.3 +seekpath>=1.9.3,~=1.9 simplejson~=3.16 spglib~=1.14 sphinx-rtd-theme~=0.4.3 @@ -49,7 +49,7 @@ sphinxcontrib-details-directive~=0.1.0 sphinx~=2.2 sqlalchemy-diff~=0.1.3 sqlalchemy-utils~=0.34.2 -sqlalchemy~=1.3,>=1.3.10 +sqlalchemy>=1.3.10,~=1.3 tabulate~=0.8.5 tornado<5.0 tzlocal~=2.0 diff --git a/docs/update_req_for_rtd.py b/docs/update_req_for_rtd.py deleted file mode 100644 index 4322a0fd4a..0000000000 --- a/docs/update_req_for_rtd.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -""" -Whenever the requirements in ../setup.json are updated, run -also this script to update the requirements for Read the Docs. -""" - -import os -import json -import click - - -@click.command() -@click.option('--pre-commit', is_flag=True) -def update_req_for_rtd(pre_commit): - """Update the separate requirements file for Read the Docs""" - docs_dir = os.path.abspath(os.path.dirname(__file__)) - root_dir = os.path.join(docs_dir, os.pardir) - - with open(os.path.join(root_dir, 'setup.json'), 'r') as info: - setup_json = json.load(info) - - extras = setup_json['extras_require'] - reqs = set(extras['testing'] + extras['docs'] + extras['rest'] + extras['atomic_tools'] + - setup_json['install_requires']) - reqs_str = '\n'.join(sorted(reqs)) - - basename = 'requirements_for_rtd.txt' - - # pylint: disable=bad-continuation - with open(os.path.join(docs_dir, basename), 'w') as reqs_file: - reqs_file.write(reqs_str) - - click.echo("File '{}' written.".format(basename)) - - if pre_commit: - msg = 'Some requirements for Read the Docs have changed, {}' - local_help = 'please add the changes and commit again' - travis_help = 'please run aiida/docs/update_req_for_rtd.py locally and commit the changes it makes' - help_msg = msg.format(travis_help if os.environ.get('TRAVIS') else local_help) - click.echo(help_msg, err=True) - - -if __name__ == '__main__': - update_req_for_rtd() # pylint: disable=no-value-for-parameter diff --git a/environment.yml b/environment.yml index 6b8cdddbe8..cfbb126c57 100644 --- a/environment.yml +++ b/environment.yml @@ -2,9 +2,8 @@ --- name: aiida channels: -- defaults - conda-forge -- etetoolkit +- defaults dependencies: - python~=3.7 - aldjemy~=0.9.1 @@ -20,19 +19,19 @@ dependencies: - ipython~=7.0 - jinja2~=2.10 - kiwipy[rmq]~=0.5.1 -- numpy~=1.17,<1.18 +- numpy<1.18,~=1.17 - paramiko~=2.6 - pika~=1.1 - plumpy~=0.14.5 - psutil~=5.6 -- psycopg2~=2.8,>=2.8.3 +- psycopg2>=2.8.3,~=2.8 - python-dateutil~=2.8 - pytz~=2019.3 - pyyaml~=5.1.2 - reentry~=1.3 - simplejson~=3.16 - sqlalchemy-utils~=0.34.2 -- sqlalchemy~=1.3,>=1.3.10 +- sqlalchemy>=1.3.10,~=1.3 - tabulate~=0.8.5 - tornado<5.0 - tzlocal~=2.0 diff --git a/pyproject.toml b/pyproject.toml index a4e1ecf2f4..24eaa8393c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,3 @@ [build-system] -# Minimum requirements for the build system to execute. -requires = ["setuptools>=40.8.0", "wheel", "reentry~=1.3"] +requires = [ "setuptools>=40.8.0", "wheel", "reentry~=1.3",] build-backend = "setuptools.build_meta:__legacy__" diff --git a/requirements/requirements-py-3.5.txt b/requirements/requirements-py-3.5.txt new file mode 100644 index 0000000000..3f7eddad62 --- /dev/null +++ b/requirements/requirements-py-3.5.txt @@ -0,0 +1,151 @@ +aiida-export-migration-tests==0.8.0 +alabaster==0.7.12 +aldjemy==0.9.1 +alembic==1.4.1 +aniso8601==8.0.0 +ase==3.19.0 +attrs==19.3.0 +Babel==2.8.0 +backcall==0.1.0 +bcrypt==3.1.7 +bleach==3.1.1 +certifi==2019.11.28 +cffi==1.14.0 +chardet==3.0.4 +circus==0.16.1 +Click==7.0 +click-completion==0.5.2 +click-config-file==0.5.0 +click-spinner==0.1.8 +configobj==5.0.6 +coverage==4.5.4 +cryptography==2.8 +cycler==0.10.0 +decorator==4.4.2 +defusedxml==0.6.0 +Django==2.2.11 +docutils==0.15.2 +entrypoints==0.3 +ete3==3.1.1 +Flask==1.1.1 +Flask-Cors==3.0.8 +Flask-RESTful==0.3.8 +frozendict==1.2 +furl==2.1.0 +future==0.18.2 +graphviz==0.13.2 +idna==2.9 +imagesize==1.2.0 +importlib-metadata==1.5.0 +ipykernel==5.1.4 +ipython==7.9.0 +ipython-genutils==0.2.0 +ipywidgets==7.5.1 +itsdangerous==1.1.0 +jedi==0.16.0 +Jinja2==2.11.1 +jsonschema==3.2.0 +jupyter==1.0.0 +jupyter-client==6.0.0 +jupyter-console==6.1.0 +jupyter-core==4.6.3 +kiwipy==0.5.3 +kiwisolver==1.1.0 +Mako==1.1.2 +MarkupSafe==1.1.1 +matplotlib==3.0.3 +mistune==0.8.4 +monty==3.0.2 +more-itertools==8.2.0 +mpmath==1.1.0 +nbconvert==5.6.1 +nbformat==5.0.4 +networkx==2.4 +notebook==5.7.8 +numpy==1.17.4 +orderedmultidict==1.0.1 +packaging==20.3 +palettable==3.3.0 +pandas==0.25.3 +pandocfilters==1.4.2 +paramiko==2.7.1 +parso==0.6.2 +pathlib2==2.3.5 +pexpect==4.8.0 +pg8000==1.13.2 +pgtest==1.3.2 +pickleshare==0.7.5 +pika==1.1.0 +pluggy==0.13.1 +plumpy==0.14.5 +prometheus-client==0.7.1 +prompt-toolkit==2.0.10 +psutil==5.7.0 +psycopg2-binary==2.8.4 +ptyprocess==0.6.0 +py==1.8.1 +pyblake2==1.1.2 +PyCifRW==4.4.1 +pycparser==2.20 +PyDispatcher==2.0.5 +Pygments==2.5.2 +pymatgen==2019.7.2 +PyMySQL==0.9.3 +PyNaCl==1.3.0 +pyparsing==2.4.6 +pyrsistent==0.15.7 +pytest==5.3.5 +pytest-cov==2.8.1 +pytest-timeout==1.3.4 +python-dateutil==2.8.1 +python-editor==1.0.4 +python-memcached==1.59 +pytz==2019.3 +PyYAML==5.1.2 +pyzmq==19.0.0 +qtconsole==4.7.1 +QtPy==1.9.0 +reentry==1.3.1 +requests==2.23.0 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.0 +scipy==1.4.1 +scramp==1.1.0 +seekpath==1.9.4 +Send2Trash==1.5.0 +shellingham==1.3.2 +shortuuid==0.5.0 +simplejson==3.17.0 +six==1.14.0 +snowballstemmer==2.0.0 +spglib==1.14.1.post0 +Sphinx==2.4.4 +sphinx-rtd-theme==0.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-contentui==0.2.4 +sphinxcontrib-details-directive==0.1.0 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.4 +SQLAlchemy==1.3.13 +sqlalchemy-diff==0.1.3 +SQLAlchemy-Utils==0.34.2 +sqlparse==0.3.1 +sympy==1.5.1 +tabulate==0.8.6 +terminado==0.8.3 +testpath==0.4.4 +topika==0.2.1 +tornado==4.5.3 +traitlets==4.3.3 +tzlocal==2.0.0 +upf-to-json==0.9.2 +urllib3==1.25.8 +wcwidth==0.1.8 +webencodings==0.5.1 +Werkzeug==1.0.0 +widgetsnbextension==3.5.1 +wrapt==1.11.2 +zipp==1.2.0 diff --git a/requirements/requirements-py-3.6.txt b/requirements/requirements-py-3.6.txt new file mode 100644 index 0000000000..f89ca64a6e --- /dev/null +++ b/requirements/requirements-py-3.6.txt @@ -0,0 +1,150 @@ +aiida-export-migration-tests==0.8.0 +alabaster==0.7.12 +aldjemy==0.9.1 +alembic==1.4.1 +aniso8601==8.0.0 +ase==3.19.0 +attrs==19.3.0 +Babel==2.8.0 +backcall==0.1.0 +bcrypt==3.1.7 +bleach==3.1.1 +certifi==2019.11.28 +cffi==1.14.0 +chardet==3.0.4 +circus==0.16.1 +Click==7.0 +click-completion==0.5.2 +click-config-file==0.5.0 +click-spinner==0.1.8 +configobj==5.0.6 +coverage==4.5.4 +cryptography==2.8 +cycler==0.10.0 +dataclasses==0.7 +decorator==4.4.2 +defusedxml==0.6.0 +Django==2.2.11 +docutils==0.15.2 +entrypoints==0.3 +ete3==3.1.1 +Flask==1.1.1 +Flask-Cors==3.0.8 +Flask-RESTful==0.3.8 +frozendict==1.2 +furl==2.1.0 +future==0.18.2 +graphviz==0.13.2 +idna==2.9 +imagesize==1.2.0 +importlib-metadata==1.5.0 +ipykernel==5.1.4 +ipython==7.13.0 +ipython-genutils==0.2.0 +ipywidgets==7.5.1 +itsdangerous==1.1.0 +jedi==0.16.0 +Jinja2==2.11.1 +jsonschema==3.2.0 +jupyter==1.0.0 +jupyter-client==6.0.0 +jupyter-console==6.1.0 +jupyter-core==4.6.3 +kiwipy==0.5.3 +kiwisolver==1.1.0 +Mako==1.1.2 +MarkupSafe==1.1.1 +matplotlib==3.2.0 +mistune==0.8.4 +monty==3.0.2 +more-itertools==8.2.0 +mpmath==1.1.0 +nbconvert==5.6.1 +nbformat==5.0.4 +networkx==2.4 +notebook==5.7.8 +numpy==1.17.4 +orderedmultidict==1.0.1 +packaging==20.3 +palettable==3.3.0 +pandas==0.25.3 +pandocfilters==1.4.2 +paramiko==2.7.1 +parso==0.6.2 +pexpect==4.8.0 +pg8000==1.13.2 +pgtest==1.3.2 +pickleshare==0.7.5 +pika==1.1.0 +pluggy==0.13.1 +plumpy==0.14.5 +prometheus-client==0.7.1 +prompt-toolkit==3.0.3 +psutil==5.7.0 +psycopg2-binary==2.8.4 +ptyprocess==0.6.0 +py==1.8.1 +PyCifRW==4.4.1 +pycparser==2.20 +PyDispatcher==2.0.5 +Pygments==2.5.2 +pymatgen==2020.3.2 +PyMySQL==0.9.3 +PyNaCl==1.3.0 +pyparsing==2.4.6 +pyrsistent==0.15.7 +pytest==5.3.5 +pytest-cov==2.8.1 +pytest-timeout==1.3.4 +python-dateutil==2.8.1 +python-editor==1.0.4 +python-memcached==1.59 +pytz==2019.3 +PyYAML==5.1.2 +pyzmq==19.0.0 +qtconsole==4.7.1 +QtPy==1.9.0 +reentry==1.3.1 +requests==2.23.0 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.0 +scipy==1.4.1 +scramp==1.1.0 +seekpath==1.9.4 +Send2Trash==1.5.0 +shellingham==1.3.2 +shortuuid==0.5.0 +simplejson==3.17.0 +six==1.14.0 +snowballstemmer==2.0.0 +spglib==1.14.1.post0 +Sphinx==2.4.4 +sphinx-rtd-theme==0.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-contentui==0.2.4 +sphinxcontrib-details-directive==0.1.0 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.4 +SQLAlchemy==1.3.13 +sqlalchemy-diff==0.1.3 +SQLAlchemy-Utils==0.34.2 +sqlparse==0.3.1 +sympy==1.5.1 +tabulate==0.8.6 +terminado==0.8.3 +testpath==0.4.4 +topika==0.2.1 +tornado==4.5.3 +traitlets==4.3.3 +tzlocal==2.0.0 +upf-to-json==0.9.2 +urllib3==1.25.8 +wcwidth==0.1.8 +webencodings==0.5.1 +Werkzeug==1.0.0 +widgetsnbextension==3.5.1 +wrapt==1.11.2 +zipp==3.1.0 diff --git a/requirements/requirements-py-3.7.txt b/requirements/requirements-py-3.7.txt new file mode 100644 index 0000000000..5dbef921c4 --- /dev/null +++ b/requirements/requirements-py-3.7.txt @@ -0,0 +1,149 @@ +aiida-export-migration-tests==0.8.0 +alabaster==0.7.12 +aldjemy==0.9.1 +alembic==1.4.1 +aniso8601==8.0.0 +ase==3.19.0 +attrs==19.3.0 +Babel==2.8.0 +backcall==0.1.0 +bcrypt==3.1.7 +bleach==3.1.1 +certifi==2019.11.28 +cffi==1.14.0 +chardet==3.0.4 +circus==0.16.1 +Click==7.0 +click-completion==0.5.2 +click-config-file==0.5.0 +click-spinner==0.1.8 +configobj==5.0.6 +coverage==4.5.4 +cryptography==2.8 +cycler==0.10.0 +decorator==4.4.2 +defusedxml==0.6.0 +Django==2.2.11 +docutils==0.15.2 +entrypoints==0.3 +ete3==3.1.1 +Flask==1.1.1 +Flask-Cors==3.0.8 +Flask-RESTful==0.3.8 +frozendict==1.2 +furl==2.1.0 +future==0.18.2 +graphviz==0.13.2 +idna==2.9 +imagesize==1.2.0 +importlib-metadata==1.5.0 +ipykernel==5.1.4 +ipython==7.13.0 +ipython-genutils==0.2.0 +ipywidgets==7.5.1 +itsdangerous==1.1.0 +jedi==0.16.0 +Jinja2==2.11.1 +jsonschema==3.2.0 +jupyter==1.0.0 +jupyter-client==6.0.0 +jupyter-console==6.1.0 +jupyter-core==4.6.3 +kiwipy==0.5.3 +kiwisolver==1.1.0 +Mako==1.1.2 +MarkupSafe==1.1.1 +matplotlib==3.2.0 +mistune==0.8.4 +monty==3.0.2 +more-itertools==8.2.0 +mpmath==1.1.0 +nbconvert==5.6.1 +nbformat==5.0.4 +networkx==2.4 +notebook==5.7.8 +numpy==1.17.4 +orderedmultidict==1.0.1 +packaging==20.3 +palettable==3.3.0 +pandas==0.25.3 +pandocfilters==1.4.2 +paramiko==2.7.1 +parso==0.6.2 +pexpect==4.8.0 +pg8000==1.13.2 +pgtest==1.3.2 +pickleshare==0.7.5 +pika==1.1.0 +pluggy==0.13.1 +plumpy==0.14.5 +prometheus-client==0.7.1 +prompt-toolkit==3.0.3 +psutil==5.7.0 +psycopg2-binary==2.8.4 +ptyprocess==0.6.0 +py==1.8.1 +PyCifRW==4.4.1 +pycparser==2.20 +PyDispatcher==2.0.5 +Pygments==2.5.2 +pymatgen==2020.3.2 +PyMySQL==0.9.3 +PyNaCl==1.3.0 +pyparsing==2.4.6 +pyrsistent==0.15.7 +pytest==5.3.5 +pytest-cov==2.8.1 +pytest-timeout==1.3.4 +python-dateutil==2.8.1 +python-editor==1.0.4 +python-memcached==1.59 +pytz==2019.3 +PyYAML==5.1.2 +pyzmq==19.0.0 +qtconsole==4.7.1 +QtPy==1.9.0 +reentry==1.3.1 +requests==2.23.0 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.0 +scipy==1.4.1 +scramp==1.1.0 +seekpath==1.9.4 +Send2Trash==1.5.0 +shellingham==1.3.2 +shortuuid==0.5.0 +simplejson==3.17.0 +six==1.14.0 +snowballstemmer==2.0.0 +spglib==1.14.1.post0 +Sphinx==2.4.4 +sphinx-rtd-theme==0.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-contentui==0.2.4 +sphinxcontrib-details-directive==0.1.0 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.4 +SQLAlchemy==1.3.13 +sqlalchemy-diff==0.1.3 +SQLAlchemy-Utils==0.34.2 +sqlparse==0.3.1 +sympy==1.5.1 +tabulate==0.8.6 +terminado==0.8.3 +testpath==0.4.4 +topika==0.2.1 +tornado==4.5.3 +traitlets==4.3.3 +tzlocal==2.0.0 +upf-to-json==0.9.2 +urllib3==1.25.8 +wcwidth==0.1.8 +webencodings==0.5.1 +Werkzeug==1.0.0 +widgetsnbextension==3.5.1 +wrapt==1.11.2 +zipp==3.1.0 diff --git a/requirements/requirements-py-3.8.txt b/requirements/requirements-py-3.8.txt new file mode 100644 index 0000000000..d8c8c8686c --- /dev/null +++ b/requirements/requirements-py-3.8.txt @@ -0,0 +1,147 @@ +aiida-export-migration-tests==0.8.0 +alabaster==0.7.12 +aldjemy==0.9.1 +alembic==1.4.1 +aniso8601==8.0.0 +ase==3.19.0 +attrs==19.3.0 +Babel==2.8.0 +backcall==0.1.0 +bcrypt==3.1.7 +bleach==3.1.1 +certifi==2019.11.28 +cffi==1.14.0 +chardet==3.0.4 +circus==0.16.1 +Click==7.0 +click-completion==0.5.2 +click-config-file==0.5.0 +click-spinner==0.1.8 +configobj==5.0.6 +coverage==4.5.4 +cryptography==2.8 +cycler==0.10.0 +decorator==4.4.2 +defusedxml==0.6.0 +Django==2.2.11 +docutils==0.15.2 +entrypoints==0.3 +ete3==3.1.1 +Flask==1.1.1 +Flask-Cors==3.0.8 +Flask-RESTful==0.3.8 +frozendict==1.2 +furl==2.1.0 +future==0.18.2 +graphviz==0.13.2 +idna==2.9 +imagesize==1.2.0 +ipykernel==5.1.4 +ipython==7.13.0 +ipython-genutils==0.2.0 +ipywidgets==7.5.1 +itsdangerous==1.1.0 +jedi==0.16.0 +Jinja2==2.11.1 +jsonschema==3.2.0 +jupyter==1.0.0 +jupyter-client==6.0.0 +jupyter-console==6.1.0 +jupyter-core==4.6.3 +kiwipy==0.5.3 +kiwisolver==1.1.0 +Mako==1.1.2 +MarkupSafe==1.1.1 +matplotlib==3.2.0 +mistune==0.8.4 +monty==3.0.2 +more-itertools==8.2.0 +mpmath==1.1.0 +nbconvert==5.6.1 +nbformat==5.0.4 +networkx==2.4 +notebook==5.7.8 +numpy==1.17.4 +orderedmultidict==1.0.1 +packaging==20.3 +palettable==3.3.0 +pandas==0.25.3 +pandocfilters==1.4.2 +paramiko==2.7.1 +parso==0.6.2 +pexpect==4.8.0 +pg8000==1.13.2 +pgtest==1.3.2 +pickleshare==0.7.5 +pika==1.1.0 +pluggy==0.13.1 +plumpy==0.14.5 +prometheus-client==0.7.1 +prompt-toolkit==3.0.3 +psutil==5.7.0 +psycopg2-binary==2.8.4 +ptyprocess==0.6.0 +py==1.8.1 +PyCifRW==4.4.1 +pycparser==2.20 +PyDispatcher==2.0.5 +Pygments==2.5.2 +pymatgen==2020.3.2 +PyMySQL==0.9.3 +PyNaCl==1.3.0 +pyparsing==2.4.6 +pyrsistent==0.15.7 +pytest==5.3.5 +pytest-cov==2.8.1 +pytest-timeout==1.3.4 +python-dateutil==2.8.1 +python-editor==1.0.4 +python-memcached==1.59 +pytz==2019.3 +PyYAML==5.1.2 +pyzmq==19.0.0 +qtconsole==4.7.1 +QtPy==1.9.0 +reentry==1.3.1 +requests==2.23.0 +ruamel.yaml==0.16.10 +ruamel.yaml.clib==0.2.0 +scipy==1.4.1 +scramp==1.1.0 +seekpath==1.9.4 +Send2Trash==1.5.0 +shellingham==1.3.2 +shortuuid==0.5.0 +simplejson==3.17.0 +six==1.14.0 +snowballstemmer==2.0.0 +spglib==1.14.1.post0 +Sphinx==2.4.4 +sphinx-rtd-theme==0.4.3 +sphinxcontrib-applehelp==1.0.2 +sphinxcontrib-contentui==0.2.4 +sphinxcontrib-details-directive==0.1.0 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.4 +SQLAlchemy==1.3.13 +sqlalchemy-diff==0.1.3 +SQLAlchemy-Utils==0.34.2 +sqlparse==0.3.1 +sympy==1.5.1 +tabulate==0.8.6 +terminado==0.8.3 +testpath==0.4.4 +topika==0.2.1 +tornado==4.5.3 +traitlets==4.3.3 +tzlocal==2.0.0 +upf-to-json==0.9.2 +urllib3==1.25.8 +wcwidth==0.1.8 +webencodings==0.5.1 +Werkzeug==1.0.0 +widgetsnbextension==3.5.1 +wrapt==1.11.2 diff --git a/setup.py b/setup.py index 8f2372edd5..0cfa973f5f 100644 --- a/setup.py +++ b/setup.py @@ -10,11 +10,26 @@ # pylint: disable=wrong-import-order """Setup script for aiida-core package.""" import json +import sys import os from utils import fastentrypoints # pylint: disable=unused-import from setuptools import setup, find_packages +if (sys.version_info.major, sys.version_info.minor) == (3, 5): + import setuptools + from distutils.version import StrictVersion + + REQUIRED_SETUPTOOLS_VERSION = StrictVersion('38.2.0') + INSTALLED_SETUPTOOLS_VERSION = StrictVersion(setuptools.__version__) + + if INSTALLED_SETUPTOOLS_VERSION < REQUIRED_SETUPTOOLS_VERSION: + raise RuntimeError( + 'The installation of AiiDA with Python version 3.5, requires setuptools>={}; your version: {}'.format( + REQUIRED_SETUPTOOLS_VERSION, INSTALLED_SETUPTOOLS_VERSION + ) + ) + if __name__ == '__main__': THIS_FOLDER = os.path.split(os.path.abspath(__file__))[0] diff --git a/utils/dependency_management.py b/utils/dependency_management.py new file mode 100755 index 0000000000..a86607c672 --- /dev/null +++ b/utils/dependency_management.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Utility CLI to manage dependencies for aiida-core.""" + +import sys +import re +import json +import subprocess +from pathlib import Path +from collections import OrderedDict +from pkg_resources import Requirement + +import click +import yaml +import toml + +ROOT = Path(__file__).resolve().parent.parent # repository root + +SETUPTOOLS_CONDA_MAPPINGS = { + 'psycopg2-binary': 'psycopg2', + 'graphviz': 'python-graphviz', +} + +CONDA_IGNORE = ['pyblake2', r'.*python_version == \"3\.5\"'] + + +class DependencySpecificationError(click.ClickException): + """Indicates an issue in a dependency specification.""" + + +def _load_setup_cfg(): + """Load the setup configuration from the 'setup.json' file.""" + try: + with open(ROOT / 'setup.json') as setup_json_file: + return json.load(setup_json_file) + except json.decoder.JSONDecodeError as error: # pylint: disable=no-member + raise DependencySpecificationError("Error while parsing 'setup.json' file: {}".format(error)) + except FileNotFoundError: + raise DependencySpecificationError("The 'setup.json' file is missing!") + + +def _load_environment_yml(): + """Load the conda environment specification from the 'environment.yml' file.""" + try: + with open(ROOT / 'environment.yml') as file: + return yaml.load(file, Loader=yaml.SafeLoader) + except yaml.error.YAMLError as error: + raise DependencySpecificationError("Error while parsing 'environment.yml':\n{}".format(error)) + except FileNotFoundError as error: + raise DependencySpecificationError(str(error)) + + +def _setuptools_to_conda(req): + """Map package names from setuptools to conda where necessary. + + In case that the same underlying dependency is listed under different names + on PyPI and conda-forge. + """ + + for pattern, replacement in SETUPTOOLS_CONDA_MAPPINGS.items(): + if re.match(pattern, str(req)): + req = Requirement.parse(re.sub(pattern, replacement, str(req))) + break + + # markers are not supported by conda + req.marker = None + + # We need to parse the modified required again, to ensure consistency. + return Requirement.parse(str(req)) + + +@click.group() +def cli(): + """Manage dependencies of the aiida-core package.""" + + +@cli.command('generate-environment-yml') +def generate_environment_yml(): + """Generate 'environment.yml' file.""" + + # needed for ordered dict, see https://stackoverflow.com/a/52621703 + yaml.add_representer( + OrderedDict, + lambda self, data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()), + Dumper=yaml.SafeDumper + ) + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']] + + # python version cannot be overriden from outside environment.yml + # (even if it is not specified at all in environment.yml) + # https://github.com/conda/conda/issues/9506 + conda_requires = ['python~=3.7'] + for req in install_requirements: + if req.name == 'python' or any(re.match(ignore, str(req)) for ignore in CONDA_IGNORE): + continue + conda_requires.append(str(_setuptools_to_conda(req))) + + environment = OrderedDict([ + ('name', 'aiida'), + ('channels', ['conda-forge', 'defaults']), + ('dependencies', conda_requires), + ]) + + with open(ROOT / 'environment.yml', 'w') as env_file: + env_file.write('# Usage: conda env create -n myenvname -f environment.yml\n') + yaml.safe_dump( + environment, env_file, explicit_start=True, default_flow_style=False, encoding='utf-8', allow_unicode=True + ) + + +@cli.command('generate-rtd-reqs') +def generate_requirements_for_rtd(): + """Generate 'docs/requirements_for_rtd.txt' file.""" + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requirements = {Requirement.parse(r) for r in setup_cfg['install_requires']} + for key in ('testing', 'docs', 'rest', 'atomic_tools'): + install_requirements.update({Requirement.parse(r) for r in setup_cfg['extras_require'][key]}) + + # pylint: disable=bad-continuation + with open(ROOT / Path('docs', 'requirements_for_rtd.txt'), 'w') as reqs_file: + reqs_file.write('\n'.join(sorted(map(str, install_requirements)))) + + +@cli.command() +def generate_pyproject_toml(): + """Generate 'pyproject.toml' file.""" + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']] + + for requirement in install_requirements: + if requirement.name == 'reentry': + reentry_requirement = requirement + break + else: + raise DependencySpecificationError("Failed to find reentry requirement in 'setup.json'.") + + pyproject = { + 'build-system': { + 'requires': ['setuptools>=40.8.0', 'wheel', str(reentry_requirement)], + 'build-backend': 'setuptools.build_meta:__legacy__', + } + } + with open(ROOT / 'pyproject.toml', 'w') as file: + toml.dump(pyproject, file) + + +@cli.command() +@click.pass_context +def generate_all(ctx): + """Generate all dependent requirement files.""" + ctx.invoke(generate_environment_yml) + ctx.invoke(generate_requirements_for_rtd) + ctx.invoke(generate_pyproject_toml) + + +@cli.command('validate-environment-yml', help="Validate 'environment.yml'.") +def validate_environment_yml(): # pylint: disable=too-many-branches + """Validate that 'environment.yml' is consistent with 'setup.json'.""" + + # Read the requirements from 'setup.json' and 'environment.yml'. + setup_cfg = _load_setup_cfg() + install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']] + python_requires = Requirement.parse('python' + setup_cfg['python_requires']) + + environment_yml = _load_environment_yml() + try: + assert environment_yml['name'] == 'aiida', "environment name should be 'aiida'." + assert environment_yml['channels'] == [ + 'conda-forge', 'defaults' + ], "channels should be 'conda-forge', 'defaults'." + except AssertionError as error: + raise DependencySpecificationError("Error in 'environment.yml': {}".format(error)) + + try: + conda_dependencies = {Requirement.parse(d) for d in environment_yml['dependencies']} + except TypeError as error: + raise DependencySpecificationError("Error while parsing requirements from 'environment_yml': {}".format(error)) + + # Attempt to find the specification of Python among the 'environment.yml' dependencies. + for dependency in conda_dependencies: + if dependency.name == 'python': # Found the Python dependency specification + conda_python_dependency = dependency + conda_dependencies.remove(dependency) + break + else: # Failed to find Python dependency specification + raise DependencySpecificationError("Did not find specification of Python version in 'environment.yml'.") + + # The Python version specified in 'setup.json' should be listed as trove classifiers. + for spec in conda_python_dependency.specifier: + expected_classifier = 'Programming Language :: Python :: ' + spec.version + if expected_classifier not in setup_cfg['classifiers']: + raise DependencySpecificationError( + "Trove classifier '{}' missing from 'setup.json'.".format(expected_classifier) + ) + + # The Python version should be specified as supported in 'setup.json'. + if not any(spec.version >= other_spec.version for other_spec in python_requires.specifier): + raise DependencySpecificationError( + "Required Python version between 'setup.json' and 'environment.yml' not consistent." + ) + + break + else: + raise DependencySpecificationError("Missing specifier: '{}'.".format(conda_python_dependency)) + + # Check that all requirements specified in the setup.json file are found in the + # conda environment specification. + missing_from_env = set() + for req in install_requirements: + if any(re.match(ignore, str(req)) for ignore in CONDA_IGNORE): + continue # skip explicitly ignored packages + + try: + conda_dependencies.remove(_setuptools_to_conda(req)) + except KeyError: + raise DependencySpecificationError("Requirement '{}' not specified in 'environment.yml'.".format(req)) + + # The only dependency left should be the one for Python itself, which is not part of + # the install_requirements for setuptools. + if len(conda_dependencies) > 0: + raise DependencySpecificationError( + "The 'environment.yml' file contains dependencies that are missing " + "in 'setup.json':\n- {}".format('\n- '.join(map(str, conda_dependencies))) + ) + + click.secho('Conda dependency specification is consistent.', fg='green') + + +@cli.command('validate-rtd-reqs', help="Validate 'docs/requirements_for_rtd.txt'.") +def validate_requirements_for_rtd(): + """Validate that 'docs/requirements_for_rtd.txt' is consistent with 'setup.json'.""" + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requirements = {Requirement.parse(r) for r in setup_cfg['install_requires']} + for key in ('testing', 'docs', 'rest', 'atomic_tools'): + install_requirements.update({Requirement.parse(r) for r in setup_cfg['extras_require'][key]}) + + with open(ROOT / Path('docs', 'requirements_for_rtd.txt')) as reqs_file: + reqs = {Requirement.parse(r) for r in reqs_file} + + if reqs != install_requirements: + raise DependencySpecificationError("The requirements for RTD are inconsistent with 'setup.json'.") + + click.secho('RTD requirements specification is consistent.', fg='green') + + +@cli.command('validate-pyproject-toml', help="Validate 'pyproject.toml'.") +def validate_pyproject_toml(): + """Validate that 'pyproject.toml' is consistent with 'setup.json'.""" + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requirements = [Requirement.parse(r) for r in setup_cfg['install_requires']] + + for requirement in install_requirements: + if requirement.name == 'reentry': + reentry_requirement = requirement + break + else: + raise DependencySpecificationError("Failed to find reentry requirement in 'setup.json'.") + + try: + with open(ROOT / 'pyproject.toml') as file: + pyproject = toml.load(file) + pyproject_requires = [Requirement.parse(r) for r in pyproject['build-system']['requires']] + + if reentry_requirement not in pyproject_requires: + raise DependencySpecificationError( + "Missing requirement '{}' in 'pyproject.toml'.".format(reentry_requirement) + ) + + except FileNotFoundError as error: + raise DependencySpecificationError("The 'pyproject.toml' file is missing!") + + click.secho('Pyproject.toml dependency specification is consistent.', fg='green') + + +@cli.command('validate-all', help='Validate consistency of all requirements.') +@click.pass_context +def validate_all(ctx): + """Validate consistency of all requirement specifications of the package. + + Validates that the specification of requirements/dependencies is consistent across + the following files: + + - setup.py + - setup.json + - environment.yml + - pyproject.toml + - docs/requirements_for_rtd.txt + """ + + ctx.invoke(validate_environment_yml) + ctx.invoke(validate_requirements_for_rtd) + ctx.invoke(validate_pyproject_toml) + + +@cli.command() +@click.argument('extras', nargs=-1) +def pip_install_extras(extras): + """Install extra requirements. + + For example: + + pip-install-extras docs + + This will install *only* the extra the requirements for docs, but without triggering + the installation of the main installations requirements of the aiida-core package. + """ + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + + to_install = set() + for key in extras: + to_install.update(Requirement.parse(r) for r in setup_cfg['extras_require'][key]) + + cmd = [sys.executable, '-m', 'pip', 'install'] + [str(r) for r in to_install] + subprocess.run(cmd, check=True) + + +if __name__ == '__main__': + cli() # pylint: disable=no-value-for-parameter diff --git a/utils/update_dependencies.py b/utils/update_dependencies.py deleted file mode 100755 index 2987f33196..0000000000 --- a/utils/update_dependencies.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -"""Utility CLI to update dependency version requirements of the `setup.json`.""" - -import copy -import os -import click - -from validate_consistency import get_setup_json, write_setup_json - -FILENAME_SETUP_JSON = 'setup.json' -SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0] -ROOT_DIR = os.path.join(SCRIPT_PATH, os.pardir) -FILEPATH_SETUP_JSON = os.path.join(ROOT_DIR, FILENAME_SETUP_JSON) -DEFAULT_EXCLUDE_LIST = ['django', 'circus', 'numpy', 'pymatgen', 'ase', 'monty', 'pyyaml'] - - -@click.group() -def cli(): - """Utility to update dependency requirements for `aiida-core`. - - Since `aiida-core` fixes the versions of almost all of its dependencies, once in a while these need to be updated. - This is a manual process, but this CLI attempts to simplify it somewhat. The idea is to remote all explicit version - restrictions from the `setup.json`, except for those packages where it is known that a upper limit is necessary. - This is accomplished by the command: - - python update_dependencies.py unrestrict - - The command will update the `setup.json` to remove all explicit limits, except for those packages specified by the - `--exclude` option. After this step, install `aiida-core` through pip with the `[all]` flag to install all optional - extra requirements as well. Since there are no explicit version requirements anymore, pip should install the latest - available version for each dependency. - - Once all the tests complete successfully, run the following command: - - pip freeze > requirements.txt - - This will now capture the exact versions of the packages installed in the virtual environment. Since the tests run - for this setup, we can now set those versions as the new requirements in the `setup.json`. Note that this is why a - clean virtual environment should be used for this entire procedure. Now execute the command: - - python update_dependencies.py update requirements.txt - - This will now update the `setup.json` to reinstate the exact version requirements for all dependencies. Commit the - changes to `setup.json` and make a pull request. - """ - - -@cli.command('unrestrict') -@click.option('--exclude', multiple=True, help='List of package names to exclude from updating.') -def unrestrict_requirements(exclude): - """Remove all explicit dependency version restrictions from `setup.json`. - - Warning, this currently only works for dependency requirements that use the `==` operator. Statements with different - operators, additional filters after a semicolon, or with extra requirements (using `[]`) are not supported. The - limits for these statements will have to be updated manually. - """ - setup = get_setup_json() - clone = copy.deepcopy(setup) - clone['install_requires'] = [] - - if exclude: - exclude = list(exclude).extend(DEFAULT_EXCLUDE_LIST) - else: - exclude = DEFAULT_EXCLUDE_LIST - - for requirement in setup['install_requires']: - if requirement in exclude or ';' in requirement or '==' not in requirement: - clone['install_requires'].append(requirement) - else: - package = requirement.split('==')[0] - clone['install_requires'].append(package) - - for extra, requirements in setup['extras_require'].items(): - clone['extras_require'][extra] = [] - - for requirement in requirements: - if requirement in exclude or ';' in requirement or '==' not in requirement: - clone['extras_require'][extra].append(requirement) - else: - package = requirement.split('==')[0] - clone['extras_require'][extra].append(package) - - write_setup_json(clone) - - -@cli.command('update') -@click.argument('requirements', type=click.File(mode='r')) -def update_requirements(requirements): - """Apply version restrictions from REQUIREMENTS. - - The REQUIREMENTS file should contain the output of `pip freeze`. - """ - setup = get_setup_json() - - package_versions = [] - - for requirement in requirements.readlines(): - try: - package, version = requirement.strip().split('==') - package_versions.append((package, version)) - except ValueError: - continue - - requirements = set() - - for requirement in setup['install_requires']: - for package, version in package_versions: - if requirement.lower() == package.lower(): - requirements.add('{}=={}'.format(package.lower(), version)) - break - else: - requirements.add(requirement) - - setup['install_requires'] = sorted(requirements) - - for extra, extra_requirements in setup['extras_require'].items(): - requirements = set() - - for requirement in extra_requirements: - for package, version in package_versions: - if requirement.lower() == package.lower(): - requirements.add('{}=={}'.format(package.lower(), version)) - break - else: - requirements.add(requirement) - - setup['extras_require'][extra] = sorted(requirements) - - write_setup_json(setup) - - -if __name__ == '__main__': - cli() # pylint: disable=no-value-for-parameter diff --git a/utils/validate_consistency.py b/utils/validate_consistency.py index 6604347ded..a771a75449 100644 --- a/utils/validate_consistency.py +++ b/utils/validate_consistency.py @@ -22,7 +22,6 @@ import sys import json from collections import OrderedDict -import toml import click FILENAME_TOML = 'pyproject.toml' @@ -229,94 +228,5 @@ def validate_version(): sys.exit(1) -@cli.command('toml') -def validate_pyproject(): - """Ensure that the version of reentry in setup.json and pyproject.toml are identical.""" - reentry_requirement = None - for requirement in get_setup_json()['install_requires']: - if 'reentry' in requirement: - reentry_requirement = requirement - break - - if reentry_requirement is None: - click.echo('Could not find the reentry requirement in {}'.format(FILEPATH_SETUP_JSON), err=True) - sys.exit(1) - - try: - with open(FILEPATH_TOML, 'r') as handle: - toml_string = handle.read() - except IOError as exception: - click.echo('Could not read the required file: {}'.format(FILEPATH_TOML), err=True) - sys.exit(1) - - try: - parsed_toml = toml.loads(toml_string) - except Exception as exception: # pylint: disable=broad-except - click.echo('Could not parse {}: {}'.format(FILEPATH_TOML, exception), err=True) - sys.exit(1) - - try: - pyproject_toml_requires = parsed_toml['build-system']['requires'] - except KeyError as exception: - click.echo('Could not retrieve the build-system requires list from {}'.format(FILEPATH_TOML), err=True) - sys.exit(1) - - if reentry_requirement not in pyproject_toml_requires: - click.echo( - 'Reentry requirement from {} {} is not mirrored in {}'.format( - FILEPATH_SETUP_JSON, reentry_requirement, FILEPATH_TOML - ), - err=True - ) - sys.exit(1) - - -@cli.command('conda') -def update_environment_yml(): - """Update `environment.yml` file for conda.""" - import yaml - import re - - # needed for ordered dict, see https://stackoverflow.com/a/52621703 - yaml.add_representer( - OrderedDict, - lambda self, data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()), - Dumper=yaml.SafeDumper - ) - - # fix incompatibilities between conda and pypi - replacements = {'psycopg2-binary': 'psycopg2', 'graphviz': 'python-graphviz'} - install_requires = get_setup_json()['install_requires'] - - # python version cannot be overriden from outside environment.yml - # (even if it is not specified at all in environment.yml) - # https://github.com/conda/conda/issues/9506 - conda_requires = ['python~=3.7'] - for req in install_requires: - # skip packages required for specific python versions - # (environment.yml aims at the latest python version) - if req.find('python_version') != -1: - continue - - for (regex, replacement) in iter(replacements.items()): - req = re.sub(regex, replacement, req) - - conda_requires.append(req) - - environment = OrderedDict([ - ('name', 'aiida'), - ('channels', ['defaults', 'conda-forge', 'etetoolkit']), - ('dependencies', conda_requires), - ]) - - environment_filename = 'environment.yml' - file_path = os.path.join(ROOT_DIR, environment_filename) - with open(file_path, 'w') as env_file: - env_file.write('# Usage: conda env create -n myenvname -f environment.yml\n') - yaml.safe_dump( - environment, env_file, explicit_start=True, default_flow_style=False, encoding='utf-8', allow_unicode=True - ) - - if __name__ == '__main__': cli() # pylint: disable=no-value-for-parameter From 87e22f41797781641c6712f1c408d39dd0f40d9f Mon Sep 17 00:00:00 2001 From: Simon Adorf Date: Tue, 10 Mar 2020 12:06:28 +0100 Subject: [PATCH 10/54] Fix workflow syntax for test-install and update-requirements workflows. Used incorrect 'on.push.branch' instead of the correct 'on.push.branches' triggers. --- .github/workflows/test-install.yml | 2 +- .github/workflows/update-requirements.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 8c3238ddfa..76382c2bfa 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -2,7 +2,7 @@ name: test-install on: push: - branch: + branches: - master - develop - release/* diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index 7283fe5d02..e9ba765063 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -2,7 +2,7 @@ name: update-requirements on: push: - branch: + branches: - release/* paths: - 'setup.json' From 8202ab152cc1668907f38636e53e792d753f8784 Mon Sep 17 00:00:00 2001 From: Simon Adorf Date: Tue, 10 Mar 2020 12:19:26 +0100 Subject: [PATCH 11/54] Update paths triggers for test-install workflow. --- .github/workflows/test-install.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 76382c2bfa..2a04b887c4 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -2,16 +2,12 @@ name: test-install on: push: - branches: - - master - - develop - - release/* - - dm/* paths: - 'setup.*' - 'environment.yml' - - 'requirements*.txt' + - '**/requirements*.txt' - 'pyproject.toml' + - 'util/dependency_management.py' - '.github/workflows/test-install.yml' schedule: - cron: '30 02 * * *' # nightly build From 19365fa78077375ae55789dfe8bbaf018a13ea7d Mon Sep 17 00:00:00 2001 From: Simon Adorf Date: Tue, 10 Mar 2020 14:49:28 +0100 Subject: [PATCH 12/54] Only create update-requirements PR on the primary repository. And not on forks. --- .github/workflows/update-requirements.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index e9ba765063..f0bfd095d1 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -2,11 +2,12 @@ name: update-requirements on: push: - branches: - - release/* paths: - - 'setup.json' + - 'setup.*' - '.github/workflows/update-requirements.yml' + pull_request: + branches: + - master jobs: @@ -95,6 +96,7 @@ jobs: path: requirements - name: Create Pull Request + if: github.repository == 'aiidateam/aiida-core' uses: peter-evans/create-pull-request@v2 with: commit-message: "Update requirements.txt" From e234fae15265a912076ffacaf51a05315a68f604 Mon Sep 17 00:00:00 2001 From: Simon Adorf Date: Tue, 10 Mar 2020 13:54:26 +0100 Subject: [PATCH 13/54] Update requirements directly on same branch. --- .github/workflows/update-requirements.yml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index f0bfd095d1..29eda9bf15 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -95,11 +95,24 @@ jobs: name: requirements.txt path: requirements - - name: Create Pull Request - if: github.repository == 'aiidateam/aiida-core' + - name: Commit requirements files + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add requirements/* + git commit -m "Update requirements files." -a || echo "Nothing to update." + + - name: Push changes + uses: ad-m/github-push-action@v0.5.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }} + + - name: Create Pull Request (since update via push failed) + if: failure() && github.repository == 'aiidateam/aiida-core' uses: peter-evans/create-pull-request@v2 with: - commit-message: "Update requirements.txt" + commit-message: "Update requirements files." token: ${{ secrets.GITHUB_TOKEN }} title: "Update requirements.txt" team-reviewers: dependency-manager From 4a49c054766568a1b8a6f186067c1ab0f05ecfea Mon Sep 17 00:00:00 2001 From: GitHub Action Date: Tue, 10 Mar 2020 13:44:32 +0000 Subject: [PATCH 14/54] Update requirements files. --- requirements/requirements-py-3.5.txt | 6 +++--- requirements/requirements-py-3.6.txt | 8 ++++---- requirements/requirements-py-3.7.txt | 8 ++++---- requirements/requirements-py-3.8.txt | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/requirements/requirements-py-3.5.txt b/requirements/requirements-py-3.5.txt index 3f7eddad62..7a9957271a 100644 --- a/requirements/requirements-py-3.5.txt +++ b/requirements/requirements-py-3.5.txt @@ -62,7 +62,7 @@ nbconvert==5.6.1 nbformat==5.0.4 networkx==2.4 notebook==5.7.8 -numpy==1.17.4 +numpy==1.17.5 orderedmultidict==1.0.1 packaging==20.3 palettable==3.3.0 @@ -88,7 +88,7 @@ pyblake2==1.1.2 PyCifRW==4.4.1 pycparser==2.20 PyDispatcher==2.0.5 -Pygments==2.5.2 +Pygments==2.6.1 pymatgen==2019.7.2 PyMySQL==0.9.3 PyNaCl==1.3.0 @@ -114,7 +114,7 @@ scramp==1.1.0 seekpath==1.9.4 Send2Trash==1.5.0 shellingham==1.3.2 -shortuuid==0.5.0 +shortuuid==1.0.1 simplejson==3.17.0 six==1.14.0 snowballstemmer==2.0.0 diff --git a/requirements/requirements-py-3.6.txt b/requirements/requirements-py-3.6.txt index f89ca64a6e..1443564fe3 100644 --- a/requirements/requirements-py-3.6.txt +++ b/requirements/requirements-py-3.6.txt @@ -63,7 +63,7 @@ nbconvert==5.6.1 nbformat==5.0.4 networkx==2.4 notebook==5.7.8 -numpy==1.17.4 +numpy==1.17.5 orderedmultidict==1.0.1 packaging==20.3 palettable==3.3.0 @@ -79,7 +79,7 @@ pika==1.1.0 pluggy==0.13.1 plumpy==0.14.5 prometheus-client==0.7.1 -prompt-toolkit==3.0.3 +prompt-toolkit==3.0.4 psutil==5.7.0 psycopg2-binary==2.8.4 ptyprocess==0.6.0 @@ -87,7 +87,7 @@ py==1.8.1 PyCifRW==4.4.1 pycparser==2.20 PyDispatcher==2.0.5 -Pygments==2.5.2 +Pygments==2.6.1 pymatgen==2020.3.2 PyMySQL==0.9.3 PyNaCl==1.3.0 @@ -113,7 +113,7 @@ scramp==1.1.0 seekpath==1.9.4 Send2Trash==1.5.0 shellingham==1.3.2 -shortuuid==0.5.0 +shortuuid==1.0.1 simplejson==3.17.0 six==1.14.0 snowballstemmer==2.0.0 diff --git a/requirements/requirements-py-3.7.txt b/requirements/requirements-py-3.7.txt index 5dbef921c4..b4ed2ed37c 100644 --- a/requirements/requirements-py-3.7.txt +++ b/requirements/requirements-py-3.7.txt @@ -62,7 +62,7 @@ nbconvert==5.6.1 nbformat==5.0.4 networkx==2.4 notebook==5.7.8 -numpy==1.17.4 +numpy==1.17.5 orderedmultidict==1.0.1 packaging==20.3 palettable==3.3.0 @@ -78,7 +78,7 @@ pika==1.1.0 pluggy==0.13.1 plumpy==0.14.5 prometheus-client==0.7.1 -prompt-toolkit==3.0.3 +prompt-toolkit==3.0.4 psutil==5.7.0 psycopg2-binary==2.8.4 ptyprocess==0.6.0 @@ -86,7 +86,7 @@ py==1.8.1 PyCifRW==4.4.1 pycparser==2.20 PyDispatcher==2.0.5 -Pygments==2.5.2 +Pygments==2.6.1 pymatgen==2020.3.2 PyMySQL==0.9.3 PyNaCl==1.3.0 @@ -112,7 +112,7 @@ scramp==1.1.0 seekpath==1.9.4 Send2Trash==1.5.0 shellingham==1.3.2 -shortuuid==0.5.0 +shortuuid==1.0.1 simplejson==3.17.0 six==1.14.0 snowballstemmer==2.0.0 diff --git a/requirements/requirements-py-3.8.txt b/requirements/requirements-py-3.8.txt index d8c8c8686c..928b5c05d2 100644 --- a/requirements/requirements-py-3.8.txt +++ b/requirements/requirements-py-3.8.txt @@ -61,7 +61,7 @@ nbconvert==5.6.1 nbformat==5.0.4 networkx==2.4 notebook==5.7.8 -numpy==1.17.4 +numpy==1.17.5 orderedmultidict==1.0.1 packaging==20.3 palettable==3.3.0 @@ -77,7 +77,7 @@ pika==1.1.0 pluggy==0.13.1 plumpy==0.14.5 prometheus-client==0.7.1 -prompt-toolkit==3.0.3 +prompt-toolkit==3.0.4 psutil==5.7.0 psycopg2-binary==2.8.4 ptyprocess==0.6.0 @@ -85,7 +85,7 @@ py==1.8.1 PyCifRW==4.4.1 pycparser==2.20 PyDispatcher==2.0.5 -Pygments==2.5.2 +Pygments==2.6.1 pymatgen==2020.3.2 PyMySQL==0.9.3 PyNaCl==1.3.0 @@ -111,7 +111,7 @@ scramp==1.1.0 seekpath==1.9.4 Send2Trash==1.5.0 shellingham==1.3.2 -shortuuid==0.5.0 +shortuuid==1.0.1 simplejson==3.17.0 six==1.14.0 snowballstemmer==2.0.0 From 12465534b578bee9fdd44984bbe4ad089a955126 Mon Sep 17 00:00:00 2001 From: Casper Welzel Andersen <43357585+CasperWA@users.noreply.github.com> Date: Thu, 12 Mar 2020 17:57:04 +0100 Subject: [PATCH 15/54] Use actions/checkout@v2 (instead of @master) (#3846) The actions/checkout@v2 GitHub action seemed to cause issues with the codecov/codecov-action@v1 before, but it seems this is not (or no longer) the case. --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bf8eae364b..f57820063e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -41,7 +41,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -76,7 +76,7 @@ jobs: python-version: [3.5, 3.8] steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - uses: CasperWA/postgresql-action@v1.2 with: postgresql version: '10' @@ -143,7 +143,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - name: Set up Python 3.7 uses: actions/setup-python@v1 @@ -165,7 +165,7 @@ jobs: timeout-minutes: 30 steps: - - uses: actions/checkout@master + - uses: actions/checkout@v2 - name: Install docker run: | From 04f5b51d239de944815ae727aa0927b8c4c255e8 Mon Sep 17 00:00:00 2001 From: Carl Simon Adorf Date: Fri, 13 Mar 2020 17:47:43 +0100 Subject: [PATCH 16/54] Dm/revise update requirements workflow (#3847) * Implement the dependency_management 'check-requirements' command. To check whether the environments frozen in the 'requirements/*.txt' files are matching the dependency specification of 'setup.json'. * Implement 'check-requirements' GitHub actions job. * Checkout specified head_ref in update-requirements workflow. * Execute 'update-requirements' workflow only upon repository_dispatch. With type 'update-requirements-command'. * Apply suggestions from code review Co-Authored-By: Leopold Talirz --- .github/workflows/ci.yml | 35 +++++++++++++ .github/workflows/test-install.yml | 2 +- .github/workflows/update-requirements.yml | 17 +++---- utils/dependency_management.py | 62 ++++++++++++++++++++++- 4 files changed, 105 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f57820063e..f2bea20b43 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,6 +64,41 @@ jobs: run: pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) + check-requirements: + + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.8 + uses: actions/setup-python@v1 + with: + python-version: 3.8 + + - name: Install dm-script dependencies + run: pip install packaging==20.3 click~=7.0 pyyaml~=5.1 toml + + - name: Check requirements files + run: python ./utils/dependency_management.py check-requirements DEFAULT + + - name: Create commit comment + if: failure() + uses: peter-evans/commit-comment@v1 + with: + token: ${{ secrets.GITHUB_TOKEN }} + path: setup.json + body: | + It appears that at least one of the environments defined in the requirements files + ('requirements/*.txt') is not meeting the dependencies specified in the 'setup.json' file. + These files define the environment for continuous integration tests, so it is important that they are updated. + + If this commit is part of a pull request, you can automatically update the requirements by + commenting with '/update-requirements'. + + Click [here](https://github.com/aiidateam/aiida-core/wiki/AiiDA-Dependency-Management) for more information. + tests: runs-on: ubuntu-latest diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 2a04b887c4..05224f43dc 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -29,7 +29,7 @@ jobs: python-version: 3.7 - name: Install dm-script dependencies - run: pip install click~=7.0 pyyaml~=5.1 toml + run: pip install packaging==20.3 click~=7.0 pyyaml~=5.1 toml - name: Validate run: python ./utils/dependency_management.py validate-all diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index 29eda9bf15..636f8c9d3d 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -1,13 +1,8 @@ name: update-requirements on: - push: - paths: - - 'setup.*' - - '.github/workflows/update-requirements.yml' - pull_request: - branches: - - master + repository_dispatch: + types: [update-requirements-command] jobs: @@ -23,7 +18,9 @@ jobs: python-version: [3.5, 3.6, 3.7, 3.8] steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.client_payload.head_ref }} - uses: CasperWA/postgresql-action@v1.2 with: postgresql version: '10' @@ -88,6 +85,8 @@ jobs: steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.client_payload.head_ref }} - name: Download requirements.txt files uses: actions/download-artifact@v1 @@ -106,7 +105,7 @@ jobs: uses: ad-m/github-push-action@v0.5.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - branch: ${{ github.ref }} + branch: ${{ github.event.client_payload.head_ref }} - name: Create Pull Request (since update via push failed) if: failure() && github.repository == 'aiidateam/aiida-core' diff --git a/utils/dependency_management.py b/utils/dependency_management.py index a86607c672..17442d66af 100755 --- a/utils/dependency_management.py +++ b/utils/dependency_management.py @@ -16,7 +16,8 @@ import subprocess from pathlib import Path from collections import OrderedDict -from pkg_resources import Requirement +from pkg_resources import Requirement, parse_requirements +from packaging.utils import canonicalize_name import click import yaml @@ -77,6 +78,24 @@ def _setuptools_to_conda(req): return Requirement.parse(str(req)) +class _Entry: + """Helper class to check whether a given distribution fulfills a requirement.""" + + def __init__(self, requirement): + self._req = requirement + + def fulfills(self, requirement): + """Returns True if this entry fullfills the requirement.""" + + return canonicalize_name(self._req.name) == canonicalize_name(requirement.name) \ + and self._req.specs[0][1] in requirement.specifier + + +def _parse_working_set(entries): + for req in parse_requirements(entries): + yield _Entry(req) + + @click.group() def cli(): """Manage dependencies of the aiida-core package.""" @@ -311,6 +330,47 @@ def validate_all(ctx): ctx.invoke(validate_pyproject_toml) +@cli.command() +@click.argument('extras', nargs=-1) +def check_requirements(extras): + """Check the 'requirements/*.txt' files. + + Checks that the environments specified in the requirements files + match all the dependencies specified in 'setup.json. + + The arguments allow to specify which 'extra' requirements to expect. + Use 'DEFAULT' to select 'atomic_tools', 'docs', 'notebook', 'rest', and 'testing'. + + """ + + if len(extras) == 1 and extras[0] == 'DEFAULT': + extras = ['atomic_tools', 'docs', 'notebook', 'rest', 'testing'] + + # Read the requirements from 'setup.json' + setup_cfg = _load_setup_cfg() + install_requires = setup_cfg['install_requires'] + for extra in extras: + install_requires.extend(setup_cfg['extras_require'][extra]) + install_requires = set(parse_requirements(install_requires)) + + for fn_req in (ROOT / 'requirements').iterdir(): + env = {'python_version': re.match(r'.*-py-(.*)\.txt', str(fn_req)).groups()[0]} + required = {r for r in install_requires if r.marker is None or r.marker.evaluate(env)} + + with open(fn_req) as req_file: + working_set = list(_parse_working_set(req_file)) + installed = {req for req in required for entry in working_set if entry.fulfills(req)} + + not_installed = required.difference(installed) + if not_installed: # switch to assignment expression after yapf supports 3.8 + raise DependencySpecificationError( + f"Environment specified in '{fn_req.relative_to(ROOT)}' misses matches for:\n" + + '\n'.join(' - ' + str(f) for f in not_installed) + ) + + click.secho("Requirements files appear to be in sync with specifications in 'setup.json'.", fg='green') + + @cli.command() @click.argument('extras', nargs=-1) def pip_install_extras(extras): From 44e12d70412d80711a50435c944f62d6f0d6e501 Mon Sep 17 00:00:00 2001 From: Carl Simon Adorf Date: Sat, 14 Mar 2020 12:33:10 +0100 Subject: [PATCH 17/54] Dm/auto generate all (#3848) * Auto-generate all dependent requirements files on commit. * Add packaging to 'dev_precommit' extra requirements. Required by the dependency_management script. --- .pre-commit-config.yaml | 12 ++++++++++++ setup.json | 1 + 2 files changed, 13 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39bd257146..62ae29e398 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -110,6 +110,18 @@ description: "This hook runs Prospector: https://github.com/landscapeio/prospector" entry: prospector + - id: dm-generate-all + name: Update all requirements files + entry: python ./utils/dependency_management.py generate-all + language: system + files: >- + (?x)^( + setup.py| + setup.json| + utils/dependency_management.py + )$ + pass_filenames: false + - id: rtd-requirements name: Validate docs/requirements_for_rtd.txt entry: python ./utils/dependency_management.py validate-rtd-reqs diff --git a/setup.json b/setup.json index d3b77f0c8b..53f1d1a310 100644 --- a/setup.json +++ b/setup.json @@ -98,6 +98,7 @@ ], "dev_precommit": [ "astroid==2.3.3", + "packaging==20.3", "pre-commit==1.18.3", "prospector==1.2.0", "pylint==2.4.4", From d0b89c19a306901f67b78def6344f76a1212d01e Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sat, 21 Mar 2020 09:19:31 +0100 Subject: [PATCH 18/54] Suggest running `reentry scan` when entry point cannot be resolved (#3765) This is the case either when the entry point cannot be found at all or when multiple entry points are registered with the same name. --- aiida/plugins/entry_point.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/aiida/plugins/entry_point.py b/aiida/plugins/entry_point.py index 92a496927b..0d47792626 100644 --- a/aiida/plugins/entry_point.py +++ b/aiida/plugins/entry_point.py @@ -258,10 +258,12 @@ def get_entry_point(group, name): entry_points = [ep for ep in get_entry_points(group) if ep.name == name] if not entry_points: - raise MissingEntryPointError("Entry point '{}' not found in group '{}'".format(name, group)) + raise MissingEntryPointError("Entry point '{}' not found in group '{}'.".format(name, group) + + 'Try running `reentry scan` to update the entry point cache.') if len(entry_points) > 1: - raise MultipleEntryPointError("Multiple entry points '{}' found in group".format(name, group)) + raise MultipleEntryPointError("Multiple entry points '{}' found in group '{}'. ".format(name, group) + + 'Try running `reentry scan` to repopulate the entry point cache.') return entry_points[0] From b90a6298c7843d33c0fc1f0099d71f54bca6eca6 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 22 Mar 2020 17:19:01 +0100 Subject: [PATCH 19/54] Add the `-l/--limit` option to `verdi group show` (#3857) This is a very useful flag that used to be there at some point. --- aiida/cmdline/commands/cmd_group.py | 12 +++- tests/cmdline/commands/test_group.py | 87 +++++++++++++++++----------- 2 files changed, 62 insertions(+), 37 deletions(-) diff --git a/aiida/cmdline/commands/cmd_group.py b/aiida/cmdline/commands/cmd_group.py index dff712033a..9e0eb26d07 100644 --- a/aiida/cmdline/commands/cmd_group.py +++ b/aiida/cmdline/commands/cmd_group.py @@ -123,6 +123,7 @@ def group_description(group, description): @verdi_group.command('show') @options.RAW(help='Show only a space-separated list of PKs of the calculations in the group') +@options.LIMIT() @click.option( '-u', '--uuid', @@ -132,18 +133,23 @@ def group_description(group, description): ) @arguments.GROUP() @with_dbenv() -def group_show(group, raw, uuid): +def group_show(group, raw, limit, uuid): """Show information for a given group.""" from tabulate import tabulate from aiida.common.utils import str_timedelta from aiida.common import timezone + if limit: + node_iterator = group.nodes[:limit] + else: + node_iterator = group.nodes + if raw: if uuid: - echo.echo(' '.join(str(_.uuid) for _ in group.nodes)) + echo.echo(' '.join(str(_.uuid) for _ in node_iterator)) else: - echo.echo(' '.join(str(_.pk) for _ in group.nodes)) + echo.echo(' '.join(str(_.pk) for _ in node_iterator)) else: type_string = group.type_string desc = group.description diff --git a/tests/cmdline/commands/test_group.py b/tests/cmdline/commands/test_group.py index 79171b3164..dbba9bf3f1 100644 --- a/tests/cmdline/commands/test_group.py +++ b/tests/cmdline/commands/test_group.py @@ -8,14 +8,10 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Tests for the `verdi group` command.""" - from aiida import orm from aiida.backends.testbase import AiidaTestCase from aiida.common import exceptions -from aiida.cmdline.commands.cmd_group import ( - group_list, group_create, group_delete, group_relabel, group_description, group_add_nodes, group_remove_nodes, - group_show, group_copy -) +from aiida.cmdline.commands import cmd_group class TestVerdiGroup(AiidaTestCase): @@ -37,64 +33,64 @@ def test_help(self): options = ['--help'] # verdi group list - result = self.cli_runner.invoke(group_list, options) + result = self.cli_runner.invoke(cmd_group.group_list, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group create - result = self.cli_runner.invoke(group_create, options) + result = self.cli_runner.invoke(cmd_group.group_create, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group delete - result = self.cli_runner.invoke(group_delete, options) + result = self.cli_runner.invoke(cmd_group.group_delete, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group relabel - result = self.cli_runner.invoke(group_relabel, options) + result = self.cli_runner.invoke(cmd_group.group_relabel, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group description - result = self.cli_runner.invoke(group_description, options) + result = self.cli_runner.invoke(cmd_group.group_description, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group addnodes - result = self.cli_runner.invoke(group_add_nodes, options) + result = self.cli_runner.invoke(cmd_group.group_add_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group removenodes - result = self.cli_runner.invoke(group_remove_nodes, options) + result = self.cli_runner.invoke(cmd_group.group_remove_nodes, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group show - result = self.cli_runner.invoke(group_show, options) + result = self.cli_runner.invoke(cmd_group.group_show, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) # verdi group copy - result = self.cli_runner.invoke(group_copy, options) + result = self.cli_runner.invoke(cmd_group.group_copy, options) self.assertIsNone(result.exception, result.output) self.assertIn('Usage', result.output) def test_create(self): """Test `verdi group create` command.""" - result = self.cli_runner.invoke(group_create, ['dummygroup5']) + result = self.cli_runner.invoke(cmd_group.group_create, ['dummygroup5']) self.assertClickResultNoException(result) # check if newly added group in present in list - result = self.cli_runner.invoke(group_list) + result = self.cli_runner.invoke(cmd_group.group_list) self.assertClickResultNoException(result) self.assertIn('dummygroup5', result.output) def test_list(self): """Test `verdi group list` command.""" - result = self.cli_runner.invoke(group_list) + result = self.cli_runner.invoke(cmd_group.group_list) self.assertClickResultNoException(result) for grp in ['dummygroup1', 'dummygroup2']: @@ -102,7 +98,7 @@ def test_list(self): def test_copy(self): """Test `verdi group copy` command.""" - result = self.cli_runner.invoke(group_copy, ['dummygroup1', 'dummygroup2']) + result = self.cli_runner.invoke(cmd_group.group_copy, ['dummygroup1', 'dummygroup2']) self.assertClickResultNoException(result) self.assertIn('Success', result.output) @@ -112,11 +108,11 @@ def test_delete(self): orm.Group(label='group_test_delete_01').store() orm.Group(label='group_test_delete_02').store() - result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_01']) + result = self.cli_runner.invoke(cmd_group.group_delete, ['--force', 'group_test_delete_01']) self.assertClickResultNoException(result) # Verify that removed group is not present in list - result = self.cli_runner.invoke(group_list) + result = self.cli_runner.invoke(cmd_group.group_list) self.assertClickResultNoException(result) self.assertNotIn('group_test_delete_01', result.output) @@ -129,11 +125,11 @@ def test_delete(self): self.assertEqual(group.count(), 2) # Calling delete on a group without the `--clear` option should raise - result = self.cli_runner.invoke(group_delete, ['--force', 'group_test_delete_02']) + result = self.cli_runner.invoke(cmd_group.group_delete, ['--force', 'group_test_delete_02']) self.assertIsNotNone(result.exception, result.output) # With `--clear` option should delete group and nodes - result = self.cli_runner.invoke(group_delete, ['--force', '--clear', 'group_test_delete_02']) + result = self.cli_runner.invoke(cmd_group.group_delete, ['--force', '--clear', 'group_test_delete_02']) self.assertClickResultNoException(result) with self.assertRaises(exceptions.NotExistent): @@ -141,7 +137,7 @@ def test_delete(self): def test_show(self): """Test `verdi group show` command.""" - result = self.cli_runner.invoke(group_show, ['dummygroup1']) + result = self.cli_runner.invoke(cmd_group.group_show, ['dummygroup1']) self.assertClickResultNoException(result) for grpline in [ @@ -149,6 +145,29 @@ def test_show(self): ]: self.assertIn(grpline, result.output) + def test_show_limit(self): + """Test `--limit` option of the `verdi group show` command.""" + label = 'test_group_limit' + nodes = [orm.Data().store(), orm.Data().store()] + group = orm.Group(label=label).store() + group.add_nodes(nodes) + + # Default should include all nodes in the output + result = self.cli_runner.invoke(cmd_group.group_show, [label]) + self.assertClickResultNoException(result) + + for node in nodes: + self.assertIn(str(node.pk), result.output) + + # Repeat test with `limit=1`, use also the `--raw` option to only display nodes + result = self.cli_runner.invoke(cmd_group.group_show, [label, '--limit', '1', '--raw']) + self.assertClickResultNoException(result) + + # The current `verdi group show` does not support ordering so we cannot rely on that for now to test if only + # one of the nodes is shown + self.assertEqual(len(result.output.strip().split('\n')), 1) + self.assertTrue(str(nodes[0].pk) in result.output or str(nodes[1].pk) in result.output) + def test_description(self): """Test `verdi group description` command.""" description = 'It is a new description' @@ -156,22 +175,22 @@ def test_description(self): self.assertNotEqual(group.description, description) # Change the description of the group - result = self.cli_runner.invoke(group_description, [group.label, description]) + result = self.cli_runner.invoke(cmd_group.group_description, [group.label, description]) self.assertClickResultNoException(result) self.assertEqual(group.description, description) # When no description argument is passed the command should just echo the current description - result = self.cli_runner.invoke(group_description, [group.label]) + result = self.cli_runner.invoke(cmd_group.group_description, [group.label]) self.assertClickResultNoException(result) self.assertIn(description, result.output) def test_relabel(self): """Test `verdi group relabel` command.""" - result = self.cli_runner.invoke(group_relabel, ['dummygroup4', 'relabeled_group']) + result = self.cli_runner.invoke(cmd_group.group_relabel, ['dummygroup4', 'relabeled_group']) self.assertIsNone(result.exception, result.output) # check if group list command shows changed group name - result = self.cli_runner.invoke(group_list) + result = self.cli_runner.invoke(cmd_group.group_list) self.assertClickResultNoException(result) self.assertNotIn('dummygroup4', result.output) self.assertIn('relabeled_group', result.output) @@ -182,21 +201,21 @@ def test_add_remove_nodes(self): node_02 = orm.CalculationNode().store() node_03 = orm.CalculationNode().store() - result = self.cli_runner.invoke(group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) + result = self.cli_runner.invoke(cmd_group.group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertClickResultNoException(result) # Check if node is added in group using group show command - result = self.cli_runner.invoke(group_show, ['dummygroup1']) + result = self.cli_runner.invoke(cmd_group.group_show, ['dummygroup1']) self.assertClickResultNoException(result) self.assertIn('CalculationNode', result.output) self.assertIn(str(node_01.pk), result.output) # Remove same node - result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) + result = self.cli_runner.invoke(cmd_group.group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid]) self.assertIsNone(result.exception, result.output) # Check if node is added in group using group show command - result = self.cli_runner.invoke(group_show, ['-r', 'dummygroup1']) + result = self.cli_runner.invoke(cmd_group.group_show, ['-r', 'dummygroup1']) self.assertClickResultNoException(result) self.assertNotIn('CalculationNode', result.output) self.assertNotIn(str(node_01.pk), result.output) @@ -206,7 +225,7 @@ def test_add_remove_nodes(self): group.add_nodes([node_01, node_02, node_03]) self.assertEqual(group.count(), 3) - result = self.cli_runner.invoke(group_remove_nodes, ['--force', '--clear', '--group=dummygroup1']) + result = self.cli_runner.invoke(cmd_group.group_remove_nodes, ['--force', '--clear', '--group=dummygroup1']) self.assertClickResultNoException(result) self.assertEqual(group.count(), 0) @@ -224,7 +243,7 @@ def test_copy_existing_group(self): # Copy using `verdi group copy` - making sure all is successful options = [source_label, dest_label] - result = self.cli_runner.invoke(group_copy, options) + result = self.cli_runner.invoke(cmd_group.group_copy, options) self.assertClickResultNoException(result) self.assertIn( 'Success: Nodes copied from group<{}> to group<{}>'.format(source_label, dest_label), result.output, @@ -238,7 +257,7 @@ def test_copy_existing_group(self): self.assertSetEqual(nodes_source_group, nodes_dest_group) # Copy again, making sure an abort error is raised, since no user input can be made and default is abort - result = self.cli_runner.invoke(group_copy, options) + result = self.cli_runner.invoke(cmd_group.group_copy, options) self.assertIsNotNone(result.exception, result.output) self.assertIn( 'Warning: Destination group<{}> already exists and is not empty.'.format(dest_label), result.output, From 9ce2183bbac741a230e7ff46149ba6a0fcd6bfde Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sun, 22 Mar 2020 22:48:14 +0100 Subject: [PATCH 20/54] Docs: consistent use of "plugin" vs "plugin package" terminology (#3799) * Apply naming convention of "plugin" vs "plugin package" consistently across the whole documentation * Update plugin design guidelines with suggestions on how to think about which information to store where and further improvements --- docs/source/concepts/calculations.rst | 2 +- docs/source/developer_guide/core/caching.rst | 2 +- .../source/developer_guide/design/changes.rst | 31 +++- docs/source/developer_guide/plugins.rst | 1 - .../source/developer_guide/plugins/basics.rst | 135 ++++++++++-------- .../developer_guide/plugins/documenting.rst | 10 +- .../developer_guide/plugins/entry_points.rst | 83 +++-------- .../developer_guide/plugins/plugin_tests.rst | 32 +++-- .../developer_guide/plugins/publish.rst | 60 +++----- .../developer_guide/plugins/quickstart.rst | 11 +- .../developer_guide/plugins/update_plugin.rst | 101 ------------- docs/source/get_started/index.rst | 10 +- docs/source/install/updating_installation.rst | 47 +++--- docs/source/working/functions.rst | 2 +- 14 files changed, 210 insertions(+), 317 deletions(-) delete mode 100644 docs/source/developer_guide/plugins/update_plugin.rst diff --git a/docs/source/concepts/calculations.rst b/docs/source/concepts/calculations.rst index 390327fd93..a79126897c 100644 --- a/docs/source/concepts/calculations.rst +++ b/docs/source/concepts/calculations.rst @@ -147,7 +147,7 @@ When a calculation job is launched, the engine will take it roughly through the * **Upload**: the calculation job implementation is used to transform the input nodes into the required input files, which are uploaded to a 'working' directory on the target machine * **Submit**: to execute the calculation, a job is submitted to the scheduler of the computer on which the input `code` is configured. * **Update**: the engine will query the scheduler to check for the status of the calculation job - * **Retrieve**: once the job has finished, the engine will retrieve the output files, specified by the plugin and store them in a node attached as an output node to the calculation + * **Retrieve**: once the job has finished, the engine will retrieve the output files, specified by the calculation plugin and store them in a node attached as an output node to the calculation All of these tasks require the engine to interact with the computer, or machine, that will actually run the external code. Since the :py:class:`~aiida.orm.nodes.data.code.Code` that is used as an input for the calculation job, which is configured for a specific :py:class:`~aiida.orm.computers.Computer`, the engine knows exactly how to execute all these tasks. diff --git a/docs/source/developer_guide/core/caching.rst b/docs/source/developer_guide/core/caching.rst index 29bd390038..9853889b8b 100644 --- a/docs/source/developer_guide/core/caching.rst +++ b/docs/source/developer_guide/core/caching.rst @@ -2,7 +2,7 @@ Caching: implementation details +++++++++++++++++++++++++++++++ This section covers some details of the caching mechanism which are not discussed in the :ref:`user guide `. -If you are developing a plugin and want to modify the caching behavior of your classes, we recommend you read :ref:`this section ` first. +If you are developing plugins and want to modify the caching behavior of your classes, we recommend you read :ref:`this section ` first. .. _devel_controlling_hashing: diff --git a/docs/source/developer_guide/design/changes.rst b/docs/source/developer_guide/design/changes.rst index adc3988dbc..227549be8e 100644 --- a/docs/source/developer_guide/design/changes.rst +++ b/docs/source/developer_guide/design/changes.rst @@ -95,4 +95,33 @@ In particular we will strive to: - if we are forced to change it anyway, deprecate a signifcant amount of time in advance - for backwards incompatible changes, increase the major version -For better clarity, we are :ref:`curating a list of classes and functions` (exposed at the second level) that are intended to be public and for which the above policy will be enforced \ No newline at end of file +For better clarity, we are :ref:`curating a list of classes and functions` (exposed at the second level) that are intended to be public and for which the above policy will be enforced + +Version 0.9.0 ++++++++++++++ + +The plugin system +----------------- + +The plugin system was designed with the following goals in mind. + +* **Sharing of calculations, workflows and data types**: plugins are bundled in a python package, distributed as a zip source archive, python ``egg`` or PyPI package. There is extensive documentation available for how to distribute python packages `here `_. + +* **Ease of use**: plugins are listed on the `AiiDA plugin registry `_ and can be installed with one simple command. This process is familiar to every regular python user. + +* **Decouple development and update cycles of AiiDA and plugins**: since plugins are separate python packages, they can be developed in a separate code repository and updated when the developer sees fit without a need to update AiiDA. Similarly, if AiiDA is updated, plugins may not need to release a new version. + +* **Promote modular design in AiiDA development**: separating plugins into their own python packages ensures that plugins can not (easily) access parts of the AiiDA code which are not part of the public API, enabling AiiDA development to stay agile. The same applies to plugins relying on other plugins. + +* **Low overhead for developers**: plugin developers can write their extensions the same way they would write any python code meant for distribution. + +* **Automatic AiiDA setup and testing of plugins**: installation of complete python environments consisting of many packages can be automated, provided all packages use ``setuptools`` as a distribution tool. This enables use of AiiDA in a service-based way using, e.g., docker images. At the same it becomes possible to create automated tests for any combination of plugins, as long as the plugins provide test entry points. + + +The chosen approach to plugins has some limitations: + +* the interface for entry point objects is enforced implicitly by the way the object is used. It is the responsibility of the plugin developer to test for compliance, especially if the object is not derived from the recommended base classes provided by AiiDA. This is to be clearly communicated in the documentation for plugin developers; +* The freedom of the plugin developer to name and rename classes ends where the information in question is stored in the database as, e.g., node attributes. +* The system is designed with the possibility of plugin versioning in mind, however this is not implemented yet. +* In principle, two different plugins can give the same name to an entry point, creating ambiguity when trying to load the associated objects. Plugin development guidelines in the documentation will advise on how to avoid this problem, and this is addressed via the use of a centralized registry of known AiiDA plugins. +* Plugins can potentially contain malicious or otherwise dangerous code. In the registry of AiiDA plugins, we try to flag plugins that we know are safe to be used. diff --git a/docs/source/developer_guide/plugins.rst b/docs/source/developer_guide/plugins.rst index d8520a507c..46b915622a 100644 --- a/docs/source/developer_guide/plugins.rst +++ b/docs/source/developer_guide/plugins.rst @@ -12,4 +12,3 @@ Plugin development plugins/documenting plugins/plugin_tests plugins/publish - plugins/update_plugin diff --git a/docs/source/developer_guide/plugins/basics.rst b/docs/source/developer_guide/plugins/basics.rst index 31b51f5a6f..5ae649c343 100644 --- a/docs/source/developer_guide/plugins/basics.rst +++ b/docs/source/developer_guide/plugins/basics.rst @@ -4,72 +4,28 @@ Basics ====== -What a plugin Is ----------------- +Nomenclature +------------ -An AiiDA plugin is a `python package `_ that provides a set of extensions to AiiDA. +An AiiDA plugin is an extension of AiiDA, announcing itself to ``aiida-core`` by means of a new :ref:`entry point `. -AiiDA plugins can use :ref:`entry points ` in order to make the ``aiida-core`` package aware of the extensions. +AiiDA plugins can be bundled and distributed in a `python package `_ that provides a set of extensions to AiiDA. .. note:: - In the python community, the term 'package' is used rather loosely. + The python community uses the term 'package' rather loosely. Depending on context, it can refer to a collection of python modules or it may, in addition, include the files necessary for building and installing the package. .. _packages: https://docs.python.org/2/tutorial/modules.html?highlight=package#packages -Goals ------ - -The plugin system was designed with the following goals in mind. - -* **Sharing of workflows and extensions**: a workflow or extension is written as a python package, distributed as a zip source archive, python ``egg`` or PyPI package. There is extensive documentation available for how to distribute python packages `here `_. - -* **Ease of use**: plugins can be found in an online curated list of plugins and installed with one simple command. This process is familiar to every regular python user. - -* **Decouple development and update cycles of AiiDA and plugins**: since plugins are separate python packages, they can be developed in a separate code repository and updated when the developer sees fit without a need to update AiiDA. Similarly, if AiiDA is updated, plugins may not need to release a new version. - -* **Promote modular design in AiiDA development**: separating plugins into their own python packages ensures that plugins can not (easily) access parts of the AiiDA code which are not part of the public API, enabling AiiDA development to stay agile. The same applies to plugins relying on other plugins. - -* **Low overhead for developers**: plugin developers can write their extensions the same way they would write any python code meant for distribution. - -* **Automatic AiiDA setup and testing of plugins**: installation of complete python environments consisting of many packages can be automated, provided all packages use ``setuptools`` as a distribution tool. This enables use of AiiDA in a service-based way using, e.g., docker images. At the same it becomes possible to create automated tests for any combination of plugins, as long as the plugins provide test entry points. - - -Design guidelines ------------------- - -* **Start simple.**: make use of existing classes like :py:class:`~aiida.orm.nodes.process.calculation.calcjob.CalcJobNode`, :py:class:`~aiida.orm.nodes.data.dict.Dict`, :py:class:`~aiida.orm.nodes.data.singlefile.SinglefileData`, ... Write only what is necessary to pass information from and to AiiDA. - -* **Don't break data provenance.**: store *at least* what is needed for full reproducibility. - -* **Parse what you want to query for.**: make a list of which information to: - - #. parse into the database for querying (:py:class:`~aiida.orm.nodes.data.dict.Dict`, ...) - #. store in files for safe-keeping (:py:class:`~aiida.orm.nodes.data.singlefile.SinglefileData`, ...) - #. leave on the remote computer (:py:class:`~aiida.orm.nodes.data.remote.RemoteData`, ...) - -* **Expose the full functionality.**: standardization is good but don't artificially limit the power of a code you are wrapping - or your users will get frustrated. If the code can do it, there should be *some* way to do it with your plugin. - - What a plugin can do -------------------- -* Add new classes to AiiDA's unified interface, including: - - - calculations - - parsers - - data types - - schedulers - - transports - - db importers - - db exporters - - subcommands to some ``verdi`` commands - +* Add a new class to AiiDA's :ref:`entry point groups `, including:: calculations, parsers, workflows, data types, verdi commands, schedulers, transports and importers/exporters from external databases. This typically involves subclassing the respective base class AiiDA provides for that purpose. -* Install separate commandline and/or GUI executables -* Depend on any number of other plugins (the required versions must not clash with AiiDA's requirements) +* Install new commandline and/or GUI executables +* Depend on, and build on top of any number of other plugins (as long as their requirements do not clash) .. _plugins.maynot: @@ -93,13 +49,70 @@ We will advise on how to proceed. .. _registry: https://github.com/aiidateam/aiida-registry -Limitations ------------ - -The chosen approach to plugins has some limitations: +Design guidelines +------------------ -* In the current version the interface for entry point objects is enforced implicitly by the way the object is used. It is the responsibility of the plugin developer to test for compliance, especially if the object is not derived from the recommended base classes provided by AiiDA. This is to be clearly communicated in the documentation for plugin developers; -* The freedom of the plugin developer to name and rename classes ends where the information in question is stored in the database as, e.g., node attributes. -* The system is designed with the possibility of plugin versioning in mind, however this is not implemented yet. -* In principle, two different plugins can give the same name to an entry point, creating ambiguity when trying to load the associated objects. Plugin development guidelines in the documentation will advise on how to avoid this problem, and this is addressed via the use of a centralized registry of known AiiDA plugins. -* Plugins can potentially contain malicious or otherwise dangerous code. In the registry of AiiDA plugins, we try to flag plugins that we know are safe to be used. +Wrapping an external code +......................... + +In order to wrap an external simulation code for use in AiiDA, you will need to write a calculation input plugin (subclassing the :py:class:`~aiida.engine.CalcJob` class) and an output parser plugin (subclassing the :py:class:`~aiida.parsers.Parser` class): + + * | **Start simple.** + | Make use of existing classes like :py:class:`~aiida.orm.nodes.data.dict.Dict`, :py:class:`~aiida.orm.nodes.data.singlefile.SinglefileData`, ... + | Write only what is necessary to pass information from and to AiiDA. + * | **Don't break data provenance.** + | Store *at least* what is needed for full reproducibility. + * | **Parse what you want to query for.** + | Make a list of which information to: + + #. parse into the database for querying (:py:class:`~aiida.orm.nodes.data.dict.Dict`, ...) + #. store in files for safe-keeping (:py:class:`~aiida.orm.nodes.data.singlefile.SinglefileData`, ...) + #. leave on the remote computer (:py:class:`~aiida.orm.nodes.data.remote.RemoteData`, ...) + + * | **Expose the full functionality.** + | Standardization is good but don't artificially limit the power of a code you are wrapping - or your users will get frustrated. + | If the code can do it, there should be *some* way to do it with your plugin. + + * | **Don't rely on AiiDA internals.** + | AiiDA's :ref:`public python API` includes anything that you can import via ``from aiida.module import thing``. + | Functionality at deeper nesting levels is not considered part of the public API and may change between minor AiiDA releases, forcing you to update your plugin. + +Folder structure +................ + +While it is up to you to decide the folder structure for your plugin, here is how a typical AiiDA plugin package may look like (see also the `aiida-diff`_ demo plugin):: + + aiida-mycode/ - distribution folder + aiida_mycode/ - toplevel package (from aiida_mycode import ..) + __init__.py + calculations/ + __init__.py + mycode.py - contains MycodeCalculation + parsers/ + __init__.py + mycode.py - contains MycodeParser + data/ + __init__.py + mydat.py - contains MyData (supports code specific format) + commands/ + __init__.py + mydat.py - contains visualization subcommand for MyData + workflows/ + __init__.py + mywf.py - contains a basic workflow using mycode + ... + setup.py - install script + setup.json - install configuration + ... + +A minimal plugin package instead might look like:: + + aiida-minimal/ + aiida_minimal/ + __init__.py + simpledata.py + setup.py + setup.json + + +.. _aiida-diff: https://github.com/aiidateam/aiida-diff diff --git a/docs/source/developer_guide/plugins/documenting.rst b/docs/source/developer_guide/plugins/documenting.rst index ad2b03168a..5d9baf2411 100644 --- a/docs/source/developer_guide/plugins/documenting.rst +++ b/docs/source/developer_guide/plugins/documenting.rst @@ -1,8 +1,8 @@ -==================== -Documenting a plugin -==================== +=========================== +Documenting plugin packages +=========================== -If you used the `AiiDA plugin cutter`_, your plugin already comes with a basic +If you used the `AiiDA plugin cutter`_, your plugin package already comes with a basic documentation that just needs to be adjusted to your needs. #. Install the ``docs`` extra:: @@ -29,7 +29,7 @@ documentation that just needs to be adjusted to your needs. requirements file ``docs/requirements_for_rtd.txt`` and the Python configuration file ``docs/source/conf.py`` in Admin => Advanced settings. -Note: When updating the plugin to a new version, remember to update the +Note: When updating the plugin package to a new version, remember to update the version number both in ``setup.json`` and ``aiida_mycode/__init__.py``. .. _aiida plugin cutter: https://github.com/aiidateam/aiida-plugin-cutter diff --git a/docs/source/developer_guide/plugins/entry_points.rst b/docs/source/developer_guide/plugins/entry_points.rst index 13013d9826..ee636ffb50 100644 --- a/docs/source/developer_guide/plugins/entry_points.rst +++ b/docs/source/developer_guide/plugins/entry_points.rst @@ -15,59 +15,22 @@ the entry point specifications are written to a file inside the distribution's can find these entry points by distribution, group and/or name and load the data structure to which it points. -This is the way AiiDA finds and loads classes provided by plugins. +This is the way AiiDA finds plugins and and loads the functionality they provide. .. _Entry points: https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins +.. _plugins.aiida_entry_points: + AiiDA Entry Points ------------------- -.. _aiida plugin template: https://github.com/aiidateam/aiida-plugin-template - -This document contains a list of entry point groups AiiDA uses, with an example -usage for each. -In the following, we assume the following folder structure:: - - aiida-mycode/ - distribution folder - aiida_mycode/ - toplevel package (from aiida_myplug import ..) - __init__.py - calcs/ - __init__.py - mycode.py - contains MycodeCalculation - parsers/ - __init__.py - mycode.py - contains MycodeParser - data/ - __init__.py - mydat.py - contains MyData (supports code specific format) - commands/ - __init__.py - mydat.py - contains visualization subcommand for MyData - workflows/ - __init__.py - mywf.py - contains a basic workflow using mycode - ... - setup.py - install script - setup.json - install configuration - ... - - -For a plugin that uses this folder structure, see the `aiida plugin template`_. +AiiDA defines a set of entry point groups that it will search for new functionality provided by plugins. +You can list those groups and their contents via:: -Note, however, that the folder structure inside ``aiida-mycode/`` is entirely up to you. -A very simple plugin might look like:: + verdi plugin list # list all groups + verdi plugin list aiida.calculations # show contents of one group - aiida-mysimple/ - aiida_mysimple/ - __init__.py - simpledata.py - setup.py - setup.json - - -The plugin has to tell AiiDA where to look for the classes to be used as -calculations, parsers, transports, etc. This is done inside ``setup.json`` by way -of the ``entry_points`` keyword:: +Plugin packages can add new entry points through the ``entry_points`` field in the ``setup.json`` file:: ... entry_points={ @@ -77,18 +40,16 @@ of the ``entry_points`` keyword:: ], ... -It is given as a dictionary containing entry point group names as keywords. The list for each entry point group contains entry point specifications. - -A specification in turn is given as a string and consists of two parts, a name and an import path describing where the class is to be imported from. The two parts are sparated by an `=` sign:: +Here, ```` can be any of the groups shown in the output of ``verdi plugin list``, and the ```` contains the entry point name and the path to the Python object it points to:: "mycode.mydat = aiida_mycode.data.mydat:MyData" -We *strongly* suggest to start the name of each entry point with the name of -the plugin, ommitting the leading 'aiida-'. -In our example this leads to entry specifications like ``"mycode. = "``, just like the above example. -Exceptions to this rule are schedulers, transports and potentially data ones. Further exceptions can be tolerated in order to provide backwards compatibility if the plugin was in use before aiida-0.9 and its modules were installed in locations which does not make it possible to follow this rule. +We *strongly* suggest to start the name of each entry point with the name of the plugin package (omitting the 'aiida-' prefix). +For a package ``aiida-mycode``, this leads to specifications like ``"mycode. = "``. +Exceptions to this rule can be tolerated if required for backwards compatibility. + +Below, we list the entry point groups defined and searched by AiiDA. -Below, a list of valid entry points recognized by AiiDA follows. ``aiida.calculations`` ---------------------- @@ -165,7 +126,7 @@ Usage:: ``aiida.workflows`` ------------------- -For AiiDA workflows. Instead of putting a workflow somewhere under the ``aiida.workflows`` package, it can now be packaged as a plugin and exposed to aiida as follows: +Package AiiDA workflows as follows: Spec:: @@ -194,8 +155,7 @@ Usage:: ``aiida.cmdline`` ----------------- -For subcommands to verdi commands like ``verdi data mydata``. -Plugin support for commands is possible due to using `click`_. +``verdi`` uses the `click_` framework, which makes it possible to add new subcommands to existing verdi commands, such as ``verdi data mydata``. AiiDA expects each entry point to be either a ``click.Command`` or ``click.CommandGroup``. @@ -230,7 +190,7 @@ Usage: ``aiida.tools.dbexporters`` --------------------------- -If your plugin adds support for exporting to an external database, use this entry point to have aiida find the module where you define the necessary functions. +If your plugin package adds support for exporting to an external database, use this entry point to have aiida find the module where you define the necessary functions. .. Not sure how dbexporters work .. .. Spec:: @@ -244,7 +204,7 @@ If your plugin adds support for exporting to an external database, use this entr ``aiida.tools.dbimporters`` --------------------------- -If your plugin adds support for importing from an external database, use this entry point to have aiida find the module where you define the necessary functions. +If your plugin package adds support for importing from an external database, use this entry point to have aiida find the module where you define the necessary functions. .. .. Spec:: .. @@ -259,7 +219,7 @@ If your plugin adds support for importing from an external database, use this en ``aiida.schedulers`` -------------------- -For scheduler plugins. Note that the entry point name is not prefixed by the plugin name. This is because typically a scheduler should be distributed in a plugin on its own, and only one plugin per scheduler should be necessary. +We recommend naming the plugin package after the scheduler (e.g. ``aiida-myscheduler``), so that the entry point name can simply equal the name of the scheduler: Spec:: @@ -280,7 +240,8 @@ Usage: The scheduler is used in the familiar way by entering 'myscheduler' as th ``aiida.transports`` -------------------- -Like schedulers, transports are supposed to be distributed in a separate plugin. Therefore we will again omit the plugin's name in the entry point name. +``aiida-core`` ships with two modes of transporting files and folders to remote computers: ``ssh`` and ``local`` (stub for when the remote computer is actually the same). +We recommend naming the plugin package after the mode of transport (e.g. ``aiida-mytransport``), so that the entry point name can simply equal the name of the transport: Spec:: @@ -301,7 +262,7 @@ Usage:: from aiida.plugins import TransportFactory transport = TransportFactory('mytransport') -Jus like one would expect, when a computer is setup, ``mytransport`` can be given as the transport option. +When setting up a new computer, specify ``mytransport`` as the transport mode. .. _click: https://click.pocoo.org/6/ .. _aiida-verdi: https://github.com/DropD/aiida-verdi diff --git a/docs/source/developer_guide/plugins/plugin_tests.rst b/docs/source/developer_guide/plugins/plugin_tests.rst index a5e6c832d7..105b71e7c6 100644 --- a/docs/source/developer_guide/plugins/plugin_tests.rst +++ b/docs/source/developer_guide/plugins/plugin_tests.rst @@ -3,8 +3,23 @@ Testing AiiDA plugins ===================== -When developing a plugin it is important to write tests. -We recommend using the `pytest`_ framework, while the `unittest`_ framework is also supported. +We highly recommend writing tests for your AiiDA plugins and running continous integration tests using free platforms like `GitHub Actions `_. + +We recommend the following folder structure for AiiDA plugin packages:: + + aiida-mycode/ - distribution folder + aiida_mycode/ - plugin package + tests/ - tests directory (possibly with subdirectories) + +.. note:: + Keeping the tests outside the plugin package keeps the distribution of your plugin package light. + +.. _ghactions: https://github.com/features/actions + +Using the pytest framework +-------------------------- + +We recommend the `pytest`_ framework for testing AiiDA plugins. One concern when running tests for AiiDA plugins is to separate the test environment from your production environment. Depending on the kind of test, each should even be run against a fresh AiiDA database. @@ -33,16 +48,10 @@ If you prefer to run tests on an existing profile, say ``test_profile``, simply In order to prevent accidental data loss, AiiDA only allows to run tests on profiles whose name starts with ``test_``. - .. _pytest: https://pytest.org .. _unittest: https://docs.python.org/library/unittest.html .. _fixture: https://docs.pytest.org/en/latest/fixture.html -Using the pytest framework --------------------------- - -We recommend the `pytest`_ framework for testing AiiDA plugins. - AiiDA's fixtures ^^^^^^^^^^^^^^^^ @@ -58,9 +67,9 @@ For example: * The :py:func:`~aiida.manage.tests.pytest_fixtures.clear_database` fixture depends on the :py:func:`~aiida.manage.tests.pytest_fixtures.aiida_profile` fixture and tells the received :py:class:`~aiida.manage.tests.TestManager` instance to reset the database. This fixture lets each test start in a fresh AiiDA environment. * The :py:func:`~aiida.manage.tests.pytest_fixtures.temp_dir` fixture returns a temporary directory for file operations and deletes it after the test is finished. - * ... you may want to add your own fixtures tailored for your plugin to set up specific ``Data`` nodes & more. + * ... you may want to add your own fixtures tailored for your plugins to set up specific ``Data`` nodes & more. -In order to make these fixtures available to your tests, add them to your ``conftest.py`` file at the root level of your plugin as follows:: +In order to make these fixtures available to your tests, add them to your ``conftest.py`` file at the root level of your plugin package as follows:: import pytest pytest_plugins = ['aiida.manage.tests.pytest_fixtures'] @@ -92,7 +101,7 @@ You can now start writing tests e.g. in a ``test_calculations.py`` file:: # check outputs of calculation assert result['...'] == ... -Feel free to check out the tests of the `aiida-diff`_ demo plugin. +Feel free to check out the tests of the `aiida-diff`_ demo plugin package. .. _conftest: https://docs.pytest.org/en/stable/fixture.html?highlight=conftest#conftest-py-sharing-fixture-functions .. _aiida-diff: https://github.com/aiidateam/aiida-diff/ @@ -115,7 +124,6 @@ Using the unittest framework The ``unittest`` package is included in the python standard library and is widely used despite its limitations. -It is also still used for testing ``aiida-core``. In analogy to the fixtures of ``pytest``, for ``unittest`` we provide a :py:class:`aiida.manage.tests.unittest_classes.PluginTestCase` class that your test cases can inherit from. diff --git a/docs/source/developer_guide/plugins/publish.rst b/docs/source/developer_guide/plugins/publish.rst index 017ea480b1..e3c03d6648 100644 --- a/docs/source/developer_guide/plugins/publish.rst +++ b/docs/source/developer_guide/plugins/publish.rst @@ -1,41 +1,27 @@ -=================== -Publishing a plugin -=================== +=========================== +Publishing a plugin package +=========================== .. _plugins.get_listed: 1. Choose a name ---------------- -The naming convention for AiiDA plugins is ``aiida-mycode`` for the plugin -and ``aiida_mycode`` for the corresponding python package, leading to the -following folder structure:: +The naming convention for AiiDA plugin packages is ``aiida-mycode`` for the plugin distribution on `PyPI`_ and ``aiida_mycode`` for the corresponding python package, leading to the following folder structure:: aiida-mycode/ aiida_mycode/ __init__.py -This marks your plugin as an AiiDA package and makes it easy to find on package indices like `PyPI`_. - **Note:** Python packages cannot contain dashes, thus the underscore. -2. Get Your Plugin Listed +2. Add to plugin registry ------------------------- -AiiDA plugins should be listed on the AiiDA plugin `registry`_ to -avoid name-clashes with other plugins. +AiiDA plugin packages should be listed on the AiiDA plugin `registry`_ to avoid name-clashes with other plugins. -If you wish to get your plugin listed on the official registry for AiiDA -plugins, you will provide the following keyword arguments as key-value pairs in -a ``setup.json`` or ``setup.yaml``. It is recommended to have setup.py -read the keyword arguments from that file:: - - aiida-myplugin/ - aiida_myplugin/ - ... - setup.py - setup.json # or setup.yaml +If you wish to get your plugin package listed on the official plugin registry, please provide the following keyword arguments as key-value pairs in a ``setup.json`` or ``setup.yaml`` file. * ``name`` * ``author`` @@ -49,18 +35,20 @@ read the keyword arguments from that file:: * ``entry_points`` * ``scripts`` (optional) -Now, fork the plugin `registry`_ repository, fill in your plugin's information -in the same fashion as the plugins already registered, and create a pull -request. The registry will allow users to discover your plugin using ``verdi -plugin search`` (note: the latter verdi command is not yet implemented in -AiiDA). +It is recommended to have your ``setup.py`` file simply read the keyword arguments from the ``setup.json``:: -3. Get Your Plugin On PyPI --------------------------- + aiida-myplugin/ + aiida_myplugin/ + ... + setup.py + setup.json # or setup.yaml + +Now, fork the plugin `registry`_ repository, fill in the information for your plugin package, and create a pull request. -For packaging and distributing AiiDA plugins, we recommend to follow existing -`guidelines for packaging python `_, -which include making the plugin available on the `python package index `_. +3. Upload to PyPI +----------------- + +For packaging and distributing AiiDA plugins, we recommend to follow existing `guidelines for packaging python `_, which include making the plugin available on the `python package index `_. This makes it possible for users to simply ``pip install aiida-myplugin``. Our suggested layout:: @@ -74,14 +62,8 @@ Our suggested layout:: setup.py installation script setup.json contains requirements, metainformation, etc -Note: In principle, ``aiida-compute`` could contain and install multiple packages. - -Incidentally a distribution can contain and install more than one package at a time. - -The most user-friendly way to distribute a package is to create such a -distribution and uploading it to `PyPI`_. Users then can simply install the -package(s) by running ``pip ``. - +Note: In principle, the ``aiida-compute`` folder could contain and install multiple python packages. +We recommend against this practice, unless there are good reasons to keep multiple packages in the same repository. .. _pypi: https://pypi.python.org .. _packaging: https://packaging.python.org/distributing/#configuring-your-project diff --git a/docs/source/developer_guide/plugins/quickstart.rst b/docs/source/developer_guide/plugins/quickstart.rst index a57f163986..16dfb97a0a 100644 --- a/docs/source/developer_guide/plugins/quickstart.rst +++ b/docs/source/developer_guide/plugins/quickstart.rst @@ -6,21 +6,21 @@ You have a code and would like to use it from AiiDA? You need a special data type, parser, scheduler, ... that is not available? Then you'll need to write an **AiiDA plugin**. -Let's get started with creating a new plugin ``aiida-mycode``. +Let's get started with creating a new plugin packacge ``aiida-mycode``. - 0. At least once, :ref:`install an existing aiida plugin ` to make sure this works. + 0. At least once, :ref:`install an existing aiida plugin package ` to make sure this works. 1. Check on the `aiida plugin registry `_ that your desired plugin name is still available - #. Use the `AiiDA plugin cutter `_ to jumpstart your plugin:: + #. Use the `AiiDA plugin cutter `_ to jumpstart your plugin package:: pip install cookiecutter cookiecutter https://github.com/aiidateam/aiida-plugin-cutter.git # follow instructions ... cd aiida-mycode - #. Install your new plugin:: + #. Install your new plugin package:: workon # if you have one pip install -e . @@ -30,8 +30,7 @@ That's it - now you can ``import aiida_mycode`` and start developing your plugin A few things to keep in mind: * Be sure to update the `setup.json`_, in particular the license and version number - * :ref:`Get your plugin listed ` as soon as possible to - reserve your plugin name and to inform others of your ongoing development + * :ref:`Get your plugin package listed ` as soon as possible to reserve your plugin name and to inform others of your ongoing development .. _setup.json: https://github.com/aiidateam/aiida-plugin-template/blob/master/setup.json .. _registry: https://github.com/aiidateam/aiida-registry diff --git a/docs/source/developer_guide/plugins/update_plugin.rst b/docs/source/developer_guide/plugins/update_plugin.rst deleted file mode 100644 index 4685c38a62..0000000000 --- a/docs/source/developer_guide/plugins/update_plugin.rst +++ /dev/null @@ -1,101 +0,0 @@ -Updating an Existing Plugin -============================ - -This document describes the process of updating an AiiDA plugin written using -the old plugin system (pre AiiDA version 0.8) to the current plugin system. - -Once the update is complete, make sure to :ref:`get your plugin listed `. - -Folder structure ------------------ - -Old plugin system:: - - aiida/ - orm/ - calculation/ - job/ - myplugin/ - __init__.py - mycalc.py - myothercalc.py - parsers/ - plugins/ - myplugin/ - __init__.py - myparser.py - myotherparser.py - data/ - myplugin/ - __init__.py - mydata.py - tools/ - codespecific/ - myplugin/ - __init__.py - ... - -Turns into:: - - aiida-myplugin/ - aiida_myplugin/ - __init__.py - calculations/ - __init__.py - mycalc.py - myothercalc.py - parsers/ - __init__.py - myparser.py - myotherparser.py - data/ - __init__.py - mydata.py - tools/ - __init__.py - ... - -Entry points -------------- - -If you are converting a plugin from the old system to new new system, the name -of your entry points must correspond to where your plugin module was installed -inside the AiiDA package. *Otherwise, your plugin will not be backwards -compatible*. For example, if you were using a calculation as:: - - from aiida.orm.calculation.job.myplugin.mycalc import MycalcCalculation - # or - CalculationFactory('myplugin.mycalc') - -Then in ``setup.py``:: - - setup( - ..., - entry_points: { - 'aiida.calculations': [ - 'myplugin.mycalc = aiida_myplugin.calculations.mycalc:MycalcCalculation' - ], - ... - }, - ... - ) - -As you see, the name of the entry point matches the argument to the factory method. - -import statements ------------------- - -If you haven't done so already, now would be a good time to search and replace -any import statements that refer to the old locations of your modules inside -AiiDA. We recommend to change them to absolute imports from your top-level -package: - -old:: - - from aiida.tools.codespecific.myplugin.thistool import this_convenience_func - -new:: - - from aiida_myplugin.tools.thistool import this_convenience_func - - diff --git a/docs/source/get_started/index.rst b/docs/source/get_started/index.rst index b71a7f837f..7b103a1b7b 100644 --- a/docs/source/get_started/index.rst +++ b/docs/source/get_started/index.rst @@ -6,21 +6,21 @@ Install Plugins While the ``aiida-core`` package provides the workflow engine and database model, it relies on *plugins* for connecting to specific simulation codes. -Search for AiiDA plugins on the `AiiDA plugin registry `_. If a plugin for your code does not yet exist, you may need to :ref:`write one `. +Search for AiiDA plugin packages on the `AiiDA plugin registry `_. +If a plugin package for your code does not yet exist, you may need to :ref:`write one `. -Most plugins are hosted on the `Python Package Index `_ and can be installed as follows:: +Most plugin packages are hosted on the `Python Package Index `_ and can be installed as follows:: pip install aiida-diff # install 'aiida-diff' plugin from PyPI reentry scan -r aiida # notify aiida of new entry points -If no PyPI package is available for a plugin, you can install -the plugin directly from a source code repository, e.g.:: +If no PyPI package is available for a plugin, you can install the plugin package directly from a source code repository, e.g.:: git clone https://github.com/aiidateam/aiida-diff pip install aiida-diff # install 'aiida-diff' plugin from local folder reentry scan -r aiida # notify aiida of new entry points -After installing new plugins, **restart the daemon** using ``verdi daemon restart``. +After installing new plugin packages, update the reentry cache using ``reentry scan`` and **restart the daemon** using ``verdi daemon restart``. .. note:: The reentry cache can also be updated from python when access to the commandline is not available (e.g. in jupyter notebooks). diff --git a/docs/source/install/updating_installation.rst b/docs/source/install/updating_installation.rst index f687d46e0f..8a77d12b73 100644 --- a/docs/source/install/updating_installation.rst +++ b/docs/source/install/updating_installation.rst @@ -3,45 +3,47 @@ ************** Updating AiiDA ************** - .. _updating_instructions: -Instructions -============ - -.. warning:: +Generic update instructions +=========================== - The following instructions are how to update from ``v0.12.*`` to ``v1.0.0``. - Each version increase may come with its own necessary migrations and you should only ever update the version by one at a time. - To find the instructions for older versions, refer to the :ref:`table below`. - -1. Finish all running calculations. After migrating your database, you will not be able to resume unfinished calculations. Data of finished calculations will of course be automatically migrated. -2. Finish all running legacy workflows. The legacy workflows are completely deprecated and all data will be removed from your database, so make sure to create a backup (see point 5). -3. Enter the python environment where AiiDA is installed -4. Stop the daemon using ``verdi daemon stop`` -5. Create a backup of your :ref:`database and repository` +1. Enter the python environment where AiiDA is installed +2. Finish all running calculations. After migrating your database, you will not be able to resume unfinished calculations. Data of finished calculations will of course be automatically migrated. +3. Stop the daemon using ``verdi daemon stop`` +4. :ref:`Create a backup of your database and repository` .. warning:: - Once you have migrated your database, you can no longer go back to an older version of ``aiida-core``, unless you restore your database and repository from a backup of course. - In addition, the data migration can take quite some time depending on the size of your database, so please be patient. - Big databases of multiple millions of nodes can take up to a few hours to migrate. + Once you have migrated your database, you can no longer go back to an older version of ``aiida-core`` (unless you restore your database and repository from a backup). -6. Update your ``aiida-core`` installation +5. Update your ``aiida-core`` installation - If you have installed AiiDA through ``pip`` simply run: ``pip install --upgrade aiida-core`` - If you have installed from the git repository using ``pip install -e .``, first delete all the ``.pyc`` files (``find . -name "*.pyc" -delete``) before updating your branch. -7. Finally, after having upgraded the installation, migrate your database with ``verdi -p database migrate`` +6. Migrate your database with ``verdi -p database migrate``. + Depending on the size of your database and the number of migrations to perform, data migration can take time, so please be patient. After the database migration finishes, you will be able to continue working with your existing data. -However, :ref:`backwards incompatible changes` were introduced in the python API, so you probably will have to update your code and installed plugins. + +.. note:: + If your update involved a change in the major version number of ``aiida-core``, expect :ref:`backwards incompatible changes` and check whether you also need to update your installed plugin packages. +Updating from 0.12.* to 1.* +=========================== + +Besides the generic update instructions, the following applies: + + * Finish all running legacy workflows. + The legacy workflows are completely deprecated and all data will be removed from your database, so make sure to create a backup (see point 5). + * The upgrade involves several long-running migrations. Migrating databases containing millions of nodes can take a few hours. + .. _updating_backward_incompatible_changes: -Backwards incompatible changes -============================== +Breaking changes from 0.12.* to 1.* +=================================== The following list covers the most important backward incompatible changes between ``aiida-core==0.12.*`` and ``aiida-core==1.0.0``. @@ -269,6 +271,7 @@ Update instructions for older versions can be found in the documentation of the * `0.5.* Django`_ * `0.4.* Django`_ + .. _0.11.*: https://aiida-core.readthedocs.io/en/v0.12.2/installation/updating.html#updating-from-0-11-to-0-12-0 .. _0.10.*: http://aiida-core.readthedocs.io/en/v0.10.0/installation/updating.html#updating-from-0-9-to-0-10-0 .. _0.9.*: http://aiida-core.readthedocs.io/en/v0.10.0/installation/updating.html#updating-from-0-9-to-0-10-0 diff --git a/docs/source/working/functions.rst b/docs/source/working/functions.rst index c58f3f6e1b..f06dc59c9c 100644 --- a/docs/source/working/functions.rst +++ b/docs/source/working/functions.rst @@ -239,7 +239,7 @@ Likewise, you should not load any existing data from the database through the AP A similar problem occurs when importing other python code. Practically, it is almost impossible to never import code into process functions, as this would force massive code duplication. However, there is still a difference between importing code from the ``aiida-core`` library or the repository in which the process function is hosted, and the importing of a local python file. -Even though for both cases there can no be guarantee of reproducibility, the former stands a better chance by far, as the version number of the plugin should be recorded. +Even though for both cases there can no be guarantee of reproducibility, the former stands a better chance by far, as the version number of the plugin package should be recorded. The rule of thumb then is to keep the importing of code to a minimum, but if you have to, make sure to make it part of a plugin package with a well-defined version number. Finally, as mentioned in the introduction, the source file of a process function is stored as a file in the repository for *each execution*. From 87fdf09c2969f30e0dd35a30de5cb94fd2a8472c Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 24 Mar 2020 13:41:38 +0100 Subject: [PATCH 21/54] Docs: remove extra `advanced_plotting` from install instructions (#3860) This requirement extra does not exist, and has not existed since a very long time. --- docs/source/install/installation.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index 4e6a20b717..da0af649da 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -79,13 +79,12 @@ There are additional optional packages that you may want to install, which are g * ``ssh_kerberos``: adds support for ssh transport authentication through Kerberos * ``REST``: allows a REST server to be ran locally to serve AiiDA data * ``docs``: tools to build the documentation - * ``advanced_plotting``: tools for advanced plotting * ``notebook``: jupyter notebook - to allow it to import AiiDA modules * ``testing``: python modules required to run the automatic unit tests In order to install any of these package groups, simply append them as a comma separated list in the ``pip`` install command:: - (aiida) $ pip install -e aiida-core[atomic_tools,docs,advanced_plotting] + (aiida) $ pip install -e aiida-core[atomic_tools,docs] .. note:: If you are installing the optional ``ssh_kerberos`` and you are on Ubuntu you might encounter an error related to the ``gss`` package. To fix this you need to install the ``libffi-dev`` and ``libkrb5-dev`` packages:: From d2af96c3b14506e0ca4023993e91fa79a45e2525 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 24 Mar 2020 15:15:23 +0100 Subject: [PATCH 22/54] Add the `--order-by/--order-direction` options to `verdi group list` (#3858) --- aiida/cmdline/commands/cmd_group.py | 6 +++-- tests/cmdline/commands/test_group.py | 36 +++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/aiida/cmdline/commands/cmd_group.py b/aiida/cmdline/commands/cmd_group.py index 9e0eb26d07..d74e416bd5 100644 --- a/aiida/cmdline/commands/cmd_group.py +++ b/aiida/cmdline/commands/cmd_group.py @@ -235,11 +235,13 @@ def user_defined_group(): default=None, help='add a filter to show only groups for which the name contains STRING' ) +@options.ORDER_BY(type=click.Choice(['id', 'label', 'ctime']), default='id') +@options.ORDER_DIRECTION() @options.NODE(help='Show only the groups that contain the node') @with_dbenv() def group_list( all_users, user_email, all_types, group_type, with_description, count, past_days, startswith, endswith, contains, - node + order_by, order_dir, node ): """Show a list of existing groups.""" # pylint: disable=too-many-branches,too-many-arguments, too-many-locals @@ -290,7 +292,7 @@ def group_list( from aiida.orm import Node query.append(Node, filters={'id': {'==': node.id}}, with_group='group') - query.order_by({Group: {'id': 'asc'}}) + query.order_by({Group: {order_by: order_dir}}) result = query.all() projection_lambdas = { diff --git a/tests/cmdline/commands/test_group.py b/tests/cmdline/commands/test_group.py index dbba9bf3f1..4302420633 100644 --- a/tests/cmdline/commands/test_group.py +++ b/tests/cmdline/commands/test_group.py @@ -17,17 +17,19 @@ class TestVerdiGroup(AiidaTestCase): """Tests for the `verdi group` command.""" - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - for group in ['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4']: - orm.Group(label=group).store() - def setUp(self): """Create runner object to run tests.""" from click.testing import CliRunner self.cli_runner = CliRunner() + for group in ['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4']: + orm.Group(label=group).store() + + def tearDown(self): + """Delete all created group objects.""" + for group in orm.Group.objects.all(): + orm.Group.objects.delete(group.pk) + def test_help(self): """Tests help text for all group sub commands.""" options = ['--help'] @@ -96,6 +98,28 @@ def test_list(self): for grp in ['dummygroup1', 'dummygroup2']: self.assertIn(grp, result.output) + def test_list_order(self): + """Test `verdi group list` command with ordering options.""" + orm.Group(label='agroup').store() + + options = [] + result = self.cli_runner.invoke(cmd_group.group_list, options) + self.assertClickResultNoException(result) + group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l] + self.assertEqual(['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4', 'agroup'], group_ordering) + + options = ['--order-by', 'label'] + result = self.cli_runner.invoke(cmd_group.group_list, options) + self.assertClickResultNoException(result) + group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l] + self.assertEqual(['agroup', 'dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4'], group_ordering) + + options = ['--order-by', 'id', '--order-direction', 'desc'] + result = self.cli_runner.invoke(cmd_group.group_list, options) + self.assertClickResultNoException(result) + group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l] + self.assertEqual(['agroup', 'dummygroup4', 'dummygroup3', 'dummygroup2', 'dummygroup1'], group_ordering) + def test_copy(self): """Test `verdi group copy` command.""" result = self.cli_runner.invoke(cmd_group.group_copy, ['dummygroup1', 'dummygroup2']) From 57b8a5a3b26b5dd4f4edd2194871dafbff9c074c Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 25 Mar 2020 18:11:35 +0100 Subject: [PATCH 23/54] `ExitCode`: make the exit message parameterizable through templates (#3824) Often one would like to define an exit code for a process where the general gist of the message is known, however, the exact form might be slightly situation dependent. Creating individual exit codes for each slight variation is cumbersome so we introduce a way to parameterize the exit message. We implement a `format` method that will format the message string with the provided keyword arguments and returns, importantly, a a new exit code instance with the new formatted message. exit_code_template = ExitCode.format(450, '{parameter} is invalid.') exit_code_concrete = exit_code_template(parameter='some_specific_key') The `exit_code_concrete` is now unique to `exit_code_template` except for the message whose parameters have been replaced. To enable to implement a new method, we had to turn the `ExitCode` from a named tuple into a class, but to keep backwards compatibility, we sub class from a namedtuple, which guarantees that the new class behaves exactly like a tuple. --- aiida/engine/processes/exit_code.py | 70 ++++++++++++++-------- aiida/engine/processes/workchains/utils.py | 4 ++ docs/source/working/functions.rst | 2 +- docs/source/working/workflows.rst | 37 ++++++++++-- tests/engine/processes/text_exit_code.py | 68 +++++++++++++++++++++ 5 files changed, 149 insertions(+), 32 deletions(-) create mode 100644 tests/engine/processes/text_exit_code.py diff --git a/aiida/engine/processes/exit_code.py b/aiida/engine/processes/exit_code.py index 2178518be6..de660ed5dd 100644 --- a/aiida/engine/processes/exit_code.py +++ b/aiida/engine/processes/exit_code.py @@ -8,49 +8,69 @@ # For further information please visit http://www.aiida.net # ########################################################################### """A namedtuple and namespace for ExitCodes that can be used to exit from Processes.""" - from collections import namedtuple - from aiida.common.extendeddicts import AttributeDict __all__ = ('ExitCode', 'ExitCodesNamespace') -ExitCode = namedtuple('ExitCode', ['status', 'message', 'invalidates_cache']) -ExitCode.__new__.__defaults__ = (0, None, False) -""" -A namedtuple to define an exit code for a :class:`~aiida.engine.processes.process.Process`. -When this namedtuple is returned from a Process._run() call, it will be interpreted that the Process -should be terminated and that the exit status and message of the namedtuple should be set to the -corresponding attributes of the node. +class ExitCode(namedtuple('ExitCode', ['status', 'message', 'invalidates_cache'])): + """A simple data class to define an exit code for a :class:`~aiida.engine.processes.process.Process`. -:param status: positive integer exit status, where a non-zero value indicated the process failed, default is `0` -:type status: int + When an instance of this clas is returned from a `Process._run()` call, it will be interpreted that the `Process` + should be terminated and that the exit status and message of the namedtuple should be set to the corresponding + attributes of the node. -:param message: optional message with more details about the failure mode -:type message: str + .. note:: this class explicitly sub-classes a namedtuple to not break backwards compatibility and to have it behave + exactly as a tuple. -:param invalidates_cache: optional flag, indicating that a process should not be used in caching -:type invalidates_cache: bool -""" + :param status: positive integer exit status, where a non-zero value indicated the process failed, default is `0` + :type status: int + :param message: optional message with more details about the failure mode + :type message: str -class ExitCodesNamespace(AttributeDict): + :param invalidates_cache: optional flag, indicating that a process should not be used in caching + :type invalidates_cache: bool """ - A namespace of ExitCode tuples that can be accessed through getattr as well as getitem. - Additionally, the collection can be called with an identifier, that can either reference - the integer `status` of the ExitCode that needs to be retrieved or the key in the collection + + def format(self, **kwargs): + """Create a clone of this exit code where the template message is replaced by the keyword arguments. + + :param kwargs: replacement parameters for the template message + :return: `ExitCode` + """ + try: + message = self.message.format(**kwargs) + except KeyError: + template = 'insufficient or incorrect format parameters `{}` for the message template `{}`.' + raise ValueError(template.format(kwargs, self.message)) + + return ExitCode(self.status, message, self.invalidates_cache) + + def __eq__(self, other): + return all(getattr(self, attr) == getattr(other, attr) for attr in ['status', 'message', 'invalidates_cache']) + + +# Set the defaults for the `ExitCode` attributes +ExitCode.__new__.__defaults__ = (0, None, False) + + +class ExitCodesNamespace(AttributeDict): + """A namespace of `ExitCode` instances that can be accessed through getattr as well as getitem. + + Additionally, the collection can be called with an identifier, that can either reference the integer `status` of the + `ExitCode` that needs to be retrieved or the key in the collection. """ def __call__(self, identifier): - """ - Return a specific exit code identified by either its exit status or label + """Return a specific exit code identified by either its exit status or label. - :param identifier: the identifier of the exit code. If the type is integer, it will be interpreted as - the exit code status, otherwise it be interpreted as the exit code label + :param identifier: the identifier of the exit code. If the type is integer, it will be interpreted as the exit + code status, otherwise it be interpreted as the exit code label :type identifier: str - :returns: an ExitCode named tuple + :returns: an `ExitCode` instance :rtype: :class:`aiida.engine.ExitCode` :raises ValueError: if no exit code with the given label is defined for this process diff --git a/aiida/engine/processes/workchains/utils.py b/aiida/engine/processes/workchains/utils.py index 9869aa3a36..45f2158e8b 100644 --- a/aiida/engine/processes/workchains/utils.py +++ b/aiida/engine/processes/workchains/utils.py @@ -11,6 +11,7 @@ from collections import namedtuple from functools import partial from inspect import getfullargspec +from types import FunctionType # pylint: disable=no-name-in-module from wrapt import decorator from ..exit_code import ExitCode @@ -68,6 +69,9 @@ def process_handler(wrapped=None, *, priority=0, exit_codes=None, enabled=True): if wrapped is None: return partial(process_handler, priority=priority, exit_codes=exit_codes, enabled=enabled) + if not isinstance(wrapped, FunctionType): + raise TypeError('first argument can only be an instance method, use keywords for decorator arguments.') + if not isinstance(priority, int): raise TypeError('the `priority` keyword should be an integer.') diff --git a/docs/source/working/functions.rst b/docs/source/working/functions.rst index f06dc59c9c..93b676e235 100644 --- a/docs/source/working/functions.rst +++ b/docs/source/working/functions.rst @@ -177,7 +177,7 @@ In the case of the example above, it would look something like the following: However, in this particular example the exception is not so much an unexpected error, but one we could have considered and have seen coming, so it might be more applicable to simply mark the process as failed. To accomplish this, there is the concept of an :ref:`exit status` that can be set on the process, which is an integer that, when non-zero, marks a process in the ``Finished`` state as 'failed'. Since the exit status is set as an attribute on the process node, it also makes it very easy to query for failed processes. -To set a non-zero exit status on a calculation function to indicate it as failed, simply return an instance of the :py:class:`~aiida.engine.processes.exit_code.ExitCode` named tuple. +To set a non-zero exit status on a calculation function to indicate it as failed, simply return an instance of the :py:class:`~aiida.engine.processes.exit_code.ExitCode` class. Time for a demonstration: .. include:: include/snippets/processes/functions/calcfunction_exit_code.py diff --git a/docs/source/working/workflows.rst b/docs/source/working/workflows.rst index 4f7166e530..896ad29707 100644 --- a/docs/source/working/workflows.rst +++ b/docs/source/working/workflows.rst @@ -109,8 +109,8 @@ Exit codes ---------- To terminate the execution of a work function and mark it as failed, one simply has to return an :ref:`exit code`. -The :py:class:`~aiida.engine.processes.exit_code.ExitCode` named tuple is constructed with an integer, to denote the desired exit status and an optional message -When such as exit code is returned, the engine will mark the node of the work function as ``Finished`` and set the exit status and message to the value of the tuple. +The :py:class:`~aiida.engine.processes.exit_code.ExitCode` class is constructed with an integer, to denote the desired exit status and an optional message +When such as exit code is returned, the engine will mark the node of the work function as ``Finished`` and set the exit status and message to the value of the exit code. Consider the following example: .. code:: python @@ -120,7 +120,7 @@ Consider the following example: from aiida.engine import ExitCode return ExitCode(418, 'I am a teapot') -The execution of the work function will be immediately terminated as soon as the tuple is returned, and the exit status and message will be set to ``418`` and ``I am a teapot``, respectively. +The execution of the work function will be immediately terminated as soon as the exit code is returned, and the exit status and message will be set to ``418`` and ``I am a teapot``, respectively. Since no output nodes are returned, the ``WorkFunctionNode`` node will have no outputs and the value returned from the function call will be an empty dictionary. @@ -483,15 +483,40 @@ In the ``inspect_calculation`` outline, we retrieve the calculation that was sub If this returns ``False``, in this example we simply fire a report message and return the exit code corresponding to the label ``ERROR_CALCULATION_FAILED``. Note that the specific exit code can be retrieved through the ``WorkChain`` property ``exit_codes``. This will return a collection of exit codes that have been defined for that ``WorkChain`` and any specific exit code can then be retrieved by accessing it as an attribute. -Returning this exit code, which will be an instance of the :py:class:`~aiida.engine.processes.exit_code.ExitCode` named tuple, will cause the work chain to be aborted and the ``exit_status`` and ``exit_message`` to be set on the node, which were defined in the spec. +Returning this exit code, which will be an instance of the :py:class:`~aiida.engine.processes.exit_code.ExitCode` class, will cause the work chain to be aborted and the ``exit_status`` and ``exit_message`` to be set on the node, which were defined in the spec. .. note:: - The notation ``self.exit_codes.ERROR_CALCULATION_FAILED`` is just syntactic sugar to retrieve the ``ExitCode`` tuple that was defined in the spec with that error label. + The notation ``self.exit_codes.ERROR_CALCULATION_FAILED`` is just syntactic sugar to retrieve the ``ExitCode`` instance that was defined in the spec with that error label. Constructing your own ``ExitCode`` directly and returning that from the outline step will have exactly the same effect in terms of aborting the work chain execution and setting the exit status and message. However, it is strongly advised to define the exit code through the spec and retrieve it through the ``self.exit_codes`` collection, as that makes it easily retrievable through the spec by the caller of the work chain. -The best part about this method of aborting a work chains execution, is that the exit status can now be used programmatically, by for example a parent work chain. +The ``message`` attribute of an ``ExitCode`` can also be a string that contains placeholders. +This is useful when the exit code's message is generic enough to a host of situations, but one would just like to parameterize the exit message. +To concretize the template message of an exit code, simply call the :meth:`~aiida.engine.processes.exit_code.ExitCode.format` method and pass the parameters as keyword arguments:: + +.. code:: python + + exit_code_template = ExitCode(450, 'the parameter {parameter} is invalid.') + exit_code_concrete = exit_code_template.format(parameter='some_specific_key') + +This concept can also be applied within the scope of a process. +In the process spec, we can declare a generic exit code whose exact message should depend on one or multiple parameters:: + +.. code:: python + + spec.exit_code(450, 'ERROR_INVALID_PARAMETER, 'the parameter {parameter} is invalid.') + +Through the ``self.exit_codes`` collection of a ``WorkChain``, this generic can be easily customized as follows: + +.. code:: python + + def inspect_calculation(self): + return self.exit_codes.ERROR_INVALID_PARAMETER.format(parameter='some_specific_key') + +This is no different than the example before, because ``self.exit_codes.ERROR_INVALID_PARAMETER`` simply returns an instance of ``ExitCode``, which we then call ``format`` on with the substitution parameters. + +In conclusion, the best part about using exit codes to abort a work chain's execution, is that the exit status can now be used programmatically, by for example a parent work chain. Imagine that a parent work chain submitted this work chain. After it has terminated its execution, the parent work chain will want to know what happened to the child work chain. As already noted in the :ref:`report` section, the report messages of the work chain should not be used. diff --git a/tests/engine/processes/text_exit_code.py b/tests/engine/processes/text_exit_code.py new file mode 100644 index 0000000000..2dbdd7abce --- /dev/null +++ b/tests/engine/processes/text_exit_code.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +"""Tests for `aiida.engine.processes.exit_code.ExitCode`.""" +import pytest + +from aiida.engine import ExitCode + + +def test_exit_code_defaults(): + """Test that the defaults are properly set.""" + exit_code = ExitCode() + assert exit_code.status == 0 + assert exit_code.message is None + assert exit_code.invalidates_cache is False + + +def test_exit_code_construct(): + """Test that the constructor allows to override defaults.""" + status = 418 + message = 'I am a teapot' + invalidates_cache = True + + exit_code = ExitCode(status, message, invalidates_cache) + assert exit_code.status == status + assert exit_code.message == message + assert exit_code.invalidates_cache == invalidates_cache + + +def test_exit_code_equality(): + """Test that the equality operator works properly.""" + exit_code_origin = ExitCode(1, 'message', True) + exit_code_clone = ExitCode(1, 'message', True) + exit_code_different = ExitCode(2, 'message', True) + + assert exit_code_origin == exit_code_clone + assert exit_code_clone != exit_code_different + + +def test_exit_code_template_message(): + """Test that an exit code with a templated message can be called to replace the parameters.""" + message_template = 'Wrong parameter {parameter}' + parameter_name = 'some_parameter' + + exit_code_base = ExitCode(418, message_template) + exit_code_called = exit_code_base.format(parameter=parameter_name) + + # Incorrect placeholder + with pytest.raises(ValueError): + exit_code_base.format(non_existing_parameter=parameter_name) + + # Missing placeholders + with pytest.raises(ValueError): + exit_code_base.format() + + assert exit_code_base != exit_code_called # Calling the exit code should return a new instance + assert exit_code_called.message == message_template.format(parameter=parameter_name) + + +def test_exit_code_expand_tuple(): + """Test that an exit code instance can be expanded in its attributes like a tuple.""" + status = 418 + message = 'I am a teapot' + invalidates_cache = True + + status_exp, message_exp, invalidates_cache_exp = ExitCode(418, message, True) + + assert status == status_exp + assert message == message_exp + assert invalidates_cache == invalidates_cache_exp From 2c2f3cfec1d7cd71537769ec42854ebc241f42a5 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 27 Mar 2020 12:57:34 +0100 Subject: [PATCH 24/54] Convert argument to str in `aiida.common.escaping.escape_for_bash` (#3873) Without conversion to string, any non-string type will cause the function to raise an `AttributeError` since it won't have the method `str_replace`. --- aiida/common/escaping.py | 2 ++ tests/common/test_escaping.py | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 tests/common/test_escaping.py diff --git a/aiida/common/escaping.py b/aiida/common/escaping.py index fd6fa6b2c9..64e17b9744 100644 --- a/aiida/common/escaping.py +++ b/aiida/common/escaping.py @@ -38,6 +38,8 @@ def escape_for_bash(str_to_escape): if str_to_escape is None: return '' + str_to_escape = str(str_to_escape) + escaped_quotes = str_to_escape.replace("'", """'"'"'""") return "'{}'".format(escaped_quotes) diff --git a/tests/common/test_escaping.py b/tests/common/test_escaping.py new file mode 100644 index 0000000000..a66070b603 --- /dev/null +++ b/tests/common/test_escaping.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Tests for the :mod:`aiida.common.escaping`.""" +from aiida.common.escaping import escape_for_bash + + +def test_escape_for_bash(): + """Tests various inputs for `aiida.common.escaping.escape_for_bash`.""" + tests = ( + [None, ''], + ['string', "'string'"], + ['string with space', "'string with space'"], + ["string with a ' single quote", """'string with a '"'"' single quote'"""], + [1, "'1'"], + [2.0, "'2.0'"], + ) + + for string_input, string_escaped in tests: + assert escape_for_bash(string_input) == string_escaped From d12a424e0ae47616df26a90ce2f6f093f9618fa2 Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sat, 28 Mar 2020 16:54:15 +0100 Subject: [PATCH 25/54] Remove unused `orm.implementation.utils` module (#3877) The `get_attr` function defined here seems to be used nowhere in the codebase. --- aiida/orm/implementation/utils.py | 26 -------------------------- 1 file changed, 26 deletions(-) delete mode 100644 aiida/orm/implementation/utils.py diff --git a/aiida/orm/implementation/utils.py b/aiida/orm/implementation/utils.py deleted file mode 100644 index 538b496769..0000000000 --- a/aiida/orm/implementation/utils.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -"""Utility functions for AiiDA ORM implementations.""" - -__all__ = ('get_attr',) - - -def get_attr(attrs, key): - """ Get the attribute that corresponds to the given key""" - path = key.split('.') - - dict_ = attrs - for part in path: - if part.isdigit(): - part = int(part) - # Let it raise the appropriate exception - dict_ = dict_[part] - - return dict_ From 58bace313a7dffdd576679feae87fb2a029bd4cf Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sun, 29 Mar 2020 22:46:20 +0200 Subject: [PATCH 26/54] Docs: enable `intersphinx` mapping for various libraries (#3876) Add intersphinx mapping to python standard library documentation and of various direct dependency libraries: * `click` * `flask` * `flask_restful` * `kiwipy` * `plumpy` Adding this documentation interconnection allows references in our docs like :py:exc:`ValueError` to now directly link to the relevant external documentation. This also allows to cut down the "nitpick exceptions" quite dramatically. Co-Authored-By: Sebastiaan Huber --- aiida/cmdline/params/options/interactive.py | 2 +- aiida/engine/processes/calcjobs/tasks.py | 3 + aiida/engine/processes/process_spec.py | 4 +- aiida/orm/implementation/logs.py | 6 +- aiida/orm/logs.py | 14 +- aiida/orm/nodes/data/array/xy.py | 6 +- docs/source/conf.py | 31 +- docs/source/nitpick-exceptions | 295 +++++--------------- docs/source/working/calculations.rst | 2 +- docs/source/working/workflows.rst | 6 +- 10 files changed, 103 insertions(+), 266 deletions(-) diff --git a/aiida/cmdline/params/options/interactive.py b/aiida/cmdline/params/options/interactive.py index 4c0101175d..f632c39c08 100644 --- a/aiida/cmdline/params/options/interactive.py +++ b/aiida/cmdline/params/options/interactive.py @@ -106,7 +106,7 @@ def get_default(self, ctx): return None def _get_default(self, ctx): - """provides the functionality of :func:`click.Option.get_default`""" + """provides the functionality of :meth:`click.Option.get_default`""" if self._contextual_default is not None: default = self._contextual_default(ctx) else: diff --git a/aiida/engine/processes/calcjobs/tasks.py b/aiida/engine/processes/calcjobs/tasks.py index 7138dcd440..c9056449a8 100644 --- a/aiida/engine/processes/calcjobs/tasks.py +++ b/aiida/engine/processes/calcjobs/tasks.py @@ -321,6 +321,9 @@ class Waiting(plumpy.Waiting): """The waiting state for the `CalcJob` process.""" def __init__(self, process, done_callback, msg=None, data=None): + """ + :param :class:`~plumpy.base.state_machine.StateMachine` process: The process this state belongs to + """ super().__init__(process, done_callback, msg, data) self._task = None self._killing = None diff --git a/aiida/engine/processes/process_spec.py b/aiida/engine/processes/process_spec.py index da9d562303..370789ed94 100644 --- a/aiida/engine/processes/process_spec.py +++ b/aiida/engine/processes/process_spec.py @@ -65,10 +65,10 @@ def exit_code(self, status, label, message, invalidates_cache=False): raise ValueError('status should be a positive integer, received {}'.format(type(status))) if not isinstance(label, str): - raise TypeError('label should be of basestring type and not of {}'.format(type(label))) + raise TypeError('label should be of str type and not of {}'.format(type(label))) if not isinstance(message, str): - raise TypeError('message should be of basestring type and not of {}'.format(type(message))) + raise TypeError('message should be of str type and not of {}'.format(type(message))) if not isinstance(invalidates_cache, bool): raise TypeError('invalidates_cache should be of type bool and not of {}'.format(type(invalidates_cache))) diff --git a/aiida/orm/implementation/logs.py b/aiida/orm/implementation/logs.py index ad50d27913..5924d0d228 100644 --- a/aiida/orm/implementation/logs.py +++ b/aiida/orm/implementation/logs.py @@ -45,7 +45,7 @@ def loggername(self): The name of the logger that created this entry :return: The entry loggername - :rtype: basestring + :rtype: str """ @abc.abstractproperty @@ -54,7 +54,7 @@ def levelname(self): The name of the log level :return: The entry log level name - :rtype: basestring + :rtype: str """ @abc.abstractproperty @@ -72,7 +72,7 @@ def message(self): Get the message corresponding to the entry :return: The entry message - :rtype: basestring + :rtype: str """ @abc.abstractproperty diff --git a/aiida/orm/logs.py b/aiida/orm/logs.py index ca83ddfdad..909cdb0add 100644 --- a/aiida/orm/logs.py +++ b/aiida/orm/logs.py @@ -40,7 +40,7 @@ def create_entry_from_record(record): Helper function to create a log entry from a record created as by the python logging library :param record: The record created by the logging module - :type record: :class:`logging.record` + :type record: :class:`logging.LogRecord` :return: An object implementing the log entry interface :rtype: :class:`aiida.orm.logs.Log` @@ -139,16 +139,16 @@ def __init__(self, time, loggername, levelname, dbnode_id, message='', metadata= :type time: :class:`!datetime.datetime` :param loggername: name of logger - :type loggername: basestring + :type loggername: str :param levelname: name of log level - :type levelname: basestring + :type levelname: str :param dbnode_id: id of database node :type dbnode_id: int :param message: log message - :type message: basestring + :type message: str :param metadata: metadata :type metadata: dict @@ -194,7 +194,7 @@ def loggername(self): The name of the logger that created this entry :return: The entry loggername - :rtype: basestring + :rtype: str """ return self._backend_entity.loggername @@ -204,7 +204,7 @@ def levelname(self): The name of the log level :return: The entry log level name - :rtype: basestring + :rtype: str """ return self._backend_entity.levelname @@ -224,7 +224,7 @@ def message(self): Get the message corresponding to the entry :return: The entry message - :rtype: basestring + :rtype: str """ return self._backend_entity.message diff --git a/aiida/orm/nodes/data/array/xy.py b/aiida/orm/nodes/data/array/xy.py index f652987504..a3d2674320 100644 --- a/aiida/orm/nodes/data/array/xy.py +++ b/aiida/orm/nodes/data/array/xy.py @@ -43,10 +43,10 @@ class XyData(ArrayData): def _arrayandname_validator(self, array, name, units): """ Validates that the array is an numpy.ndarray and that the name is - of type basestring. Raises InputValidationError if this not the case. + of type str. Raises InputValidationError if this not the case. """ if not isinstance(name, str): - raise InputValidationError('The name must always be an instance of basestring.') + raise InputValidationError('The name must always be a str.') if not isinstance(array, np.ndarray): raise InputValidationError('The input array must always be a numpy array') @@ -55,7 +55,7 @@ def _arrayandname_validator(self, array, name, units): except ValueError: raise InputValidationError('The input array must only contain floats') if not isinstance(units, str): - raise InputValidationError('The units must always be an instance of basestring.') + raise InputValidationError('The units must always be a str.') def set_x(self, x_array, x_name, x_units): """ diff --git a/docs/source/conf.py b/docs/source/conf.py index aac8f853ab..e13004f880 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -49,7 +49,11 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', 'sphinxcontrib.contentui', 'aiida.sphinxext'] +extensions = [ + 'sphinx.ext.intersphinx', 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode', 'sphinx.ext.coverage', + 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.todo', 'IPython.sphinxext.ipython_console_highlighting', + 'IPython.sphinxext.ipython_directive', 'sphinxcontrib.contentui', 'aiida.sphinxext' +] ipython_mplbackend = '' todo_include_todos = True @@ -115,6 +119,14 @@ # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] +intersphinx_mapping = { + 'click': ('https://click.palletsprojects.com/', None), + 'flask': ('http://flask.pocoo.org/docs/latest/', None), + 'flask_restful': ('https://flask-restful.readthedocs.io/en/latest/', None), + 'kiwipy': ('https://kiwipy.readthedocs.io/en/latest/', None), + 'plumpy': ('https://plumpy.readthedocs.io/en/latest/', None), + 'python': ('https://docs.python.org/3', None), +} # -- Options for HTML output --------------------------------------------------- @@ -361,17 +373,8 @@ def setup(app): # Allow duplicate toc entries. #epub_tocdup = True -# otherwise, readthedocs.org uses their theme by default, so no need -# to specify it - - # Warnings to ignore when using the -n (nitpicky) option -# We should ignore any python built-in exception, for instance -nitpick_ignore = [('py:class','Warning'), ('py:class', 'exceptions.Warning')] - -for line in open('nitpick-exceptions'): - if line.strip() == '' or line.startswith('#'): - continue - dtype, target = line.split(None, 1) - target = target.strip() - nitpick_ignore.append((dtype, target)) +with open('nitpick-exceptions', 'r') as handle: + nitpick_ignore = [ + tuple(line.strip().split(None, 1)) for line in handle.readlines() if line.strip() and not line.startswith('#') + ] diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index 3e5c3007a3..4019aad3f7 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -1,243 +1,86 @@ -# built-in python exceptions -py:exc ArithmeticError -py:exc AssertionError -py:exc AttributeError -py:exc BaseException -py:exc BufferError -py:exc DeprecationWarning -py:exc EOFError -py:exc EnvironmentError -py:exc Exception -py:exc FloatingPointError -py:exc FutureWarning -py:exc GeneratorExit -py:exc IOError -py:exc ImportError -py:exc ImportWarning -py:exc IndentationError -py:exc IndexError -py:exc KeyError -py:exc KeyboardInterrupt -py:exc LookupError -py:exc MemoryError -py:exc NameError -py:exc NotImplementedError -py:exc OSError -py:exc OverflowError -py:exc PendingDeprecationWarning -py:exc ReferenceError -py:exc RuntimeError -py:exc RuntimeWarning -py:exc StandardError -py:exc StopIteration -py:exc SyntaxError -py:exc SyntaxWarning -py:exc SystemError -py:exc SystemExit -py:exc TabError -py:exc TypeError -py:exc UnboundLocalError -py:exc UnicodeDecodeError -py:exc UnicodeEncodeError -py:exc UnicodeError -py:exc UnicodeTranslateError -py:exc UnicodeWarning -py:exc UserWarning -py:exc VMSError -py:exc ValueError -py:exc Warning -py:exc WindowsError -py:exc ZeroDivisionError +### python builtins -# python builtins -py:class classmethod -py:class dict -py:class callable -py:class filter -py:class list -py:class object -py:class unittest.case.TestCase -py:class unittest.runner.TextTestRunner -py:class unittest2.case.TestCase -py:meth unittest.TestLoader.discover -py:meth copy.copy -py:class abc.ABC -py:class exceptions.Exception -py:class exceptions.ValueError -py:class exceptions.BaseException -# repeat for Python 3 -py:class Exception -py:class ValueError -py:class BaseException -py:class str -py:class bytes -py:class tuple -py:class int -py:class float -py:class bool -py:class basestring -py:class None -py:class type -py:class typing.Final -# this is required for metaclasses(?) -py:class __builtin__.bool -py:class __builtin__.float -py:class __builtin__.int -py:class __builtin__.object -py:class __builtin__.str -py:class __builtin__.dict -# ... and the same for Python 3 -py:class builtins.bool -py:class builtins.float -py:class builtins.int -py:class builtins.object -py:class builtins.str -py:class builtins.dict -py:class set +# note: there doesn't seem to be a standard way of indicating a callable in python3 +# https://stackoverflow.com/questions/23571253/how-to-define-a-callable-parameter-in-a-python-docstring +py:class callable -# python builtin objects -py:obj basestring -py:obj bool -py:obj float -py:obj int -py:obj str -py:obj string -py:obj tuple -py:obj None -py:obj bool +# For some reason, "filter" does not seem to be found +py:class filter -# python packages -# Note: These are needed, if they are provided, e.g. -# as types or rtypes without actually being imported -py:class abc.ABCMeta +py:class unittest.case.TestCase +py:class unittest.runner.TextTestRunner -py:exc click.BadParameter -py:exc click.UsageError -py:class click.ParamType -py:class click.core.Group -py:class click.core.Option -py:class click.Command -py:class click.Group -py:class click.Option -py:class click.types.ParamType -py:class click.types.Choice -py:class click.types.IntParamType -py:class click.types.StringParamType -py:class click.types.Path +# required for metaclasses(?) +py:class builtins.bool +py:class builtins.float +py:class builtins.int +py:class builtins.object +py:class builtins.str +py:class builtins.dict -py:class concurrent.futures._base.TimeoutError +### AiiDA -py:class distutils.version.Version +# not quite clear why necessary... +py:class WorkChainSpec -py:class docutils.parsers.rst.Directive +### python packages +# Note: These exceptions are needed if +# * the objects are referenced e.g. as param/return types types in method docstrings (without intersphinx mapping) +# * the documentation linked via intersphinx lists the objects at a different (usually lower) import hierarchy +py:class click.core.Group +py:class click.core.Option +py:class click.types.ParamType +py:class click.types.Choice +py:class click.types.IntParamType +py:class click.types.StringParamType +py:class click.types.Path +py:meth click.Option.get_default -py:class enum.Enum -py:class enum.IntEnum +py:class concurrent.futures._base.TimeoutError -py:class flask.app.Flask -py:class flask.json.JSONEncoder -py:class flask_restful.Api -py:class flask_restful.Resource +py:class docutils.parsers.rst.Directive -py:class frozenset +py:class frozenset -py:class logging.Filter -py:class logging.Handler -py:class logging.record -py:class logging.Logger -py:class logging.LoggerAdapter +py:class paramiko.proxy.ProxyCommand -py:class paramiko.proxy.ProxyCommand +# These can be removed once they are properly included in the `__all__` in `plumpy` +py:class plumpy.ports.PortNamespace +py:class plumpy.utils.AttributesDict -py:class StateMachine -py:class plumpy.futures.Future -py:class plumpy.processes.Process -py:class plumpy.process_comms.ProcessLauncher -py:class plumpy.process_spec.ProcessSpec -py:class plumpy.Port -py:class plumpy.Process -py:class plumpy.Communicator -py:class plumpy.RemoteProcessThreadController -py:class plumpy.Bundle -py:class plumpy.workchains.WorkChainSpec -py:class plumpy.WorkChainSpec -py:class plumpy.Persister -py:class plumpy.persistence.Persister -py:class plumpy.PersistenceError -py:class plumpy.ports.Port -py:class plumpy.ports.InputPort -py:class plumpy.ports.OutputPort -py:class plumpy.ports.PortNamespace -py:class plumpy.utils.AttributesDict -py:class plumpy.loaders.DefaultObjectLoader -py:class plumpy.ObjectLoader -py:class plumpy.process_states.Waiting -py:class plumpy.process_comms.RemoteProcessThreadController -py:exc plumpy.TaskRejected -py:meth plumpy.ProcessSpec.output -py:meth plumpy.process_spec.ProcessSpec.expose_inputs -py:meth plumpy.process_spec.ProcessSpec.expose_outputs +py:class topika.Connection -py:class kiwipy.futures.Future -py:class kiwipy.communications.TimeoutError -py:class kiwipy.Communicator -py:class kiwipy.rmq.communicator.RmqThreadCommunicator +py:class tornado.ioloop.IOLoop +py:class tornado.concurrent.Future -py:class topika.Connection +py:class IPython.core.magic.Magics -py:class tornado.ioloop.IOLoop -py:class tornado.concurrent.Future +py:class HTMLParser.HTMLParser +py:class html.parser.HTMLParser -py:class IPython.core.magic.Magics +py:class django.contrib.auth.base_user.AbstractBaseUser +py:class django.contrib.auth.base_user.BaseUserManager +py:class django.contrib.auth.models.AbstractBaseUser +py:class django.contrib.auth.models.BaseUserManager +py:class django.contrib.auth.models.PermissionsMixin +py:class django.core.exceptions.MultipleObjectsReturned +py:class django.core.exceptions.ObjectDoesNotExist +py:class django.db.models.base.Model +py:class django.db.models.manager.Manager +py:class django.db.models.query.QuerySet +py:class django.db.migrations.migration.Migration -py:class HTMLParser.HTMLParser -py:class html.parser.HTMLParser +py:class flask.app.Flask -py:class tuple +py:class sqlalchemy.ext.declarative.api.Base +py:class sqlalchemy.ext.declarative.api.Model +py:class sqlalchemy.sql.functions.FunctionElement +py:class sqlalchemy.orm.query.Query +py:class sqlalchemy.orm.util.AliasedClass +py:class sqlalchemy.orm.session.Session +py:exc sqlalchemy.orm.exc.MultipleResultsFound -py:class staticmethod - -py:class django.contrib.auth.base_user.AbstractBaseUser -py:class django.contrib.auth.base_user.BaseUserManager -py:class django.contrib.auth.models.AbstractBaseUser -py:class django.contrib.auth.models.BaseUserManager -py:class django.contrib.auth.models.PermissionsMixin -py:class django.core.exceptions.MultipleObjectsReturned -py:class django.core.exceptions.ObjectDoesNotExist -py:class django.db.models.base.Model -py:class django.db.models.manager.Manager -py:class django.db.models.query.QuerySet -py:class django.db.migrations.migration.Migration - -py:class sqlalchemy.ext.declarative.api.Base -py:class sqlalchemy.ext.declarative.api.Model -py:class sqlalchemy.sql.functions.FunctionElement -py:class sqlalchemy.orm.query.Query -py:class sqlalchemy.orm.util.AliasedClass -py:class sqlalchemy.orm.session.Session -py:exc sqlalchemy.orm.exc.MultipleResultsFound - -py:class sphinx.ext.autodoc.ClassDocumenter - -py:class collections.abc.Mapping -py:class collections.abc.MutableMapping -py:class collections.abc.MutableSequence -py:class collections.abc.Iterator -py:class collections.abc.Sized - -# backend-dependent implementation -py:class WorkChainSpec -py:class aiida.orm.nodes.Node -py:meth aiida.engine.processes.process_spec.ProcessSpec.input -py:meth aiida.engine.processes.process_spec.ProcessSpec.output -py:meth aiida.engine.processes.process_spec.ProcessSpec.outline - -# This comes from ABCMeta -py:meth aiida.orm.groups.Group.get_from_string - -py:mod click -py:class click.Choice -py:func click.Option.get_default +py:class sphinx.ext.autodoc.ClassDocumenter py:class yaml.Dumper py:class yaml.Loader @@ -245,21 +88,10 @@ py:class yaml.dumper.Dumper py:class yaml.loader.Loader py:class yaml.FullLoader py:class yaml.loader.FullLoader - py:class uuid.UUID -# typing -py:class typing.Generic -py:class typing.TypeVar - -# Python 3 complains about this because of orm.Entity.Collection inner class (no idea why) -py:class Collection - - -# psychopg2 py:class psycopg2.extensions.cursor -# Aldjemy exceptions py:class aldjemy.orm.DbNode py:class aldjemy.orm.DbLink py:class aldjemy.orm.DbComputer @@ -270,5 +102,4 @@ py:class aldjemy.orm.DbComment py:class aldjemy.orm.DbLog py:class aldjemy.orm.DbSetting -# Alembic py:class alembic.config.Config diff --git a/docs/source/working/calculations.rst b/docs/source/working/calculations.rst index 4fae92b4ba..6f55f72a55 100644 --- a/docs/source/working/calculations.rst +++ b/docs/source/working/calculations.rst @@ -115,7 +115,7 @@ Next we should define what outputs we expect the calculation to produce: Just as for the inputs, one can specify what node type each output should have. By default a defined output will be 'required', which means that if the calculation job terminates and the output has not been attached, the process will be marked as failed. To indicate that an output is optional, one can use ``required=False`` in the ``spec.output`` call. -Note that the process spec, and its :py:meth:`~aiida.engine.processes.process_spec.ProcessSpec.input` and :py:meth:`~aiida.engine.processes.process_spec.ProcessSpec.output` methods provide a lot more functionality. +Note that the process spec, and its :py:meth:`~plumpy.ProcessSpec.input` and :py:meth:`~plumpy.ProcessSpec.output` methods provide a lot more functionality. Fore more details, please refer to the section on :ref:`process specifications`. diff --git a/docs/source/working/workflows.rst b/docs/source/working/workflows.rst index 896ad29707..a58595e766 100644 --- a/docs/source/working/workflows.rst +++ b/docs/source/working/workflows.rst @@ -178,8 +178,8 @@ The third and final line is extremely important, as it will call the ``define`` Inputs and outputs ------------------ With those formalities out of the way, you can start defining the interesting properties of the work chain through the ``spec``. -In the example you can see how the method :py:meth:`~aiida.engine.processes.process_spec.ProcessSpec.input` is used to define multiple input ports, which document exactly which inputs the work chain expects. -Similarly, :py:meth:`~aiida.engine.processes.process_spec.ProcessSpec.output` is called to instruct that the work chain will produce an output with the label ``result``. +In the example you can see how the method :py:meth:`~plumpy.ProcessSpec.input` is used to define multiple input ports, which document exactly which inputs the work chain expects. +Similarly, :py:meth:`~plumpy.ProcessSpec.output` is called to instruct that the work chain will produce an output with the label ``result``. These two port creation methods support a lot more functionality, such as adding help string, validation and more, all of which is documented in detail in the section on :ref:`ports and port namespace`. @@ -189,7 +189,7 @@ Outline ------- The outline is what sets the work chain apart from other processes. It is a way of defining the higher-level logic that encodes the workflow that the work chain takes. -The outline is defined in the ``define`` method through the :py:meth:`~aiida.engine.processes.process_spec.ProcessSpec.outline`. +The outline is defined in the ``define`` method through the :py:meth:`~plumpy.WorkChainSpec.outline`. It takes a sequence of instructions that the work chain will execute, each of which is implemented as a method of the work chain class. In the simple example above, the outline consists of three simple instructions: ``add``, ``multiply``, ``results``. Since these are implemented as instance methods, they are prefixed with ``cls.`` to indicate that they are in fact methods of the work chain class. From 0f069a70035337b880a0750f94adb0ada0d1f45f Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sun, 29 Mar 2020 23:10:14 +0200 Subject: [PATCH 27/54] REST API: fix the interface of `run_api` (#3875) The current `run_api` interface for running the AiiDA REST API had unnecessarily many parameters, making it complicated to use in WSGI scripts, e.g.: from aiida.restapi import api from aiida.restapi.run_api import run_api import aiida.restapi CONFIG_DIR = os.path.join(os.path.split( os.path.abspath(aiida.restapi.__file__))[0], 'common') (app, api) = run_api( api.App, api.AiidaApi, hostname="localhost", port=5000, config=CONFIG_DIR, debug=False, wsgi_profile=False, hookup=False, catch_internal_server=False ) While all but the first two parameters are keyword arguments, the code would actually crash if they are not provided. In reality, there is no reason to have to specify *any* parameters whatsoever and one should simply be able to call `run_api()`. This commit accomplishes this by defining the appropriate default values. --- aiida/cmdline/commands/cmd_restapi.py | 42 ++++++------ aiida/restapi/common/config.py | 63 ++++++------------ aiida/restapi/resources.py | 6 +- aiida/restapi/run_api.py | 90 ++++++++++++-------------- docs/source/verdi/verdi_user_guide.rst | 13 ++-- 5 files changed, 90 insertions(+), 124 deletions(-) diff --git a/aiida/cmdline/commands/cmd_restapi.py b/aiida/cmdline/commands/cmd_restapi.py index a6b8c9adf4..9cbde54473 100644 --- a/aiida/cmdline/commands/cmd_restapi.py +++ b/aiida/cmdline/commands/cmd_restapi.py @@ -12,60 +12,54 @@ Main advantage of doing this by means of a verdi command is that different profiles can be selected at hook-up (-p flag). """ -import os import click -import aiida.restapi from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.params.options import HOSTNAME, PORT - -CONFIG_DIR = os.path.join(os.path.split(os.path.abspath(aiida.restapi.__file__))[0], 'common') +from aiida.restapi.common import config @verdi.command('restapi') -@HOSTNAME(default='127.0.0.1') -@PORT(default=5000) +@HOSTNAME(default=config.CLI_DEFAULTS['HOST_NAME']) +@PORT(default=config.CLI_DEFAULTS['PORT']) @click.option( '-c', '--config-dir', type=click.Path(exists=True), - default=CONFIG_DIR, - help='the path of the configuration directory' + default=config.CLI_DEFAULTS['CONFIG_DIR'], + help='Path to the configuration directory' ) -@click.option('--debug', 'debug', is_flag=True, default=False, help='run app in debug mode') +@click.option('--debug', 'debug', is_flag=True, default=config.APP_CONFIG['DEBUG'], help='Enable debugging') @click.option( '--wsgi-profile', - 'wsgi_profile', is_flag=True, - default=False, - help='to use WSGI profiler middleware for finding bottlenecks in web application' + default=config.CLI_DEFAULTS['WSGI_PROFILE'], + help='Whether to enable WSGI profiler middleware for finding bottlenecks' +) +@click.option( + '--hookup/--no-hookup', + 'hookup', + is_flag=True, + default=config.CLI_DEFAULTS['HOOKUP_APP'], + help='Hookup app to flask server' ) -@click.option('--hookup/--no-hookup', 'hookup', is_flag=True, default=True, help='to hookup app') def restapi(hostname, port, config_dir, debug, wsgi_profile, hookup): """ Run the AiiDA REST API server. Example Usage: - \b - verdi -p restapi --hostname 127.0.0.5 --port 6789 --config-dir - --debug --wsgi-profile --hookup + verdi -p restapi --hostname 127.0.0.5 --port 6789 """ - from aiida.restapi.api import App, AiidaApi from aiida.restapi.run_api import run_api - # Construct parameter dictionary - kwargs = dict( - prog_name='verdi-restapi', + # Invoke the runner + run_api( hostname=hostname, port=port, config=config_dir, debug=debug, wsgi_profile=wsgi_profile, hookup=hookup, - catch_internal_server=True ) - - # Invoke the runner - run_api(App, AiidaApi, **kwargs) diff --git a/aiida/restapi/common/config.py b/aiida/restapi/common/config.py index 2f719a2654..382e334ea4 100644 --- a/aiida/restapi/common/config.py +++ b/aiida/restapi/common/config.py @@ -8,48 +8,26 @@ # For further information please visit http://www.aiida.net # ########################################################################### """ -Constants used in rest api +Default configuration for the REST API """ +import os -## Pagination defaults -LIMIT_DEFAULT = 400 -PERPAGE_DEFAULT = 20 - -##Version prefix for all the URLs -PREFIX = '/api/v4' -VERSION = '4.0.1' -""" -Flask app configs. - -DEBUG: True/False. enables debug mode N.B. -!!!For production run use ALWAYS False!!! - -PROPAGATE_EXCEPTIONS: True/False serve REST exceptions to the client (and not a -generic 500: Internal Server Error exception) +API_CONFIG = { + 'LIMIT_DEFAULT': 400, # default records total + 'PERPAGE_DEFAULT': 20, # default records per page + 'PREFIX': '/api/v4', # prefix for all URLs + 'VERSION': '4.0.1', +} -""" APP_CONFIG = { - 'DEBUG': False, - 'PROPAGATE_EXCEPTIONS': True, + 'DEBUG': False, # use False for production + 'PROPAGATE_EXCEPTIONS': True, # serve REST exceptions to client instead of generic 500 internal server error } -""" -JSON serialization config. Leave this dictionary empty if default Flask -serializer is desired. - -Here is a list a all supported fields. If a field is not present in the -dictionary its value is assumed to be 'default'. -DATETIME_FORMAT: allowed values are 'asinput' and 'default'. +SERIALIZER_CONFIG = {'datetime_format': 'default'} # use 'asinput' or 'default' -""" -SERIALIZER_CONFIG = {'datetime_format': 'default'} -""" -Caching configuration - -memcached: backend caching system -""" CACHE_CONFIG = {'CACHE_TYPE': 'memcached'} -CACHING_TIMEOUTS = { #Caching TIMEOUTS (in seconds) +CACHING_TIMEOUTS = { # Caching timeouts in seconds 'nodes': 10, 'users': 10, 'calculations': 10, @@ -61,13 +39,12 @@ # IO tree MAX_TREE_DEPTH = 5 -""" -Aiida profile used by the REST api when no profile is specified (ex. by ---aiida-profile flag). -This has to be one of the profiles registered in .aiida/config.json - -In case you want to use the default stored in -.aiida/config.json, set this varibale to "default" -""" -DEFAULT_AIIDA_PROFILE = None +CLI_DEFAULTS = { + 'HOST_NAME': '127.0.0.1', + 'PORT': 5000, + 'CONFIG_DIR': os.path.dirname(os.path.abspath(__file__)), + 'WSGI_PROFILE': False, + 'HOOKUP_APP': True, + 'CATCH_INTERNAL_SERVER': False, +} diff --git a/aiida/restapi/resources.py b/aiida/restapi/resources.py index c746169681..49ccadec17 100644 --- a/aiida/restapi/resources.py +++ b/aiida/restapi/resources.py @@ -48,20 +48,20 @@ def get(self): response = {} - import aiida.restapi.common.config as conf + from aiida.restapi.common.config import API_CONFIG from aiida import __version__ if resource_type == 'info': response = {} # Add Rest API version - api_version = conf.VERSION.split('.') + api_version = API_CONFIG['VERSION'].split('.') response['API_major_version'] = api_version[0] response['API_minor_version'] = api_version[1] response['API_revision_version'] = api_version[2] # Add Rest API prefix - response['API_prefix'] = conf.PREFIX + response['API_prefix'] = API_CONFIG['PREFIX'] # Add AiiDA version response['AiiDA_version'] = __version__ diff --git a/aiida/restapi/run_api.py b/aiida/restapi/run_api.py index 6dfadff896..9c0696f2de 100755 --- a/aiida/restapi/run_api.py +++ b/aiida/restapi/run_api.py @@ -16,64 +16,64 @@ import os from flask_cors import CORS +from .common.config import CLI_DEFAULTS, APP_CONFIG, API_CONFIG +from . import api as api_classes -def run_api(flask_app, flask_api, **kwargs): +def run_api(flask_app=api_classes.App, flask_api=api_classes.AiidaApi, **kwargs): """ Takes a flask.Flask instance and runs it. - flask_app: Class inheriting from Flask app class - flask_api = flask_restful API class to be used to wrap the app - - kwargs: - List of valid parameters: - prog_name: name of the command before arguments are parsed. Useful when - api is embedded in a command, such as verdi restapi - hostname: self-explainatory - port: self-explainatory - config: directory containing the config.py file used to - configure the RESTapi - catch_internal_server: If true, catch and print all inter server errors - debug: self-explainatory - wsgi_profile:to use WSGI profiler middleware for finding bottlenecks in web application - hookup: to hookup app - All other passed parameters are ignored. + :param flask_app: Class inheriting from flask app class + :type flask_app: :py:class:`flask.Flask` + :param flask_api: flask_restful API class to be used to wrap the app + :type flask_api: :py:class:`flask_restful.Api` + + List of valid keyword arguments: + :param hostname: hostname to run app on (only when using built-in server) + :param port: port to run app on (only when using built-in server) + :param config: directory containing the config.py file used to configure the RESTapi + :param catch_internal_server: If true, catch and print all inter server errors + :param debug: enable debugging + :param wsgi_profile: use WSGI profiler middleware for finding bottlenecks in web application + :param hookup: If true, hook up application to built-in server - else just return it """ # pylint: disable=too-many-locals # Unpack parameters - hostname = kwargs['hostname'] - port = kwargs['port'] - config = kwargs['config'] + hostname = kwargs.pop('hostname', CLI_DEFAULTS['HOST_NAME']) + port = kwargs.pop('port', CLI_DEFAULTS['PORT']) + config = kwargs.pop('config', CLI_DEFAULTS['CONFIG_DIR']) - catch_internal_server = kwargs.pop('catch_internal_server', False) - debug = kwargs['debug'] - wsgi_profile = kwargs['wsgi_profile'] - hookup = kwargs['hookup'] + catch_internal_server = kwargs.pop('catch_internal_server', CLI_DEFAULTS['CATCH_INTERNAL_SERVER']) + debug = kwargs.pop('debug', APP_CONFIG['DEBUG']) + wsgi_profile = kwargs.pop('wsgi_profile', CLI_DEFAULTS['WSGI_PROFILE']) + hookup = kwargs.pop('hookup', CLI_DEFAULTS['HOOKUP_APP']) - # Import the right configuration file + if kwargs: + raise ValueError('Unknown keyword arguments: {}'.format(kwargs)) + + # Import the configuration file spec = importlib.util.spec_from_file_location(os.path.join(config, 'config'), os.path.join(config, 'config.py')) - confs = importlib.util.module_from_spec(spec) - spec.loader.exec_module(confs) + config_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(config_module) # Instantiate an app - app_kwargs = dict(catch_internal_server=catch_internal_server) - app = flask_app(__name__, **app_kwargs) + app = flask_app(__name__, catch_internal_server=catch_internal_server) - # Config the app - app.config.update(**confs.APP_CONFIG) + # Apply default configuration + app.config.update(**config_module.APP_CONFIG) - # cors - cors_prefix = os.path.join(confs.PREFIX, '*') - CORS(app, resources={r'' + cors_prefix: {'origins': '*'}}) + # Allow cross-origin resource sharing + cors_prefix = r'{}/*'.format(config_module) + CORS(app, resources={cors_prefix: {'origins': '*'}}) - # Config the serializer used by the app - if confs.SERIALIZER_CONFIG: + # Configure the serializer + if config_module.SERIALIZER_CONFIG: from aiida.restapi.common.utils import CustomJSONEncoder app.json_encoder = CustomJSONEncoder - # If the user selects the profiling option, then we need - # to do a little extra setup + # Set up WSGI profile if requested if wsgi_profile: from werkzeug.middleware.profiler import ProfilerMiddleware @@ -81,18 +81,14 @@ def run_api(flask_app, flask_api, **kwargs): app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) # Instantiate an Api by associating its app - api_kwargs = dict(PREFIX=confs.PREFIX, PERPAGE_DEFAULT=confs.PERPAGE_DEFAULT, LIMIT_DEFAULT=confs.LIMIT_DEFAULT) - api = flask_api(app, **api_kwargs) + api = flask_api(app, **API_CONFIG) - # Check if the app has to be hooked-up or just returned if hookup: - print(' * REST API running on http://{}:{}{}'.format(hostname, port, confs.PREFIX)) + # Run app through built-in werkzeug server + print(' * REST API running on http://{}:{}{}'.format(hostname, port, API_CONFIG['PREFIX'])) api.app.run(debug=debug, host=hostname, port=int(port), threaded=True) else: - # here we return the app, and the api with no specifications on debug - # mode, port and host. This can be handled by an external server, - # e.g. apache2, which will set the host and port. This implies that - # the user-defined configuration of the app is ineffective (it only - # affects the internal werkzeug server used by Flask). + # Return the app & api without specifying port/host to be handled by an external server (e.g. apache). + # Some of the user-defined configuration of the app is ineffective (only affects built-in server). return (app, api) diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 8019a2031c..1e384344e1 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -677,17 +677,16 @@ Below is a list with all available subcommands. Example Usage: - verdi -p restapi --hostname 127.0.0.5 --port 6789 --config-dir - --debug --wsgi-profile --hookup + verdi -p restapi --hostname 127.0.0.5 --port 6789 Options: -H, --hostname TEXT Hostname. -P, --port INTEGER Port number. - -c, --config-dir PATH the path of the configuration directory - --debug run app in debug mode - --wsgi-profile to use WSGI profiler middleware for finding - bottlenecks in web application - --hookup / --no-hookup to hookup app + -c, --config-dir PATH Path to the configuration directory + --debug Enable debugging + --wsgi-profile Whether to enable WSGI profiler middleware for + finding bottlenecks + --hookup / --no-hookup Hookup app to flask server --help Show this message and exit. From 5e226e7035d339a907fc8c2edfd8e5956513a1b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Mar 2020 15:15:29 +0200 Subject: [PATCH 28/54] Bump bleach from 3.1.1 to 3.1.4 in /requirements (#3880) Bumps [bleach](https://github.com/mozilla/bleach) from 3.1.1 to 3.1.4. - [Release notes](https://github.com/mozilla/bleach/releases) - [Changelog](https://github.com/mozilla/bleach/blob/master/CHANGES) - [Commits](https://github.com/mozilla/bleach/compare/v3.1.1...v3.1.4) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/requirements-py-3.5.txt | 2 +- requirements/requirements-py-3.6.txt | 2 +- requirements/requirements-py-3.7.txt | 2 +- requirements/requirements-py-3.8.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements/requirements-py-3.5.txt b/requirements/requirements-py-3.5.txt index 7a9957271a..ff5c36ad78 100644 --- a/requirements/requirements-py-3.5.txt +++ b/requirements/requirements-py-3.5.txt @@ -8,7 +8,7 @@ attrs==19.3.0 Babel==2.8.0 backcall==0.1.0 bcrypt==3.1.7 -bleach==3.1.1 +bleach==3.1.4 certifi==2019.11.28 cffi==1.14.0 chardet==3.0.4 diff --git a/requirements/requirements-py-3.6.txt b/requirements/requirements-py-3.6.txt index 1443564fe3..4e9015bc17 100644 --- a/requirements/requirements-py-3.6.txt +++ b/requirements/requirements-py-3.6.txt @@ -8,7 +8,7 @@ attrs==19.3.0 Babel==2.8.0 backcall==0.1.0 bcrypt==3.1.7 -bleach==3.1.1 +bleach==3.1.4 certifi==2019.11.28 cffi==1.14.0 chardet==3.0.4 diff --git a/requirements/requirements-py-3.7.txt b/requirements/requirements-py-3.7.txt index b4ed2ed37c..f52fa482bd 100644 --- a/requirements/requirements-py-3.7.txt +++ b/requirements/requirements-py-3.7.txt @@ -8,7 +8,7 @@ attrs==19.3.0 Babel==2.8.0 backcall==0.1.0 bcrypt==3.1.7 -bleach==3.1.1 +bleach==3.1.4 certifi==2019.11.28 cffi==1.14.0 chardet==3.0.4 diff --git a/requirements/requirements-py-3.8.txt b/requirements/requirements-py-3.8.txt index 928b5c05d2..462ddf5777 100644 --- a/requirements/requirements-py-3.8.txt +++ b/requirements/requirements-py-3.8.txt @@ -8,7 +8,7 @@ attrs==19.3.0 Babel==2.8.0 backcall==0.1.0 bcrypt==3.1.7 -bleach==3.1.1 +bleach==3.1.4 certifi==2019.11.28 cffi==1.14.0 chardet==3.0.4 From 035a9daa4e3b9282419eb2e5c58621d23cee1792 Mon Sep 17 00:00:00 2001 From: Carl Simon Adorf Date: Wed, 1 Apr 2020 13:44:21 +0200 Subject: [PATCH 29/54] Skip unit test `test_daemon_start_number` temporarily. (#3883) This unit test currently fails non-deterministically. --- tests/cmdline/commands/test_daemon.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/cmdline/commands/test_daemon.py b/tests/cmdline/commands/test_daemon.py index 034cc7e1b6..3a9cda21a2 100644 --- a/tests/cmdline/commands/test_daemon.py +++ b/tests/cmdline/commands/test_daemon.py @@ -10,6 +10,7 @@ """Tests for `verdi daemon`.""" from click.testing import CliRunner +import pytest from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands import cmd_daemon @@ -42,6 +43,7 @@ def test_daemon_start(self): finally: self.daemon_client.stop_daemon(wait=True) + @pytest.mark.skip(reason='Test fails non-deterministically; see issue #3051.') def test_daemon_start_number(self): """Test `verdi daemon start` with a specific number of workers.""" From 99742f284556bfea62a3c6df28a0a0b7da4832f0 Mon Sep 17 00:00:00 2001 From: Davide Grassano <34096612+Crivella@users.noreply.github.com> Date: Wed, 1 Apr 2020 17:53:52 +0200 Subject: [PATCH 30/54] Remove the return statement of `RemoteData.getfile()` (#3742) The method also returned the contents of the `RemoteData` but this was added through a copy-paste mistake and makes no sense for this method. It has been removed and the docstring has been adapted. The method now returns nothing. --- aiida/orm/nodes/data/remote.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/aiida/orm/nodes/data/remote.py b/aiida/orm/nodes/data/remote.py index 03a089e515..03e6bf6453 100644 --- a/aiida/orm/nodes/data/remote.py +++ b/aiida/orm/nodes/data/remote.py @@ -55,11 +55,10 @@ def is_empty(self): def getfile(self, relpath, destpath): """ - Connects to the remote folder and gets a string with the (full) content of the file. + Connects to the remote folder and retrieves the content of a file. - :param relpath: The relative path of the file to show. - :param destpath: A path on the local computer to get the file - :return: a string with the file content + :param relpath: The relative path of the file on the remote to retrieve. + :param destpath: The absolute path of where to store the file on the local machine. """ authinfo = self.get_authinfo() t = authinfo.get_transport() @@ -76,8 +75,6 @@ def getfile(self, relpath, destpath): else: raise - return t.listdir() - def listdir(self, relpath='.'): """ Connects to the remote folder and lists the directory content. From 434c593f2dc98505018fcf61be2445067309b790 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 3 Apr 2020 14:50:32 +0200 Subject: [PATCH 31/54] Ensure log messages are not duplicated in daemon log file (#3890) The logging for the daemon was configured with a handler both for writing to stdout `console` as well as a rotating file handler that writes directly to the daemon log file. The last one is the intended one but since the default `console` handler was also kept, its output also ended up in the daemon log file, duplicating every message. --- aiida/common/log.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/aiida/common/log.py b/aiida/common/log.py index 7180d20376..d1d5b16503 100644 --- a/aiida/common/log.py +++ b/aiida/common/log.py @@ -180,6 +180,11 @@ def configure_logging(with_orm=False, daemon=False, daemon_log_file=None): for logger in config.get('loggers', {}).values(): logger.setdefault('handlers', []).append(daemon_handler_name) + try: + # Remove the `console` stdout stream handler to prevent messages being duplicated in the daemon log file + logger['handlers'].remove('console') + except ValueError: + pass # Add the `DbLogHandler` if `with_orm` is `True` if with_orm: From ef5928108174d4a8f319213c890fc8377ba86fc2 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 3 Apr 2020 15:15:46 +0200 Subject: [PATCH 32/54] Fix the `RotatingFileHandler` configuration of the daemon logger (#3891) The logging configuration for the daemon did already include the definition of a `RotatingFileHandler`, however, the logs were not actually being rolled over when the maximum size was hit. The problem was because the argument `backupCount` was not defined. As the python documentation of the `logging` module states: but if either of `maxBytes` or `backupCount` is zero, rollover never occurs, so you generally want to set `backupCount` to at least 1, and have a non-zero `maxBytes`. Setting `backupCount` to 10, fixes the problem. The size of each file is set to 10 MB. This should then keep at most 100 MB of log files for each profile. --- aiida/common/log.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aiida/common/log.py b/aiida/common/log.py index d1d5b16503..9f208072ed 100644 --- a/aiida/common/log.py +++ b/aiida/common/log.py @@ -175,7 +175,8 @@ def configure_logging(with_orm=False, daemon=False, daemon_log_file=None): 'class': 'logging.handlers.RotatingFileHandler', 'filename': daemon_log_file, 'encoding': 'utf8', - 'maxBytes': 100000, + 'maxBytes': 10000000, # 10 MB + 'backupCount': 10, } for logger in config.get('loggers', {}).values(): From 161eeaeaf2083b69aff92a2905fb4075951e5fd8 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Fri, 3 Apr 2020 19:22:41 +0200 Subject: [PATCH 33/54] Prevent nodes without registered entry points from being stored (#3886) Up until now it was possible to store instances of `Node` subclasses that do not have a registered entry point. Imagine for example the definition of a `Data` subclass called `SubClass` in a shell and an instance being stored. The node type string will be: `__main__.SubClass.` When trying to load this node at a later point in time, the type string can of course not be resolved to an importable class and the loader would raise an exception. The first change is that this exception is now turned into a warning and the loader falls back onto the `Data` class. Note that we do not use the `Node` class for this as this base class is also not storable and so the ORM logic is potentially ill-defined for instances of the base `Node`. Secondly, we now add a check to `Node.store` to make sure that the class corresponds to a registered entry point. If this is not the case, the store method will raise `StoringNotAllowed`. One unit test was deleted because its implementation was incorrect and was not actually testing what it was intending to test. Besides intended functionality is now covered by other tests. --- aiida/orm/nodes/node.py | 19 ++++++-- aiida/orm/utils/node.py | 18 ++++--- aiida/plugins/entry_point.py | 33 +++++++++++-- tests/cmdline/commands/test_database.py | 13 +++-- tests/orm/utils/test_node.py | 6 +++ tests/test_nodes.py | 63 ++++++------------------- 6 files changed, 83 insertions(+), 69 deletions(-) diff --git a/aiida/orm/nodes/node.py b/aiida/orm/nodes/node.py index 86f6d9ace3..c8608bc36c 100644 --- a/aiida/orm/nodes/node.py +++ b/aiida/orm/nodes/node.py @@ -163,6 +163,20 @@ def _validate(self): # pylint: disable=no-self-use return True + def validate_storability(self): + """Verify that the current node is allowed to be stored. + + :raises `aiida.common.exceptions.StoringNotAllowed`: if the node does not match all requirements for storing + """ + from aiida.plugins.entry_point import is_registered_entry_point + + if not self._storable: + raise exceptions.StoringNotAllowed(self._unstorable_message) + + if not is_registered_entry_point(self.__module__, self.__class__.__name__, groups=('aiida.node', 'aiida.data')): + msg = 'class `{}:{}` does not have registered entry point'.format(self.__module__, self.__class__.__name__) + raise exceptions.StoringNotAllowed(msg) + @classproperty def class_node_type(cls): """Returns the node type of this node (sub) class.""" @@ -998,11 +1012,10 @@ def store(self, with_transaction=True, use_cache=None): # pylint: disable=argum 'the `use_cache` argument is deprecated and will be removed in `v2.0.0`', AiidaDeprecationWarning ) - if not self._storable: - raise exceptions.StoringNotAllowed(self._unstorable_message) - if not self.is_stored: + # Call `validate_storability` directly and not in `_validate` in case sub class forgets to call the super. + self.validate_storability() self._validate() # Verify that parents are already stored. Raises if this is not the case. diff --git a/aiida/orm/utils/node.py b/aiida/orm/utils/node.py index f48ec2ae16..6827225272 100644 --- a/aiida/orm/utils/node.py +++ b/aiida/orm/utils/node.py @@ -12,6 +12,7 @@ import logging import math import numbers +import warnings from collections.abc import Iterable, Mapping from aiida.common import exceptions @@ -70,7 +71,14 @@ def load_node_class(type_string): entry_point_name = strip_prefix(base_path, 'nodes.') return load_entry_point('aiida.node', entry_point_name) - raise exceptions.EntryPointError('unknown type string {}'.format(type_string)) + # At this point we really have an anomalous type string. At some point, storing nodes with unresolvable type strings + # was allowed, for example by creating a sub class in a shell and then storing an instance. Attempting to load the + # node then would fail miserably. This is now no longer allowed, but we need a fallback for existing cases, which + # should be rare. We fallback on `Data` and not `Node` because bare node instances are also not storable and so the + # logic of the ORM is not well defined for a loaded instance of the base `Node` class. + warnings.warn('unknown type string `{}`, falling back onto `Data` class'.format(type_string)) # pylint: disable=no-member + + return Data def get_type_string_from_class(class_module, class_name): @@ -247,13 +255,9 @@ def clean_builtin(val): class AbstractNodeMeta(ABCMeta): # pylint: disable=too-few-public-methods - """ - Some python black magic to set correctly the logger also in subclasses. - """ - - # pylint: disable=arguments-differ,protected-access,too-many-function-args + """Some python black magic to set correctly the logger also in subclasses.""" - def __new__(mcs, name, bases, namespace): + def __new__(mcs, name, bases, namespace): # pylint: disable=arguments-differ,protected-access,too-many-function-args newcls = ABCMeta.__new__(mcs, name, bases, namespace) newcls._logger = logging.getLogger('{}.{}'.format(namespace['__module__'], name)) diff --git a/aiida/plugins/entry_point.py b/aiida/plugins/entry_point.py index 0d47792626..432e740852 100644 --- a/aiida/plugins/entry_point.py +++ b/aiida/plugins/entry_point.py @@ -186,6 +186,7 @@ def load_entry_point_from_string(entry_point_string): group, name = parse_entry_point_string(entry_point_string) return load_entry_point(group, name) + def load_entry_point(group, name): """ Load the class registered under the entry point for a given name and group @@ -244,6 +245,7 @@ def get_entry_points(group): """ return [ep for ep in ENTRYPOINT_MANAGER.iter_entry_points(group=group)] + @functools.lru_cache(maxsize=None) def get_entry_point(group, name): """ @@ -258,12 +260,12 @@ def get_entry_point(group, name): entry_points = [ep for ep in get_entry_points(group) if ep.name == name] if not entry_points: - raise MissingEntryPointError("Entry point '{}' not found in group '{}'.".format(name, group) + - 'Try running `reentry scan` to update the entry point cache.') + raise MissingEntryPointError("Entry point '{}' not found in group '{}'. Try running `reentry scan` to update " + 'the entry point cache.'.format(name, group)) if len(entry_points) > 1: - raise MultipleEntryPointError("Multiple entry points '{}' found in group '{}'. ".format(name, group) + - 'Try running `reentry scan` to repopulate the entry point cache.') + raise MultipleEntryPointError("Multiple entry points '{}' found in group '{}'.Try running `reentry scan` to " + 'repopulate the entry point cache.'.format(name, group)) return entry_points[0] @@ -332,3 +334,26 @@ def is_valid_entry_point_string(entry_point_string): return False return group in entry_point_group_to_module_path_map + + +@functools.lru_cache(maxsize=None) +def is_registered_entry_point(class_module, class_name, groups=None): + """Verify whether the class with the given module and class name is a registered entry point. + + .. note:: this function only checks whether the class has a registered entry point. It does explicitly not verify + if the corresponding class is also importable. Use `load_entry_point` for this purpose instead. + + :param class_module: the module of the class + :param class_name: the name of the class + :param groups: optionally consider only these entry point groups to look for the class + :return: boolean, True if the class is a registered entry point, False otherwise. + """ + if groups is None: + groups = list(entry_point_group_to_module_path_map.keys()) + + for group in groups: + for entry_point in ENTRYPOINT_MANAGER.iter_entry_points(group): + if class_module == entry_point.module_name and [class_name] == entry_point.attrs: + return True + else: + return False diff --git a/tests/cmdline/commands/test_database.py b/tests/cmdline/commands/test_database.py index 131a2b7c6c..4269cf6c7e 100644 --- a/tests/cmdline/commands/test_database.py +++ b/tests/cmdline/commands/test_database.py @@ -9,7 +9,6 @@ ########################################################################### # pylint: disable=invalid-name,protected-access """Tests for `verdi database`.""" - import enum from click.testing import CliRunner @@ -17,7 +16,7 @@ from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands import cmd_database from aiida.common.links import LinkType -from aiida.orm import Data, Node, CalculationNode, WorkflowNode +from aiida.orm import Data, CalculationNode, WorkflowNode class TestVerdiDatabasaIntegrity(AiidaTestCase): @@ -162,11 +161,11 @@ def test_detect_invalid_nodes_unknown_node_type(self): self.assertEqual(result.exit_code, 0) self.assertClickResultNoException(result) - # Create a node with invalid type: a base Node type string is considered invalid - # Note that there is guard against storing base Nodes for this reason, which we temporarily disable - Node._storable = True - Node().store() - Node._storable = False + # Create a node with invalid type: since there are a lot of validation rules that prevent us from creating an + # invalid node type normally, we have to do it manually on the database model instance before storing + node = Data() + node.backend_entity.dbmodel.node_type = '__main__.SubClass.' + node.store() result = self.cli_runner.invoke(cmd_database.detect_invalid_nodes, []) self.assertNotEqual(result.exit_code, 0) diff --git a/tests/orm/utils/test_node.py b/tests/orm/utils/test_node.py index 239a2a5cb8..9cc72edba5 100644 --- a/tests/orm/utils/test_node.py +++ b/tests/orm/utils/test_node.py @@ -8,6 +8,7 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Tests for the `Node` utils.""" +import pytest from aiida.backends.testbase import AiidaTestCase from aiida.orm import Data @@ -21,3 +22,8 @@ def test_load_node_class_fallback(self): """Verify that `load_node_class` will fall back to `Data` class if entry point cannot be loaded.""" loaded_class = load_node_class('data.some.non.existing.plugin.') self.assertEqual(loaded_class, Data) + + # For really unresolvable type strings, we fall back onto the `Data` class + with pytest.warns(UserWarning): + loaded_class = load_node_class('__main__.SubData.') + self.assertEqual(loaded_class, Data) diff --git a/tests/test_nodes.py b/tests/test_nodes.py index 463baad315..1c1a2d6c5e 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -24,32 +24,28 @@ class TestNodeIsStorable(AiidaTestCase): - """ - Test if one can store specific Node subclasses, and that Node and - ProcessType are not storable, intead. - """ + """Test that checks on storability of certain node sub classes work correctly.""" - def test_storable_unstorable(self): - """ - Test storability of Nodes - """ - node = orm.Node() + def test_base_classes(self): + """Test storability of `Node` base sub classes.""" with self.assertRaises(StoringNotAllowed): - node.store() + orm.Node().store() - process = orm.ProcessNode() with self.assertRaises(StoringNotAllowed): - process.store() + orm.ProcessNode().store() - # These below should be allowed instead - data = orm.Data() - data.store() + # The following base classes are storable + orm.Data().store() + orm.CalculationNode().store() + orm.WorkflowNode().store() - calc = orm.CalculationNode() - calc.store() + def test_unregistered_sub_class(self): + """Sub classes without a registered entry point are not storable.""" + class SubData(orm.Data): + pass - work = orm.WorkflowNode() - work.store() + with self.assertRaises(StoringNotAllowed): + SubData().store() class TestNodeCopyDeepcopy(AiidaTestCase): @@ -1207,35 +1203,6 @@ def test_load_node(self): with self.assertRaises(NotExistent): orm.load_node(spec, sub_classes=(orm.ArrayData,)) - def test_load_unknown_data_type(self): - """ - Test that the loader will choose a common data ancestor for an unknown data type. - For the case where, e.g., the user doesn't have the necessary plugin. - """ - from aiida.plugins import DataFactory - - KpointsData = DataFactory('array.kpoints') - kpoint = KpointsData().store() - - # compare if plugin exist - obj = orm.load_node(uuid=kpoint.uuid) - self.assertEqual(type(kpoint), type(obj)) - - class TestKpointsData(KpointsData): - pass - - # change node type and save in database again - TestKpointsData().store() - - # changed node should return data node as its plugin is not exist - obj = orm.load_node(uuid=kpoint.uuid) - self.assertEqual(type(kpoint), type(obj)) - - # for node - n1 = orm.Data().store() - obj = orm.load_node(n1.uuid) - self.assertEqual(type(n1), type(obj)) - class TestSubNodesAndLinks(AiidaTestCase): From f40f0d25952b18da8a49133632e4066437f59290 Mon Sep 17 00:00:00 2001 From: Giovanni Pizzi Date: Sat, 4 Apr 2020 19:57:14 +0200 Subject: [PATCH 34/54] Ensure unicity when creating instances of `Autogroup` (#3650) The default label of an `Autogroup` instance was based on the current date and time, with a precision of seconds. If multiple scripts attempted to create an instance at the same time, a uniqueness error would be thrown by the database. This commit adds the logic to the `get_or_create_group` method to be able to deal with this situtation and iteratively generate unique labels by appending an integer. The CLI parameters of the `verdi run` command were broken and badly tested. These have been altered so as to fix the behavior: * removed the `--group` flag, which was true by default and so served no purpose whatsoever, and replaced it with `--auto-group`. The auto grouping is now disabled by default, but can be enabled by this new flag. * deprecated use of `group-name' in favour of `group-label-prefix` * the flags to include or exclude certain node types have been merged into just `--exclude` and `--include` which are mutually exclusive. By default everything is included and the flags take entry point strings of nodes to narrow the scope. The entry point string can contain SQL wildcards `%` and `_` to match entry points with the `like` operator. The `Autogroup` interface has been adapted to match the new logic of the CLI. Finally, an overzealous `isinstance` check has been removed from the `Node.store` method to check that the `current_autogroup` global, if set, is of type `Autogroup`, which should speed up the storing of nodes. --- .ci/workchains.py | 4 +- .gitignore | 1 + .pre-commit-config.yaml | 2 - aiida/backends/testbase.py | 11 + aiida/cmdline/commands/cmd_plugin.py | 6 +- aiida/cmdline/commands/cmd_run.py | 79 ++-- aiida/engine/processes/calcjobs/calcjob.py | 4 +- aiida/manage/caching.py | 8 +- aiida/orm/autogroup.py | 359 +++++++++++------- aiida/orm/nodes/node.py | 16 +- aiida/orm/utils/node.py | 6 +- aiida/plugins/entry_point.py | 49 ++- aiida/tools/ipython/ipython_magics.py | 4 +- docs/source/verdi/verdi_user_guide.rst | 22 +- .../migrations/test_migrations_common.py | 6 +- .../aiida_sqlalchemy/test_migrations.py | 53 +-- tests/cmdline/commands/test_calcjob.py | 1 + tests/cmdline/commands/test_run.py | 356 ++++++++++++++++- .../engine/processes/workchains/test_utils.py | 6 + tests/orm/test_autogroups.py | 129 +++++++ tests/orm/test_groups.py | 1 - tests/tools/importexport/orm/test_codes.py | 2 + tests/tools/visualization/test_graph.py | 2 + utils/dependency_management.py | 5 +- 24 files changed, 877 insertions(+), 255 deletions(-) create mode 100644 tests/orm/test_autogroups.py diff --git a/.ci/workchains.py b/.ci/workchains.py index 110334f0ae..f5ab3872d7 100644 --- a/.ci/workchains.py +++ b/.ci/workchains.py @@ -68,8 +68,8 @@ def a_magic_unicorn_appeared(self, node): @process_handler(priority=400, exit_codes=ArithmeticAddCalculation.exit_codes.ERROR_NEGATIVE_NUMBER) def error_negative_sum(self, node): """What even is a negative number, how can I have minus three melons?!.""" - self.ctx.inputs.x = Int(abs(node.inputs.x.value)) - self.ctx.inputs.y = Int(abs(node.inputs.y.value)) + self.ctx.inputs.x = Int(abs(node.inputs.x.value)) # pylint: disable=invalid-name + self.ctx.inputs.y = Int(abs(node.inputs.y.value)) # pylint: disable=invalid-name return ProcessHandlerReport(True) diff --git a/.gitignore b/.gitignore index 9d225c3ef0..1983db653d 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ .cache .pytest_cache .coverage +coverage.xml # Files created by RPN tests .ci/polish/polish_workchains/polish* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62ae29e398..8ceab33c0e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,7 +53,6 @@ aiida/common/datastructures.py| aiida/engine/daemon/execmanager.py| aiida/engine/processes/calcjobs/tasks.py| - aiida/orm/autogroup.py| aiida/orm/querybuilder.py| aiida/orm/nodes/data/array/bands.py| aiida/orm/nodes/data/array/projection.py| @@ -66,7 +65,6 @@ aiida/parsers/plugins/arithmetic/add.py| aiida/parsers/plugins/templatereplacer/doubler.py| aiida/parsers/plugins/templatereplacer/__init__.py| - aiida/plugins/entry_point.py| aiida/plugins/entry.py| aiida/plugins/info.py| aiida/plugins/registry.py| diff --git a/aiida/backends/testbase.py b/aiida/backends/testbase.py index de855eec4b..ed18f27566 100644 --- a/aiida/backends/testbase.py +++ b/aiida/backends/testbase.py @@ -99,7 +99,11 @@ def tearDown(self): def reset_database(self): """Reset the database to the default state deleting any content currently stored""" + from aiida.orm import autogroup + self.clean_db() + if autogroup.CURRENT_AUTOGROUP is not None: + autogroup.CURRENT_AUTOGROUP.clear_group_cache() self.insert_data() @classmethod @@ -109,7 +113,10 @@ def insert_data(cls): inserts default data into the database (which is for the moment a default computer). """ + from aiida.orm import User + cls.create_user() + User.objects.reset() cls.create_computer() @classmethod @@ -180,7 +187,11 @@ def user_email(cls): # pylint: disable=no-self-argument def tearDownClass(cls, *args, **kwargs): # pylint: disable=arguments-differ # Double check for double security to avoid to run the tearDown # if this is not a test profile + from aiida.orm import autogroup + check_if_tests_can_run() + if autogroup.CURRENT_AUTOGROUP is not None: + autogroup.CURRENT_AUTOGROUP.clear_group_cache() cls.clean_db() cls.clean_repository() cls.__backend_instance.tearDownClass_method(*args, **kwargs) diff --git a/aiida/cmdline/commands/cmd_plugin.py b/aiida/cmdline/commands/cmd_plugin.py index f09c064950..3232441379 100644 --- a/aiida/cmdline/commands/cmd_plugin.py +++ b/aiida/cmdline/commands/cmd_plugin.py @@ -13,7 +13,7 @@ from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.utils import decorators, echo -from aiida.plugins.entry_point import entry_point_group_to_module_path_map +from aiida.plugins.entry_point import ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP @verdi.group('plugin') @@ -22,7 +22,7 @@ def verdi_plugin(): @verdi_plugin.command('list') -@click.argument('entry_point_group', type=click.Choice(entry_point_group_to_module_path_map.keys()), required=False) +@click.argument('entry_point_group', type=click.Choice(ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP.keys()), required=False) @click.argument('entry_point', type=click.STRING, required=False) @decorators.with_dbenv() def plugin_list(entry_point_group, entry_point): @@ -34,7 +34,7 @@ def plugin_list(entry_point_group, entry_point): if entry_point_group is None: echo.echo_info('Available entry point groups:') - for group in sorted(entry_point_group_to_module_path_map.keys()): + for group in sorted(ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP.keys()): echo.echo('* {}'.format(group)) echo.echo('') diff --git a/aiida/cmdline/commands/cmd_run.py b/aiida/cmdline/commands/cmd_run.py index 5a43cad6f5..bd3972b841 100644 --- a/aiida/cmdline/commands/cmd_run.py +++ b/aiida/cmdline/commands/cmd_run.py @@ -10,13 +10,16 @@ """`verdi run` command.""" import contextlib import os +import functools import sys +import warnings import click from aiida.cmdline.commands.cmd_verdi import verdi from aiida.cmdline.params.options.multivalue import MultipleValueOption from aiida.cmdline.utils import decorators, echo +from aiida.common.warnings import AiidaDeprecationWarning @contextlib.contextmanager @@ -37,31 +40,56 @@ def update_environment(argv): sys.path = _path +def validate_entrypoint_string(ctx, param, value): # pylint: disable=unused-argument,invalid-name + """Validate that `value` is a valid entrypoint string.""" + from aiida.orm import autogroup + + try: + autogroup.Autogroup.validate(value) + except Exception as exc: + raise click.BadParameter(str(exc) + ' ({})'.format(value)) + + return value + + @verdi.command('run', context_settings=dict(ignore_unknown_options=True,)) @click.argument('scriptname', type=click.STRING) @click.argument('varargs', nargs=-1, type=click.UNPROCESSED) -@click.option('-g', '--group', is_flag=True, default=True, show_default=True, help='Enables the autogrouping') -@click.option('-n', '--group-name', type=click.STRING, required=False, help='Specify the name of the auto group') -@click.option('-e', '--exclude', cls=MultipleValueOption, default=[], help='Exclude these classes from auto grouping') +@click.option('--auto-group', is_flag=True, help='Enables the autogrouping') +@click.option( + '-l', + '--auto-group-label-prefix', + type=click.STRING, + required=False, + help='Specify the prefix of the label of the auto group (numbers might be automatically ' + 'appended to generate unique names per run).' +) @click.option( - '-i', '--include', cls=MultipleValueOption, default=['all'], help='Include these classes from auto grouping' + '-n', + '--group-name', + type=click.STRING, + required=False, + help='Specify the name of the auto group [DEPRECATED, USE --auto-group-label-prefix instead]. ' + 'This also enables auto-grouping.' ) @click.option( - '-E', - '--excludesubclasses', + '-e', + '--exclude', cls=MultipleValueOption, - default=[], - help='Exclude these classes and their sub classes from auto grouping' + default=None, + help='Exclude these classes from auto grouping (use full entrypoint strings).', + callback=functools.partial(validate_entrypoint_string) ) @click.option( - '-I', - '--includesubclasses', + '-i', + '--include', cls=MultipleValueOption, - default=[], - help='Include these classes and their sub classes from auto grouping' + default=None, + help='Include these classes from auto grouping (use full entrypoint strings or "all").', + callback=validate_entrypoint_string ) @decorators.with_dbenv() -def run(scriptname, varargs, group, group_name, exclude, excludesubclasses, include, includesubclasses): +def run(scriptname, varargs, auto_group, auto_group_label_prefix, group_name, exclude, include): # pylint: disable=too-many-arguments,exec-used """Execute scripts with preloaded AiiDA environment.""" from aiida.cmdline.utils.shell import DEFAULT_MODULES_LIST @@ -80,22 +108,27 @@ def run(scriptname, varargs, group, group_name, exclude, excludesubclasses, incl for app_mod, model_name, alias in DEFAULT_MODULES_LIST: globals_dict['{}'.format(alias)] = getattr(__import__(app_mod, {}, {}, model_name), model_name) - if group: - automatic_group_name = group_name - if automatic_group_name is None: - from aiida.common import timezone - - automatic_group_name = 'Verdi autogroup on ' + timezone.now().strftime('%Y-%m-%d %H:%M:%S') + if group_name: + warnings.warn('--group-name is deprecated, use `--auto-group-label-prefix` instead', AiidaDeprecationWarning) # pylint: disable=no-member + if auto_group_label_prefix: + raise click.BadParameter( + 'You cannot specify both --group-name and --auto-group-label-prefix; ' + 'use --auto-group-label-prefix only' + ) + auto_group_label_prefix = group_name + # To have the old behavior, with auto-group enabled. + auto_group = True + if auto_group: aiida_verdilib_autogroup = autogroup.Autogroup() + # Set the ``group_label_prefix`` if defined, otherwise a default prefix will be used + if auto_group_label_prefix is not None: + aiida_verdilib_autogroup.set_group_label_prefix(auto_group_label_prefix) aiida_verdilib_autogroup.set_exclude(exclude) aiida_verdilib_autogroup.set_include(include) - aiida_verdilib_autogroup.set_exclude_with_subclasses(excludesubclasses) - aiida_verdilib_autogroup.set_include_with_subclasses(includesubclasses) - aiida_verdilib_autogroup.set_group_name(automatic_group_name) # Note: this is also set in the exec environment! This is the intended behavior - autogroup.current_autogroup = aiida_verdilib_autogroup + autogroup.CURRENT_AUTOGROUP = aiida_verdilib_autogroup # Initialize the variable here, otherwise we get UnboundLocalError in the finally clause if it fails to open handle = None diff --git a/aiida/engine/processes/calcjobs/calcjob.py b/aiida/engine/processes/calcjobs/calcjob.py index 0e6b234ef0..9f3d2d765f 100644 --- a/aiida/engine/processes/calcjobs/calcjob.py +++ b/aiida/engine/processes/calcjobs/calcjob.py @@ -64,7 +64,7 @@ def validate_calc_job(inputs, ctx): ) -def validate_parser(parser_name, ctx): +def validate_parser(parser_name, ctx): # pylint: disable=unused-argument """Validate the parser. :raises InputValidationError: if the parser name does not correspond to a loadable `Parser` class. @@ -78,7 +78,7 @@ def validate_parser(parser_name, ctx): raise exceptions.InputValidationError('invalid parser specified: {}'.format(exception)) -def validate_resources(resources, ctx): +def validate_resources(resources, ctx): # pylint: disable=unused-argument """Validate the resources. :raises InputValidationError: if `num_machines` is not specified or is not an integer. diff --git a/aiida/manage/caching.py b/aiida/manage/caching.py index d8079fd747..9b7f1d427d 100644 --- a/aiida/manage/caching.py +++ b/aiida/manage/caching.py @@ -22,7 +22,7 @@ from aiida.common import exceptions from aiida.common.lang import type_check -from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, entry_point_group_to_module_path_map +from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP __all__ = ('get_use_cache', 'enable_caching', 'disable_caching') @@ -248,7 +248,7 @@ def _validate_identifier_pattern(*, identifier): 1. - where `group_name` is one of the keys in `entry_point_group_to_module_path_map` + where `group_name` is one of the keys in `ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP` and `tail` can be anything _except_ `ENTRY_POINT_STRING_SEPARATOR`. 2. a fully qualified Python name @@ -276,7 +276,7 @@ def _validate_identifier_pattern(*, identifier): group_pattern, _ = identifier.split(ENTRY_POINT_STRING_SEPARATOR) if not any( _match_wildcard(string=group_name, pattern=group_pattern) - for group_name in entry_point_group_to_module_path_map + for group_name in ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP ): raise ValueError( common_error_msg + "Group name pattern '{}' does not match any of the AiiDA entry point group names.". @@ -290,7 +290,7 @@ def _validate_identifier_pattern(*, identifier): # aiida.* or aiida.calculations* if '*' in identifier: group_part, _ = identifier.split('*', 1) - if any(group_name.startswith(group_part) for group_name in entry_point_group_to_module_path_map): + if any(group_name.startswith(group_part) for group_name in ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP): return # Finally, check if it could be a fully qualified Python name for identifier_part in identifier.split('.'): diff --git a/aiida/orm/autogroup.py b/aiida/orm/autogroup.py index ed4551a3ad..16bf03f1c1 100644 --- a/aiida/orm/autogroup.py +++ b/aiida/orm/autogroup.py @@ -7,173 +7,278 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### - +"""Module to manage the autogrouping functionality by ``verdi run``.""" +import re +import warnings from aiida.common import exceptions, timezone -from aiida.orm import GroupTypeString - +from aiida.common.escaping import escape_for_sql_like, get_regex_pattern_from_sql +from aiida.common.warnings import AiidaDeprecationWarning +from aiida.orm import GroupTypeString, Group +from aiida.plugins.entry_point import get_entry_point_string_from_class -current_autogroup = None +CURRENT_AUTOGROUP = None VERDIAUTOGROUP_TYPE = GroupTypeString.VERDIAUTOGROUP_TYPE.value -# TODO: make the Autogroup usable to the user, and not only to the verdi run class Autogroup: """ An object used for the autogrouping of objects. The autogrouping is checked by the Node.store() method. - In the store(), the Node will check if current_autogroup is != None. + In the store(), the Node will check if CURRENT_AUTOGROUP is != None. If so, it will call Autogroup.is_to_be_grouped, and decide whether to put it in a group. Such autogroups are going to be of the VERDIAUTOGROUP_TYPE. - The exclude/include lists, can have values 'all' if you want to include/exclude all classes. - Otherwise, they are lists of strings like: calculation.quantumespresso.pw, data.array.kpoints, ... - i.e.: a string identifying the base class, than the path to the class as in Calculation/Data -Factories + The exclude/include lists are lists of strings like: + ``aiida.data:int``, ``aiida.calculation:quantumespresso.pw``, + ``aiida.data:array.%``, ... + i.e.: a string identifying the base class, followed a colona and by the path to the class + as accepted by CalculationFactory/DataFactory. + Each string can contain one or more wildcard characters ``%``; + in this case this is used in a ``like`` comparison with the QueryBuilder. + Note that in this case you have to remember that ``_`` means "any character" + in the QueryBuilder, and you need to escape it if you mean a literal underscore. + + Only one of the two (between exclude and include) can be set. + If none of the two is set, everything is included. """ - def _validate(self, param, is_exact=True): - """ - Used internally to verify the sanity of exclude, include lists - """ - from aiida.plugins import CalculationFactory, DataFactory - - for i in param: - if not any([i.startswith('calculation'), - i.startswith('code'), - i.startswith('data'), - i == 'all', - ]): - raise exceptions.ValidationError('Module not recognized, allow prefixes ' - ' are: calculation, code or data') - the_param = [i + '.' for i in param] - - factorydict = {'calculation': locals()['CalculationFactory'], - 'data': locals()['DataFactory']} - - for i in the_param: - base, module = i.split('.', 1) - if base == 'code': - if module: - raise exceptions.ValidationError('Cannot have subclasses for codes') - elif base == 'all': - continue - else: - if is_exact: - try: - factorydict[base](module.rstrip('.')) - except exceptions.EntryPointError: - raise exceptions.ValidationError('Cannot find the class to be excluded') - return the_param + def __init__(self): + """Initialize with defaults.""" + self._exclude = None + self._include = None + + now = timezone.now() + default_label_prefix = 'Verdi autogroup on ' + now.strftime('%Y-%m-%d %H:%M:%S') + self._group_label_prefix = default_label_prefix + self._group_label = None # Actual group label, set by `get_or_create_group` + + @staticmethod + def validate(strings): + """Validate the list of strings passed to set_include and set_exclude.""" + if strings is None: + return + valid_prefixes = set(['aiida.node', 'aiida.calculations', 'aiida.workflows', 'aiida.data']) + for string in strings: + pieces = string.split(':') + if len(pieces) != 2: + raise exceptions.ValidationError( + "'{}' is not a valid include/exclude filter, must contain two parts split by a colon". + format(string) + ) + if pieces[0] not in valid_prefixes: + raise exceptions.ValidationError( + "'{}' has an invalid prefix, must be among: {}".format(string, sorted(valid_prefixes)) + ) def get_exclude(self): - """Return the list of classes to exclude from autogrouping.""" - try: - return self.exclude - except AttributeError: - return [] + """Return the list of classes to exclude from autogrouping. - def get_exclude_with_subclasses(self): - """ - Return the list of classes to exclude from autogrouping. - Will also exclude their derived subclasses - """ - try: - return self.exclude_with_subclasses - except AttributeError: - return [] + Returns ``None`` if no exclusion list has been set.""" + return self._exclude def get_include(self): - """Return the list of classes to include in the autogrouping.""" - try: - return self.include - except AttributeError: - return [] - - def get_include_with_subclasses(self): """Return the list of classes to include in the autogrouping. - Will also include their derived subclasses.""" - try: - return self.include_with_subclasses - except AttributeError: - return [] + + Returns ``None`` if no inclusion list has been set.""" + return self._include + + def get_group_label_prefix(self): + """Get the prefix of the label of the group. + If no group label prefix was set, it will set a default one by itself.""" + return self._group_label_prefix def get_group_name(self): - """Get the name of the group. - If no group name was set, it will set a default one by itself.""" - try: - return self.group_name - except AttributeError: - now = timezone.now() - gname = 'Verdi autogroup on ' + now.strftime('%Y-%m-%d %H:%M:%S') - self.set_group_name(gname) - return self.group_name + """Get the label of the group. + If no group label was set, it will set a default one by itself. - def set_exclude(self, exclude): - """Return the list of classes to exclude from autogrouping.""" - the_exclude_classes = self._validate(exclude) - if self.get_include() is not None: - if 'all.' in self.get_include(): - if 'all.' in the_exclude_classes: - raise exceptions.ValidationError('Cannot exclude and include all classes') - self.exclude = the_exclude_classes - - def set_exclude_with_subclasses(self, exclude): + .. deprecated:: 1.2.0 + Will be removed in `v2.0.0`, use :py:meth:`.get_group_label_prefix` instead. """ - Set the list of classes to exclude from autogrouping. - Will also exclude their derived subclasses + warnings.warn('function is deprecated, use `get_group_label_prefix` instead', AiidaDeprecationWarning) # pylint: disable=no-member + return self.get_group_label_prefix() + + def set_exclude(self, exclude): + """Set the list of classes to exclude in the autogrouping. + + :param exclude: a list of valid entry point strings (might contain '%' to be used as + string to be matched using SQL's ``LIKE`` pattern-making logic), or ``None`` + to specify no include list. """ - the_exclude_classes = self._validate(exclude, is_exact=False) - self.exclude_with_subclasses = the_exclude_classes + if isinstance(exclude, str): + exclude = [exclude] + self.validate(exclude) + if exclude is not None and self.get_include() is not None: + # It's ok to set None, both as a default, or to 'undo' the exclude list + raise exceptions.ValidationError('Cannot both specify exclude and include') + self._exclude = exclude def set_include(self, include): - """ - Set the list of classes to include in the autogrouping. - """ - the_include_classes = self._validate(include) - if self.get_exclude() is not None: - if 'all.' in self.get_exclude(): - if 'all.' in the_include_classes: - raise exceptions.ValidationError('Cannot exclude and include all classes') + """Set the list of classes to include in the autogrouping. - self.include = the_include_classes + :param include: a list of valid entry point strings (might contain '%' to be used as + string to be matched using SQL's ``LIKE`` pattern-making logic), or ``None`` + to specify no include list. + """ + if isinstance(include, str): + include = [include] + self.validate(include) + if include is not None and self.get_exclude() is not None: + # It's ok to set None, both as a default, or to 'undo' the include list + raise exceptions.ValidationError('Cannot both specify exclude and include') + self._include = include - def set_include_with_subclasses(self, include): + def set_group_label_prefix(self, label_prefix): """ - Set the list of classes to include in the autogrouping. - Will also include their derived subclasses. + Set the label of the group to be created """ - the_include_classes = self._validate(include, is_exact=False) - self.include_with_subclasses = the_include_classes + if not isinstance(label_prefix, str): + raise exceptions.ValidationError('group label must be a string') + self._group_label_prefix = label_prefix def set_group_name(self, gname): + """Set the name of the group. + + .. deprecated:: 1.2.0 + Will be removed in `v2.0.0`, use :py:meth:`.set_group_label_prefix` instead. """ - Set the name of the group to be created + warnings.warn('function is deprecated, use `set_group_label_prefix` instead', AiidaDeprecationWarning) # pylint: disable=no-member + return self.set_group_label_prefix(label_prefix=gname) + + @staticmethod + def _matches(string, filter_string): + """Check if 'string' matches the 'filter_string' (used for include and exclude filters). + + If 'filter_string' does not contain any % sign, perform an exact match. + Otherwise, match with a SQL-like query, where % means any character sequence, + and _ means a single character (these caracters can be escaped with a backslash). + + :param string: the string to match. + :param filter_string: the filter string. """ - if not isinstance(gname, str): - raise exceptions.ValidationError('group name must be a string') - self.group_name = gname + if '%' in filter_string: + regex_filter = get_regex_pattern_from_sql(filter_string) + return re.match(regex_filter, string) is not None + return string == filter_string - def is_to_be_grouped(self, the_class): + def is_to_be_grouped(self, node): """ - Return whether the given class has to be included in the autogroup according to include/exclude list + Return whether the given node has to be included in the autogroup according to include/exclude list - :return (bool): True if the_class is to be included in the autogroup + :return (bool): True if ``node`` is to be included in the autogroup """ + # strings, including possibly 'all' include = self.get_include() - include_ws = self.get_include_with_subclasses() - if (('all.' in include) or - (the_class._plugin_type_string in include) or - any([the_class._plugin_type_string.startswith(i) for i in include_ws]) - ): - exclude = self.get_exclude() - exclude_ws = self.get_exclude_with_subclasses() - if ((not 'all.' in exclude) or - (the_class._plugin_type_string in exclude) or - any([the_class._plugin_type_string.startswith(i) for i in exclude_ws]) - ): - return True - else: - return False + exclude = self.get_exclude() + if include is None and exclude is None: + # Include all classes by default if nothing is explicitly specified. + return True + + # We should never be here, anyway - this should be catched by the `set_include/exclude` methods + assert include is None or exclude is None, "You cannot specify both an 'include' and an 'exclude' list" + + entry_point_string = node.process_type + # If there is no `process_type` we are dealing with a `Data` node so we get the entry point from the class + if not entry_point_string: + entry_point_string = get_entry_point_string_from_class(node.__class__.__module__, node.__class__.__name__) + if include is not None: + # As soon as a filter string matches, we include the class + return any(self._matches(entry_point_string, filter_string) for filter_string in include) + # If we are here, exclude is not None + # include *only* in *none* of the filters match (that is, exclude as + # soon as any of the filters matches) + return not any(self._matches(entry_point_string, filter_string) for filter_string in exclude) + + def clear_group_cache(self): + """Clear the cache of the group name. + + This is mostly used by tests when they reset the database. + """ + self._group_label = None + + def get_or_create_group(self): + """Return the current Autogroup, or create one if None has been set yet. + + This function implements a somewhat complex logic that is however needed + to make sure that, even if `verdi run` is called at the same time multiple + times, e.g. in a for loop in bash, there is never the risk that two ``verdi run`` + Unix processes try to create the same group, with the same label, ending + up in a crash of the code (see PR #3650). + + Here, instead, we make sure that if this concurrency issue happens, + one of the two will get a IntegrityError from the DB, and then recover + trying to create a group with a different label (with a numeric suffix appended), + until it manages to create it. + """ + from aiida.orm import QueryBuilder + + # When this function is called, if it is the first time, just generate + # a new group name (later on, after this ``if`` block`). + # In that case, we will later cache in ``self._group_label`` the group label, + # So the group with the same name can be returned quickly in future + # calls of this method. + if self._group_label is not None: + results = [ + res[0] for res in QueryBuilder(). + append(Group, filters={ + 'label': self._group_label, + 'type_string': VERDIAUTOGROUP_TYPE + }, project='*').iterall() + ] + if results: + # If it is not empty, it should have only one result due to the + # uniqueness constraints + assert len(results) == 1, 'I got more than one autogroup with the same label!' + return results[0] + # There are no results: probably the group has been deleted. + # I continue as if it was not cached + self._group_label = None + + label_prefix = self.get_group_label_prefix() + # Try to do a preliminary QB query to avoid to do too many try/except + # if many of the prefix_NUMBER groups already exist + queryb = QueryBuilder().append( + Group, + filters={ + 'or': [{ + 'label': { + '==': label_prefix + } + }, { + 'label': { + 'like': escape_for_sql_like(label_prefix + '_') + '%' + } + }] + }, + project='label' + ) + existing_group_labels = [res[0][len(label_prefix):] for res in queryb.all()] + existing_group_ints = [] + for label in existing_group_labels: + if label == '': + # This is just the prefix without name - corresponds to counter = 0 + existing_group_ints.append(0) + elif label.startswith('_'): + try: + existing_group_ints.append(int(label[1:])) + except ValueError: + # It's not an integer, so it will never collide - just ignore it + pass + + if not existing_group_ints: + counter = 0 else: - return False + counter = max(existing_group_ints) + 1 + + while True: + try: + label = label_prefix if counter == 0 else '{}_{}'.format(label_prefix, counter) + group = Group(label=label, type_string=VERDIAUTOGROUP_TYPE).store() + self._group_label = group.label + except exceptions.IntegrityError: + counter += 1 + else: + break + + return group diff --git a/aiida/orm/nodes/node.py b/aiida/orm/nodes/node.py index c8608bc36c..f776809952 100644 --- a/aiida/orm/nodes/node.py +++ b/aiida/orm/nodes/node.py @@ -23,6 +23,7 @@ from aiida.orm.utils.links import LinkManager, LinkTriple from aiida.orm.utils.repository import Repository from aiida.orm.utils.node import AbstractNodeMeta, validate_attribute_extra_key +from aiida.orm import autogroup from ..comments import Comment from ..computers import Computer @@ -1037,18 +1038,9 @@ def store(self, with_transaction=True, use_cache=None): # pylint: disable=argum self._store(with_transaction=with_transaction, clean=True) # Set up autogrouping used by verdi run - from aiida.orm.autogroup import current_autogroup, Autogroup, VERDIAUTOGROUP_TYPE - from aiida.orm import Group - - if current_autogroup is not None: - if not isinstance(current_autogroup, Autogroup): - raise exceptions.ValidationError('`current_autogroup` is not of type `Autogroup`') - - if current_autogroup.is_to_be_grouped(self): - group_label = current_autogroup.get_group_name() - if group_label is not None: - group = Group.objects.get_or_create(label=group_label, type_string=VERDIAUTOGROUP_TYPE)[0] - group.add_nodes(self) + if autogroup.CURRENT_AUTOGROUP is not None and autogroup.CURRENT_AUTOGROUP.is_to_be_grouped(self): + group = autogroup.CURRENT_AUTOGROUP.get_or_create_group() + group.add_nodes(self) return self diff --git a/aiida/orm/utils/node.py b/aiida/orm/utils/node.py index 6827225272..0432964467 100644 --- a/aiida/orm/utils/node.py +++ b/aiida/orm/utils/node.py @@ -90,13 +90,13 @@ def get_type_string_from_class(class_module, class_name): :param class_module: module of the class :param class_name: name of the class """ - from aiida.plugins.entry_point import get_entry_point_from_class, entry_point_group_to_module_path_map + from aiida.plugins.entry_point import get_entry_point_from_class, ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP group, entry_point = get_entry_point_from_class(class_module, class_name) # If we can reverse engineer an entry point group and name, we're dealing with an external class if group and entry_point: - module_base_path = entry_point_group_to_module_path_map[group] + module_base_path = ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP[group] type_string = '{}.{}.{}.'.format(module_base_path, entry_point.name, class_name) # Otherwise we are dealing with an internal class @@ -258,7 +258,7 @@ class AbstractNodeMeta(ABCMeta): # pylint: disable=too-few-public-methods """Some python black magic to set correctly the logger also in subclasses.""" def __new__(mcs, name, bases, namespace): # pylint: disable=arguments-differ,protected-access,too-many-function-args - newcls = ABCMeta.__new__(mcs, name, bases, namespace) + newcls = ABCMeta.__new__(mcs, name, bases, namespace) # pylint: disable=too-many-function-args newcls._logger = logging.getLogger('{}.{}'.format(namespace['__module__'], name)) # Set the plugin type string and query type string based on the plugin type string diff --git a/aiida/plugins/entry_point.py b/aiida/plugins/entry_point.py index 432e740852..2abe6be077 100644 --- a/aiida/plugins/entry_point.py +++ b/aiida/plugins/entry_point.py @@ -7,7 +7,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### - +"""Module to manage loading entrypoints.""" import enum import traceback import functools @@ -24,7 +24,6 @@ __all__ = ('load_entry_point', 'load_entry_point_from_string') - ENTRY_POINT_GROUP_PREFIX = 'aiida.' ENTRY_POINT_STRING_SEPARATOR = ':' @@ -51,7 +50,7 @@ class EntryPointFormat(enum.Enum): MINIMAL = 3 -entry_point_group_to_module_path_map = { +ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP = { 'aiida.calculations': 'aiida.orm.nodes.process.calculation.calcjob', 'aiida.cmdline.data': 'aiida.cmdline.data', 'aiida.data': 'aiida.orm.nodes.data', @@ -65,7 +64,7 @@ class EntryPointFormat(enum.Enum): } -def validate_registered_entry_points(): +def validate_registered_entry_points(): # pylint: disable=invalid-name """Validate all registered entry points by loading them with the corresponding factory. :raises EntryPointError: if any of the registered entry points cannot be loaded. This can happen if: @@ -108,12 +107,11 @@ def format_entry_point_string(group, name, fmt=EntryPointFormat.FULL): if fmt == EntryPointFormat.FULL: return '{}{}{}'.format(group, ENTRY_POINT_STRING_SEPARATOR, name) - elif fmt == EntryPointFormat.PARTIAL: + if fmt == EntryPointFormat.PARTIAL: return '{}{}{}'.format(group[len(ENTRY_POINT_GROUP_PREFIX):], ENTRY_POINT_STRING_SEPARATOR, name) - elif fmt == EntryPointFormat.MINIMAL: + if fmt == EntryPointFormat.MINIMAL: return '{}'.format(name) - else: - raise ValueError('invalid EntryPointFormat') + raise ValueError('invalid EntryPointFormat') def parse_entry_point_string(entry_point_string): @@ -146,14 +144,13 @@ def get_entry_point_string_format(entry_point_string): :rtype: EntryPointFormat """ try: - group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) + group, _ = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) except ValueError: return EntryPointFormat.MINIMAL else: if group.startswith(ENTRY_POINT_GROUP_PREFIX): return EntryPointFormat.FULL - else: - return EntryPointFormat.PARTIAL + return EntryPointFormat.PARTIAL def get_entry_point_from_string(entry_point_string): @@ -216,7 +213,7 @@ def get_entry_point_groups(): :return: a list of valid entry point groups """ - return entry_point_group_to_module_path_map.keys() + return ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP.keys() def get_entry_point_names(group, sort=True): @@ -243,7 +240,7 @@ def get_entry_points(group): :param group: the entry point group :return: a list of entry points """ - return [ep for ep in ENTRYPOINT_MANAGER.iter_entry_points(group=group)] + return list(ENTRYPOINT_MANAGER.iter_entry_points(group=group)) @functools.lru_cache(maxsize=None) @@ -260,12 +257,16 @@ def get_entry_point(group, name): entry_points = [ep for ep in get_entry_points(group) if ep.name == name] if not entry_points: - raise MissingEntryPointError("Entry point '{}' not found in group '{}'. Try running `reentry scan` to update " - 'the entry point cache.'.format(name, group)) + raise MissingEntryPointError( + "Entry point '{}' not found in group '{}'. Try running `reentry scan` to update " + 'the entry point cache.'.format(name, group) + ) if len(entry_points) > 1: - raise MultipleEntryPointError("Multiple entry points '{}' found in group '{}'.Try running `reentry scan` to " - 'repopulate the entry point cache.'.format(name, group)) + raise MultipleEntryPointError( + "Multiple entry points '{}' found in group '{}'.Try running `reentry scan` to " + 'repopulate the entry point cache.'.format(name, group) + ) return entry_points[0] @@ -291,7 +292,7 @@ def get_entry_point_from_class(class_module, class_name): return None, None -def get_entry_point_string_from_class(class_module, class_name): +def get_entry_point_string_from_class(class_module, class_name): # pylint: disable=invalid-name """ Given the module and name of a class, attempt to obtain the corresponding entry point if it exists and return the entry point string which will be the entry point group and entry point @@ -313,8 +314,7 @@ def get_entry_point_string_from_class(class_module, class_name): if group and entry_point: return ENTRY_POINT_STRING_SEPARATOR.join([group, entry_point.name]) - else: - return None + return None def is_valid_entry_point_string(entry_point_string): @@ -328,12 +328,12 @@ def is_valid_entry_point_string(entry_point_string): :return: True if the string is considered valid, False otherwise """ try: - group, name = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) + group, _ = entry_point_string.split(ENTRY_POINT_STRING_SEPARATOR) except (AttributeError, ValueError): # Either `entry_point_string` is not a string or it does not contain the separator return False - return group in entry_point_group_to_module_path_map + return group in ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP @functools.lru_cache(maxsize=None) @@ -349,11 +349,10 @@ def is_registered_entry_point(class_module, class_name, groups=None): :return: boolean, True if the class is a registered entry point, False otherwise. """ if groups is None: - groups = list(entry_point_group_to_module_path_map.keys()) + groups = list(ENTRY_POINT_GROUP_TO_MODULE_PATH_MAP.keys()) for group in groups: for entry_point in ENTRYPOINT_MANAGER.iter_entry_points(group): if class_module == entry_point.module_name and [class_name] == entry_point.attrs: return True - else: - return False + return False diff --git a/aiida/tools/ipython/ipython_magics.py b/aiida/tools/ipython/ipython_magics.py index af3d8cb395..66310c37b9 100644 --- a/aiida/tools/ipython/ipython_magics.py +++ b/aiida/tools/ipython/ipython_magics.py @@ -34,8 +34,8 @@ In [2]: %aiida """ -from IPython import version_info -from IPython.core import magic +from IPython import version_info # pylint: disable=no-name-in-module +from IPython.core import magic # pylint: disable=no-name-in-module,import-error from aiida.common import json diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 1e384344e1..3c24ffeb1b 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -702,15 +702,19 @@ Below is a list with all available subcommands. Execute scripts with preloaded AiiDA environment. Options: - -g, --group Enables the autogrouping [default: True] - -n, --group-name TEXT Specify the name of the auto group - -e, --exclude TEXT Exclude these classes from auto grouping - -i, --include TEXT Include these classes from auto grouping - -E, --excludesubclasses TEXT Exclude these classes and their sub classes - from auto grouping - -I, --includesubclasses TEXT Include these classes and their sub classes - from auto grouping - --help Show this message and exit. + --auto-group Enables the autogrouping + -l, --auto-group-label-prefix TEXT + Specify the prefix of the label of the auto + group (numbers might be automatically + appended to generate unique names per run). + -n, --group-name TEXT Specify the name of the auto group + [DEPRECATED, USE --auto-group-label-prefix + instead]. This also enables auto-grouping. + -e, --exclude TEXT Exclude these classes from auto grouping + (use full entrypoint strings). + -i, --include TEXT Include these classes from auto grouping + (use full entrypoint strings or "all"). + --help Show this message and exit. .. _verdi_setup: diff --git a/tests/backends/aiida_django/migrations/test_migrations_common.py b/tests/backends/aiida_django/migrations/test_migrations_common.py index f8de61f9a6..43f4f03b3d 100644 --- a/tests/backends/aiida_django/migrations/test_migrations_common.py +++ b/tests/backends/aiida_django/migrations/test_migrations_common.py @@ -38,8 +38,8 @@ def setUp(self): from aiida.backends.djsite import get_scoped_session from aiida.orm import autogroup - self.current_autogroup = autogroup.current_autogroup - autogroup.current_autogroup = None + self.current_autogroup = autogroup.CURRENT_AUTOGROUP + autogroup.CURRENT_AUTOGROUP = None assert self.migrate_from and self.migrate_to, \ "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) self.migrate_from = [(self.app, self.migrate_from)] @@ -85,7 +85,7 @@ def tearDown(self): """At the end make sure we go back to the latest schema version.""" from aiida.orm import autogroup self._revert_database_schema() - autogroup.current_autogroup = self.current_autogroup + autogroup.CURRENT_AUTOGROUP = self.current_autogroup def setUpBeforeMigration(self): """Anything to do before running the migrations, which should be implemented in test subclasses.""" diff --git a/tests/backends/aiida_sqlalchemy/test_migrations.py b/tests/backends/aiida_sqlalchemy/test_migrations.py index 8bdda5d145..8e2046f293 100644 --- a/tests/backends/aiida_sqlalchemy/test_migrations.py +++ b/tests/backends/aiida_sqlalchemy/test_migrations.py @@ -22,7 +22,6 @@ from aiida.backends.sqlalchemy.models.base import Base from aiida.backends.sqlalchemy.utils import flag_modified from aiida.backends.testbase import AiidaTestCase -from aiida.common.utils import Capturing from .test_utils import new_database @@ -57,22 +56,28 @@ def setUp(self): super().setUp() from aiida.orm import autogroup - self.current_autogroup = autogroup.current_autogroup - autogroup.current_autogroup = None + self.current_autogroup = autogroup.CURRENT_AUTOGROUP + autogroup.CURRENT_AUTOGROUP = None assert self.migrate_from and self.migrate_to, \ "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) try: - with Capturing(): - self.migrate_db_down(self.migrate_from) + self.migrate_db_down(self.migrate_from) self.setUpBeforeMigration() - with Capturing(): - self.migrate_db_up(self.migrate_to) + self._perform_actual_migration() except Exception: # Bring back the DB to the correct state if this setup part fails self._reset_database_and_schema() + autogroup.CURRENT_AUTOGROUP = self.current_autogroup raise + def _perform_actual_migration(self): + """Perform the actual migration (upwards, to migrate_to). + + Must be called after we are properly set to be in migrate_from. + """ + self.migrate_db_up(self.migrate_to) + def migrate_db_up(self, destination): """ Perform a migration upwards (upgrade) with alembic @@ -99,7 +104,7 @@ def tearDown(self): """ from aiida.orm import autogroup self._reset_database_and_schema() - autogroup.current_autogroup = self.current_autogroup + autogroup.CURRENT_AUTOGROUP = self.current_autogroup super().tearDown() def setUpBeforeMigration(self): # pylint: disable=invalid-name @@ -116,8 +121,7 @@ def _reset_database_and_schema(self): of tests. """ self.reset_database() - with Capturing(): - self.migrate_db_up('head') + self.migrate_db_up('head') @property def current_rev(self): @@ -210,29 +214,12 @@ class TestBackwardMigrationsSQLA(TestMigrationsSQLA): than the migrate_to revision. """ - def setUp(self): - """ - Go to the migrate_from revision, apply setUpBeforeMigration, then - run the migration. - """ - AiidaTestCase.setUp(self) # pylint: disable=bad-super-call - from aiida.orm import autogroup + def _perform_actual_migration(self): + """Perform the actual migration (downwards, to migrate_to). - self.current_autogroup = autogroup.current_autogroup - autogroup.current_autogroup = None - assert self.migrate_from and self.migrate_to, \ - "TestCase '{}' must define migrate_from and migrate_to properties".format(type(self).__name__) - - try: - with Capturing(): - self.migrate_db_down(self.migrate_from) - self.setUpBeforeMigration() - with Capturing(): - self.migrate_db_down(self.migrate_to) - except Exception: - # Bring back the DB to the correct state if this setup part fails - self._reset_database_and_schema() - raise + Must be called after we are properly set to be in migrate_from. + """ + self.migrate_db_down(self.migrate_to) class TestMigrationEngine(TestMigrationsSQLA): @@ -1003,7 +990,7 @@ class TestDbLogUUIDAddition(TestMigrationsSQLA): """ Test that the UUID column is correctly added to the DbLog table and that the uniqueness constraint is added without problems (if the migration arrives until 375c2db70663 then the - constraint is added properly. + constraint is added properly). """ migrate_from = '041a79fc615f' # 041a79fc615f_dblog_cleaning diff --git a/tests/cmdline/commands/test_calcjob.py b/tests/cmdline/commands/test_calcjob.py index dc7895c5d4..2f1945d45a 100644 --- a/tests/cmdline/commands/test_calcjob.py +++ b/tests/cmdline/commands/test_calcjob.py @@ -98,6 +98,7 @@ def setUpClass(cls, *args, **kwargs): cls.arithmetic_job = calculations[0] def setUp(self): + super().setUp() self.cli_runner = CliRunner() def test_calcjob_res(self): diff --git a/tests/cmdline/commands/test_run.py b/tests/cmdline/commands/test_run.py index 3cae78fb70..78c858420f 100644 --- a/tests/cmdline/commands/test_run.py +++ b/tests/cmdline/commands/test_run.py @@ -8,6 +8,9 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Tests for `verdi run`.""" +import tempfile +import warnings + from click.testing import CliRunner from aiida.backends.testbase import AiidaTestCase @@ -28,7 +31,6 @@ def test_run_workfunction(self): that are defined within the script will fail, as the inspect module will not correctly be able to determin the full path of the source file. """ - import tempfile from aiida.orm import load_node from aiida.orm import WorkFunctionNode @@ -64,3 +66,355 @@ def wf(): self.assertTrue(isinstance(node, WorkFunctionNode)) self.assertEqual(node.function_name, 'wf') self.assertEqual(node.get_function_source_code(), script_content) + + +class TestAutoGroups(AiidaTestCase): + """Test the autogroup functionality.""" + + def setUp(self): + """Setup the CLI runner to run command line commands.""" + from aiida.orm import autogroup + + super().setUp() + self.cli_runner = CliRunner() + # I need to disable the global variable of this test environment, + # because invoke is just calling the function and therefore inheriting + # the global variable + self._old_autogroup = autogroup.CURRENT_AUTOGROUP + autogroup.CURRENT_AUTOGROUP = None + + def tearDown(self): + """Setup the CLI runner to run command line commands.""" + from aiida.orm import autogroup + + super().tearDown() + autogroup.CURRENT_AUTOGROUP = self._old_autogroup + + def test_autogroup(self): + """Check if the autogroup is properly generated.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """from aiida.orm import Data +node = Data().store() +print(node.pk) +""" + + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + options = ['--auto-group', fhandle.name] + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual( + len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' + ) + + def test_autogroup_custom_label(self): + """Check if the autogroup is properly generated with the label specified.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """from aiida.orm import Data +node = Data().store() +print(node.pk) +""" + autogroup_label = 'SOME_group_LABEL' + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + options = [fhandle.name, '--auto-group', '--auto-group-label-prefix', autogroup_label] + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual( + len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' + ) + self.assertEqual(all_auto_groups[0][0].label, autogroup_label) + + def test_no_autogroup(self): + """Check if the autogroup is not generated if ``verdi run`` is asked not to.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """from aiida.orm import Data +node = Data().store() +print(node.pk) +""" + + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + options = [fhandle.name] # Not storing an autogroup by default + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual(len(all_auto_groups), 0, 'There should be no autogroup generated') + + def test_autogroup_filter_class(self): # pylint: disable=too-many-locals + """Check if the autogroup is properly generated but filtered classes are skipped.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """import sys +from aiida.orm import Computer, Int, ArrayData, KpointsData, CalculationNode, WorkflowNode +from aiida.plugins import CalculationFactory +from aiida.engine import run_get_node +ArithmeticAdd = CalculationFactory('arithmetic.add') + +computer = Computer( + name='localhost-example-{}'.format(sys.argv[1]), + hostname='localhost', + description='my computer', + transport_type='local', + scheduler_type='direct', + workdir='/tmp' +).store() +computer.configure() + +code = Code( + input_plugin_name='arithmetic.add', + remote_computer_exec=[computer, '/bin/true']).store() +inputs = { + 'x': Int(1), + 'y': Int(2), + 'code': code, + 'metadata': { + 'options': { + 'resources': { + 'num_machines': 1, + 'num_mpiprocs_per_machine': 1 + } + } + } +} + +node1 = KpointsData().store() +node2 = ArrayData().store() +node3 = Int(3).store() +node4 = CalculationNode().store() +node5 = WorkflowNode().store() +_, node6 = run_get_node(ArithmeticAdd, **inputs) +print(node1.pk) +print(node2.pk) +print(node3.pk) +print(node4.pk) +print(node5.pk) +print(node6.pk) +""" + from aiida.orm import Code + Code() + for idx, ( + flags, + kptdata_in_autogroup, + arraydata_in_autogroup, + int_in_autogroup, + calc_in_autogroup, + wf_in_autogroup, + calcarithmetic_in_autogroup, + ) in enumerate([ + [['--exclude', 'aiida.data:array.kpoints'], False, True, True, True, True, True], + # Check if % works anywhere - both 'int' and 'array.kpoints' contain an 'i' + [['--exclude', 'aiida.data:%i%'], False, True, False, True, True, True], + [['--exclude', 'aiida.data:int'], True, True, False, True, True, True], + [['--exclude', 'aiida.data:%'], False, False, False, True, True, True], + [['--exclude', 'aiida.data:array', 'aiida.data:array.%'], False, False, True, True, True, True], + [['--exclude', 'aiida.data:array', 'aiida.data:array.%', 'aiida.data:int'], False, False, False, True, True, + True], + [['--exclude', 'aiida.calculations:arithmetic.add'], True, True, True, True, True, False], + [ + ['--include', 'aiida.node:process.calculation'], # Base type, no specific plugin + False, + False, + False, + True, + False, + False + ], + [ + ['--include', 'aiida.node:process.workflow'], # Base type, no specific plugin + False, + False, + False, + False, + True, + False + ], + [[], True, True, True, True, True, True], + ]): + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + options = ['--auto-group'] + flags + ['--', fhandle.name, str(idx)] + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk1_str, pk2_str, pk3_str, pk4_str, pk5_str, pk6_str = result.output.split() + pk1 = int(pk1_str) + pk2 = int(pk2_str) + pk3 = int(pk3_str) + pk4 = int(pk4_str) + pk5 = int(pk5_str) + pk6 = int(pk6_str) + _ = load_node(pk1) # Check if the node can be loaded + _ = load_node(pk2) # Check if the node can be loaded + _ = load_node(pk3) # Check if the node can be loaded + _ = load_node(pk4) # Check if the node can be loaded + _ = load_node(pk5) # Check if the node can be loaded + _ = load_node(pk6) # Check if the node can be loaded + + queryb = QueryBuilder().append(Node, filters={'id': pk1}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_kptdata = queryb.all() + + queryb = QueryBuilder().append(Node, filters={'id': pk2}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_arraydata = queryb.all() + + queryb = QueryBuilder().append(Node, filters={'id': pk3}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_int = queryb.all() + + queryb = QueryBuilder().append(Node, filters={'id': pk4}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_calc = queryb.all() + + queryb = QueryBuilder().append(Node, filters={'id': pk5}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_wf = queryb.all() + + queryb = QueryBuilder().append(Node, filters={'id': pk6}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups_calcarithmetic = queryb.all() + + self.assertEqual( + len(all_auto_groups_kptdata), 1 if kptdata_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the KpointsData node ' + "just created with flags '{}'".format(' '.join(flags)) + ) + self.assertEqual( + len(all_auto_groups_arraydata), 1 if arraydata_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the ArrayData node ' + "just created with flags '{}'".format(' '.join(flags)) + ) + self.assertEqual( + len(all_auto_groups_int), 1 if int_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the Int node ' + "just created with flags '{}'".format(' '.join(flags)) + ) + self.assertEqual( + len(all_auto_groups_calc), 1 if calc_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the CalculationNode ' + "just created with flags '{}'".format(' '.join(flags)) + ) + self.assertEqual( + len(all_auto_groups_wf), 1 if wf_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the WorkflowNode ' + "just created with flags '{}'".format(' '.join(flags)) + ) + self.assertEqual( + len(all_auto_groups_calcarithmetic), 1 if calcarithmetic_in_autogroup else 0, + 'Wrong number of nodes in autogroup associated with the ArithmeticAdd CalcJobNode ' + "just created with flags '{}'".format(' '.join(flags)) + ) + + def test_autogroup_clashing_label(self): + """Check if the autogroup label is properly (re)generated when it clashes with an existing group name.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """from aiida.orm import Data +node = Data().store() +print(node.pk) +""" + autogroup_label = 'SOME_repeated_group_LABEL' + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + # First run + options = [fhandle.name, '--auto-group', '--auto-group-label-prefix', autogroup_label] + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual( + len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' + ) + self.assertEqual(all_auto_groups[0][0].label, autogroup_label) + + # A few more runs with the same label - it should not crash but append something to the group name + for _ in range(10): + options = [fhandle.name, '--auto-group', '--auto-group-label-prefix', autogroup_label] + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual( + len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' + ) + self.assertTrue(all_auto_groups[0][0].label.startswith(autogroup_label)) + + def test_legacy_autogroup_name(self): + """Check if the autogroup is properly generated when using the legacy --group-name flag.""" + from aiida.orm import QueryBuilder, Node, Group, load_node + + script_content = """from aiida.orm import Data +node = Data().store() +print(node.pk) +""" + group_label = 'legacy-group-name' + + with tempfile.NamedTemporaryFile(mode='w+') as fhandle: + fhandle.write(script_content) + fhandle.flush() + + options = ['--group-name', group_label, fhandle.name] + with warnings.catch_warnings(record=True) as warns: # pylint: disable=no-member + result = self.cli_runner.invoke(cmd_run.run, options) + self.assertTrue( + any(['use `--auto-group-label-prefix` instead' in str(warn.message) for warn in warns]), + "No warning for '--group-name' was raised" + ) + + self.assertClickResultNoException(result) + + pk = int(result.output) + _ = load_node(pk) # Check if the node can be loaded + + queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') + queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + all_auto_groups = queryb.all() + self.assertEqual( + len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' + ) + self.assertEqual( + all_auto_groups[0][0].label, group_label, + 'The auto group label is "{}" instead of "{}"'.format(all_auto_groups[0][0].label, group_label) + ) diff --git a/tests/engine/processes/workchains/test_utils.py b/tests/engine/processes/workchains/test_utils.py index 00f1e127a3..51a787235e 100644 --- a/tests/engine/processes/workchains/test_utils.py +++ b/tests/engine/processes/workchains/test_utils.py @@ -53,6 +53,7 @@ def test_priority(self): attribute_key = 'handlers_called' class ArithmeticAddBaseWorkChain(BaseRestartWorkChain): + """Implementation of a possible BaseRestartWorkChain for the ``ArithmeticAddCalculation``.""" _process_class = ArithmeticAddCalculation @@ -61,6 +62,7 @@ class ArithmeticAddBaseWorkChain(BaseRestartWorkChain): # This can then be checked after invoking `inspect_process` to ensure they were called in the right order @process_handler(priority=100) def handler_01(self, node): + """Example handler returing ExitCode 100.""" handlers_called = node.get_attribute(attribute_key, default=[]) handlers_called.append('handler_01') node.set_attribute(attribute_key, handlers_called) @@ -68,6 +70,7 @@ def handler_01(self, node): @process_handler(priority=300) def handler_03(self, node): + """Example handler returing ExitCode 300.""" handlers_called = node.get_attribute(attribute_key, default=[]) handlers_called.append('handler_03') node.set_attribute(attribute_key, handlers_called) @@ -75,6 +78,7 @@ def handler_03(self, node): @process_handler(priority=200) def handler_02(self, node): + """Example handler returing ExitCode 200.""" handlers_called = node.get_attribute(attribute_key, default=[]) handlers_called.append('handler_02') node.set_attribute(attribute_key, handlers_called) @@ -82,6 +86,7 @@ def handler_02(self, node): @process_handler(priority=400) def handler_04(self, node): + """Example handler returing ExitCode 400.""" handlers_called = node.get_attribute(attribute_key, default=[]) handlers_called.append('handler_04') node.set_attribute(attribute_key, handlers_called) @@ -159,6 +164,7 @@ def test_exit_codes_filter(self): node_skip.set_exit_status(200) # Some other exit status class ArithmeticAddBaseWorkChain(BaseRestartWorkChain): + """Minimal base restart workchain for the ``ArithmeticAddCalculation``.""" _process_class = ArithmeticAddCalculation diff --git a/tests/orm/test_autogroups.py b/tests/orm/test_autogroups.py new file mode 100644 index 0000000000..e1426ad2e8 --- /dev/null +++ b/tests/orm/test_autogroups.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for the Autogroup functionality.""" +from aiida.backends.testbase import AiidaTestCase +from aiida.orm import Group, QueryBuilder +from aiida.orm.autogroup import Autogroup + + +class TestAutogroup(AiidaTestCase): + """Tests the Autogroup logic.""" + + def test_get_or_create(self): + """Test the ``get_or_create_group`` method of ``Autogroup``.""" + label_prefix = 'test_prefix_TestAutogroup' + + # Check that there are no groups to begin with + queryb = QueryBuilder().append(Group, filters={'type_string': 'auto.run', 'label': label_prefix}, project='*') + assert not list(queryb.all()) + queryb = QueryBuilder().append( + Group, filters={ + 'type_string': 'auto.run', + 'label': { + 'like': r'{}\_%'.format(label_prefix) + } + }, project='*' + ) + assert not list(queryb.all()) + + # First group (no existing one) + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + # Second group (only one with no suffix existing) + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + '_1' + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + # Second group (only one suffix _1 existing) + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + '_2' + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + # I create a group with a large integer suffix (9) + Group(label='{}_9'.format(label_prefix), type_string='auto.run').store() + # The next autogroup should become number 10 + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + '_10' + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + # I create a group with a non-integer suffix (15a), it should be ignored + Group(label='{}_15b'.format(label_prefix), type_string='auto.run').store() + # The next autogroup should become number 11 + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + '_11' + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + def test_get_or_create_invalid_prefix(self): + """Test the ``get_or_create_group`` method of ``Autogroup`` when there is already a group + with the same prefix, but followed by other non-underscore characters.""" + label_prefix = 'new_test_prefix_TestAutogroup' + # I create a group with the same prefix, but followed by non-underscore + # characters. These should be ignored in the logic. + Group(label='{}xx'.format(label_prefix), type_string='auto.run').store() + + # Check that there are no groups to begin with + queryb = QueryBuilder().append(Group, filters={'type_string': 'auto.run', 'label': label_prefix}, project='*') + assert not list(queryb.all()) + queryb = QueryBuilder().append( + Group, filters={ + 'type_string': 'auto.run', + 'label': { + 'like': r'{}\_%'.format(label_prefix) + } + }, project='*' + ) + assert not list(queryb.all()) + + # First group (no existing one) + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) + + # Second group (only one with no suffix existing) + autogroup = Autogroup() + autogroup.set_group_label_prefix(label_prefix) + group = autogroup.get_or_create_group() + expected_label = label_prefix + '_1' + self.assertEqual( + group.label, expected_label, + "The auto-group should be labelled '{}', it is instead '{}'".format(expected_label, group.label) + ) diff --git a/tests/orm/test_groups.py b/tests/orm/test_groups.py index 9c842aa2c4..ce2797daad 100644 --- a/tests/orm/test_groups.py +++ b/tests/orm/test_groups.py @@ -8,7 +8,6 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test for the Group ORM class.""" - from aiida import orm from aiida.backends.testbase import AiidaTestCase from aiida.common import exceptions diff --git a/tests/tools/importexport/orm/test_codes.py b/tests/tools/importexport/orm/test_codes.py index 5a11e07b94..d8f173107b 100644 --- a/tests/tools/importexport/orm/test_codes.py +++ b/tests/tools/importexport/orm/test_codes.py @@ -24,9 +24,11 @@ class TestCode(AiidaTestCase): """Test ex-/import cases related to Codes""" def setUp(self): + super().setUp() self.reset_database() def tearDown(self): + super().tearDown() self.reset_database() @with_temp_dir diff --git a/tests/tools/visualization/test_graph.py b/tests/tools/visualization/test_graph.py index 9f15cab9ca..d48a3e6800 100644 --- a/tests/tools/visualization/test_graph.py +++ b/tests/tools/visualization/test_graph.py @@ -22,9 +22,11 @@ class TestVisGraph(AiidaTestCase): """Tests for verdi graph""" def setUp(self): + super().setUp() self.reset_database() def tearDown(self): + super().tearDown() self.reset_database() def create_provenance(self): diff --git a/utils/dependency_management.py b/utils/dependency_management.py index 17442d66af..af476de3e7 100755 --- a/utils/dependency_management.py +++ b/utils/dependency_management.py @@ -239,7 +239,6 @@ def validate_environment_yml(): # pylint: disable=too-many-branches # Check that all requirements specified in the setup.json file are found in the # conda environment specification. - missing_from_env = set() for req in install_requirements: if any(re.match(ignore, str(req)) for ignore in CONDA_IGNORE): continue # skip explicitly ignored packages @@ -251,7 +250,7 @@ def validate_environment_yml(): # pylint: disable=too-many-branches # The only dependency left should be the one for Python itself, which is not part of # the install_requirements for setuptools. - if len(conda_dependencies) > 0: + if conda_dependencies: raise DependencySpecificationError( "The 'environment.yml' file contains dependencies that are missing " "in 'setup.json':\n- {}".format('\n- '.join(map(str, conda_dependencies))) @@ -304,7 +303,7 @@ def validate_pyproject_toml(): "Missing requirement '{}' in 'pyproject.toml'.".format(reentry_requirement) ) - except FileNotFoundError as error: + except FileNotFoundError: raise DependencySpecificationError("The 'pyproject.toml' file is missing!") click.secho('Pyproject.toml dependency specification is consistent.', fg='green') From 5c0d86f5a9693dc1d6847d5fa75cb6edeb19207d Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Sat, 4 Apr 2020 22:37:02 +0200 Subject: [PATCH 35/54] Cleanup the top-level directory of the repository (#3738) * Include `bin/runaiida` through `console_scripts` of `setup.json` * Remove the outdated examples from `examples` directory * Remove obsolete `utils/plugin_tpl/calculation.tpl` superseded by plugin cookie cutter package * Move `conftest` to the `tests` directory --- .github/workflows/tests.sh | 5 +- bin/runaiida | 14 ---- examples/__init__.py | 9 --- examples/work/__init__.py | 9 --- examples/work/workchain.py | 91 --------------------- examples/work/workchain_outline.py | 78 ------------------ examples/work/workfunction.py | 51 ------------ setup.json | 8 +- conftest.py => tests/conftest.py | 1 - utils/plugin_tpl/calculation.tpl | 126 ----------------------------- 10 files changed, 7 insertions(+), 385 deletions(-) delete mode 100755 bin/runaiida delete mode 100644 examples/__init__.py delete mode 100644 examples/work/__init__.py delete mode 100755 examples/work/workchain.py delete mode 100755 examples/work/workchain_outline.py delete mode 100755 examples/work/workfunction.py rename conftest.py => tests/conftest.py (99%) delete mode 100644 utils/plugin_tpl/calculation.tpl diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh index 169ca71525..5fcc682769 100755 --- a/.github/workflows/tests.sh +++ b/.github/workflows/tests.sh @@ -22,7 +22,10 @@ verdi daemon stop pytest --noconftest .ci/test_test_manager.py pytest --noconftest .ci/test_profile_manager.py python .ci/test_plugin_testcase.py # uses custom unittest test runner -AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest + +# Until the `.ci/pytest` tests are moved within `tests` we have to run them separately and pass in the path to the +# `conftest.py` explicitly, because otherwise it won't be able to find the fixtures it provides +AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests/conftest.py .ci/pytest # main aiida-core tests AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest tests diff --git a/bin/runaiida b/bin/runaiida deleted file mode 100755 index d5c1c951c5..0000000000 --- a/bin/runaiida +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# Pass all parameters to 'verdi run' -# This is useful to use in a shebang line: i.e., you can put -# the following line as the first line in a file: - -#!/usr/bin/env runaiida - -# and the script will be run with 'verdi run' upon execution -# (if it has the correct execution bits set, i.e. using -# chmod +x ...) - -# With "$@", each parameter is correctly escaped -verdi run "$@" - diff --git a/examples/__init__.py b/examples/__init__.py deleted file mode 100644 index 2776a55f97..0000000000 --- a/examples/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### diff --git a/examples/work/__init__.py b/examples/work/__init__.py deleted file mode 100644 index 2776a55f97..0000000000 --- a/examples/work/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### diff --git a/examples/work/workchain.py b/examples/work/workchain.py deleted file mode 100755 index 6894ef3876..0000000000 --- a/examples/work/workchain.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env runaiida -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -""" -This example illustrates in a very minimal way how a WorkChain can be defined and how it can be run. This mostly -illustrates how the spec of the WorkChain is defined and how functions in the outline of the spec have to be defined. -""" - -from aiida.engine import WorkChain, run -from aiida.orm import NumericType, Float, Int - - -class SumWorkChain(WorkChain): - - @classmethod - def define(cls, spec): - super().define(spec) - spec.input('a', valid_type=NumericType) - spec.input('b', valid_type=NumericType) - spec.outline( - cls.sum - ) - spec.output('sum', valid_type=NumericType) - - def sum(self): - self.out('sum', self.inputs.a + self.inputs.b) - - -class ProductWorkChain(WorkChain): - - @classmethod - def define(cls, spec): - super().define(spec) - spec.input('a', valid_type=NumericType) - spec.input('b', valid_type=NumericType) - spec.outline( - cls.product - ) - spec.output('product', valid_type=NumericType) - - def product(self): - self.out('product', self.inputs.a * self.inputs.b) - - -class SumProductWorkChain(WorkChain): - - @classmethod - def define(cls, spec): - super().define(spec) - spec.input('a', valid_type=NumericType) - spec.input('b', valid_type=NumericType) - spec.input('c', valid_type=NumericType) - spec.outline( - cls.sum, - cls.product - ) - spec.output('sumproduct', valid_type=NumericType) - - def sum(self): - self.ctx.sum = self.inputs.a + self.inputs.b - - def product(self): - self.out('sumproduct', self.ctx.sum * self.inputs.c) - - -def main(): - inputs = { - 'a': Float(3.14), - 'b': Int(4), - 'c': Int(6) - } - - results = run(SumWorkChain, **inputs) - print('Result of SumWorkChain: {}'.format(results)) - - results = run(ProductWorkChain, **inputs) - print('Result of ProductWorkChain: {}'.format(results)) - - results = run(SumProductWorkChain, **inputs) - print('Result of SumProductWorkChain: {}'.format(results)) - - -if __name__ == '__main__': - main() diff --git a/examples/work/workchain_outline.py b/examples/work/workchain_outline.py deleted file mode 100755 index d82a6cca43..0000000000 --- a/examples/work/workchain_outline.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env runaiida -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -""" -This WorkChain example is a very contrived implementation of the infamous FizzBuzz problem, that serves to illustrate -the various logical blocks that one can incorporate into the outline of the workchain's spec. -""" - -from aiida.engine import WorkChain, run, while_, if_ -from aiida.orm import Int - - -class OutlineWorkChain(WorkChain): - - @classmethod - def define(cls, spec): - super().define(spec) - spec.input('a', valid_type=Int) - spec.outline( - cls.setup, - while_(cls.not_finished)( - if_(cls.if_multiple_of_three_and_five)( - cls.report_fizz_buzz - ).elif_(cls.if_multiple_of_five)( - cls.report_buzz - ).elif_(cls.if_multiple_of_three)( - cls.report_fizz - ).else_( - cls.report_number - ), - cls.decrement - ) - ) - - def setup(self): - self.ctx.counter = abs(self.inputs.a.value) - - def not_finished(self): - return self.ctx.counter > 0 - - def if_multiple_of_three_and_five(self): - return (self.ctx.counter % 3 == 0 and self.ctx.counter % 5 == 0) - - def if_multiple_of_five(self): - return self.ctx.counter % 5 == 0 - - def if_multiple_of_three(self): - return self.ctx.counter % 3 == 0 - - def report_fizz_buzz(self): - print('FizzBuzz') - - def report_fizz(self): - print('Fizz') - - def report_buzz(self): - print('Buzz') - - def report_number(self): - print(self.ctx.counter) - - def decrement(self): - self.ctx.counter -= 1 - - -def main(): - run(OutlineWorkChain, a=Int(16)) - - -if __name__ == '__main__': - main() diff --git a/examples/work/workfunction.py b/examples/work/workfunction.py deleted file mode 100755 index d36a2c66c0..0000000000 --- a/examples/work/workfunction.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env runaiida -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -""" -This example implements exactly the same functionality as seen in the basic WorkChain example, except in this case it -utilizes calcfunctions instead of workchains. -""" - -from aiida.engine import calcfunction -from aiida.orm import Float, Int - - -@calcfunction -def sum(a, b): - return a + b - - -@calcfunction -def product(a, b): - return a * b - - -@calcfunction -def sumproduct(a, b, c): - return product(sum(a, b), c) - - -def main(): - a = Float(3.14) - b = Int(4) - c = Int(6) - - results = sum(a, b) - print('Result of sum: {}'.format(results)) - - results = product(a, b) - print('Result of product: {}'.format(results)) - - results = sumproduct(a, b, c) - print('Result of sumproduct: {}'.format(results)) - - -if __name__ == '__main__': - main() diff --git a/setup.json b/setup.json index 53f1d1a310..de5f83b295 100644 --- a/setup.json +++ b/setup.json @@ -112,7 +112,8 @@ "reentry_register": true, "entry_points": { "console_scripts": [ - "verdi=aiida.cmdline.commands.cmd_verdi:verdi" + "verdi=aiida.cmdline.commands.cmd_verdi:verdi", + "runaiida=aiida.cmdline.commands.cmd_run:run" ], "aiida.calculations": [ "arithmetic.add = aiida.calculations.plugins.arithmetic.add:ArithmeticAddCalculation", @@ -201,8 +202,5 @@ "realhydrogen = aiida.tools.data.orbital.realhydrogen:RealhydrogenOrbital" ], "aiida.workflows": [] - }, - "scripts": [ - "bin/runaiida" - ] + } } diff --git a/conftest.py b/tests/conftest.py similarity index 99% rename from conftest.py rename to tests/conftest.py index 1d2336820d..29210b7305 100644 --- a/conftest.py +++ b/tests/conftest.py @@ -8,7 +8,6 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Configuration file for pytest tests.""" - import pytest # pylint: disable=unused-import pytest_plugins = ['aiida.manage.tests.pytest_fixtures'] # pylint: disable=invalid-name diff --git a/utils/plugin_tpl/calculation.tpl b/utils/plugin_tpl/calculation.tpl deleted file mode 100644 index 5cab031e0f..0000000000 --- a/utils/plugin_tpl/calculation.tpl +++ /dev/null @@ -1,126 +0,0 @@ -#-*- coding: utf8 -*- -""" -defines {{classname}} -""" -from aiida.orm import JobCalculation - - -class {{classname}}(JobCalculation): - """TODO: describe the calculation""" - - def _init_internal_params(self): - """Initialize internal parameters""" - super()._init_internal_params() - - self._INPUT_FILE_NAME = '{{ifilename}}' - self._OUTPUT_FILE_NAME = '{{ofilename}}' - self._default_parser = '{{parser}}' - - @classproperty - def _use_methods(cls): - """ - input node declaration hook - """ - - ''' - Start by getting the _use_methods from super and update the dictionary - before returning it. - - Each entry should look like this:: - - '': { # the input will be set with calc.use_(Data) - 'valid_types': , - 'additional_parameter': , - # -> use__(Data) - 'linkname': - # The name attached to the link in the db between the input - # and the calculation. Will be used for queries. - 'docstring': - } - ''' - retdict = super()._use_methods - retdict.update({ - {% for item in inputs %} - '{{item.name}}: { - 'valid_types': {{item.types}}, - 'additional_parameter': {{item.adn_par}}, - 'linkname': '{{item.get("lname", item.name)}}' - 'docstring': '{{item.docstring}}' - }, - {% endfor %} - }) - return retdict - - def _prepare_for_submission(self, tempfolder, inputdict): - """ - Hook for the deamon to create input files and do everything - else necessary before submitting the calculation to the computer. - - :param tempfolder: all input files should be put into this :py:class:`aiida.common.folders.Folder` subclass - :param inputdict: a dictionary containing all the inputs, keys are link names - """ - self.verify_inputs(self, inputdict) - - self._write_inputfiles(self, tempfolder, inputdict) - - calcinfo = CalcInfo() - calcinfo.uuid = self.uuid - '''list of files to copy to the computer''' - calcinfo.local_copy_list = [] # [('', '')] - calcinfo.remote_copy_list = [] # [('', '', '')] - calcinfo.retrieve_list = [self._OUTPUT_FILE_NAME] # add all files to be parsed - - code = inputdict['code'] - codeinfo = CodeInfo() - codeinfo.cmdline_params = [] # example: ['-i {}'.format(self._INPUT_FILE_NAME)] - codeinfo.code_uuid = code.uuid - - calcinfo.codes_info = [codeinfo] - - return calcinfo - - def verify_inputs(self, inputdict): - """ - ensure required input nodes are given, of the right type and nothing else - - raise ValidationError() otherwise - - example required node:: - - try: - param_name = inputdict.pop(self.get_linkname(param_name)) - except KeyError: - raise InputValidationError("Missing: param_name") - - if not isinstance(param_name, param_type(s)): - raise InputValidationError("Wrong type: param_name") - - example no superfluous nodes:: - - # after pop() - ing all expected nodes - if inputdict: - raise ValidationError("Superflous input nodes!") - """ - - '''TODO: implement input checks''' - - def _write_input_files(self, tempfolder, inputdict): - """ - write inputfiles to a temporary folder in preparation to submitting - - example using json input format:: - - # Dict input nodes - input_params = inputdict['param_name'].get_dict() - secondary_params = inputdict['secondary_name'].get_dict() - - input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME) - with open(input_filename, 'w') as infile: - json.dump(input_params, infile) - - secondary_input_filename = tempfolder.get_abs_path('secondary.inp') - with open(secondary_input_filename, 'w') as infile: - json.dump(secondary_params, infile) - """ - - '''TODO: implement input file writing From 5da7120645b6cdb751d6db54431ead38417d2345 Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Mon, 6 Apr 2020 11:16:45 +0200 Subject: [PATCH 36/54] Move `aiida.manage.external.pgsu` to external package `pgsu` (#3892) One of the main issues with `verdi quicksetup` is that it is non-trivial to figure out how to connect as the PostgreSQL superuser in a wide variety of operating systems and PostgreSQL setups. This PR factors out the code which takes care of this connection into a separate package, called `pgsu` which can then be tested on a wide variety of setups using continuous integration. --- aiida/cmdline/commands/cmd_setup.py | 21 +- aiida/manage/external/pgsu.py | 349 +------------------------ aiida/manage/external/postgres.py | 131 +++++++--- aiida/manage/tests/__init__.py | 22 +- docs/requirements_for_rtd.txt | 1 + docs/source/nitpick-exceptions | 3 + environment.yml | 1 + requirements/requirements-py-3.5.txt | 1 + requirements/requirements-py-3.6.txt | 1 + requirements/requirements-py-3.7.txt | 1 + requirements/requirements-py-3.8.txt | 1 + setup.json | 1 + tests/manage/external/test_postgres.py | 29 +- 13 files changed, 141 insertions(+), 421 deletions(-) diff --git a/aiida/cmdline/commands/cmd_setup.py b/aiida/cmdline/commands/cmd_setup.py index 4edc23426b..fbfbf8b23c 100644 --- a/aiida/cmdline/commands/cmd_setup.py +++ b/aiida/cmdline/commands/cmd_setup.py @@ -136,22 +136,15 @@ def quicksetup( echo.echo_critical('failed to determine the PostgreSQL setup') try: - create = True - if not postgres.dbuser_exists(db_username): - postgres.create_dbuser(db_username, db_password) - else: - db_name, create = postgres.check_db_name(db_name) - - if create: - postgres.create_db(db_username, db_name) + db_username, db_name = postgres.create_dbuser_db_safe(dbname=db_name, dbuser=db_username, dbpass=db_password) except Exception as exception: echo.echo_error( '\n'.join([ 'Oops! quicksetup was unable to create the AiiDA database for you.', - 'For AiiDA to work, please either create the database yourself as follows:', - manual_setup_instructions(dbuser=su_db_username, dbname=su_db_name), '', - 'Alternatively, give your (operating system) user permission to create postgresql databases' + - 'and run quicksetup again.', '' + 'See `verdi quicksetup -h` for how to specify non-standard parameters for the postgresql connection.\n' + 'Alternatively, create the AiiDA database yourself: ', + manual_setup_instructions(dbuser=su_db_username, + dbname=su_db_name), '', 'and then use `verdi setup` instead', '' ]) ) raise exception @@ -169,8 +162,8 @@ def quicksetup( 'db_backend': db_backend, 'db_name': db_name, # from now on we connect as the AiiDA DB user, which may be forbidden when going via sockets - 'db_host': db_host or 'localhost', - 'db_port': db_port, + 'db_host': postgres.host_for_psycopg2, + 'db_port': postgres.port_for_psycopg2, 'db_username': db_username, 'db_password': db_password, 'repository': repository, diff --git a/aiida/manage/external/pgsu.py b/aiida/manage/external/pgsu.py index 025972e8a3..05c58e2a97 100644 --- a/aiida/manage/external/pgsu.py +++ b/aiida/manage/external/pgsu.py @@ -13,344 +13,11 @@ separate package that can then be tested on multiple OS / postgres setups. Therefore, **please keep this module entirely AiiDA-agnostic**. """ - -try: - import subprocess32 as subprocess -except ImportError: - import subprocess - -from enum import IntEnum -import click - -DEFAULT_DBINFO = { - 'host': 'localhost', - 'port': 5432, - 'user': 'postgres', - 'password': None, - 'database': 'template1', -} - - -class PostgresConnectionMode(IntEnum): - """Describe mode of connecting to postgres.""" - - DISCONNECTED = 0 - PSYCOPG = 1 - PSQL = 2 - - -class PGSU: - """ - Connect to an existing PostgreSQL cluster as the `postgres` superuser and execute SQL commands. - - Tries to use psycopg2 with a fallback to psql subcommands (using ``sudo su`` to run as postgres user). - - Simple Example:: - - postgres = PGSU() - postgres.execute("CREATE USER testuser PASSWORD 'testpw'") - - Complex Example:: - - postgres = PGSU(interactive=True, dbinfo={'port': 5433}) - postgres.execute("CREATE USER testuser PASSWORD 'testpw'") - - Note: In postgresql - * you cannot drop databases you are currently connected to - * 'template0' is the unmodifiable template database (which you cannot connect to) - * 'template1' is the modifiable template database (which you can connect to) - """ - - def __init__(self, interactive=False, quiet=True, dbinfo=None, determine_setup=True): - """Store postgres connection info. - - :param interactive: use True for verdi commands - :param quiet: use False to show warnings/exceptions - :param dbinfo: psycopg dictionary containing keys like 'host', 'user', 'port', 'database' - :param determine_setup: Whether to determine setup upon instantiation. - You may set this to False and use the 'determine_setup()' method instead. - """ - self.interactive = interactive - self.quiet = quiet - self.connection_mode = PostgresConnectionMode.DISCONNECTED - - self.setup_fail_callback = prompt_db_info if interactive else None - self.setup_fail_counter = 0 - self.setup_max_tries = 1 - - self.dbinfo = DEFAULT_DBINFO.copy() - if dbinfo is not None: - self.dbinfo.update(dbinfo) - - if determine_setup: - self.determine_setup() - - def execute(self, command, **kwargs): - """Execute postgres command using determined connection mode. - - :param command: A psql command line as a str - :param kwargs: will be forwarded to _execute_... function - """ - # Use self.dbinfo as default kwargs, update with provided kwargs - kw_copy = self.dbinfo.copy() - kw_copy.update(kwargs) - - if self.connection_mode == PostgresConnectionMode.PSYCOPG: # pylint: disable=no-else-return - return _execute_psyco(command, **kw_copy) - elif self.connection_mode == PostgresConnectionMode.PSQL: - return _execute_psql(command, **kw_copy) - - raise ValueError('Could not connect to postgres.') - - def set_setup_fail_callback(self, callback): - """ - Set a callback to be called when setup cannot be determined automatically - - :param callback: a callable with signature ``callback(interactive, dbinfo)`` - that returns a ``dbinfo`` dictionary. - """ - self.setup_fail_callback = callback - - def determine_setup(self): - """Determine how to connect as the postgres superuser. - - Depending on how postgres is set up, psycopg2 can be used to create dbs and db users, - otherwise a subprocess has to be used that executes psql as an os user with appropriate permissions. - - Note: We aim to connect as a superuser (typically 'postgres') with privileges to manipulate (create/drop) - databases and database users. - - :returns success: True, if connection could be established. - :rtype success: bool - """ - # find out if we run as a postgres superuser or can connect as postgres - # This will work on OSX in some setups but not in the default Debian one - dbinfo = self.dbinfo.copy() - - for pg_user in set([dbinfo.get('user'), None]): - dbinfo['user'] = pg_user - if _try_connect_psycopg(**dbinfo): - self.dbinfo = dbinfo - self.connection_mode = PostgresConnectionMode.PSYCOPG - return True - - # This will work for the default Debian postgres setup, assuming that sudo is available to the user - # Check if the user can find the sudo command - if _sudo_exists(): - if _try_subcmd(interactive=self.interactive, quiet=self.quiet, **dbinfo): - self.dbinfo = dbinfo - self.connection_mode = PostgresConnectionMode.PSQL - return True - elif not self.quiet: - click.echo('Warning: Could not find `sudo` for connecting to the database.') - - self.setup_fail_counter += 1 - return self._no_setup_detected() - - def _no_setup_detected(self): - """Print a warning message and calls the failed setup callback - - :returns: False, if no successful try. - """ - message = '\n'.join([ - 'Warning: Unable to autodetect postgres setup - do you know how to access it?', - ]) - - if not self.quiet: - click.echo(message) - - if self.setup_fail_callback and self.setup_fail_counter <= self.setup_max_tries: - self.dbinfo = self.setup_fail_callback(self.interactive, self.dbinfo) - return self.determine_setup() - - return False - - @property - def is_connected(self): - return self.connection_mode in (PostgresConnectionMode.PSYCOPG, PostgresConnectionMode.PSQL) - - -def prompt_db_info(interactive, dbinfo): - """ - Prompt interactively for postgres database connection details - - Can be used as a setup fail callback for :py:class:`PGSU` - - :return: dictionary with the following keys: host, port, database, user - """ - if not interactive: - return DEFAULT_DBINFO - - access = False - while not access: - dbinfo_new = {} - dbinfo_new['host'] = click.prompt('postgres host', default=dbinfo.get('host'), type=str) - dbinfo_new['port'] = click.prompt('postgres port', default=dbinfo.get('port'), type=int) - dbinfo_new['user'] = click.prompt('postgres super user', default=dbinfo.get('user'), type=str) - dbinfo_new['database'] = click.prompt('database', default=dbinfo.get('database'), type=str) - click.echo('') - click.echo('Trying to access postgres ...') - if _try_connect_psycopg(**dbinfo_new): - access = True - else: - dbinfo_new['password'] = click.prompt( - 'postgres password of {}'.format(dbinfo_new['user']), hide_input=True, type=str, default='' - ) - if not dbinfo_new.get('password'): - dbinfo_new.pop('password') - return dbinfo_new - - -def _try_connect_psycopg(**kwargs): - """ - try to start a psycopg2 connection. - - :return: True if successful, False otherwise - """ - from psycopg2 import connect - success = False - try: - conn = connect(**kwargs) - success = True - conn.close() - except Exception: # pylint: disable=broad-except - pass - return success - - -def _sudo_exists(): - """ - Check that the sudo command can be found - - :return: True if successful, False otherwise - """ - try: - subprocess.check_output(['sudo', '-V']) - except subprocess.CalledProcessError: - return False - except OSError: - return False - - return True - - -def _try_subcmd(**kwargs): - """ - try to run psql in a subprocess. - - :return: True if successful, False otherwise - """ - success = False - try: - kwargs['stderr'] = subprocess.STDOUT - _execute_psql(r'\q', **kwargs) - success = True - except subprocess.CalledProcessError: - pass - return success - - -def _execute_psyco(command, **kwargs): - """ - executes a postgres commandline through psycopg2 - - :param command: A psql command line as a str - :param kwargs: will be forwarded to psycopg2.connect - """ - import psycopg2 - - # Note: Ubuntu 18.04 uses "peer" as the default postgres configuration - # which allows connections only when the unix user matches the database user. - # This restriction no longer applies for IPv4/v6-based connection, - # when specifying host=localhost. - if kwargs.get('host') is None: - kwargs['host'] = 'localhost' - - output = None - with psycopg2.connect(**kwargs) as conn: - conn.autocommit = True - with conn.cursor() as cursor: - cursor.execute(command) - if cursor.description is not None: - output = cursor.fetchall() - - # see http://initd.org/psycopg/docs/usage.html#with-statement - conn.close() - return output - - -def _execute_psql(command, user='postgres', quiet=True, interactive=False, **kwargs): - """ - Executes an SQL command via ``psql`` as another system user in a subprocess. - - Tries to "become" the user specified in ``kwargs`` (i.e. interpreted as UNIX system user) - and run psql in a subprocess. - - :param command: A psql command line as a str - :param quiet: If True, don't print warnings. - :param interactive: If False, `sudo` won't ask for a password and fail if one is required. - :param kwargs: connection details to forward to psql, signature as in psycopg2.connect - """ - option_str = '' - - database = kwargs.pop('database', None) - if database: - option_str += '-d {}'.format(database) - # to do: Forward password to psql; ignore host only when the password is None. # pylint: disable=fixme - kwargs.pop('password', None) - - host = kwargs.pop('host', 'localhost') - if host and host != 'localhost': - option_str += ' -h {}'.format(host) - elif not quiet: - click.echo( - "Warning: Found host 'localhost' but dropping '-h localhost' option for psql " + - 'since this may cause psql to switch to password-based authentication.' - ) - - port = kwargs.pop('port', None) - if port: - option_str += ' -p {}'.format(port) - - user = kwargs.pop('user', 'postgres') - - # Build command line - sudo_cmd = ['sudo'] - if not interactive: - sudo_cmd += ['-n'] - su_cmd = ['su', user, '-c'] - - psql_cmd = ['psql {opt} -tc {cmd}'.format(cmd=escape_for_bash(command), opt=option_str)] - sudo_su_psql = sudo_cmd + su_cmd + psql_cmd - result = subprocess.check_output(sudo_su_psql, **kwargs) - result = result.decode('utf-8').strip().split('\n') - result = [i for i in result if i] - - return result - - -def escape_for_bash(str_to_escape): - """ - This function takes any string and escapes it in a way that - bash will interpret it as a single string. - - Explanation: - - At the end, in the return statement, the string is put within single - quotes. Therefore, the only thing that I have to escape in bash is the - single quote character. To do this, I substitute every single - quote ' with '"'"' which means: - - First single quote: exit from the enclosing single quotes - - Second, third and fourth character: "'" is a single quote character, - escaped by double quotes - - Last single quote: reopen the single quote to continue the string - - Finally, note that for python I have to enclose the string '"'"' - within triple quotes to make it work, getting finally: the complicated - string found below. - """ - escaped_quotes = str_to_escape.replace("'", """'"'"'""") - return "'{}'".format(escaped_quotes) +import warnings +from pgsu import PGSU, PostgresConnectionMode, DEFAULT_DSN as DEFAULT_DBINFO # pylint: disable=unused-import,no-name-in-module +from aiida.common.warnings import AiidaDeprecationWarning + +warnings.warn( # pylint: disable=no-member + '`aiida.manage.external.pgsu` is now available in the separate `pgsu` package. ' + 'This module will be removed entirely in AiiDA 2.0.0', AiidaDeprecationWarning +) diff --git a/aiida/manage/external/postgres.py b/aiida/manage/external/postgres.py index 0c6e9b1c5a..680b62e088 100644 --- a/aiida/manage/external/postgres.py +++ b/aiida/manage/external/postgres.py @@ -21,7 +21,7 @@ import click from aiida.cmdline.utils import echo -from .pgsu import PGSU, PostgresConnectionMode, DEFAULT_DBINFO +from pgsu import PGSU, PostgresConnectionMode, DEFAULT_DSN as DEFAULT_DBINFO # pylint: disable=no-name-in-module _CREATE_USER_COMMAND = 'CREATE USER "{}" WITH PASSWORD \'{}\'' _DROP_USER_COMMAND = 'DROP USER "{}"' @@ -32,20 +32,20 @@ ) _DROP_DB_COMMAND = 'DROP DATABASE "{}"' _GRANT_PRIV_COMMAND = 'GRANT ALL PRIVILEGES ON DATABASE "{}" TO "{}"' -_GET_USERS_COMMAND = "SELECT usename FROM pg_user WHERE usename='{}'" +_USER_EXISTS_COMMAND = "SELECT usename FROM pg_user WHERE usename='{}'" _CHECK_DB_EXISTS_COMMAND = "SELECT datname FROM pg_database WHERE datname='{}'" _COPY_DB_COMMAND = 'CREATE DATABASE "{}" WITH TEMPLATE "{}" OWNER "{}"' class Postgres(PGSU): """ - Adds convenience functions to pgsu.Postgres. + Adds convenience functions to :py:class:`pgsu.PGSU`. - Provides conenience functions for + Provides convenience functions for * creating/dropping users * creating/dropping databases - etc. See pgsu.Postgres for implementation details. + etc. Example:: @@ -55,6 +55,10 @@ class Postgres(PGSU): postgres.create_db('username', 'dbname') """ + def __init__(self, dbinfo=None, **kwargs): + """See documentation of :py:meth:`pgsu.PGSU.__init__`.""" + super().__init__(dsn=dbinfo, **kwargs) + @classmethod def from_profile(cls, profile, **kwargs): """Create Postgres instance with dbinfo from AiiDA profile data. @@ -63,7 +67,7 @@ def from_profile(cls, profile, **kwargs): database superuser. :param profile: AiiDA profile instance - :param kwargs: keyword arguments forwarded to Postgres constructor + :param kwargs: keyword arguments forwarded to PGSU constructor :returns: Postgres instance pre-populated with data from AiiDA profile """ @@ -77,23 +81,25 @@ def from_profile(cls, profile, **kwargs): return Postgres(dbinfo=dbinfo, **kwargs) - def check_db_name(self, dbname): - """Looks up if a database with the name exists, prompts for using or creating a differently named one.""" - create = True - while create and self.db_exists(dbname): - echo.echo_info('database {} already exists!'.format(dbname)) - if not click.confirm('Use it (make sure it is not used by another profile)?'): - dbname = click.prompt('new name', type=str, default=dbname) - else: - create = False - return dbname, create + ### DB user functions ### + + def dbuser_exists(self, dbuser): + """ + Find out if postgres user with name dbuser exists + + :param str dbuser: database user to check for + :return: (bool) True if user exists, False otherwise + """ + return bool(self.execute(_USER_EXISTS_COMMAND.format(dbuser))) def create_dbuser(self, dbuser, dbpass): """ Create a database user in postgres - :param dbuser: (str), Name of the user to be created. - :param dbpass: (str), Password the user should be given. + :param str dbuser: Name of the user to be created. + :param str dbpass: Password the user should be given. + :raises: psycopg2.errors.DuplicateObject if user already exists and + self.connection_mode == PostgresConnectionMode.PSYCOPG """ self.execute(_CREATE_USER_COMMAND.format(dbuser, dbpass)) @@ -101,25 +107,42 @@ def drop_dbuser(self, dbuser): """ Drop a database user in postgres - :param dbuser: (str), Name of the user to be dropped. + :param str dbuser: Name of the user to be dropped. """ self.execute(_DROP_USER_COMMAND.format(dbuser)) - def dbuser_exists(self, dbuser): + def check_dbuser(self, dbuser): + """Looks up if a given user already exists, prompts for using or creating a differently named one. + + :param str dbuser: Name of the user to be created or reused. + :returns: tuple (dbuser, created) """ - Find out if postgres user with name dbuser exists + create = True + while create and self.dbuser_exists(dbuser): + echo.echo_info('Database user "{}" already exists!'.format(dbuser)) + if not click.confirm('Use it? '): + dbuser = click.prompt('New database user name: ', type=str, default=dbuser) + else: + create = False + return dbuser, create - :param dbuser: (str) database user to check for - :return: (bool) True if user exists, False otherwise + ### DB functions ### + + def db_exists(self, dbname): """ - return bool(self.execute(_GET_USERS_COMMAND.format(dbuser))) + Check wether a postgres database with dbname exists + + :param str dbname: Name of the database to check for + :return: (bool), True if database exists, False otherwise + """ + return bool(self.execute(_CHECK_DB_EXISTS_COMMAND.format(dbname))) def create_db(self, dbuser, dbname): """ Create a database in postgres - :param dbuser: (str), Name of the user which should own the db. - :param dbname: (str), Name of the database. + :param str dbuser: Name of the user which should own the db. + :param str dbname: Name of the database. """ self.execute(_CREATE_DB_COMMAND.format(dbname, dbuser)) self.execute(_GRANT_PRIV_COMMAND.format(dbname, dbuser)) @@ -128,28 +151,70 @@ def drop_db(self, dbname): """ Drop a database in postgres - :param dbname: (str), Name of the database. + :param str dbname: Name of the database. """ self.execute(_DROP_DB_COMMAND.format(dbname)) def copy_db(self, src_db, dest_db, dbuser): self.execute(_COPY_DB_COMMAND.format(dest_db, src_db, dbuser)) - def db_exists(self, dbname): + def check_db(self, dbname): + """Looks up if a database with the name exists, prompts for using or creating a differently named one. + + :param str dbname: Name of the database to be created or reused. + :returns: tuple (dbname, created) """ - Check wether a postgres database with dbname exists + create = True + while create and self.db_exists(dbname): + echo.echo_info('database {} already exists!'.format(dbname)) + if not click.confirm('Use it (make sure it is not used by another profile)?'): + dbname = click.prompt('new name', type=str, default=dbname) + else: + create = False + return dbname, create - :param dbname: Name of the database to check for - :return: (bool), True if database exists, False otherwise + def create_dbuser_db_safe(self, dbname, dbuser, dbpass): + """Create DB and user + grant privileges. + + Prompts when reusing existing users / databases. """ - return bool(self.execute(_CHECK_DB_EXISTS_COMMAND.format(dbname))) + dbuser, create = self.check_dbuser(dbuser=dbuser) + if create: + self.create_dbuser(dbuser=dbuser, dbpass=dbpass) + + dbname, create = self.check_db(dbname=dbname) + if create: + self.create_db(dbuser, dbname) + + return dbuser, dbname + + @property + def host_for_psycopg2(self): + """Return correct host for psycopg2 connection (as required by regular AiiDA operation).""" + host = self.dsn.get('host') + if self.connection_mode == PostgresConnectionMode.PSQL: + # If "sudo su postgres" was needed to create the DB, we are likely on Ubuntu, where + # the same will *not* work for arbitrary database users => enforce TCP/IP connection + host = host or 'localhost' + + return host + + @property + def port_for_psycopg2(self): + """Return port for psycopg2 connection (as required by regular AiiDA operation).""" + return self.dsn.get('port') + + @property + def dbinfo(self): + """Alias for Postgres.dsn.""" + return self.dsn.copy() def manual_setup_instructions(dbuser, dbname): """Create a message with instructions for manually creating a database""" dbpass = '' instructions = '\n'.join([ - 'Please run the following commands as the user for PostgreSQL (Ubuntu: $sudo su postgres):', + 'Run the following commands as a UNIX user with access to PostgreSQL (Ubuntu: $ sudo su postgres):', '', '\t$ psql template1', '\t==> ' + _CREATE_USER_COMMAND.format(dbuser, dbpass), diff --git a/aiida/manage/tests/__init__.py b/aiida/manage/tests/__init__.py index 9ff04e39c8..cc4e2e5fbf 100644 --- a/aiida/manage/tests/__init__.py +++ b/aiida/manage/tests/__init__.py @@ -252,10 +252,11 @@ def __init__(self, backend=BACKEND_DJANGO, pgtest=None): # pylint: disable=supe self.postgres = None self._profile = None self._has_test_db = False - self._backup = {} - self._backup['config'] = configuration.CONFIG - self._backup['config_dir'] = settings.AIIDA_CONFIG_FOLDER - self._backup['profile'] = configuration.PROFILE + self._backup = { + 'config': configuration.CONFIG, + 'config_dir': settings.AIIDA_CONFIG_FOLDER, + 'profile': configuration.PROFILE, + } @property def profile_dictionary(self): @@ -264,10 +265,10 @@ def profile_dictionary(self): Used to set up AiiDA profile from self.profile_info dictionary. """ dictionary = { - 'database_engine': self.profile_info['database_engine'], - 'database_backend': self.profile_info['database_backend'], - 'database_port': self.dbinfo.get('port'), - 'database_hostname': self.dbinfo.get('host'), + 'database_engine': self.profile_info.get('database_engine'), + 'database_backend': self.profile_info.get('database_backend'), + 'database_port': self.profile_info.get('database_port'), + 'database_hostname': self.profile_info.get('database_hostname'), 'database_name': self.profile_info.get('database_name'), 'database_username': self.profile_info.get('database_username'), 'database_password': self.profile_info.get('database_password'), @@ -297,9 +298,12 @@ def create_aiida_db(self): if self.pg_cluster is None: self.create_db_cluster() self.postgres = Postgres(interactive=False, quiet=True, dbinfo=self.dbinfo) - self.dbinfo = self.postgres.dbinfo.copy() + # note: not using postgres.create_dbuser_db_safe here since we don't want prompts self.postgres.create_dbuser(self.profile_info['database_username'], self.profile_info['database_password']) self.postgres.create_db(self.profile_info['database_username'], self.profile_info['database_name']) + self.dbinfo = self.postgres.dbinfo + self.profile_info['database_hostname'] = self.postgres.host_for_psycopg2 + self.profile_info['database_port'] = self.postgres.port_for_psycopg2 self._has_test_db = True def create_profile(self): diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index 4336f2f21c..cb290f8f83 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -22,6 +22,7 @@ kiwipy[rmq]~=0.5.1 numpy<1.18,~=1.17 paramiko~=2.6 pg8000~=1.13 +pgsu~=0.1.0 pgtest>=1.3.1,~=1.3 pika~=1.1 plumpy~=0.14.5 diff --git a/docs/source/nitpick-exceptions b/docs/source/nitpick-exceptions index 4019aad3f7..46dc269e12 100644 --- a/docs/source/nitpick-exceptions +++ b/docs/source/nitpick-exceptions @@ -103,3 +103,6 @@ py:class aldjemy.orm.DbLog py:class aldjemy.orm.DbSetting py:class alembic.config.Config + +py:class pgsu.PGSU +py:meth pgsu.PGSU.__init__ diff --git a/environment.yml b/environment.yml index cfbb126c57..d92ed0950a 100644 --- a/environment.yml +++ b/environment.yml @@ -23,6 +23,7 @@ dependencies: - paramiko~=2.6 - pika~=1.1 - plumpy~=0.14.5 +- pgsu~=0.1.0 - psutil~=5.6 - psycopg2>=2.8.3,~=2.8 - python-dateutil~=2.8 diff --git a/requirements/requirements-py-3.5.txt b/requirements/requirements-py-3.5.txt index ff5c36ad78..545e13a27f 100644 --- a/requirements/requirements-py-3.5.txt +++ b/requirements/requirements-py-3.5.txt @@ -74,6 +74,7 @@ pathlib2==2.3.5 pexpect==4.8.0 pg8000==1.13.2 pgtest==1.3.2 +pgsu==0.1.0 pickleshare==0.7.5 pika==1.1.0 pluggy==0.13.1 diff --git a/requirements/requirements-py-3.6.txt b/requirements/requirements-py-3.6.txt index 4e9015bc17..665898918e 100644 --- a/requirements/requirements-py-3.6.txt +++ b/requirements/requirements-py-3.6.txt @@ -73,6 +73,7 @@ paramiko==2.7.1 parso==0.6.2 pexpect==4.8.0 pg8000==1.13.2 +pgsu==0.1.0 pgtest==1.3.2 pickleshare==0.7.5 pika==1.1.0 diff --git a/requirements/requirements-py-3.7.txt b/requirements/requirements-py-3.7.txt index f52fa482bd..3a166e7b53 100644 --- a/requirements/requirements-py-3.7.txt +++ b/requirements/requirements-py-3.7.txt @@ -72,6 +72,7 @@ paramiko==2.7.1 parso==0.6.2 pexpect==4.8.0 pg8000==1.13.2 +pgsu==0.1.0 pgtest==1.3.2 pickleshare==0.7.5 pika==1.1.0 diff --git a/requirements/requirements-py-3.8.txt b/requirements/requirements-py-3.8.txt index 462ddf5777..a3295d91f7 100644 --- a/requirements/requirements-py-3.8.txt +++ b/requirements/requirements-py-3.8.txt @@ -71,6 +71,7 @@ paramiko==2.7.1 parso==0.6.2 pexpect==4.8.0 pg8000==1.13.2 +pgsu==0.1.0 pgtest==1.3.2 pickleshare==0.7.5 pika==1.1.0 diff --git a/setup.json b/setup.json index de5f83b295..ebe8360174 100644 --- a/setup.json +++ b/setup.json @@ -37,6 +37,7 @@ "paramiko~=2.6", "pika~=1.1", "plumpy~=0.14.5", + "pgsu~=0.1.0", "psutil~=5.6", "psycopg2-binary~=2.8,>=2.8.3", "pyblake2~=1.1; python_version<'3.6'", diff --git a/tests/manage/external/test_postgres.py b/tests/manage/external/test_postgres.py index 266dc92921..5b83374f06 100644 --- a/tests/manage/external/test_postgres.py +++ b/tests/manage/external/test_postgres.py @@ -9,16 +9,10 @@ ########################################################################### """Unit tests for postgres database maintenance functionality""" from unittest import TestCase -from unittest.mock import patch from aiida.manage.external.postgres import Postgres -def _try_connect_always_fail(**kwargs): # pylint: disable=unused-argument - """Always return False""" - return False - - class PostgresTest(TestCase): """Test the public API provided by the `Postgres` class""" @@ -38,31 +32,18 @@ def _setup_postgres(self): return Postgres(interactive=False, quiet=True, dbinfo=self.pg_test.dsn) def test_determine_setup_fail(self): + """Check that setup fails, if bad port is provided. + + Note: In interactive mode, this would prompt for the connection details. + """ postgres = Postgres(interactive=False, quiet=True, dbinfo={'port': '11111'}) self.assertFalse(postgres.is_connected) def test_determine_setup_success(self): + """Check that setup works with default parameters.""" postgres = self._setup_postgres() self.assertTrue(postgres.is_connected) - def test_setup_fail_callback(self): - """Make sure `determine_setup` works despite wrong initial values in case of correct callback""" - - def correct_setup(interactive, dbinfo): # pylint: disable=unused-argument - return self.pg_test.dsn - - postgres = Postgres(interactive=False, quiet=True, dbinfo={'port': '11111'}, determine_setup=False) - postgres.set_setup_fail_callback(correct_setup) - setup_success = postgres.determine_setup() - self.assertTrue(setup_success) - - @patch('aiida.manage.external.pgsu._try_connect_psycopg', new=_try_connect_always_fail) - @patch('aiida.manage.external.pgsu._try_subcmd') - def test_fallback_on_subcmd(self, try_subcmd): - """Ensure that accessing postgres via subcommand is tried if psycopg does not work.""" - self._setup_postgres() - self.assertTrue(try_subcmd.call_count >= 1) - def test_create_drop_db_user(self): """Check creating and dropping a user works""" postgres = self._setup_postgres() From 676a83efd979530f1fd6593ed170df6c3fe97fcd Mon Sep 17 00:00:00 2001 From: Casper Welzel Andersen <43357585+CasperWA@users.noreply.github.com> Date: Wed, 8 Apr 2020 08:37:16 +0200 Subject: [PATCH 37/54] CI: use GitHub Actions services for PostgreSQL and RabbitMQ (#3901) Note that we need to keep installing `postgresql-10` manually through `apt` for the `tests` action, because some of the tests generate a PostgreSQL cluster on the fly through the python package `pg_test`. --- .github/workflows/ci.yml | 32 ++++++++++++++--------- .github/workflows/test-install.yml | 32 ++++++++++++++--------- .github/workflows/update-requirements.yml | 32 ++++++++++++++--------- 3 files changed, 60 insertions(+), 36 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f2bea20b43..10a1251707 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -110,15 +110,27 @@ jobs: backend: ['django', 'sqlalchemy'] python-version: [3.5, 3.8] + services: + postgres: + image: postgres:10 + env: + POSTGRES_DB: test_${{ matrix.backend }} + POSTGRES_PASSWORD: '' + POSTGRES_HOST_AUTH_METHOD: trust + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + rabbitmq: + image: rabbitmq:latest + ports: + - 5672:5672 + steps: - uses: actions/checkout@v2 - - uses: CasperWA/postgresql-action@v1.2 - with: - postgresql version: '10' - postgresql db: test_${{ matrix.backend }} - postgresql user: postgres - postgresql password: '' - postgresql auth: trust - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 @@ -127,13 +139,9 @@ jobs: - name: Install system dependencies run: | - wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - - echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list - echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list sudo rm -f /etc/apt/sources.list.d/dotnetdev.list /etc/apt/sources.list.d/microsoft-prod.list sudo apt update - sudo apt install postgresql-10 rabbitmq-server graphviz - sudo systemctl status rabbitmq-server.service + sudo apt install postgresql-10 graphviz - name: Upgrade pip run: | diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 05224f43dc..54be58f1d5 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -98,15 +98,27 @@ jobs: python-version: [3.5, 3.6, 3.7, 3.8] backend: ['django', 'sqlalchemy'] + services: + postgres: + image: postgres:10 + env: + POSTGRES_DB: test_${{ matrix.backend }} + POSTGRES_PASSWORD: '' + POSTGRES_HOST_AUTH_METHOD: trust + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + rabbitmq: + image: rabbitmq:latest + ports: + - 5672:5672 + steps: - uses: actions/checkout@v2 - - uses: CasperWA/postgresql-action@v1.2 - with: - postgresql version: '10' - postgresql db: test_${{ matrix.backend }} - postgresql user: postgres - postgresql password: '' - postgresql auth: trust - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 @@ -115,13 +127,9 @@ jobs: - name: Install system dependencies run: | - wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - - echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list - echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list sudo rm -f /etc/apt/sources.list.d/dotnetdev.list /etc/apt/sources.list.d/microsoft-prod.list sudo apt update - sudo apt install postgresql-10 rabbitmq-server graphviz - sudo systemctl status rabbitmq-server.service + sudo apt install postgresql-10 graphviz - run: pip install --upgrade pip diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index 636f8c9d3d..ebc0b22a5c 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -17,17 +17,29 @@ jobs: backend: ['django', 'sqlalchemy'] python-version: [3.5, 3.6, 3.7, 3.8] + services: + postgres: + image: postgres:10 + env: + POSTGRES_DB: test_${{ matrix.backend }} + POSTGRES_PASSWORD: '' + POSTGRES_HOST_AUTH_METHOD: trust + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + rabbitmq: + image: rabbitmq:latest + ports: + - 5672:5672 + steps: - uses: actions/checkout@v2 with: ref: ${{ github.event.client_payload.head_ref }} - - uses: CasperWA/postgresql-action@v1.2 - with: - postgresql version: '10' - postgresql db: test_${{ matrix.backend }} - postgresql user: postgres - postgresql password: '' - postgresql auth: trust - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 @@ -36,13 +48,9 @@ jobs: - name: Install system dependencies run: | - wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - - echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list - echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list sudo rm -f /etc/apt/sources.list.d/dotnetdev.list /etc/apt/sources.list.d/microsoft-prod.list sudo apt update - sudo apt install postgresql-10 rabbitmq-server graphviz - sudo systemctl status rabbitmq-server.service + sudo apt install postgresql-10 graphviz - run: pip install --upgrade pip From 38c4684fc770c42b81f00faa5d8156a16ca666bd Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 8 Apr 2020 14:37:11 +0200 Subject: [PATCH 38/54] Add the `-v/--version` option to `verdi export migrate` (#3910) The default behavior remains the same and if not specified the export archive will be migrated to the latest version. However, with the flag any other version can be chosen, as long as it constitutes a forward migration as backward migrations are not supported. --- aiida/cmdline/commands/cmd_export.py | 24 +++++-- .../tools/importexport/migration/__init__.py | 33 +++++----- docs/source/verdi/verdi_user_guide.rst | 2 +- tests/cmdline/commands/test_export.py | 22 ++++++- .../importexport/migration/test_migration.py | 63 +++++++++---------- 5 files changed, 88 insertions(+), 56 deletions(-) diff --git a/aiida/cmdline/commands/cmd_export.py b/aiida/cmdline/commands/cmd_export.py index 4e4b8f0066..651d25ca1a 100644 --- a/aiida/cmdline/commands/cmd_export.py +++ b/aiida/cmdline/commands/cmd_export.py @@ -145,17 +145,26 @@ def create( @options.ARCHIVE_FORMAT() @options.FORCE(help='overwrite output file if it already exists') @options.SILENT() -def migrate(input_file, output_file, force, silent, archive_format): +@click.option( + '-v', + '--version', + type=click.STRING, + required=False, + metavar='VERSION', + help='Specify an exact archive version to migrate to. By default the most recent version is taken.' +) +def migrate(input_file, output_file, force, silent, archive_format, version): # pylint: disable=too-many-locals,too-many-statements,too-many-branches - """ - Migrate an old export archive file to the most recent format. - """ + """Migrate an export archive to a more recent format version.""" import tarfile import zipfile from aiida.common import json from aiida.common.folders import SandboxFolder - from aiida.tools.importexport import migration, extract_zip, extract_tar + from aiida.tools.importexport import EXPORT_VERSION, migration, extract_zip, extract_tar, ArchiveMigrationError + + if version is None: + version = EXPORT_VERSION if os.path.exists(output_file) and not force: echo.echo_critical('the output file already exists') @@ -178,7 +187,10 @@ def migrate(input_file, output_file, force, silent, archive_format): echo.echo_critical('export archive does not contain the required file {}'.format(fhandle.filename)) old_version = migration.verify_metadata_version(metadata) - new_version = migration.migrate_recursively(metadata, data, folder) + try: + new_version = migration.migrate_recursively(metadata, data, folder, version) + except ArchiveMigrationError as exception: + echo.echo_critical(exception) with open(folder.get_abs_path('data.json'), 'wb') as fhandle: json.dump(data, fhandle, indent=4) diff --git a/aiida/tools/importexport/migration/__init__.py b/aiida/tools/importexport/migration/__init__.py index e5772c1f8f..a99ee359e7 100644 --- a/aiida/tools/importexport/migration/__init__.py +++ b/aiida/tools/importexport/migration/__init__.py @@ -8,9 +8,9 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Migration export files from old export versions to the newest, used by `verdi export migrate` command.""" - -from aiida.cmdline.utils import echo -from aiida.tools.importexport.common.exceptions import DanglingLinkError +from aiida.common.lang import type_check +from aiida.tools.importexport import EXPORT_VERSION +from aiida.tools.importexport.common.exceptions import DanglingLinkError, ArchiveMigrationError from .utils import verify_metadata_version from .v01_to_v02 import migrate_v1_to_v2 @@ -34,34 +34,37 @@ } -def migrate_recursively(metadata, data, folder): - """ - Recursive migration of export files from v0.1 to newest version, +def migrate_recursively(metadata, data, folder, version=EXPORT_VERSION): + """Recursive migration of export files from v0.1 to a newer version. + See specific migration functions for detailed descriptions. :param metadata: the content of an export archive metadata.json file :param data: the content of an export archive data.json file :param folder: SandboxFolder in which the archive has been unpacked (workdir) + :param version: the version to migrate to, by default the current export version """ - from aiida.tools.importexport import EXPORT_VERSION as newest_version - old_version = verify_metadata_version(metadata) + type_check(version, str) + try: - if old_version == newest_version: - echo.echo_critical('Your export file is already at the newest export version {}'.format(newest_version)) + if old_version == version: + raise ArchiveMigrationError('Your export file is already at the version {}'.format(version)) + elif old_version > version: + raise ArchiveMigrationError('Backward migrations are not supported') elif old_version in MIGRATE_FUNCTIONS: MIGRATE_FUNCTIONS[old_version](metadata, data, folder) else: - echo.echo_critical('Cannot migrate from version {}'.format(old_version)) + raise ArchiveMigrationError('Cannot migrate from version {}'.format(old_version)) except ValueError as exception: - echo.echo_critical(exception) + raise ArchiveMigrationError(exception) except DanglingLinkError: - echo.echo_critical('Export file is invalid because it contains dangling links') + raise ArchiveMigrationError('Export file is invalid because it contains dangling links') new_version = verify_metadata_version(metadata) - if new_version < newest_version: - new_version = migrate_recursively(metadata, data, folder) + if new_version < version: + new_version = migrate_recursively(metadata, data, folder, version) return new_version diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 3c24ffeb1b..8265083e6d 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -394,7 +394,7 @@ Below is a list with all available subcommands. Commands: create Export subsets of the provenance graph to file for sharing. inspect Inspect contents of an exported archive without importing it. - migrate Migrate an old export archive file to the most recent format. + migrate Migrate an export archive to a more recent format version. .. _verdi_graph: diff --git a/tests/cmdline/commands/test_export.py b/tests/cmdline/commands/test_export.py index 4e0d5a3233..441dcfae68 100644 --- a/tests/cmdline/commands/test_export.py +++ b/tests/cmdline/commands/test_export.py @@ -19,7 +19,7 @@ from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands import cmd_export -from aiida.tools.importexport import EXPORT_VERSION +from aiida.tools.importexport import EXPORT_VERSION, Archive from tests.utils.archives import get_archive_file @@ -160,6 +160,26 @@ def test_migrate_versions_old(self): finally: delete_temporary_file(filename_output) + def test_migrate_version_specific(self): + """Test the `-v/--version` option to migrate to a specific version instead of the latest.""" + archive = 'export_v0.1_simple.aiida' + target_version = '0.2' + + filename_input = get_archive_file(archive, filepath=self.fixture_archive) + filename_output = next(tempfile._get_candidate_names()) # pylint: disable=protected-access + + try: + options = [filename_input, filename_output, '--version', target_version] + result = self.cli_runner.invoke(cmd_export.migrate, options) + self.assertIsNone(result.exception, result.output) + self.assertTrue(os.path.isfile(filename_output)) + self.assertEqual(zipfile.ZipFile(filename_output).testzip(), None) + + with Archive(filename_output) as archive_object: + self.assertEqual(archive_object.version_format, target_version) + finally: + delete_temporary_file(filename_output) + def test_migrate_versions_recent(self): """Migrating an archive with the current version should exit with non-zero status.""" filename_input = get_archive_file(self.newest_archive, filepath=self.fixture_archive) diff --git a/tests/tools/importexport/migration/test_migration.py b/tests/tools/importexport/migration/test_migration.py index c2c45afbd2..82c08b8e1d 100644 --- a/tests/tools/importexport/migration/test_migration.py +++ b/tests/tools/importexport/migration/test_migration.py @@ -8,14 +8,12 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test export file migration from old export versions to the newest""" - import os from aiida import orm from aiida.backends.testbase import AiidaTestCase -from aiida.tools.importexport import import_data, EXPORT_VERSION as newest_version +from aiida.tools.importexport import import_data, ArchiveMigrationError, Archive, EXPORT_VERSION as newest_version from aiida.tools.importexport.migration import migrate_recursively, verify_metadata_version -from aiida.common.utils import Capturing from tests.utils.archives import get_archive_file, get_json_files, migrate_archive from tests.utils.configuration import with_temp_dir @@ -102,6 +100,28 @@ def test_migrate_recursively(self): verify_metadata_version(metadata, version=newest_version) self.assertEqual(new_version, newest_version) + def test_migrate_recursively_specific_version(self): + """Test the `version` argument of the `migrate_recursively` function.""" + filepath_archive = get_archive_file('export_v0.3_simple.aiida', **self.core_archive) + + with Archive(filepath_archive) as archive: + + # Incorrect type + with self.assertRaises(TypeError): + migrate_recursively(archive.meta_data, archive.data, None, version=0.2) + + # Backward migrations are not supported + with self.assertRaises(ArchiveMigrationError): + migrate_recursively(archive.meta_data, archive.data, None, version='0.2') + + # Same version will also raise + with self.assertRaises(ArchiveMigrationError): + migrate_recursively(archive.meta_data, archive.data, None, version='0.3') + + migrated_version = '0.5' + version = migrate_recursively(archive.meta_data, archive.data, None, version=migrated_version) + self.assertEqual(version, migrated_version) + @with_temp_dir def test_no_node_export(self, temp_dir): """Test migration of export file that has no Nodes""" @@ -138,7 +158,6 @@ def test_wrong_versions(self): """Test correct errors are raised if export files have wrong version numbers""" from aiida.tools.importexport.migration import MIGRATE_FUNCTIONS - # Initialization wrong_versions = ['0.0', '0.1.0', '0.99'] old_versions = list(MIGRATE_FUNCTIONS.keys()) legal_versions = old_versions + [newest_version] @@ -147,7 +166,6 @@ def test_wrong_versions(self): metadata = {'export_version': version} wrong_version_metadatas.append(metadata) - # Checks # Make sure the "wrong_versions" are wrong for version in wrong_versions: self.assertNotIn( @@ -156,18 +174,11 @@ def test_wrong_versions(self): msg="'{}' was not expected to be a legal version, legal version: {}".format(version, legal_versions) ) - # Make sure migrate_recursively throws a critical message and raises SystemExit + # Make sure migrate_recursively throws an ArchiveMigrationError for metadata in wrong_version_metadatas: - with self.assertRaises(SystemExit) as exception: - with Capturing(capture_stderr=True): - new_version = migrate_recursively(metadata, {}, None) - - self.assertIn( - 'Critical: Cannot migrate from version {}'.format(metadata['export_version']), - exception.exception, - msg="Expected a critical statement for the wrong export version '{}', " - 'instead got {}'.format(metadata['export_version'], exception.exception) - ) + with self.assertRaises(ArchiveMigrationError): + new_version = migrate_recursively(metadata, {}, None) + self.assertIsNone( new_version, msg='migrate_recursively should not return anything, ' @@ -175,26 +186,12 @@ def test_wrong_versions(self): ) def test_migrate_newest_version(self): - """ - Test critical message and SystemExit is raised, when an export file with the newest export version is migrated - """ - # Initialization + """Test that an exception is raised when an export file with the newest export version is migrated.""" metadata = {'export_version': newest_version} - # Check - with self.assertRaises(SystemExit) as exception: + with self.assertRaises(ArchiveMigrationError): + new_version = migrate_recursively(metadata, {}, None) - with Capturing(capture_stderr=True): - new_version = migrate_recursively(metadata, {}, None) - - self.assertIn( - 'Critical: Your export file is already at the newest export version {}'.format( - metadata['export_version'] - ), - exception.exception, - msg="Expected a critical statement that the export version '{}' is the newest export version '{}', " - 'instead got {}'.format(metadata['export_version'], newest_version, exception.exception) - ) self.assertIsNone( new_version, msg='migrate_recursively should not return anything, ' From b14243e9aed85b804b8187bf40dcf1ee98aec917 Mon Sep 17 00:00:00 2001 From: Chris Sewell Date: Wed, 8 Apr 2020 19:01:42 +0100 Subject: [PATCH 39/54] `GroupPath`: a utility to work with virtual `Group` hierarchies (#3613) Groups can be used to store nodes in AiiDA, but do not have any builtin hierarchy themselves. However, often it may be useful to think of groups as folders on a filesystem and the nodes within them as the files. Building this functionality directly on the database would require significant changes, but a virtual hierarchy based on the group labels can be readily provided. This is what the new utility class `GroupPath` facilitates. It allows group labels to be interpreted as the hierarchy of groups. Example: consider one has groups with the following labels group/sub/a group/sub/b group/other/c One could see this as the group `group` containing the sub groups `sub` and `other`, with `sub` containing `a` and `b` itself. The `GroupPath` class allows one to exploit this hierarchical naming:: path = GroupPath('group') path.sub.a.get_group() # will return group with label `group/sub/a` It can also be used to create groups that do not yet exist: path = GroupPath() path.some.group.get_or_create_group() This will create a `Group` with the label `some/group`. The `GroupPath` class implements many other useful methods to make the traversing and manipulating of groups a lot easier. --- .ci/workchains.py | 1 + .pylintrc | 2 +- aiida/cmdline/commands/cmd_group.py | 69 +++++ aiida/tools/groups/__init__.py | 11 + aiida/tools/groups/paths.py | 352 ++++++++++++++++++++++++ docs/source/verdi/verdi_user_guide.rst | 1 + tests/cmdline/commands/test_group_ls.py | 127 +++++++++ tests/tools/groups/__init__.py | 9 + tests/tools/groups/test_paths.py | 161 +++++++++++ 9 files changed, 732 insertions(+), 1 deletion(-) create mode 100644 aiida/tools/groups/__init__.py create mode 100644 aiida/tools/groups/paths.py create mode 100644 tests/cmdline/commands/test_group_ls.py create mode 100644 tests/tools/groups/__init__.py create mode 100644 tests/tools/groups/test_paths.py diff --git a/.ci/workchains.py b/.ci/workchains.py index f5ab3872d7..4ae521540f 100644 --- a/.ci/workchains.py +++ b/.ci/workchains.py @@ -7,6 +7,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=invalid-name from aiida.common import AttributeDict from aiida.engine import calcfunction, workfunction, WorkChain, ToContext, append_, while_, ExitCode from aiida.engine import BaseRestartWorkChain, process_handler, ProcessHandlerReport diff --git a/.pylintrc b/.pylintrc index 9e3adfb075..8fc7a6a3c3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -50,7 +50,7 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=bad-continuation,locally-disabled,useless-suppression,django-not-available,bad-option-value,logging-format-interpolation,no-else-raise,import-outside-toplevel +disable=bad-continuation,locally-disabled,useless-suppression,django-not-available,bad-option-value,logging-format-interpolation,no-else-raise,import-outside-toplevel,cyclic-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/aiida/cmdline/commands/cmd_group.py b/aiida/cmdline/commands/cmd_group.py index d74e416bd5..16978379ae 100644 --- a/aiida/cmdline/commands/cmd_group.py +++ b/aiida/cmdline/commands/cmd_group.py @@ -361,3 +361,72 @@ def group_copy(source_group, destination_group): # Copy nodes dest_group.add_nodes(list(source_group.nodes)) echo.echo_success('Nodes copied from group<{}> to group<{}>'.format(source_group.label, dest_group.label)) + + +@verdi_group.group('path') +def verdi_group_path(): + """Inspect groups of nodes, with delimited label paths.""" + + +@verdi_group_path.command('ls') +@click.argument('path', type=click.STRING, required=False) +@click.option('-R', '--recursive', is_flag=True, default=False, help='Recursively list sub-paths encountered') +@click.option('-l', '--long', 'as_table', is_flag=True, default=False, help='List as a table, with sub-group count') +@click.option( + '-d', '--with-description', 'with_description', is_flag=True, default=False, help='Show also the group description' +) +@click.option( + '--no-virtual', + 'no_virtual', + is_flag=True, + default=False, + help='Only show paths that fully correspond to an existing group' +) +@click.option( + '-t', + '--type', + 'group_type', + type=types.LazyChoice(valid_group_type_strings), + default=user_defined_group, + help='Show groups of a specific type, instead of user-defined groups. Start with semicolumn if you want to ' + 'specify aiida-internal type' +) +@click.option('--no-warn', is_flag=True, default=False, help='Do not issue a warning if any paths are invalid.') +@with_dbenv() +def group_path_ls(path, recursive, as_table, no_virtual, group_type, with_description, no_warn): + # pylint: disable=too-many-arguments + """Show a list of existing group paths.""" + from aiida.tools.groups.paths import GroupPath, InvalidPath + + try: + path = GroupPath(path or '', type_string=group_type, warn_invalid_child=not no_warn) + except InvalidPath as err: + echo.echo_critical(str(err)) + + if recursive: + children = path.walk() + else: + children = path.children + + if as_table or with_description: + from tabulate import tabulate + headers = ['Path', 'Sub-Groups'] + if with_description: + headers.append('Description') + rows = [] + for child in sorted(children): + if no_virtual and child.is_virtual: + continue + row = [ + child.path if child.is_virtual else click.style(child.path, bold=True), + len([c for c in child.walk() if not c.is_virtual]) + ] + if with_description: + row.append('-' if child.is_virtual else child.get_group().description) + rows.append(row) + echo.echo(tabulate(rows, headers=headers)) + else: + for child in sorted(children): + if no_virtual and child.is_virtual: + continue + echo.echo(child.path, bold=not child.is_virtual) diff --git a/aiida/tools/groups/__init__.py b/aiida/tools/groups/__init__.py new file mode 100644 index 0000000000..7d429eeab7 --- /dev/null +++ b/aiida/tools/groups/__init__.py @@ -0,0 +1,11 @@ +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=wildcard-import,undefined-variable +"""Provides tools for interacting with AiiDA Groups.""" +from .paths import * + +__all__ = paths.__all__ diff --git a/aiida/tools/groups/paths.py b/aiida/tools/groups/paths.py new file mode 100644 index 0000000000..9d20ea9c55 --- /dev/null +++ b/aiida/tools/groups/paths.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Provides functionality for managing large numbers of AiiDA Groups, via label delimitation.""" +from collections import namedtuple +from functools import total_ordering +import re +from typing import Any, Iterable, List, Optional # pylint: disable=unused-import +import warnings + +from aiida import orm +from aiida.common.exceptions import NotExistent + +__all__ = ('GroupPath', 'InvalidPath') + +REGEX_ATTR = re.compile('^[a-zA-Z][\\_a-zA-Z0-9]*$') + + +class InvalidPath(Exception): + """An exception to indicate that a path is not valid.""" + + +class GroupNotFoundError(Exception): + """An exception raised when a path does not have an associated group.""" + + def __init__(self, grouppath): + msg = 'No such group: {}'.format(grouppath.path) + super().__init__(msg) + + +class GroupNotUniqueError(Exception): + """An exception raised when a path has multiple associated groups.""" + + def __init__(self, grouppath): + msg = 'The path is not unique: {}'.format(grouppath.path) + super().__init__(msg) + + +class NoGroupsInPathError(Exception): + """An exception raised when a path has multiple associated groups.""" + + def __init__(self, grouppath): + msg = 'The path does not contain any descendant groups: {}'.format(grouppath.path) + super().__init__(msg) + + +WalkNodeResult = namedtuple('WalkNodeResult', ['group_path', 'node']) + + +@total_ordering +class GroupPath: + """A class to provide label delimited access to groups. + + See tests for usage examples. + """ + + def __init__(self, path='', type_string=orm.GroupTypeString.USER.value, warn_invalid_child=True): + # type: (str, Optional[str], Optional[GroupPath]) + """Instantiate the class. + + :param path: The initial path of the group. + :param type_string: Used to query for and instantiate a ``Group`` with. + :param warn_invalid_child: Issue a warning, when iterating children, if a child path is invalid. + + """ + self._delimiter = '/' + if not isinstance(type_string, str): + raise TypeError('type_string must a str: {}'.format(type_string)) + self._type_string = type_string + self._path_string = self._validate_path(path) + self._path_list = self._path_string.split(self._delimiter) if path else [] + self._warn_invalid_child = warn_invalid_child + + def _validate_path(self, path): + """Validate the supplied path.""" + if path == self._delimiter: + return '' + if self._delimiter * 2 in path: + raise InvalidPath("The path may not contain a duplicate delimiter '{}': {}".format(self._delimiter, path)) + if (path.startswith(self._delimiter) or path.endswith(self._delimiter)): + raise InvalidPath("The path may not start/end with the delimiter '{}': {}".format(self._delimiter, path)) + return path + + def __repr__(self): + # type: () -> str + """Represent the instantiated class.""" + return "{}('{}', type='{}')".format(self.__class__.__name__, self.path, self.type_string) + + def __eq__(self, other): + # type: (Any) -> bool + """Compare equality of path and type string to another ``GroupPath`` object.""" + if not isinstance(other, GroupPath): + return NotImplemented + return (self.path, self.type_string) == (other.path, other.type_string) + + def __lt__(self, other): + # type: (Any) -> bool + """Compare less-than operator of path and type string to another ``GroupPath`` object.""" + if not isinstance(other, GroupPath): + return NotImplemented + return (self.path, self.type_string) < (other.path, other.type_string) + + @property + def path(self): + # type: () -> str + """Return the path string.""" + return self._path_string + + @property + def path_list(self): + # type: () -> List[str] + """Return a list of the path components.""" + return self._path_list[:] + + @property + def key(self): + # type: () -> str + """Return the final component of the the path.""" + if self._path_list: + return self._path_list[-1] + return None + + @property + def delimiter(self): + # type: () -> str + """Return the delimiter used to split path into components.""" + return self._delimiter + + @property + def type_string(self): + # type: () -> str + """Return the type_string used to query for and instantiate a ``Group`` with.""" + return self._type_string + + @property + def parent(self): + # type: () -> Optional[GroupPath] + """Return the parent path.""" + if self.path_list: + return GroupPath( + self.delimiter.join(self.path_list[:-1]), + type_string=self.type_string, + warn_invalid_child=self._warn_invalid_child + ) + return None + + def __truediv__(self, path): + # type: (str) -> GroupPath + """Return a child ``GroupPath``, with a new path formed by appending ``path`` to the current path.""" + if not isinstance(path, str): + raise TypeError('path is not a string: {}'.format(path)) + path = self._validate_path(path) + child = GroupPath( + path=self.path + self.delimiter + path if self.path else path, + type_string=self.type_string, + warn_invalid_child=self._warn_invalid_child + ) + return child + + def __getitem__(self, path): + # type: (str) -> GroupPath + """Return a child ``GroupPath``, with a new path formed by appending ``path`` to the current path.""" + return self.__truediv__(path) + + def get_group(self): + # type: () -> Optional[orm.Group] + """Return the concrete group associated with this path.""" + try: + return orm.Group.objects.get(label=self.path, type_string=self.type_string) + except NotExistent: + return None + + @property + def group_ids(self): + # type: () -> List[int] + """Return all the UUID associated with this GroupPath. + + :returns: and empty list, if no group associated with this label, + or can be multiple if type_string was None + + This is an efficient method for checking existence, + which does not require the (slow) loading of the ORM entity. + """ + query = orm.QueryBuilder() + filters = {'label': self.path} + if self.type_string is not None: + filters['type_string'] = self.type_string + query.append(orm.Group, filters=filters, project='id') + return [r[0] for r in query.all()] + + @property + def is_virtual(self): + # type: () -> bool + """Return whether there is one or more concrete groups associated with this path.""" + return len(self.group_ids) == 0 + + def get_or_create_group(self): + # type: () -> (orm.Group, bool) + """Return the concrete group associated with this path or, create it, if it does not already exist.""" + if self.type_string is not None: + return orm.Group.objects.get_or_create(label=self.path, type_string=self.type_string) + return orm.Group.objects.get_or_create(label=self.path) + + def delete_group(self): + """Delete the concrete group associated with this path. + + :raises: GroupNotFoundError, GroupNotUniqueError + """ + ids = self.group_ids + if not ids: + raise GroupNotFoundError(self) + if len(ids) > 1: + raise GroupNotUniqueError(self) + orm.Group.objects.delete(ids[0]) + + @property + def children(self): + # type: () -> Iterable[GroupPath] + """Iterate through all (direct) children of this path.""" + query = orm.QueryBuilder() + filters = {} + if self.path: + filters['label'] = {'like': self.path + self.delimiter + '%'} + if self.type_string is not None: + filters['type_string'] = self.type_string + query.append(orm.Group, filters=filters, project='label') + if query.count() == 0 and self.is_virtual: + raise NoGroupsInPathError(self) + + yielded = [] + for (label,) in query.iterall(): + path = label.split(self._delimiter) + if len(path) <= len(self._path_list): + continue + path_string = self._delimiter.join(path[:len(self._path_list) + 1]) + if (path_string not in yielded and path[:len(self._path_list)] == self._path_list): + yielded.append(path_string) + try: + yield GroupPath( + path=path_string, type_string=self.type_string, warn_invalid_child=self._warn_invalid_child + ) + except InvalidPath: + if self._warn_invalid_child: + warnings.warn('invalid path encountered: {}'.format(path_string)) # pylint: disable=no-member + + def __iter__(self): + # type: () -> Iterable[GroupPath] + """Iterate through all (direct) children of this path.""" + return self.children + + def __len__(self): + # type: () -> int + """Return the number of children for this path.""" + return sum(1 for _ in self.children) + + def __contains__(self, key): + # type: (str) -> bool + """Return whether a child exists for this key.""" + for child in self.children: + if child.path_list[-1] == key: + return True + return False + + def walk(self, return_virtual=True): + # type: () -> Iterable[GroupPath] + """Recursively iterate through all children of this path.""" + for child in self: + if return_virtual or not child.is_virtual: + yield child + for sub_child in child.walk(return_virtual=return_virtual): + if return_virtual or not sub_child.is_virtual: + yield sub_child + + def walk_nodes(self, filters=None, node_class=None, query_batch=None): + # type: () -> Iterable[WalkNodeResult] + """Recursively iterate through all nodes of this path and its children. + + :param filters: filters to apply to the node query + :param node_class: return only nodes of a certain class (or list of classes) + :param int batch_size: The size of the batches to ask the backend to batch results in subcollections. + You can optimize the speed of the query by tuning this parameter. + Be aware though that is only safe if no commit will take place during this transaction. + """ + query = orm.QueryBuilder() + group_filters = {} + if self.path: + group_filters['label'] = {'or': [{'==': self.path}, {'like': self.path + self.delimiter + '%'}]} + if self.type_string is not None: + group_filters['type_string'] = self.type_string + query.append(orm.Group, filters=group_filters, project='label', tag='group') + query.append( + orm.Node if node_class is None else node_class, + with_group='group', + filters=filters, + project=['*'], + ) + for (label, node) in query.iterall(query_batch) if query_batch else query.all(): + yield WalkNodeResult(GroupPath(label, type_string=self.type_string), node) + + @property + def browse(self): + """Return a ``GroupAttr`` instance, for attribute access to children.""" + return GroupAttr(self) + + +class GroupAttr: + """A class to provide attribute access to a ``GroupPath`` children. + + The only public attributes on this class are dynamically created from the ``GroupPath`` child keys. + NOTE: any child keys that do not conform to an acceptable (public) attribute string will be ignored. + The ``GroupPath`` can be retrieved *via* a function call, e.g.:: + + group_path = GroupPath() + group_attr = GroupAttr(group_path) + group_attr.a.b.c() == GroupPath("a/b/c") + + """ + + def __init__(self, group_path): + # type: (GroupPath) + """Instantiate the ``GroupPath``, and a mapping of its children.""" + self._group_path = group_path + + def __repr__(self): + # type: () -> str + """Represent the instantiated class.""" + return "{}('{}', type='{}')".format( + self.__class__.__name__, self._group_path.path, self._group_path.type_string + ) + + def __call__(self): + # type: () -> GroupPath + """Return the ``GroupPath``.""" + return self._group_path + + def __dir__(self): + """Return a list of available attributes.""" + return [c.path_list[-1] for c in self._group_path.children if REGEX_ATTR.match(c.path_list[-1])] + + def __getattr__(self, attr): + # type: (str) -> GroupAttr + """Return the requested attribute name.""" + for child in self._group_path.children: + if attr == child.path_list[-1]: + return GroupAttr(child) + raise AttributeError(attr) diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 8265083e6d..1fc8bfabe8 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -436,6 +436,7 @@ Below is a list with all available subcommands. delete Delete a group. description Change the description of a group. list Show a list of existing groups. + path Inspect groups of nodes, with delimited label paths. relabel Change the label of a group. remove-nodes Remove nodes from a group. show Show information for a given group. diff --git a/tests/cmdline/commands/test_group_ls.py b/tests/cmdline/commands/test_group_ls.py new file mode 100644 index 0000000000..7cc01079a4 --- /dev/null +++ b/tests/cmdline/commands/test_group_ls.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for GroupPath command line interface""" +# pylint: disable=redefined-outer-name,unused-argument +from textwrap import dedent + +from click.testing import CliRunner +import pytest + +from aiida import orm +from aiida.cmdline.commands.cmd_group import group_path_ls + + +@pytest.fixture +def setup_groups(clear_database_before_test): + """Setup some groups for testing.""" + for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f']: + group, _ = orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + group.description = 'A description of {}'.format(label) + orm.Group.objects.get_or_create('a/x', type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + yield + + +def test_with_no_opts(setup_groups): + """Test ``verdi group path ls``""" + + cli_runner = CliRunner() + + result = cli_runner.invoke(group_path_ls) + assert result.exit_code == 0, result.exception + assert result.output == 'a\n' + + result = cli_runner.invoke(group_path_ls, ['a']) + assert result.exit_code == 0, result.exception + assert result.output == 'a/b\na/c\na/f\n' + + result = cli_runner.invoke(group_path_ls, ['a/c']) + assert result.exit_code == 0, result.exception + assert result.output == 'a/c/d\na/c/e\n' + + +def test_recursive(setup_groups): + """Test ``verdi group path ls --recursive``""" + + cli_runner = CliRunner() + + for tag in ['-R', '--recursive']: + result = cli_runner.invoke(group_path_ls, [tag]) + assert result.exit_code == 0, result.exception + assert result.output == 'a\na/b\na/c\na/c/d\na/c/e\na/c/e/g\na/f\n' + + result = cli_runner.invoke(group_path_ls, [tag, 'a/c']) + assert result.exit_code == 0, result.exception + assert result.output == 'a/c/d\na/c/e\na/c/e/g\n' + + +@pytest.mark.parametrize('tag', ['-l', '--long']) +def test_long(setup_groups, tag): + """Test ``verdi group path ls --long``""" + + cli_runner = CliRunner() + + result = cli_runner.invoke(group_path_ls, [tag]) + assert result.exit_code == 0, result.exception + assert result.output == dedent( + """\ + Path Sub-Groups + ------ ------------ + a 4 + """ + ) + + result = cli_runner.invoke(group_path_ls, [tag, '-d', 'a']) + assert result.exit_code == 0, result.exception + assert result.output == dedent( + """\ + Path Sub-Groups Description + ------ ------------ -------------------- + a/b 0 A description of a/b + a/c 2 - + a/f 0 A description of a/f + """ + ) + + result = cli_runner.invoke(group_path_ls, [tag, '-R']) + assert result.exit_code == 0, result.exception + assert result.output == dedent( + """\ + Path Sub-Groups + ------- ------------ + a 4 + a/b 0 + a/c 2 + a/c/d 0 + a/c/e 1 + a/c/e/g 0 + a/f 0 + """ + ) + + +@pytest.mark.parametrize('tag', ['--no-virtual']) +def test_groups_only(setup_groups, tag): + """Test ``verdi group path ls --no-virtual``""" + + cli_runner = CliRunner() + + result = cli_runner.invoke(group_path_ls, [tag, '-l', '-R', '--with-description']) + assert result.exit_code == 0, result.exception + assert result.output == dedent( + """\ + Path Sub-Groups Description + ------- ------------ ------------------------ + a 4 A description of a + a/b 0 A description of a/b + a/c/d 0 A description of a/c/d + a/c/e/g 0 A description of a/c/e/g + a/f 0 A description of a/f + """ + ) diff --git a/tests/tools/groups/__init__.py b/tests/tools/groups/__init__.py new file mode 100644 index 0000000000..2776a55f97 --- /dev/null +++ b/tests/tools/groups/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### diff --git a/tests/tools/groups/test_paths.py b/tests/tools/groups/test_paths.py new file mode 100644 index 0000000000..a6f1cdb757 --- /dev/null +++ b/tests/tools/groups/test_paths.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Tests for GroupPath""" +# pylint: disable=redefined-outer-name,unused-argument +import pytest + +from aiida import orm +from aiida.tools.groups.paths import (GroupAttr, GroupPath, InvalidPath, GroupNotFoundError, NoGroupsInPathError) + + +@pytest.fixture +def setup_groups(clear_database_before_test): + """Setup some groups for testing.""" + for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f']: + group, _ = orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + group.description = 'A description of {}'.format(label) + yield + + +@pytest.mark.parametrize('path', ('/a', 'a/', '/a/', 'a//b')) +def test_invalid_paths(setup_groups, path): + """Invalid paths should raise an ``InvalidPath`` exception.""" + with pytest.raises(InvalidPath): + GroupPath(path=path) + + +def test_root_path(setup_groups): + """Test the root path properties""" + group_path = GroupPath() + assert group_path.path == '' + assert group_path.delimiter == '/' + assert group_path.parent is None + assert group_path.is_virtual + assert group_path.get_group() is None + + +def test_path_concatenation(setup_groups): + """Test methods to build a new path.""" + group_path = GroupPath() + assert (group_path / 'a').path == 'a' + assert (group_path / 'a' / 'b').path == 'a/b' + assert (group_path / 'a/b').path == 'a/b' + assert group_path['a/b'].path == 'a/b' + assert GroupPath('a/b/c') == GroupPath('a/b') / 'c' + + +def test_path_existence(setup_groups): + """Test existence of child "folders".""" + group_path = GroupPath() + assert 'a' in group_path + assert 'x' not in group_path + + +def test_group_retrieval(setup_groups): + """Test retrieval of the actual group from a path. + + The ``group`` attribute will return None + if no group is associated with the path + """ + group_path = GroupPath() + assert group_path['x'].is_virtual + assert not group_path['a'].is_virtual + assert group_path.get_group() is None + assert isinstance(group_path['a'].get_group(), orm.Group) + + +def test_group_creation(setup_groups): + """Test creation of new groups.""" + group_path = GroupPath() + group, created = group_path['a'].get_or_create_group() + assert isinstance(group, orm.Group) + assert created is False + group, created = group_path['x'].get_or_create_group() + assert isinstance(group, orm.Group) + assert created is True + + +def test_group_deletion(setup_groups): + """Test deletion of existing groups.""" + group_path = GroupPath() + assert not group_path['a'].is_virtual + group_path['a'].delete_group() + assert group_path['a'].is_virtual + with pytest.raises(GroupNotFoundError): + group_path['a'].delete_group() + + +def test_path_iteration(setup_groups): + """Test iteration of groups.""" + group_path = GroupPath() + assert len(group_path) == 1 + assert [(c.path, c.is_virtual) for c in group_path.children] == [('a', False)] + child = next(group_path.children) + assert child.parent == group_path + assert len(child) == 3 + assert [(c.path, c.is_virtual) for c in sorted(child)] == [('a/b', False), ('a/c', True), ('a/f', False)] + + +def test_path_with_no_groups(setup_groups): + """Test ``NoGroupsInPathError`` is raised if the path contains descendant groups.""" + group_path = GroupPath() + with pytest.raises(NoGroupsInPathError): + list(group_path['x']) + + +def test_walk(setup_groups): + """Test the ``GroupPath.walk()`` function.""" + group_path = GroupPath() + assert [c.path for c in sorted(group_path.walk())] == ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g', 'a/f'] + + +def test_walk_with_invalid_path(clear_database_before_test): + for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f', 'bad//group', 'bad/other']: + orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + group_path = GroupPath() + assert [c.path for c in sorted(group_path.walk()) + ] == ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g', 'a/f', 'bad', 'bad/other'] + + +def test_walk_nodes(clear_database): + """Test the ``GroupPath.walk_nodes()`` function.""" + group, _ = orm.Group.objects.get_or_create('a', type_string=orm.GroupTypeString.USER.value) + node = orm.Data() + node.set_attribute_many({'i': 1, 'j': 2}) + node.store() + group.add_nodes(node) + group_path = GroupPath() + assert [(r.group_path.path, r.node.attributes) for r in group_path.walk_nodes()] == [('a', {'i': 1, 'j': 2})] + + +def test_type_string(clear_database_before_test): + """Test that only the type_string instantiated in ``GroupPath`` is returned.""" + for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g']: + orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + for label in ['a/c/e', 'a/f']: + orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + group_path = GroupPath() + assert sorted([c.path for c in group_path.walk()]) == ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g'] + group_path = GroupPath(type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + assert sorted([c.path for c in group_path.walk()]) == ['a', 'a/c', 'a/c/e', 'a/f'] + assert GroupPath('a/b/c') != GroupPath('a/b/c', type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + + +def test_attr(clear_database_before_test): + """Test ``GroupAttr``.""" + for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f', 'bad space', 'bad@char', '_badstart']: + orm.Group.objects.get_or_create(label) + group_path = GroupPath() + assert isinstance(group_path.browse.a.c.d, GroupAttr) + assert isinstance(group_path.browse.a.c.d(), GroupPath) + assert group_path.browse.a.c.d().path == 'a/c/d' + assert not set(group_path.browse.__dir__()).intersection(['bad space', 'bad@char', '_badstart']) + with pytest.raises(AttributeError): + group_path.browse.a.c.x # pylint: disable=pointless-statement From d609e5e1572cbe826c138490e9924139bd8ffc69 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 9 Apr 2020 11:11:02 +0200 Subject: [PATCH 40/54] Make `Group` sub classable through entry points (#3882) We add the `aiida.groups` entry point group where sub classes of the `aiida.orm.groups.Group` class can be registered. A new metaclass is used to automatically set the `type_string` based on the entry point of the `Group` sub class. This will make it possible to reload the correct sub class when reloading from the database. If the `GroupMeta` metaclass cannot retrieve the corresponding entry point of the subclass, a warning is issued that any instances of this class will not be storable and the `_type_string` attribute is set to `None`. This can be checked by the `store` method which will make it fail. We choose to only except in the `store` method such that it is still possible to define and instantiate subclasses of `Group` that have not yet been registered. This is useful for testing and experimenting. Since the group type strings are now based on the entry point names, the existing group type strings in the database have to be migrated: * `user` -> `core` * `data.upf.family` -> `core.upf` * `auto.import` -> `core.import` * `auto.run` -> `core.auto` When loading a `Group` instance from the database, the loader will try to resolve the type string to the corresponding subclass through the entry points. If this fails, a warning is issued and we fallback on the base `Group` class. --- .../db/migrations/0044_dbgroup_type_string.py | 44 ++++ .../backends/djsite/db/migrations/__init__.py | 2 +- .../bf591f31dd12_dbgroup_type_string.py | 45 ++++ aiida/cmdline/commands/cmd_data/cmd_upf.py | 11 +- aiida/cmdline/commands/cmd_group.py | 28 +-- aiida/cmdline/commands/cmd_run.py | 1 + aiida/cmdline/params/types/group.py | 4 +- aiida/orm/autogroup.py | 33 +-- aiida/orm/convert.py | 5 +- aiida/orm/groups.py | 108 ++++++--- aiida/orm/implementation/groups.py | 2 +- aiida/orm/nodes/data/upf.py | 30 +-- aiida/plugins/entry_point.py | 2 + aiida/plugins/factories.py | 23 +- aiida/tools/groups/paths.py | 71 +++--- aiida/tools/importexport/common/config.py | 4 +- .../dbimport/backends/django/__init__.py | 6 +- .../dbimport/backends/sqla/__init__.py | 6 +- docs/source/working_with_aiida/groups.rst | 160 +++++++------ setup.json | 6 + ...est_migrations_0044_dbgroup_type_string.py | 63 +++++ .../aiida_sqlalchemy/test_migrations.py | 66 ++++++ tests/cmdline/commands/test_group.py | 2 +- tests/cmdline/commands/test_group_ls.py | 8 +- tests/cmdline/commands/test_run.py | 223 ++++++++++-------- tests/orm/data/test_upf.py | 16 +- tests/orm/test_groups.py | 53 +++++ tests/tools/graph/test_age.py | 6 +- tests/tools/groups/test_paths.py | 28 ++- .../tools/importexport/test_prov_redesign.py | 4 +- 30 files changed, 696 insertions(+), 364 deletions(-) create mode 100644 aiida/backends/djsite/db/migrations/0044_dbgroup_type_string.py create mode 100644 aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py create mode 100644 tests/backends/aiida_django/migrations/test_migrations_0044_dbgroup_type_string.py diff --git a/aiida/backends/djsite/db/migrations/0044_dbgroup_type_string.py b/aiida/backends/djsite/db/migrations/0044_dbgroup_type_string.py new file mode 100644 index 0000000000..8c577ce397 --- /dev/null +++ b/aiida/backends/djsite/db/migrations/0044_dbgroup_type_string.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=invalid-name,too-few-public-methods +"""Migration after the `Group` class became pluginnable and so the group `type_string` changed.""" + +# pylint: disable=no-name-in-module,import-error +from django.db import migrations +from aiida.backends.djsite.db.migrations import upgrade_schema_version + +REVISION = '1.0.44' +DOWN_REVISION = '1.0.43' + +forward_sql = [ + """UPDATE db_dbgroup SET type_string = 'core' WHERE type_string = 'user';""", + """UPDATE db_dbgroup SET type_string = 'core.upf' WHERE type_string = 'data.upf';""", + """UPDATE db_dbgroup SET type_string = 'core.import' WHERE type_string = 'auto.import';""", + """UPDATE db_dbgroup SET type_string = 'core.auto' WHERE type_string = 'auto.run';""", +] + +reverse_sql = [ + """UPDATE db_dbgroup SET type_string = 'user' WHERE type_string = 'core';""", + """UPDATE db_dbgroup SET type_string = 'data.upf' WHERE type_string = 'core.upf';""", + """UPDATE db_dbgroup SET type_string = 'auto.import' WHERE type_string = 'core.import';""", + """UPDATE db_dbgroup SET type_string = 'auto.run' WHERE type_string = 'core.auto';""", +] + + +class Migration(migrations.Migration): + """Migration after the update of group `type_string`""" + dependencies = [ + ('db', '0043_default_link_label'), + ] + + operations = [ + migrations.RunSQL(sql='\n'.join(forward_sql), reverse_sql='\n'.join(reverse_sql)), + upgrade_schema_version(REVISION, DOWN_REVISION), + ] diff --git a/aiida/backends/djsite/db/migrations/__init__.py b/aiida/backends/djsite/db/migrations/__init__.py index a832b4e5f7..41ee2b3d2c 100644 --- a/aiida/backends/djsite/db/migrations/__init__.py +++ b/aiida/backends/djsite/db/migrations/__init__.py @@ -21,7 +21,7 @@ class DeserializationException(AiidaException): pass -LATEST_MIGRATION = '0043_default_link_label' +LATEST_MIGRATION = '0044_dbgroup_type_string' def _update_schema_version(version, apps, _): diff --git a/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py b/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py new file mode 100644 index 0000000000..8231d8ebb7 --- /dev/null +++ b/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +"""Migration after the `Group` class became pluginnable and so the group `type_string` changed. + +Revision ID: bf591f31dd12 +Revises: 118349c10896 +Create Date: 2020-03-31 10:00:52.609146 + +""" +# pylint: disable=no-name-in-module,import-error,invalid-name,no-member +from alembic import op +from sqlalchemy.sql import text + +forward_sql = [ + """UPDATE db_dbgroup SET type_string = 'core' WHERE type_string = 'user';""", + """UPDATE db_dbgroup SET type_string = 'core.upf' WHERE type_string = 'data.upf';""", + """UPDATE db_dbgroup SET type_string = 'core.import' WHERE type_string = 'auto.import';""", + """UPDATE db_dbgroup SET type_string = 'core.auto' WHERE type_string = 'auto.run';""", +] + +reverse_sql = [ + """UPDATE db_dbgroup SET type_string = 'user' WHERE type_string = 'core';""", + """UPDATE db_dbgroup SET type_string = 'data.upf' WHERE type_string = 'core.upf';""", + """UPDATE db_dbgroup SET type_string = 'auto.import' WHERE type_string = 'core.import';""", + """UPDATE db_dbgroup SET type_string = 'auto.run' WHERE type_string = 'core.auto';""", +] + +# revision identifiers, used by Alembic. +revision = 'bf591f31dd12' +down_revision = '118349c10896' +branch_labels = None +depends_on = None + + +def upgrade(): + """Migrations for the upgrade.""" + conn = op.get_bind() + statement = text('\n'.join(forward_sql)) + conn.execute(statement) + + +def downgrade(): + """Migrations for the downgrade.""" + conn = op.get_bind() + statement = text('\n'.join(reverse_sql)) + conn.execute(statement) diff --git a/aiida/cmdline/commands/cmd_data/cmd_upf.py b/aiida/cmdline/commands/cmd_data/cmd_upf.py index 78f79b0d9e..745f4af7a2 100644 --- a/aiida/cmdline/commands/cmd_data/cmd_upf.py +++ b/aiida/cmdline/commands/cmd_data/cmd_upf.py @@ -64,22 +64,13 @@ def upf_listfamilies(elements, with_description): """ from aiida import orm from aiida.plugins import DataFactory - from aiida.orm.nodes.data.upf import UPFGROUP_TYPE UpfData = DataFactory('upf') # pylint: disable=invalid-name query = orm.QueryBuilder() query.append(UpfData, tag='upfdata') if elements is not None: query.add_filter(UpfData, {'attributes.element': {'in': elements}}) - query.append( - orm.Group, - with_node='upfdata', - tag='group', - project=['label', 'description'], - filters={'type_string': { - '==': UPFGROUP_TYPE - }} - ) + query.append(orm.UpfFamily, with_node='upfdata', tag='group', project=['label', 'description']) query.distinct() if query.count() > 0: diff --git a/aiida/cmdline/commands/cmd_group.py b/aiida/cmdline/commands/cmd_group.py index 16978379ae..e48c361b33 100644 --- a/aiida/cmdline/commands/cmd_group.py +++ b/aiida/cmdline/commands/cmd_group.py @@ -13,7 +13,7 @@ from aiida.common.exceptions import UniquenessError from aiida.cmdline.commands.cmd_verdi import verdi -from aiida.cmdline.params import options, arguments, types +from aiida.cmdline.params import options, arguments from aiida.cmdline.utils import echo from aiida.cmdline.utils.decorators import with_dbenv @@ -178,18 +178,6 @@ def group_show(group, raw, limit, uuid): echo.echo(tabulate(table, headers=header)) -@with_dbenv() -def valid_group_type_strings(): - from aiida.orm import GroupTypeString - return tuple(i.value for i in GroupTypeString) - - -@with_dbenv() -def user_defined_group(): - from aiida.orm import GroupTypeString - return GroupTypeString.USER.value - - @verdi_group.command('list') @options.ALL_USERS(help='Show groups for all users, rather than only for the current user') @click.option( @@ -204,8 +192,7 @@ def user_defined_group(): '-t', '--type', 'group_type', - type=types.LazyChoice(valid_group_type_strings), - default=user_defined_group, + default='core', help='Show groups of a specific type, instead of user-defined groups. Start with semicolumn if you want to ' 'specify aiida-internal type' ) @@ -330,9 +317,8 @@ def group_list( def group_create(group_label): """Create an empty group with a given name.""" from aiida import orm - from aiida.orm import GroupTypeString - group, created = orm.Group.objects.get_or_create(label=group_label, type_string=GroupTypeString.USER.value) + group, created = orm.Group.objects.get_or_create(label=group_label) if created: echo.echo_success("Group created with PK = {} and name '{}'".format(group.id, group.label)) @@ -351,7 +337,7 @@ def group_copy(source_group, destination_group): Note that the destination group may not exist.""" from aiida import orm - dest_group, created = orm.Group.objects.get_or_create(label=destination_group, type_string=source_group.type_string) + dest_group, created = orm.Group.objects.get_or_create(label=destination_group) # Issue warning if destination group is not empty and get user confirmation to continue if not created and not dest_group.is_empty: @@ -386,8 +372,7 @@ def verdi_group_path(): '-t', '--type', 'group_type', - type=types.LazyChoice(valid_group_type_strings), - default=user_defined_group, + default='core', help='Show groups of a specific type, instead of user-defined groups. Start with semicolumn if you want to ' 'specify aiida-internal type' ) @@ -396,10 +381,11 @@ def verdi_group_path(): def group_path_ls(path, recursive, as_table, no_virtual, group_type, with_description, no_warn): # pylint: disable=too-many-arguments """Show a list of existing group paths.""" + from aiida.plugins import GroupFactory from aiida.tools.groups.paths import GroupPath, InvalidPath try: - path = GroupPath(path or '', type_string=group_type, warn_invalid_child=not no_warn) + path = GroupPath(path or '', cls=GroupFactory(group_type), warn_invalid_child=not no_warn) except InvalidPath as err: echo.echo_critical(str(err)) diff --git a/aiida/cmdline/commands/cmd_run.py b/aiida/cmdline/commands/cmd_run.py index bd3972b841..d46b6f984c 100644 --- a/aiida/cmdline/commands/cmd_run.py +++ b/aiida/cmdline/commands/cmd_run.py @@ -150,5 +150,6 @@ def run(scriptname, varargs, auto_group, auto_group_label_prefix, group_name, ex # Re-raise the exception to have the error code properly returned at the end raise finally: + autogroup.current_autogroup = None if handle: handle.close() diff --git a/aiida/cmdline/params/types/group.py b/aiida/cmdline/params/types/group.py index ef216044e7..6150f6d062 100644 --- a/aiida/cmdline/params/types/group.py +++ b/aiida/cmdline/params/types/group.py @@ -40,12 +40,12 @@ def orm_class_loader(self): @with_dbenv() def convert(self, value, param, ctx): - from aiida.orm import Group, GroupTypeString + from aiida.orm import Group try: group = super().convert(value, param, ctx) except click.BadParameter: if self._create_if_not_exist: - group = Group(label=value, type_string=GroupTypeString.USER.value) + group = Group(label=value) else: raise diff --git a/aiida/orm/autogroup.py b/aiida/orm/autogroup.py index 16bf03f1c1..06e83185e3 100644 --- a/aiida/orm/autogroup.py +++ b/aiida/orm/autogroup.py @@ -14,21 +14,18 @@ from aiida.common import exceptions, timezone from aiida.common.escaping import escape_for_sql_like, get_regex_pattern_from_sql from aiida.common.warnings import AiidaDeprecationWarning -from aiida.orm import GroupTypeString, Group +from aiida.orm import AutoGroup from aiida.plugins.entry_point import get_entry_point_string_from_class CURRENT_AUTOGROUP = None -VERDIAUTOGROUP_TYPE = GroupTypeString.VERDIAUTOGROUP_TYPE.value - class Autogroup: - """ - An object used for the autogrouping of objects. - The autogrouping is checked by the Node.store() method. - In the store(), the Node will check if CURRENT_AUTOGROUP is != None. - If so, it will call Autogroup.is_to_be_grouped, and decide whether to put it in a group. - Such autogroups are going to be of the VERDIAUTOGROUP_TYPE. + """Class to create a new `AutoGroup` instance that will, while active, automatically contain all nodes being stored. + + The autogrouping is checked by the `Node.store()` method which, if `CURRENT_AUTOGROUP is not None` the method + `Autogroup.is_to_be_grouped` is called to decide whether to put the current node being stored in the current + `AutoGroup` instance. The exclude/include lists are lists of strings like: ``aiida.data:int``, ``aiida.calculation:quantumespresso.pw``, @@ -198,7 +195,7 @@ def clear_group_cache(self): self._group_label = None def get_or_create_group(self): - """Return the current Autogroup, or create one if None has been set yet. + """Return the current `AutoGroup`, or create one if None has been set yet. This function implements a somewhat complex logic that is however needed to make sure that, even if `verdi run` is called at the same time multiple @@ -219,16 +216,10 @@ def get_or_create_group(self): # So the group with the same name can be returned quickly in future # calls of this method. if self._group_label is not None: - results = [ - res[0] for res in QueryBuilder(). - append(Group, filters={ - 'label': self._group_label, - 'type_string': VERDIAUTOGROUP_TYPE - }, project='*').iterall() - ] + builder = QueryBuilder().append(AutoGroup, filters={'label': self._group_label}) + results = [res[0] for res in builder.iterall()] if results: - # If it is not empty, it should have only one result due to the - # uniqueness constraints + # If it is not empty, it should have only one result due to the uniqueness constraints assert len(results) == 1, 'I got more than one autogroup with the same label!' return results[0] # There are no results: probably the group has been deleted. @@ -239,7 +230,7 @@ def get_or_create_group(self): # Try to do a preliminary QB query to avoid to do too many try/except # if many of the prefix_NUMBER groups already exist queryb = QueryBuilder().append( - Group, + AutoGroup, filters={ 'or': [{ 'label': { @@ -274,7 +265,7 @@ def get_or_create_group(self): while True: try: label = label_prefix if counter == 0 else '{}_{}'.format(label_prefix, counter) - group = Group(label=label, type_string=VERDIAUTOGROUP_TYPE).store() + group = AutoGroup(label=label).store() self._group_label = group.label except exceptions.IntegrityError: counter += 1 diff --git a/aiida/orm/convert.py b/aiida/orm/convert.py index 197253cffd..d6b577773b 100644 --- a/aiida/orm/convert.py +++ b/aiida/orm/convert.py @@ -61,8 +61,9 @@ def _(backend_entity): @get_orm_entity.register(BackendGroup) def _(backend_entity): - from . import groups - return groups.Group.from_backend_entity(backend_entity) + from .groups import load_group_class + group_class = load_group_class(backend_entity.type_string) + return group_class.from_backend_entity(backend_entity) @get_orm_entity.register(BackendComputer) diff --git a/aiida/orm/groups.py b/aiida/orm/groups.py index cb7b4af801..f2c726c0f2 100644 --- a/aiida/orm/groups.py +++ b/aiida/orm/groups.py @@ -8,7 +8,7 @@ # For further information please visit http://www.aiida.net # ########################################################################### """ AiiDA Group entites""" - +from abc import ABCMeta from enum import Enum import warnings @@ -21,19 +21,63 @@ from . import entities from . import users -__all__ = ('Group', 'GroupTypeString') +__all__ = ('Group', 'GroupTypeString', 'AutoGroup', 'ImportGroup', 'UpfFamily') + + +def load_group_class(type_string): + """Load the sub class of `Group` that corresponds to the given `type_string`. + + .. note:: will fall back on `aiida.orm.groups.Group` if `type_string` cannot be resolved to loadable entry point. + + :param type_string: the entry point name of the `Group` sub class + :return: sub class of `Group` registered through an entry point + """ + from aiida.common.exceptions import EntryPointError + from aiida.plugins.entry_point import load_entry_point + + try: + group_class = load_entry_point('aiida.groups', type_string) + except EntryPointError: + message = 'could not load entry point `{}`, falling back onto `Group` base class.'.format(type_string) + warnings.warn(message) # pylint: disable=no-member + group_class = Group + + return group_class + + +class GroupMeta(ABCMeta): + """Meta class for `aiida.orm.groups.Group` to automatically set the `type_string` attribute.""" + + def __new__(mcs, name, bases, namespace, **kwargs): + from aiida.plugins.entry_point import get_entry_point_from_class + + newcls = ABCMeta.__new__(mcs, name, bases, namespace, **kwargs) # pylint: disable=too-many-function-args + + entry_point_group, entry_point = get_entry_point_from_class(namespace['__module__'], name) + + if entry_point_group is None or entry_point_group != 'aiida.groups': + newcls._type_string = None + message = 'no registered entry point for `{}` so its instances will not be storable.'.format(name) + warnings.warn(message) # pylint: disable=no-member + else: + newcls._type_string = entry_point.name # pylint: disable=protected-access + + return newcls class GroupTypeString(Enum): - """A simple enum of allowed group type strings.""" + """A simple enum of allowed group type strings. + .. deprecated:: 1.2.0 + This enum is deprecated and will be removed in `v2.0.0`. + """ UPFGROUP_TYPE = 'data.upf' IMPORTGROUP_TYPE = 'auto.import' VERDIAUTOGROUP_TYPE = 'auto.run' USER = 'user' -class Group(entities.Entity): +class Group(entities.Entity, metaclass=GroupMeta): """An AiiDA ORM implementation of group of nodes.""" class Collection(entities.Collection): @@ -54,21 +98,10 @@ def get_or_create(self, label=None, **kwargs): if not label: raise ValueError('Group label must be provided') - filters = {'label': label} - - if 'type_string' in kwargs: - if not isinstance(kwargs['type_string'], str): - raise exceptions.ValidationError( - 'type_string must be {}, you provided an object of type ' - '{}'.format(str, type(kwargs['type_string'])) - ) - - filters['type_string'] = kwargs['type_string'] - - res = self.find(filters=filters) + res = self.find(filters={'label': label}) if not res: - return Group(label, backend=self.backend, **kwargs).store(), True + return self.entity_type(label, backend=self.backend, **kwargs).store(), True if len(res) > 1: raise exceptions.MultipleObjectsError('More than one groups found in the database') @@ -83,12 +116,15 @@ def delete(self, id): # pylint: disable=invalid-name, redefined-builtin """ self._backend.groups.delete(id) - def __init__(self, label=None, user=None, description='', type_string=GroupTypeString.USER.value, backend=None): + def __init__(self, label=None, user=None, description='', type_string=None, backend=None): """ Create a new group. Either pass a dbgroup parameter, to reload a group from the DB (and then, no further parameters are allowed), or pass the parameters for the Group creation. + .. deprecated:: 1.2.0 + The parameter `type_string` will be removed in `v2.0.0` and is now determined automatically. + :param label: The group label, required on creation :type label: str @@ -105,12 +141,11 @@ def __init__(self, label=None, user=None, description='', type_string=GroupTypeS if not label: raise ValueError('Group label must be provided') - # Check that chosen type_string is allowed - if not isinstance(type_string, str): - raise exceptions.ValidationError( - 'type_string must be {}, you provided an object of type ' - '{}'.format(str, type(type_string)) - ) + if type_string is not None: + message = '`type_string` is deprecated because it is determined automatically, using default `core`' + warnings.warn(message) # pylint: disable=no-member + + type_string = self._type_string backend = backend or get_manager().get_backend() user = user or users.User.objects(backend).get_default() @@ -130,6 +165,13 @@ def __str__(self): return '"{}" [user-defined], of user {}'.format(self.label, self.user.email) + def store(self): + """Verify that the group is allowed to be stored, which is the case along as `type_string` is set.""" + if self._type_string is None: + raise exceptions.StoringNotAllowed('`type_string` is `None` so the group cannot be stored.') + + return super().store() + @property def label(self): """ @@ -295,11 +337,7 @@ def get(cls, **kwargs): filters = {} if 'type_string' in kwargs: - if not isinstance(kwargs['type_string'], str): - raise exceptions.ValidationError( - 'type_string must be {}, you provided an object of type ' - '{}'.format(str, type(kwargs['type_string'])) - ) + type_check(kwargs['type_string'], str) query = QueryBuilder() for key, val in kwargs.items(): @@ -382,3 +420,15 @@ def get_schema(): 'type': 'unicode' } } + + +class AutoGroup(Group): + """Group to be used to contain selected nodes generated while `aiida.orm.autogroup.CURRENT_AUTOGROUP` is set.""" + + +class ImportGroup(Group): + """Group to be used to contain all nodes from an export archive that has been imported.""" + + +class UpfFamily(Group): + """Group that represents a pseudo potential family containing `UpfData` nodes.""" diff --git a/aiida/orm/implementation/groups.py b/aiida/orm/implementation/groups.py index 74349e25e6..f39314060f 100644 --- a/aiida/orm/implementation/groups.py +++ b/aiida/orm/implementation/groups.py @@ -101,7 +101,7 @@ def get_or_create(cls, *args, **kwargs): :return: (group, created) where group is the group (new or existing, in any case already stored) and created is a boolean saying """ - res = cls.query(name=kwargs.get('name'), type_string=kwargs.get('type_string')) + res = cls.query(name=kwargs.get('name')) if not res: return cls.create(*args, **kwargs), True diff --git a/aiida/orm/nodes/data/upf.py b/aiida/orm/nodes/data/upf.py index d35e1b35ee..33cf9b6421 100644 --- a/aiida/orm/nodes/data/upf.py +++ b/aiida/orm/nodes/data/upf.py @@ -8,20 +8,14 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Module of `Data` sub class to represent a pseudopotential single file in UPF format and related utilities.""" - import json import re from upf_to_json import upf_to_json - -from aiida.common.lang import classproperty -from aiida.orm import GroupTypeString from .singlefile import SinglefileData __all__ = ('UpfData',) -UPFGROUP_TYPE = GroupTypeString.UPFGROUP_TYPE.value - REGEX_UPF_VERSION = re.compile(r""" \s*.*)"> @@ -107,9 +101,7 @@ def upload_upf_family(folder, group_label, group_description, stop_if_existing=T nfiles = len(filenames) automatic_user = orm.User.objects.get_default() - group, group_created = orm.Group.objects.get_or_create( - label=group_label, type_string=UPFGROUP_TYPE, user=automatic_user - ) + group, group_created = orm.UpfFamily.objects.get_or_create(label=group_label, user=automatic_user) if group.user.email != automatic_user.email: raise UniquenessError( @@ -312,12 +304,6 @@ def get_or_create(cls, filepath, use_first=False, store_upf=True): return (pseudos[0], False) - @classproperty - def upffamily_type_string(cls): - """Return the type string used for UPF family groups.""" - # pylint: disable=no-self-argument,no-self-use - return UPFGROUP_TYPE - def store(self, *args, **kwargs): """Store the node, reparsing the file so that the md5 and the element are correctly reset.""" # pylint: disable=arguments-differ @@ -388,11 +374,11 @@ def set_file(self, file, filename=None): def get_upf_family_names(self): """Get the list of all upf family names to which the pseudo belongs.""" - from aiida.orm import Group + from aiida.orm import UpfFamily from aiida.orm import QueryBuilder query = QueryBuilder() - query.append(Group, filters={'type_string': {'==': self.upffamily_type_string}}, tag='group', project='label') + query.append(UpfFamily, tag='group', project='label') query.append(UpfData, filters={'id': {'==': self.id}}, with_group='group') return [label for label, in query.all()] @@ -465,9 +451,9 @@ def get_upf_group(cls, group_label): :param group_label: the family group label :return: the `Group` with the given label, if it exists """ - from aiida.orm import Group + from aiida.orm import UpfFamily - return Group.get(label=group_label, type_string=cls.upffamily_type_string) + return UpfFamily.get(label=group_label) @classmethod def get_upf_groups(cls, filter_elements=None, user=None): @@ -480,12 +466,12 @@ def get_upf_groups(cls, filter_elements=None, user=None): If defined, it should be either a `User` instance or the user email. :return: list of `Group` entities of type UPF. """ - from aiida.orm import Group + from aiida.orm import UpfFamily from aiida.orm import QueryBuilder from aiida.orm import User builder = QueryBuilder() - builder.append(Group, filters={'type_string': {'==': cls.upffamily_type_string}}, tag='group', project='*') + builder.append(UpfFamily, tag='group', project='*') if user: builder.append(User, filters={'email': {'==': user}}, with_group='group') @@ -496,7 +482,7 @@ def get_upf_groups(cls, filter_elements=None, user=None): if filter_elements is not None: builder.append(UpfData, filters={'attributes.element': {'in': filter_elements}}, with_group='group') - builder.order_by({Group: {'id': 'asc'}}) + builder.order_by({UpfFamily: {'id': 'asc'}}) return [group for group, in builder.all()] diff --git a/aiida/plugins/entry_point.py b/aiida/plugins/entry_point.py index 2abe6be077..46e4bf3c7e 100644 --- a/aiida/plugins/entry_point.py +++ b/aiida/plugins/entry_point.py @@ -54,6 +54,7 @@ class EntryPointFormat(enum.Enum): 'aiida.calculations': 'aiida.orm.nodes.process.calculation.calcjob', 'aiida.cmdline.data': 'aiida.cmdline.data', 'aiida.data': 'aiida.orm.nodes.data', + 'aiida.groups': 'aiida.orm.groups', 'aiida.node': 'aiida.orm.nodes', 'aiida.parsers': 'aiida.parsers.plugins', 'aiida.schedulers': 'aiida.schedulers.plugins', @@ -78,6 +79,7 @@ def validate_registered_entry_points(): # pylint: disable=invalid-name factory_mapping = { 'aiida.calculations': factories.CalculationFactory, 'aiida.data': factories.DataFactory, + 'aiida.groups': factories.GroupFactory, 'aiida.parsers': factories.ParserFactory, 'aiida.schedulers': factories.SchedulerFactory, 'aiida.transports': factories.TransportFactory, diff --git a/aiida/plugins/factories.py b/aiida/plugins/factories.py index 6e5a9296e9..1675ac6cb6 100644 --- a/aiida/plugins/factories.py +++ b/aiida/plugins/factories.py @@ -14,8 +14,8 @@ from aiida.common.exceptions import InvalidEntryPointTypeError __all__ = ( - 'BaseFactory', 'CalculationFactory', 'DataFactory', 'DbImporterFactory', 'OrbitalFactory', 'ParserFactory', - 'SchedulerFactory', 'TransportFactory', 'WorkflowFactory' + 'BaseFactory', 'CalculationFactory', 'DataFactory', 'DbImporterFactory', 'GroupFactory', 'OrbitalFactory', + 'ParserFactory', 'SchedulerFactory', 'TransportFactory', 'WorkflowFactory' ) @@ -107,6 +107,25 @@ def DbImporterFactory(entry_point_name): raise_invalid_type_error(entry_point_name, entry_point_group, valid_classes) +def GroupFactory(entry_point_name): + """Return the `Group` sub class registered under the given entry point. + + :param entry_point_name: the entry point name + :return: sub class of :py:class:`~aiida.orm.groups.Group` + :raises aiida.common.InvalidEntryPointTypeError: if the type of the loaded entry point is invalid. + """ + from aiida.orm import Group + + entry_point_group = 'aiida.groups' + entry_point = BaseFactory(entry_point_group, entry_point_name) + valid_classes = (Group,) + + if isclass(entry_point) and issubclass(entry_point, Group): + return entry_point + + raise_invalid_type_error(entry_point_name, entry_point_group, valid_classes) + + def OrbitalFactory(entry_point_name): """Return the `Orbital` sub class registered under the given entry point. diff --git a/aiida/tools/groups/paths.py b/aiida/tools/groups/paths.py index 9d20ea9c55..b025ab250e 100644 --- a/aiida/tools/groups/paths.py +++ b/aiida/tools/groups/paths.py @@ -17,7 +17,7 @@ from aiida import orm from aiida.common.exceptions import NotExistent -__all__ = ('GroupPath', 'InvalidPath') +__all__ = ('GroupPath', 'InvalidPath', 'GroupNotFoundError', 'GroupNotUniqueError', 'NoGroupsInPathError') REGEX_ATTR = re.compile('^[a-zA-Z][\\_a-zA-Z0-9]*$') @@ -60,19 +60,20 @@ class GroupPath: See tests for usage examples. """ - def __init__(self, path='', type_string=orm.GroupTypeString.USER.value, warn_invalid_child=True): + def __init__(self, path='', cls=orm.Group, warn_invalid_child=True): # type: (str, Optional[str], Optional[GroupPath]) """Instantiate the class. :param path: The initial path of the group. - :param type_string: Used to query for and instantiate a ``Group`` with. + :param cls: The subclass of `Group` to operate on. :param warn_invalid_child: Issue a warning, when iterating children, if a child path is invalid. """ + if not issubclass(cls, orm.Group): + raise TypeError('cls must a subclass of Group: {}'.format(cls)) + self._delimiter = '/' - if not isinstance(type_string, str): - raise TypeError('type_string must a str: {}'.format(type_string)) - self._type_string = type_string + self._cls = cls self._path_string = self._validate_path(path) self._path_list = self._path_string.split(self._delimiter) if path else [] self._warn_invalid_child = warn_invalid_child @@ -90,21 +91,21 @@ def _validate_path(self, path): def __repr__(self): # type: () -> str """Represent the instantiated class.""" - return "{}('{}', type='{}')".format(self.__class__.__name__, self.path, self.type_string) + return "{}('{}', cls='{}')".format(self.__class__.__name__, self.path, self.cls) def __eq__(self, other): # type: (Any) -> bool - """Compare equality of path and type string to another ``GroupPath`` object.""" + """Compare equality of path and ``Group`` subclass to another ``GroupPath`` object.""" if not isinstance(other, GroupPath): return NotImplemented - return (self.path, self.type_string) == (other.path, other.type_string) + return (self.path, self.cls) == (other.path, other.cls) def __lt__(self, other): # type: (Any) -> bool - """Compare less-than operator of path and type string to another ``GroupPath`` object.""" + """Compare less-than operator of path and ``Group`` subclass to another ``GroupPath`` object.""" if not isinstance(other, GroupPath): return NotImplemented - return (self.path, self.type_string) < (other.path, other.type_string) + return (self.path, self.cls) < (other.path, other.cls) @property def path(self): @@ -133,10 +134,10 @@ def delimiter(self): return self._delimiter @property - def type_string(self): + def cls(self): # type: () -> str - """Return the type_string used to query for and instantiate a ``Group`` with.""" - return self._type_string + """Return the cls used to query for and instantiate a ``Group`` with.""" + return self._cls @property def parent(self): @@ -144,9 +145,7 @@ def parent(self): """Return the parent path.""" if self.path_list: return GroupPath( - self.delimiter.join(self.path_list[:-1]), - type_string=self.type_string, - warn_invalid_child=self._warn_invalid_child + self.delimiter.join(self.path_list[:-1]), cls=self.cls, warn_invalid_child=self._warn_invalid_child ) return None @@ -158,7 +157,7 @@ def __truediv__(self, path): path = self._validate_path(path) child = GroupPath( path=self.path + self.delimiter + path if self.path else path, - type_string=self.type_string, + cls=self.cls, warn_invalid_child=self._warn_invalid_child ) return child @@ -169,10 +168,10 @@ def __getitem__(self, path): return self.__truediv__(path) def get_group(self): - # type: () -> Optional[orm.Group] + # type: () -> Optional[self.cls] """Return the concrete group associated with this path.""" try: - return orm.Group.objects.get(label=self.path, type_string=self.type_string) + return self.cls.objects.get(label=self.path) except NotExistent: return None @@ -182,16 +181,14 @@ def group_ids(self): """Return all the UUID associated with this GroupPath. :returns: and empty list, if no group associated with this label, - or can be multiple if type_string was None + or can be multiple if cls was None This is an efficient method for checking existence, which does not require the (slow) loading of the ORM entity. """ query = orm.QueryBuilder() filters = {'label': self.path} - if self.type_string is not None: - filters['type_string'] = self.type_string - query.append(orm.Group, filters=filters, project='id') + query.append(self.cls, filters=filters, project='id') return [r[0] for r in query.all()] @property @@ -201,11 +198,9 @@ def is_virtual(self): return len(self.group_ids) == 0 def get_or_create_group(self): - # type: () -> (orm.Group, bool) + # type: () -> (self.cls, bool) """Return the concrete group associated with this path or, create it, if it does not already exist.""" - if self.type_string is not None: - return orm.Group.objects.get_or_create(label=self.path, type_string=self.type_string) - return orm.Group.objects.get_or_create(label=self.path) + return self.cls.objects.get_or_create(label=self.path) def delete_group(self): """Delete the concrete group associated with this path. @@ -217,7 +212,7 @@ def delete_group(self): raise GroupNotFoundError(self) if len(ids) > 1: raise GroupNotUniqueError(self) - orm.Group.objects.delete(ids[0]) + self.cls.objects.delete(ids[0]) @property def children(self): @@ -227,9 +222,7 @@ def children(self): filters = {} if self.path: filters['label'] = {'like': self.path + self.delimiter + '%'} - if self.type_string is not None: - filters['type_string'] = self.type_string - query.append(orm.Group, filters=filters, project='label') + query.append(self.cls, filters=filters, project='label') if query.count() == 0 and self.is_virtual: raise NoGroupsInPathError(self) @@ -242,9 +235,7 @@ def children(self): if (path_string not in yielded and path[:len(self._path_list)] == self._path_list): yielded.append(path_string) try: - yield GroupPath( - path=path_string, type_string=self.type_string, warn_invalid_child=self._warn_invalid_child - ) + yield GroupPath(path=path_string, cls=self.cls, warn_invalid_child=self._warn_invalid_child) except InvalidPath: if self._warn_invalid_child: warnings.warn('invalid path encountered: {}'.format(path_string)) # pylint: disable=no-member @@ -291,9 +282,7 @@ def walk_nodes(self, filters=None, node_class=None, query_batch=None): group_filters = {} if self.path: group_filters['label'] = {'or': [{'==': self.path}, {'like': self.path + self.delimiter + '%'}]} - if self.type_string is not None: - group_filters['type_string'] = self.type_string - query.append(orm.Group, filters=group_filters, project='label', tag='group') + query.append(self.cls, filters=group_filters, project='label', tag='group') query.append( orm.Node if node_class is None else node_class, with_group='group', @@ -301,7 +290,7 @@ def walk_nodes(self, filters=None, node_class=None, query_batch=None): project=['*'], ) for (label, node) in query.iterall(query_batch) if query_batch else query.all(): - yield WalkNodeResult(GroupPath(label, type_string=self.type_string), node) + yield WalkNodeResult(GroupPath(label, cls=self.cls), node) @property def browse(self): @@ -330,9 +319,7 @@ def __init__(self, group_path): def __repr__(self): # type: () -> str """Represent the instantiated class.""" - return "{}('{}', type='{}')".format( - self.__class__.__name__, self._group_path.path, self._group_path.type_string - ) + return "{}('{}', type='{}')".format(self.__class__.__name__, self._group_path.path, self._group_path.cls) def __call__(self): # type: () -> GroupPath diff --git a/aiida/tools/importexport/common/config.py b/aiida/tools/importexport/common/config.py index 0baac376c9..549c22be7d 100644 --- a/aiida/tools/importexport/common/config.py +++ b/aiida/tools/importexport/common/config.py @@ -9,15 +9,13 @@ ########################################################################### # pylint: disable=invalid-name """ Configuration file for AiiDA Import/Export module """ - -from aiida.orm import Computer, Group, GroupTypeString, Node, User, Log, Comment +from aiida.orm import Computer, Group, Node, User, Log, Comment __all__ = ('EXPORT_VERSION',) # Current export version EXPORT_VERSION = '0.8' -IMPORTGROUP_TYPE = GroupTypeString.IMPORTGROUP_TYPE.value DUPL_SUFFIX = ' (Imported #{})' # The name of the subfolder in which the node files are stored diff --git a/aiida/tools/importexport/dbimport/backends/django/__init__.py b/aiida/tools/importexport/dbimport/backends/django/__init__.py index d97ad70d1d..aa463f5ffb 100644 --- a/aiida/tools/importexport/dbimport/backends/django/__init__.py +++ b/aiida/tools/importexport/dbimport/backends/django/__init__.py @@ -21,10 +21,10 @@ from aiida.common.links import LinkType, validate_link_label from aiida.common.utils import grouper, get_object_from_string from aiida.orm.utils.repository import Repository -from aiida.orm import QueryBuilder, Node, Group +from aiida.orm import QueryBuilder, Node, Group, ImportGroup from aiida.tools.importexport.common import exceptions from aiida.tools.importexport.common.archive import extract_tree, extract_tar, extract_zip -from aiida.tools.importexport.common.config import DUPL_SUFFIX, IMPORTGROUP_TYPE, EXPORT_VERSION, NODES_EXPORT_SUBFOLDER +from aiida.tools.importexport.common.config import DUPL_SUFFIX, EXPORT_VERSION, NODES_EXPORT_SUBFOLDER from aiida.tools.importexport.common.config import ( NODE_ENTITY_NAME, GROUP_ENTITY_NAME, COMPUTER_ENTITY_NAME, USER_ENTITY_NAME, LOG_ENTITY_NAME, COMMENT_ENTITY_NAME ) @@ -673,7 +673,7 @@ def import_data_dj( "Overflow of import groups (more than 100 import groups exists with basename '{}')" ''.format(basename) ) - group = Group(label=group_label, type_string=IMPORTGROUP_TYPE).store() + group = ImportGroup(label=group_label).store() # Add all the nodes to the new group # TODO: decide if we want to return the group label diff --git a/aiida/tools/importexport/dbimport/backends/sqla/__init__.py b/aiida/tools/importexport/dbimport/backends/sqla/__init__.py index f08de125ec..2e800b1361 100644 --- a/aiida/tools/importexport/dbimport/backends/sqla/__init__.py +++ b/aiida/tools/importexport/dbimport/backends/sqla/__init__.py @@ -20,13 +20,13 @@ from aiida.common.folders import SandboxFolder, RepositoryFolder from aiida.common.links import LinkType from aiida.common.utils import get_object_from_string -from aiida.orm import QueryBuilder, Node, Group, WorkflowNode, CalculationNode, Data +from aiida.orm import QueryBuilder, Node, Group, ImportGroup from aiida.orm.utils.links import link_triple_exists, validate_link from aiida.orm.utils.repository import Repository from aiida.tools.importexport.common import exceptions from aiida.tools.importexport.common.archive import extract_tree, extract_tar, extract_zip -from aiida.tools.importexport.common.config import DUPL_SUFFIX, IMPORTGROUP_TYPE, EXPORT_VERSION, NODES_EXPORT_SUBFOLDER +from aiida.tools.importexport.common.config import DUPL_SUFFIX, EXPORT_VERSION, NODES_EXPORT_SUBFOLDER from aiida.tools.importexport.common.config import ( NODE_ENTITY_NAME, GROUP_ENTITY_NAME, COMPUTER_ENTITY_NAME, USER_ENTITY_NAME, LOG_ENTITY_NAME, COMMENT_ENTITY_NAME ) @@ -664,7 +664,7 @@ def import_data_sqla( "Overflow of import groups (more than 100 import groups exists with basename '{}')" ''.format(basename) ) - group = Group(label=group_label, type_string=IMPORTGROUP_TYPE) + group = ImportGroup(label=group_label) session.add(group.backend_entity._dbmodel) # Adding nodes to group avoiding the SQLA ORM to increase speed diff --git a/docs/source/working_with_aiida/groups.rst b/docs/source/working_with_aiida/groups.rst index 58eadcc024..55bd82ebd5 100644 --- a/docs/source/working_with_aiida/groups.rst +++ b/docs/source/working_with_aiida/groups.rst @@ -18,144 +18,162 @@ be performed with groups: Create a new Group ------------------ - From the command line interface:: +From the command line interface:: - verdi group create test_group + verdi group create test_group - From the python interface:: +From the python interface:: - In [1]: group = Group(label="test_group") - - In [2]: group.store() - Out[2]: + In [1]: group = Group(label="test_group") + In [2]: group.store() + Out[2]: List available Groups --------------------- - Example:: +Example:: - verdi group list + verdi group list - By default ``verdi group list`` only shows groups of the type *user*. - In case you want to show groups of another type use ``-t/--type`` option. If - you want to show groups of all types, use the ``-a/--all-types`` option. +By default ``verdi group list`` only shows groups of the type *user*. +In case you want to show groups of another type use ``-t/--type`` option. If +you want to show groups of all types, use the ``-a/--all-types`` option. - From the command line interface:: +From the command line interface:: - verdi group list -t user + verdi group list -t user - From the python interface:: +From the python interface:: - In [1]: query = QueryBuilder() + In [1]: query = QueryBuilder() - In [2]: query.append(Group, filters={'type_string':'user'}) - Out[2]: + In [2]: query.append(Group, filters={'type_string':'user'}) + Out[2]: - In [3]: query.all() - Out[3]: - [[], - [], - []] + In [3]: query.all() + Out[3]: + [[], + [], + []] Add nodes to a Group -------------------- - Once the ``test_group`` has been created, we can add nodes to it. To add the node with ``pk=1`` to the group we need to do the following. - - From the command line interface:: - - verdi group add-nodes -G test_group 1 - Do you really want to add 1 nodes to Group? [y/N]: y - - From the python interface:: +Once the ``test_group`` has been created, we can add nodes to it. To add the node with ``pk=1`` to the group we need to do the following. - In [1]: group = Group.get(label='test_group') +From the command line interface:: - In [2]: from aiida.orm import Dict + verdi group add-nodes -G test_group 1 + Do you really want to add 1 nodes to Group? [y/N]: y - In [3]: p = Dict().store() +From the python interface:: - In [4]: p - Out[4]: + In [1]: group = Group.get(label='test_group') + In [2]: from aiida.orm import Dict + In [3]: p = Dict().store() + In [4]: p + Out[4]: + In [5]: group.add_nodes(p) - In [5]: group.add_nodes(p) Show information about a Group ------------------------------ - From the command line interface:: - - verdi group show test_group - ----------------- ---------------- - Group label test_group - Group type_string user - Group description - ----------------- ---------------- - # Nodes: - PK Type Created - ---- ------ --------------- - 1 Code 26D:21h:45m ago +From the command line interface:: + verdi group show test_group + ----------------- ---------------- + Group label test_group + Group type_string user + Group description + ----------------- ---------------- + # Nodes: + PK Type Created + ---- ------ --------------- + 1 Code 26D:21h:45m ago Remove nodes from a Group ------------------------- - From the command line interface:: +From the command line interface:: - verdi group remove-nodes -G test_group 1 - Do you really want to remove 1 nodes from Group? [y/N]: y + verdi group remove-nodes -G test_group 1 + Do you really want to remove 1 nodes from Group? [y/N]: y - From the python interface:: +From the python interface:: - In [1]: group = Group.get(label='test_group') + In [1]: group = Group.get(label='test_group') + In [2]: group.clear() - In [2]: group.clear() Rename Group ------------ - From the command line interface:: +From the command line interface:: verdi group relabel test_group old_group Success: Label changed to old_group - From the python interface:: +From the python interface:: - In [1]: group = Group.get(label='old_group') + In [1]: group = Group.get(label='old_group') + In [2]: group.label = "another_group" - In [2]: group.label = "another_group" Delete Group ------------ - From the command line interface:: +From the command line interface:: verdi group delete another_group Are you sure to delete Group? [y/N]: y Success: Group deleted. - Copy one group into another --------------------------- - This operation will copy the nodes of the source group into the destination - group. Moreover, if the destination group did not exist before, it will - be created automatically. +This operation will copy the nodes of the source group into the destination +group. Moreover, if the destination group did not exist before, it will +be created automatically. + +From the command line interface:: + + verdi group copy source_group dest_group + Success: Nodes copied from group to group + +From the python interface:: + + In [1]: src_group = Group.objects.get(label='source_group') + In [2]: dest_group = Group(label='destination_group').store() + In [3]: dest_group.add_nodes(list(src_group.nodes)) + + +Create a `Group` subclass +------------------------- +It is possible to create a subclass of `Group` to implement custom functionality. +To make the instances of the subclass storable and loadable, it has to be registered through an entry point in the ``aiida.groups`` entry point category. +For example, assuming we have a subclass ``SubClassGroup`` in the module ``aiida_plugin.groups.sub_class:SubClassGroup``, to register it, one has to add the following to the ``setup.py`` of the plugin package:: - From the command line interface:: + "entry_points": { + "aiida.groups": [ + "plugin.sub_class = aiida_plugin.groups.sub_class:SubClassGroup" + ] + } - verdi group copy source_group dest_group - Success: Nodes copied from group to group +Now that the subclass is properly registered, instances can be stored:: - From the python interface:: + group = SubClassGroup(label='sub-class-group') + group.store() - In [1]: src_group = Group.objects.get(label='source_group') +The ``type_string`` of the group instance corresponds to the entry point name and so in this example is ``plugin.sub_class``. +This is what AiiDA uses to load the correct class when reloading the group from the database:: - In [2]: dest_group = Group(label='destination_group').store() + group = load_group(group.pk) + assert isinstance(group, SubClassGroup) - In [3]: dest_group.add_nodes(list(src_group.nodes)) +If the entry point is not currently registered, because the corresponding plugin package is not installed for example, AiiDA will issue a warning and fallback onto the ``Group`` base class. diff --git a/setup.json b/setup.json index ebe8360174..7f4b7783ca 100644 --- a/setup.json +++ b/setup.json @@ -159,6 +159,12 @@ "structure = aiida.orm.nodes.data.structure:StructureData", "upf = aiida.orm.nodes.data.upf:UpfData" ], + "aiida.groups": [ + "core = aiida.orm.groups:Group", + "core.auto = aiida.orm.groups:AutoGroup", + "core.import = aiida.orm.groups:ImportGroup", + "core.upf = aiida.orm.groups:UpfFamily" + ], "aiida.node": [ "data = aiida.orm.nodes.data.data:Data", "process = aiida.orm.nodes.process.process:ProcessNode", diff --git a/tests/backends/aiida_django/migrations/test_migrations_0044_dbgroup_type_string.py b/tests/backends/aiida_django/migrations/test_migrations_0044_dbgroup_type_string.py new file mode 100644 index 0000000000..ab1b31d518 --- /dev/null +++ b/tests/backends/aiida_django/migrations/test_migrations_0044_dbgroup_type_string.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +# pylint: disable=import-error,no-name-in-module,invalid-name +"""Test migration of `type_string` after the `Group` class became pluginnable.""" + +from .test_migrations_common import TestMigrations + + +class TestGroupTypeStringMigration(TestMigrations): + """Test migration of `type_string` after the `Group` class became pluginnable.""" + + migrate_from = '0043_default_link_label' + migrate_to = '0044_dbgroup_type_string' + + def setUpBeforeMigration(self): + DbGroup = self.apps.get_model('db', 'DbGroup') + + # test user group type_string: 'user' -> 'core' + group_user = DbGroup(label='01', user_id=self.default_user.id, type_string='user') + group_user.save() + self.group_user_pk = group_user.pk + + # test data.upf group type_string: 'data.upf' -> 'core.upf' + group_data_upf = DbGroup(label='02', user_id=self.default_user.id, type_string='data.upf') + group_data_upf.save() + self.group_data_upf_pk = group_data_upf.pk + + # test auto.import group type_string: 'auto.import' -> 'core.import' + group_autoimport = DbGroup(label='03', user_id=self.default_user.id, type_string='auto.import') + group_autoimport.save() + self.group_autoimport_pk = group_autoimport.pk + + # test auto.run group type_string: 'auto.run' -> 'core.auto' + group_autorun = DbGroup(label='04', user_id=self.default_user.id, type_string='auto.run') + group_autorun.save() + self.group_autorun_pk = group_autorun.pk + + def test_group_string_update(self): + """Test that the type_string were updated correctly.""" + DbGroup = self.apps.get_model('db', 'DbGroup') + + # 'user' -> 'core' + group_user = DbGroup.objects.get(pk=self.group_user_pk) + self.assertEqual(group_user.type_string, 'core') + + # 'data.upf' -> 'core.upf' + group_data_upf = DbGroup.objects.get(pk=self.group_data_upf_pk) + self.assertEqual(group_data_upf.type_string, 'core.upf') + + # 'auto.import' -> 'core.import' + group_autoimport = DbGroup.objects.get(pk=self.group_autoimport_pk) + self.assertEqual(group_autoimport.type_string, 'core.import') + + # 'auto.run' -> 'core.auto' + group_autorun = DbGroup.objects.get(pk=self.group_autorun_pk) + self.assertEqual(group_autorun.type_string, 'core.auto') diff --git a/tests/backends/aiida_sqlalchemy/test_migrations.py b/tests/backends/aiida_sqlalchemy/test_migrations.py index 8e2046f293..2bb52ceecc 100644 --- a/tests/backends/aiida_sqlalchemy/test_migrations.py +++ b/tests/backends/aiida_sqlalchemy/test_migrations.py @@ -1642,3 +1642,69 @@ def test_data_migrated(self): finally: session.close() + + +class TestGroupTypeStringMigration(TestMigrationsSQLA): + """Test the migration that renames the DbGroup type strings.""" + + migrate_from = '118349c10896' # 118349c10896_default_link_label.py + migrate_to = 'bf591f31dd12' # bf591f31dd12_dbgroup_type_string.py + + def setUpBeforeMigration(self): + """Create the DbGroups with the old type strings.""" + DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name + DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name + + with self.get_session() as session: + try: + default_user = DbUser(email='{}@aiida.net'.format(self.id())) + session.add(default_user) + session.commit() + + # test user group type_string: 'user' -> 'core' + group_user = DbGroup(label='01', user_id=default_user.id, type_string='user') + session.add(group_user) + # test data.upf group type_string: 'data.upf' -> 'core.upf' + group_data_upf = DbGroup(label='02', user_id=default_user.id, type_string='data.upf') + session.add(group_data_upf) + # test auto.import group type_string: 'auto.import' -> 'core.import' + group_autoimport = DbGroup(label='03', user_id=default_user.id, type_string='auto.import') + session.add(group_autoimport) + # test auto.run group type_string: 'auto.run' -> 'core.auto' + group_autorun = DbGroup(label='04', user_id=default_user.id, type_string='auto.run') + session.add(group_autorun) + + session.commit() + + # Store values for later tests + self.group_user_pk = group_user.id + self.group_data_upf_pk = group_data_upf.id + self.group_autoimport_pk = group_autoimport.id + self.group_autorun_pk = group_autorun.id + + finally: + session.close() + + def test_group_string_update(self): + """Test that the type strings are properly migrated.""" + DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name + + with self.get_session() as session: + try: + # test user group type_string: 'user' -> 'core' + group_user = session.query(DbGroup).filter(DbGroup.id == self.group_user_pk).one() + self.assertEqual(group_user.type_string, 'core') + + # test data.upf group type_string: 'data.upf' -> 'core.upf' + group_data_upf = session.query(DbGroup).filter(DbGroup.id == self.group_data_upf_pk).one() + self.assertEqual(group_data_upf.type_string, 'core.upf') + + # test auto.import group type_string: 'auto.import' -> 'core.import' + group_autoimport = session.query(DbGroup).filter(DbGroup.id == self.group_autoimport_pk).one() + self.assertEqual(group_autoimport.type_string, 'core.import') + + # test auto.run group type_string: 'auto.run' -> 'core.auto' + group_autorun = session.query(DbGroup).filter(DbGroup.id == self.group_autorun_pk).one() + self.assertEqual(group_autorun.type_string, 'core.auto') + finally: + session.close() diff --git a/tests/cmdline/commands/test_group.py b/tests/cmdline/commands/test_group.py index 4302420633..ab79f650b1 100644 --- a/tests/cmdline/commands/test_group.py +++ b/tests/cmdline/commands/test_group.py @@ -165,7 +165,7 @@ def test_show(self): self.assertClickResultNoException(result) for grpline in [ - 'Group label', 'dummygroup1', 'Group type_string', 'user', 'Group description', '' + 'Group label', 'dummygroup1', 'Group type_string', 'core', 'Group description', '' ]: self.assertIn(grpline, result.output) diff --git a/tests/cmdline/commands/test_group_ls.py b/tests/cmdline/commands/test_group_ls.py index 7cc01079a4..1a8bb7d2ef 100644 --- a/tests/cmdline/commands/test_group_ls.py +++ b/tests/cmdline/commands/test_group_ls.py @@ -22,12 +22,13 @@ def setup_groups(clear_database_before_test): """Setup some groups for testing.""" for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f']: - group, _ = orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + group, _ = orm.Group.objects.get_or_create(label) group.description = 'A description of {}'.format(label) - orm.Group.objects.get_or_create('a/x', type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + orm.UpfFamily.objects.get_or_create('a/x') yield +@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') def test_with_no_opts(setup_groups): """Test ``verdi group path ls``""" @@ -46,6 +47,7 @@ def test_with_no_opts(setup_groups): assert result.output == 'a/c/d\na/c/e\n' +@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') def test_recursive(setup_groups): """Test ``verdi group path ls --recursive``""" @@ -61,6 +63,7 @@ def test_recursive(setup_groups): assert result.output == 'a/c/d\na/c/e\na/c/e/g\n' +@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') @pytest.mark.parametrize('tag', ['-l', '--long']) def test_long(setup_groups, tag): """Test ``verdi group path ls --long``""" @@ -106,6 +109,7 @@ def test_long(setup_groups, tag): ) +@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') @pytest.mark.parametrize('tag', ['--no-virtual']) def test_groups_only(setup_groups, tag): """Test ``verdi group path ls --no-virtual``""" diff --git a/tests/cmdline/commands/test_run.py b/tests/cmdline/commands/test_run.py index 78c858420f..4ed690bb20 100644 --- a/tests/cmdline/commands/test_run.py +++ b/tests/cmdline/commands/test_run.py @@ -9,6 +9,7 @@ ########################################################################### """Tests for `verdi run`.""" import tempfile +import textwrap import warnings from click.testing import CliRunner @@ -31,21 +32,22 @@ def test_run_workfunction(self): that are defined within the script will fail, as the inspect module will not correctly be able to determin the full path of the source file. """ - from aiida.orm import load_node - from aiida.orm import WorkFunctionNode + from aiida.orm import load_node, WorkFunctionNode - script_content = """ -#!/usr/bin/env python -from aiida.engine import workfunction + script_content = textwrap.dedent( + """\ + #!/usr/bin/env python + from aiida.engine import workfunction -@workfunction -def wf(): - pass + @workfunction + def wf(): + pass -if __name__ == '__main__': - result, node = wf.run_get_node() - print(node.pk) - """ + if __name__ == '__main__': + result, node = wf.run_get_node() + print(node.pk) + """ + ) # If `verdi run` is not setup correctly, the script above when run with `verdi run` will fail, because when # the engine will try to create the node for the workfunction and create a copy of its sourcefile, namely the @@ -77,9 +79,8 @@ def setUp(self): super().setUp() self.cli_runner = CliRunner() - # I need to disable the global variable of this test environment, - # because invoke is just calling the function and therefore inheriting - # the global variable + # I need to disable the global variable of this test environment, because invoke is just calling the function + # and therefore inheriting the global variable self._old_autogroup = autogroup.CURRENT_AUTOGROUP autogroup.CURRENT_AUTOGROUP = None @@ -92,12 +93,15 @@ def tearDown(self): def test_autogroup(self): """Check if the autogroup is properly generated.""" - from aiida.orm import QueryBuilder, Node, Group, load_node + from aiida.orm import QueryBuilder, Node, AutoGroup, load_node - script_content = """from aiida.orm import Data -node = Data().store() -print(node.pk) -""" + script_content = textwrap.dedent( + """\ + from aiida.orm import Data + node = Data().store() + print(node.pk) + """ + ) with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) @@ -111,7 +115,7 @@ def test_autogroup(self): _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' @@ -119,12 +123,16 @@ def test_autogroup(self): def test_autogroup_custom_label(self): """Check if the autogroup is properly generated with the label specified.""" - from aiida.orm import QueryBuilder, Node, Group, load_node + from aiida.orm import QueryBuilder, Node, AutoGroup, load_node + + script_content = textwrap.dedent( + """\ + from aiida.orm import Data + node = Data().store() + print(node.pk) + """ + ) - script_content = """from aiida.orm import Data -node = Data().store() -print(node.pk) -""" autogroup_label = 'SOME_group_LABEL' with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) @@ -138,7 +146,7 @@ def test_autogroup_custom_label(self): _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' @@ -147,12 +155,15 @@ def test_autogroup_custom_label(self): def test_no_autogroup(self): """Check if the autogroup is not generated if ``verdi run`` is asked not to.""" - from aiida.orm import QueryBuilder, Node, Group, load_node + from aiida.orm import QueryBuilder, Node, AutoGroup, load_node - script_content = """from aiida.orm import Data -node = Data().store() -print(node.pk) -""" + script_content = textwrap.dedent( + """\ + from aiida.orm import Data + node = Data().store() + print(node.pk) + """ + ) with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) @@ -166,61 +177,64 @@ def test_no_autogroup(self): _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual(len(all_auto_groups), 0, 'There should be no autogroup generated') def test_autogroup_filter_class(self): # pylint: disable=too-many-locals """Check if the autogroup is properly generated but filtered classes are skipped.""" - from aiida.orm import QueryBuilder, Node, Group, load_node - - script_content = """import sys -from aiida.orm import Computer, Int, ArrayData, KpointsData, CalculationNode, WorkflowNode -from aiida.plugins import CalculationFactory -from aiida.engine import run_get_node -ArithmeticAdd = CalculationFactory('arithmetic.add') - -computer = Computer( - name='localhost-example-{}'.format(sys.argv[1]), - hostname='localhost', - description='my computer', - transport_type='local', - scheduler_type='direct', - workdir='/tmp' -).store() -computer.configure() - -code = Code( - input_plugin_name='arithmetic.add', - remote_computer_exec=[computer, '/bin/true']).store() -inputs = { - 'x': Int(1), - 'y': Int(2), - 'code': code, - 'metadata': { - 'options': { - 'resources': { - 'num_machines': 1, - 'num_mpiprocs_per_machine': 1 + from aiida.orm import Code, QueryBuilder, Node, AutoGroup, load_node + + script_content = textwrap.dedent( + """\ + import sys + from aiida.orm import Computer, Int, ArrayData, KpointsData, CalculationNode, WorkflowNode + from aiida.plugins import CalculationFactory + from aiida.engine import run_get_node + ArithmeticAdd = CalculationFactory('arithmetic.add') + + computer = Computer( + name='localhost-example-{}'.format(sys.argv[1]), + hostname='localhost', + description='my computer', + transport_type='local', + scheduler_type='direct', + workdir='/tmp' + ).store() + computer.configure() + + code = Code( + input_plugin_name='arithmetic.add', + remote_computer_exec=[computer, '/bin/true']).store() + inputs = { + 'x': Int(1), + 'y': Int(2), + 'code': code, + 'metadata': { + 'options': { + 'resources': { + 'num_machines': 1, + 'num_mpiprocs_per_machine': 1 + } + } + } } - } - } -} - -node1 = KpointsData().store() -node2 = ArrayData().store() -node3 = Int(3).store() -node4 = CalculationNode().store() -node5 = WorkflowNode().store() -_, node6 = run_get_node(ArithmeticAdd, **inputs) -print(node1.pk) -print(node2.pk) -print(node3.pk) -print(node4.pk) -print(node5.pk) -print(node6.pk) -""" - from aiida.orm import Code + + node1 = KpointsData().store() + node2 = ArrayData().store() + node3 = Int(3).store() + node4 = CalculationNode().store() + node5 = WorkflowNode().store() + _, node6 = run_get_node(ArithmeticAdd, **inputs) + print(node1.pk) + print(node2.pk) + print(node3.pk) + print(node4.pk) + print(node5.pk) + print(node6.pk) + """ + ) + Code() for idx, ( flags, @@ -283,27 +297,27 @@ def test_autogroup_filter_class(self): # pylint: disable=too-many-locals _ = load_node(pk6) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk1}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_kptdata = queryb.all() queryb = QueryBuilder().append(Node, filters={'id': pk2}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_arraydata = queryb.all() queryb = QueryBuilder().append(Node, filters={'id': pk3}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_int = queryb.all() queryb = QueryBuilder().append(Node, filters={'id': pk4}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_calc = queryb.all() queryb = QueryBuilder().append(Node, filters={'id': pk5}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_wf = queryb.all() queryb = QueryBuilder().append(Node, filters={'id': pk6}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups_calcarithmetic = queryb.all() self.assertEqual( @@ -339,12 +353,16 @@ def test_autogroup_filter_class(self): # pylint: disable=too-many-locals def test_autogroup_clashing_label(self): """Check if the autogroup label is properly (re)generated when it clashes with an existing group name.""" - from aiida.orm import QueryBuilder, Node, Group, load_node + from aiida.orm import QueryBuilder, Node, AutoGroup, load_node + + script_content = textwrap.dedent( + """\ + from aiida.orm import Data + node = Data().store() + print(node.pk) + """ + ) - script_content = """from aiida.orm import Data -node = Data().store() -print(node.pk) -""" autogroup_label = 'SOME_repeated_group_LABEL' with tempfile.NamedTemporaryFile(mode='w+') as fhandle: fhandle.write(script_content) @@ -358,7 +376,7 @@ def test_autogroup_clashing_label(self): pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' @@ -374,7 +392,7 @@ def test_autogroup_clashing_label(self): pk = int(result.output) _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' @@ -383,12 +401,15 @@ def test_autogroup_clashing_label(self): def test_legacy_autogroup_name(self): """Check if the autogroup is properly generated when using the legacy --group-name flag.""" - from aiida.orm import QueryBuilder, Node, Group, load_node - - script_content = """from aiida.orm import Data -node = Data().store() -print(node.pk) -""" + from aiida.orm import QueryBuilder, Node, AutoGroup, load_node + + script_content = textwrap.dedent( + """\ + from aiida.orm import Data + node = Data().store() + print(node.pk) + """ + ) group_label = 'legacy-group-name' with tempfile.NamedTemporaryFile(mode='w+') as fhandle: @@ -409,7 +430,7 @@ def test_legacy_autogroup_name(self): _ = load_node(pk) # Check if the node can be loaded queryb = QueryBuilder().append(Node, filters={'id': pk}, tag='node') - queryb.append(Group, with_node='node', filters={'type_string': 'auto.run'}, project='*') + queryb.append(AutoGroup, with_node='node', project='*') all_auto_groups = queryb.all() self.assertEqual( len(all_auto_groups), 1, 'There should be only one autogroup associated with the node just created' diff --git a/tests/orm/data/test_upf.py b/tests/orm/data/test_upf.py index 228f8d9b77..02922bc60f 100644 --- a/tests/orm/data/test_upf.py +++ b/tests/orm/data/test_upf.py @@ -10,7 +10,6 @@ """ This module contains tests for UpfData and UpfData related functions. """ - import errno import tempfile import shutil @@ -95,8 +94,8 @@ def setUp(self): def tearDown(self): """Delete all groups and destroy the temporary directory created.""" - for group in orm.Group.objects.find(filters={'type_string': orm.GroupTypeString.UPFGROUP_TYPE.value}): - orm.Group.objects.delete(group.pk) + for group in orm.UpfFamily.objects.find(): + orm.UpfFamily.objects.delete(group.pk) try: shutil.rmtree(self.temp_dir) @@ -122,32 +121,31 @@ def test_get_upf_family_names(self): """Test the `UpfData.get_upf_family_names` method.""" label = 'family' - family, _ = orm.Group.objects.get_or_create(label=label, type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + family, _ = orm.UpfFamily.objects.get_or_create(label=label) family.add_nodes([self.pseudo_barium]) family.store() - self.assertEqual({group.label for group in orm.UpfData.get_upf_groups()}, {label}) + self.assertEqual({group.label for group in orm.UpfFamily.objects.all()}, {label}) self.assertEqual(self.pseudo_barium.get_upf_family_names(), [label]) def test_get_upf_groups(self): """Test the `UpfData.get_upf_groups` class method.""" - type_string = orm.GroupTypeString.UPFGROUP_TYPE.value label_01 = 'family_01' label_02 = 'family_02' user = orm.User(email='alternate@localhost').store() - self.assertEqual(orm.UpfData.get_upf_groups(), []) + self.assertEqual(orm.UpfFamily.objects.all(), []) # Create group with default user and add `Ba` pseudo - family_01, _ = orm.Group.objects.get_or_create(label=label_01, type_string=type_string) + family_01, _ = orm.UpfFamily.objects.get_or_create(label=label_01) family_01.add_nodes([self.pseudo_barium]) family_01.store() self.assertEqual({group.label for group in orm.UpfData.get_upf_groups()}, {label_01}) # Create group with different user and add `O` pseudo - family_02, _ = orm.Group.objects.get_or_create(label=label_02, type_string=type_string, user=user) + family_02, _ = orm.UpfFamily.objects.get_or_create(label=label_02, user=user) family_02.add_nodes([self.pseudo_oxygen]) family_02.store() diff --git a/tests/orm/test_groups.py b/tests/orm/test_groups.py index ce2797daad..67b189195c 100644 --- a/tests/orm/test_groups.py +++ b/tests/orm/test_groups.py @@ -8,6 +8,8 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test for the Group ORM class.""" +import pytest + from aiida import orm from aiida.backends.testbase import AiidaTestCase from aiida.common import exceptions @@ -272,3 +274,54 @@ def test_group_uuid_hashing_for_querybuidler(self): # And that the results are correct self.assertEqual(builder.count(), 1) self.assertEqual(builder.first()[0], group.id) + + +class TestGroupsSubclasses(AiidaTestCase): + """Test rules around creating `Group` subclasses.""" + + @staticmethod + def test_creation_registered(): + """Test rules around creating registered `Group` subclasses.""" + group = orm.AutoGroup('some-label') + assert isinstance(group, orm.AutoGroup) + assert group.type_string == 'core.auto' + + group, _ = orm.AutoGroup.objects.get_or_create('some-auto-group') + assert isinstance(group, orm.AutoGroup) + assert group.type_string == 'core.auto' + + @staticmethod + def test_creation_unregistered(): + """Test rules around creating `Group` subclasses without a registered entry point.""" + + # Defining an unregistered subclas should issue a warning and its type string should be set to `None` + with pytest.warns(UserWarning): + + class SubGroup(orm.Group): + pass + + assert SubGroup._type_string is None # pylint: disable=protected-access + + # Creating an instance is allowed + instance = SubGroup(label='subgroup') + assert instance._type_string is None # pylint: disable=protected-access + + # Storing the instance, however, is forbidden and should raise + with pytest.raises(exceptions.StoringNotAllowed): + instance.store() + + @staticmethod + def test_loading_unregistered(): + """Test rules around loading `Group` subclasses without a registered entry point. + + Storing instances of unregistered subclasses is not allowed so we have to create one sneakily by instantiating + a normal group and manipulating the type string directly on the database model. + """ + group = orm.Group(label='group') + group.backend_entity.dbmodel.type_string = 'unregistered.subclass' + group.store() + + with pytest.warns(UserWarning): + loaded = orm.load_group(group.pk) + + assert isinstance(loaded, orm.Group) diff --git a/tests/tools/graph/test_age.py b/tests/tools/graph/test_age.py index dddf2323c2..538087c7d7 100644 --- a/tests/tools/graph/test_age.py +++ b/tests/tools/graph/test_age.py @@ -494,7 +494,7 @@ def test_groups(self): # Rule that only gets nodes connected by the same group queryb = orm.QueryBuilder() queryb.append(orm.Node, tag='nodes_in_set') - queryb.append(orm.Group, with_node='nodes_in_set', tag='groups_considered', filters={'type_string': 'user'}) + queryb.append(orm.Group, with_node='nodes_in_set', tag='groups_considered') queryb.append(orm.Data, with_group='groups_considered') initial_node = [node2.id] @@ -513,7 +513,7 @@ def test_groups(self): # But two rules chained should get both nodes and groups... queryb = orm.QueryBuilder() queryb.append(orm.Node, tag='nodes_in_set') - queryb.append(orm.Group, with_node='nodes_in_set', filters={'type_string': 'user'}) + queryb.append(orm.Group, with_node='nodes_in_set') rule1 = UpdateRule(queryb) queryb = orm.QueryBuilder() @@ -569,7 +569,7 @@ def test_groups(self): qb1 = orm.QueryBuilder() qb1.append(orm.Node, tag='nodes_in_set') - qb1.append(orm.Group, with_node='nodes_in_set', filters={'type_string': 'user'}) + qb1.append(orm.Group, with_node='nodes_in_set') rule1 = UpdateRule(qb1, track_edges=True) qb2 = orm.QueryBuilder() diff --git a/tests/tools/groups/test_paths.py b/tests/tools/groups/test_paths.py index a6f1cdb757..b6e8940ce5 100644 --- a/tests/tools/groups/test_paths.py +++ b/tests/tools/groups/test_paths.py @@ -7,19 +7,19 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -"""Tests for GroupPath""" # pylint: disable=redefined-outer-name,unused-argument +"""Tests for GroupPath""" import pytest from aiida import orm -from aiida.tools.groups.paths import (GroupAttr, GroupPath, InvalidPath, GroupNotFoundError, NoGroupsInPathError) +from aiida.tools.groups.paths import GroupAttr, GroupPath, InvalidPath, GroupNotFoundError, NoGroupsInPathError @pytest.fixture def setup_groups(clear_database_before_test): """Setup some groups for testing.""" for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f']: - group, _ = orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + group, _ = orm.Group.objects.get_or_create(label) group.description = 'A description of {}'.format(label) yield @@ -117,16 +117,17 @@ def test_walk(setup_groups): def test_walk_with_invalid_path(clear_database_before_test): + """Test the ``GroupPath.walk`` method with invalid paths.""" for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g', 'a/f', 'bad//group', 'bad/other']: - orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + orm.Group.objects.get_or_create(label) group_path = GroupPath() - assert [c.path for c in sorted(group_path.walk()) - ] == ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g', 'a/f', 'bad', 'bad/other'] + expected = ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g', 'a/f', 'bad', 'bad/other'] + assert [c.path for c in sorted(group_path.walk())] == expected def test_walk_nodes(clear_database): """Test the ``GroupPath.walk_nodes()`` function.""" - group, _ = orm.Group.objects.get_or_create('a', type_string=orm.GroupTypeString.USER.value) + group, _ = orm.Group.objects.get_or_create('a') node = orm.Data() node.set_attribute_many({'i': 1, 'j': 2}) node.store() @@ -135,17 +136,18 @@ def test_walk_nodes(clear_database): assert [(r.group_path.path, r.node.attributes) for r in group_path.walk_nodes()] == [('a', {'i': 1, 'j': 2})] -def test_type_string(clear_database_before_test): - """Test that only the type_string instantiated in ``GroupPath`` is returned.""" +@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') +def test_cls(clear_database_before_test): + """Test that only instances of `cls` or its subclasses are matched by ``GroupPath``.""" for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g']: - orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.USER.value) + orm.Group.objects.get_or_create(label) for label in ['a/c/e', 'a/f']: - orm.Group.objects.get_or_create(label, type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + orm.UpfFamily.objects.get_or_create(label) group_path = GroupPath() assert sorted([c.path for c in group_path.walk()]) == ['a', 'a/b', 'a/c', 'a/c/d', 'a/c/e', 'a/c/e/g'] - group_path = GroupPath(type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + group_path = GroupPath(cls=orm.UpfFamily) assert sorted([c.path for c in group_path.walk()]) == ['a', 'a/c', 'a/c/e', 'a/f'] - assert GroupPath('a/b/c') != GroupPath('a/b/c', type_string=orm.GroupTypeString.UPFGROUP_TYPE.value) + assert GroupPath('a/b/c') != GroupPath('a/b/c', cls=orm.UpfFamily) def test_attr(clear_database_before_test): diff --git a/tests/tools/importexport/test_prov_redesign.py b/tests/tools/importexport/test_prov_redesign.py index 37f9a485a0..5ef849c51c 100644 --- a/tests/tools/importexport/test_prov_redesign.py +++ b/tests/tools/importexport/test_prov_redesign.py @@ -229,7 +229,7 @@ def test_group_name_and_type_change(self, temp_dir): groups_type_string = [g.type_string for g in [group_user, group_upf]] # Assert correct type strings exists prior to export - self.assertListEqual(groups_type_string, ['user', 'data.upf']) + self.assertListEqual(groups_type_string, ['core', 'core.upf']) # Export node filename = os.path.join(temp_dir, 'export.tar.gz') @@ -268,4 +268,4 @@ def test_group_name_and_type_change(self, temp_dir): # Check type_string content of "import group" import_group = orm.load_group(imported_groups_uuid[0]) - self.assertEqual(import_group.type_string, 'auto.import') + self.assertEqual(import_group.type_string, 'core.import') From e5eae67b96e933a30bac14043b225cb9a9275d68 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Thu, 9 Apr 2020 11:27:51 +0200 Subject: [PATCH 41/54] Add support for subclassing in `Groups` to the `QueryBuilder` (#3903) This now allows to search for instances of `Group` and all its subclasses through the `QueryBuilder` just as implemented for the `Node` class. The subclassing is based on the `type_string` of the group instances, which in turn is defined by the entry point name. When querying for `Group` instances through the `QueryBuilder`: QueryBuilder().append(Group).all() by default subclassing is enabled and should return all instances of a subclass of `Group` as well. The same goes for subclasses of `Group` themselves. Imagine a plugin defines the following entry points: 'plugin.sub = aiida_plugin.groups:SubGroup' 'plugin.sub.a = aiida_plugin.groups:SubAGroup' 'plugin.sub.b = aiida_plugin.groups:SubBGroup' The following query: QueryBuilder().append(SubGroup).all() will return all instances of `SubGroup` but also of `SubAGroup` and `SubBGroup`. This is because their entry point names both start with the entry point name `plugin.sub` of the class that is being appended. Note that in order for all group instances to be returned when querying for the `Group` base class, a small hack had to be applied in the query builder code building the filters. Note that the entry point name of `Group` is `core` and so with the rules described above, the plugin subclasses would never be matched since their entry point names do not start with `core`. This is a problem and could be fixed by making the type string of base group instances an empty string, however, this would require using an empty string for the `Group` base class which is not allowed in Python. To still provide the desired behavior, when querying for `Group` instances the `type_string` filter is set to match anything. --- aiida/orm/querybuilder.py | 113 +++++++++++++++++------- aiida/tools/graph/age_rules.py | 4 +- aiida/tools/groups/paths.py | 8 +- tests/cmdline/commands/test_group_ls.py | 4 - tests/orm/test_autogroups.py | 30 ++----- tests/orm/test_groups.py | 49 ++++++++++ tests/orm/test_querybuilder.py | 6 +- tests/tools/groups/test_paths.py | 27 +++++- 8 files changed, 172 insertions(+), 69 deletions(-) diff --git a/aiida/orm/querybuilder.py b/aiida/orm/querybuilder.py index 7f622bed06..701bdf89ed 100644 --- a/aiida/orm/querybuilder.py +++ b/aiida/orm/querybuilder.py @@ -49,6 +49,14 @@ _LOGGER = logging.getLogger(__name__) +# This global variable is necessary to enable the subclassing functionality for the `Group` entity. The current +# implementation of the `QueryBuilder` was written with the assumption that only `Node` was subclassable. Support for +# subclassing was added later for `Group` and is based on its `type_string`, but the current implementation does not +# allow to extend this support to the `QueryBuilder` in an elegant way. The prefix `group.` needs to be used in various +# places to make it work, but really the internals of the `QueryBuilder` should be rewritten to in principle support +# subclassing for any entity type. This workaround should then be able to be removed. +GROUP_ENTITY_TYPE_PREFIX = 'group.' + def get_querybuilder_classifiers_from_cls(cls, qb): """ @@ -83,10 +91,10 @@ def get_querybuilder_classifiers_from_cls(cls, qb): # Groups: elif issubclass(cls, qb.Group): - classifiers['ormclass_type_string'] = 'group' + classifiers['ormclass_type_string'] = GROUP_ENTITY_TYPE_PREFIX + cls._type_string ormclass = cls elif issubclass(cls, groups.Group): - classifiers['ormclass_type_string'] = 'group' + classifiers['ormclass_type_string'] = GROUP_ENTITY_TYPE_PREFIX + cls._type_string ormclass = qb.Group # Computers: @@ -164,7 +172,8 @@ def get_querybuilder_classifiers_from_type(ormclass_type_string, qb): classifiers['process_type_string'] = None classifiers['ormclass_type_string'] = ormclass_type_string.lower() - if classifiers['ormclass_type_string'] == 'group': + if classifiers['ormclass_type_string'].startswith(GROUP_ENTITY_TYPE_PREFIX): + classifiers['ormclass_type_string'] = 'group.core' ormclass = qb.Group elif classifiers['ormclass_type_string'] == 'computer': ormclass = qb.Computer @@ -179,11 +188,10 @@ def get_querybuilder_classifiers_from_type(ormclass_type_string, qb): if ormclass == qb.Node: is_valid_node_type_string(classifiers['ormclass_type_string'], raise_on_false=True) - return ormclass, classifiers -def get_type_filter(classifiers, subclassing): +def get_node_type_filter(classifiers, subclassing): """ Return filter dictionaries given a set of classifiers. @@ -199,13 +207,14 @@ def get_type_filter(classifiers, subclassing): value = classifiers['ormclass_type_string'] if not subclassing: - filter = {'==': value} + filters = {'==': value} else: # Note: the query_type_string always ends with a dot. This ensures that "like {str}%" matches *only* # the query type string - filter = {'like': '{}%'.format(escape_for_sql_like(get_query_type_from_type_string(value)))} + filters = {'like': '{}%'.format(escape_for_sql_like(get_query_type_from_type_string(value)))} + + return filters - return filter def get_process_type_filter(classifiers, subclassing): """ @@ -229,7 +238,7 @@ def get_process_type_filter(classifiers, subclassing): value = classifiers['process_type_string'] if not subclassing: - filter = {'==': value} + filters = {'==': value} else: if ':' in value: # if value is an entry point, do usual subclassing @@ -237,7 +246,7 @@ def get_process_type_filter(classifiers, subclassing): # Note: the process_type_string stored in the database does *not* end in a dot. # In order to avoid that querying for class 'Begin' will also find class 'BeginEnd', # we need to search separately for equality and 'like'. - filter = {'or': [ + filters = {'or': [ {'==': value}, {'like': escape_for_sql_like(get_query_string_from_process_type_string(value))}, ]} @@ -248,19 +257,46 @@ def get_process_type_filter(classifiers, subclassing): # between process classes and node classes # Note: Improve this when issue #2475 is addressed - filter = {'like': '%'} + filters = {'like': '%'} else: warnings.warn("Process type '{}' does not correspond to a registered entry. " 'This risks queries to fail once the location of the process class changes. ' "Add an entry point for '{}' to remove this warning.".format(value, value), AiidaEntryPointWarning) - filter = {'or': [ + filters = {'or': [ {'==': value}, {'like': escape_for_sql_like(get_query_string_from_process_type_string(value))}, ]} + return filters + + +def get_group_type_filter(classifiers, subclassing): + """Return filter dictionaries for `Group.type_string` given a set of classifiers. + + :param classifiers: a dictionary with classifiers (note: does *not* support lists) + :param subclassing: if True, allow for subclasses of the ormclass + + :returns: dictionary in QueryBuilder filter language to pass into {'type_string': ... } + :rtype: dict + """ + from aiida.common.escaping import escape_for_sql_like + + value = classifiers['ormclass_type_string'].lstrip(GROUP_ENTITY_TYPE_PREFIX) + + if not subclassing: + filters = {'==': value} + else: + # This is a hardcoded solution to the problem that the base class `Group` should match all subclasses, however + # its entry point string is `core` and so will only match those subclasses whose entry point also starts with + # 'core', however, this is only the case for group subclasses shipped with `aiida-core`. Any plugins from + # external packages will never be matched. Making the entry point name of `Group` an empty string is also not + # possible so we perform the switch here in code. + if value == 'core': + value = '' + filters = {'like': '{}%'.format(escape_for_sql_like(value))} - return filter + return filters class QueryBuilder: @@ -692,20 +728,16 @@ def append(self, # FILTERS ###################################### try: self._filters[tag] = {} - # So far, only Node and its subclasses need additional filters on column type - # (for other classes, the "classifi. - # This so far only is necessary for AiidaNodes not for groups. - # Now here there is the issue that for everything else, - # the query_type_string is either None (e.g. if Group was passed) - # or a list of None (if (Group, ) was passed. - # Here we have to only call the function _add_type_filter essentially if it makes sense to - # For now that is only nodes, and it is hardcoded. In the future (e.g. we subclass group) - # this has to be added + # Subclassing is currently only implemented for the `Node` and `Group` classes. So for those cases we need + # to construct the correct filters corresponding to the provided classes and value of `subclassing`. if ormclass == self._impl.Node: - self._add_type_filter(tag, classifiers, subclassing) + self._add_node_type_filter(tag, classifiers, subclassing) self._add_process_type_filter(tag, classifiers, subclassing) - # The order has to be first _add_type_filter and then add_filter. + elif ormclass == self._impl.Group: + self._add_group_type_filter(tag, classifiers, subclassing) + + # The order has to be first _add_node_type_filter and then add_filter. # If the user adds a query on the type column, it overwrites what I did # if the user specified a filter, add it: if filters is not None: @@ -993,23 +1025,21 @@ def _process_filters(self, filters): return processed_filters - def _add_type_filter(self, tagspec, classifiers, subclassing): + def _add_node_type_filter(self, tagspec, classifiers, subclassing): """ - Add a filter based on type. + Add a filter based on node type. :param tagspec: The tag, which has to exist already as a key in self._filters :param classifiers: a dictionary with classifiers :param subclassing: if True, allow for subclasses of the ormclass """ - tag = self._get_tag_from_specification(tagspec) - if isinstance(classifiers, list): # If a list was passed to QueryBuilder.append, this propagates to a list in the classifiers entity_type_filter = {'or': []} for c in classifiers: - entity_type_filter['or'].append(get_type_filter(c, subclassing)) + entity_type_filter['or'].append(get_node_type_filter(c, subclassing)) else: - entity_type_filter = get_type_filter(classifiers, subclassing) + entity_type_filter = get_node_type_filter(classifiers, subclassing) self.add_filter(tagspec, {'node_type': entity_type_filter}) @@ -1023,8 +1053,6 @@ def _add_process_type_filter(self, tagspec, classifiers, subclassing): Note: This function handles the case when process_type_string is None. """ - tag = self._get_tag_from_specification(tagspec) - if isinstance(classifiers, list): # If a list was passed to QueryBuilder.append, this propagates to a list in the classifiers process_type_filter = {'or': []} @@ -1040,6 +1068,23 @@ def _add_process_type_filter(self, tagspec, classifiers, subclassing): process_type_filter = get_process_type_filter(classifiers, subclassing) self.add_filter(tagspec, {'process_type': process_type_filter}) + def _add_group_type_filter(self, tagspec, classifiers, subclassing): + """ + Add a filter based on group type. + + :param tagspec: The tag, which has to exist already as a key in self._filters + :param classifiers: a dictionary with classifiers + :param subclassing: if True, allow for subclasses of the ormclass + """ + if isinstance(classifiers, list): + # If a list was passed to QueryBuilder.append, this propagates to a list in the classifiers + type_string_filter = {'or': []} + for classifier in classifiers: + type_string_filter['or'].append(get_group_type_filter(classifier, subclassing)) + else: + type_string_filter = get_group_type_filter(classifiers, subclassing) + + self.add_filter(tagspec, {'type_string': type_string_filter}) def add_projection(self, tag_spec, projection_spec): r""" @@ -1678,7 +1723,9 @@ def _get_connecting_node(self, index, joining_keyword=None, joining_value=None, :param joining_value: the tag of the nodes to be joined """ # Set the calling entity - to allow for the correct join relation to be set - if self._path[index]['entity_type'] not in ['computer', 'user', 'group', 'comment', 'log']: + if self._path[index]['entity_type'].startswith(GROUP_ENTITY_TYPE_PREFIX): + calling_entity = 'group' + elif self._path[index]['entity_type'] not in ['computer', 'user', 'comment', 'log']: calling_entity = 'node' else: calling_entity = self._path[index]['entity_type'] diff --git a/aiida/tools/graph/age_rules.py b/aiida/tools/graph/age_rules.py index ed023aa6a7..09f373a901 100644 --- a/aiida/tools/graph/age_rules.py +++ b/aiida/tools/graph/age_rules.py @@ -77,6 +77,8 @@ def __init__(self, querybuilder, max_iterations=1, track_edges=False): super().__init__(max_iterations, track_edges=track_edges) def get_spec_from_path(queryhelp, idx): + from aiida.orm.querybuilder import GROUP_ENTITY_TYPE_PREFIX + if ( queryhelp['path'][idx]['entity_type'].startswith('node') or queryhelp['path'][idx]['entity_type'].startswith('data') or @@ -84,7 +86,7 @@ def get_spec_from_path(queryhelp, idx): queryhelp['path'][idx]['entity_type'] == '' ): result = 'nodes' - elif queryhelp['path'][idx]['entity_type'] == 'group': + elif queryhelp['path'][idx]['entity_type'].startswith(GROUP_ENTITY_TYPE_PREFIX): result = 'groups' else: raise Exception('not understood entity from ( {} )'.format(queryhelp['path'][idx]['entity_type'])) diff --git a/aiida/tools/groups/paths.py b/aiida/tools/groups/paths.py index b025ab250e..362d1da1f8 100644 --- a/aiida/tools/groups/paths.py +++ b/aiida/tools/groups/paths.py @@ -171,7 +171,7 @@ def get_group(self): # type: () -> Optional[self.cls] """Return the concrete group associated with this path.""" try: - return self.cls.objects.get(label=self.path) + return orm.QueryBuilder().append(self.cls, subclassing=False, filters={'label': self.path}).one()[0] except NotExistent: return None @@ -188,7 +188,7 @@ def group_ids(self): """ query = orm.QueryBuilder() filters = {'label': self.path} - query.append(self.cls, filters=filters, project='id') + query.append(self.cls, subclassing=False, filters=filters, project='id') return [r[0] for r in query.all()] @property @@ -222,7 +222,7 @@ def children(self): filters = {} if self.path: filters['label'] = {'like': self.path + self.delimiter + '%'} - query.append(self.cls, filters=filters, project='label') + query.append(self.cls, subclassing=False, filters=filters, project='label') if query.count() == 0 and self.is_virtual: raise NoGroupsInPathError(self) @@ -282,7 +282,7 @@ def walk_nodes(self, filters=None, node_class=None, query_batch=None): group_filters = {} if self.path: group_filters['label'] = {'or': [{'==': self.path}, {'like': self.path + self.delimiter + '%'}]} - query.append(self.cls, filters=group_filters, project='label', tag='group') + query.append(self.cls, subclassing=False, filters=group_filters, project='label', tag='group') query.append( orm.Node if node_class is None else node_class, with_group='group', diff --git a/tests/cmdline/commands/test_group_ls.py b/tests/cmdline/commands/test_group_ls.py index 1a8bb7d2ef..d1982d56b5 100644 --- a/tests/cmdline/commands/test_group_ls.py +++ b/tests/cmdline/commands/test_group_ls.py @@ -28,7 +28,6 @@ def setup_groups(clear_database_before_test): yield -@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') def test_with_no_opts(setup_groups): """Test ``verdi group path ls``""" @@ -47,7 +46,6 @@ def test_with_no_opts(setup_groups): assert result.output == 'a/c/d\na/c/e\n' -@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') def test_recursive(setup_groups): """Test ``verdi group path ls --recursive``""" @@ -63,7 +61,6 @@ def test_recursive(setup_groups): assert result.output == 'a/c/d\na/c/e\na/c/e/g\n' -@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') @pytest.mark.parametrize('tag', ['-l', '--long']) def test_long(setup_groups, tag): """Test ``verdi group path ls --long``""" @@ -109,7 +106,6 @@ def test_long(setup_groups, tag): ) -@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') @pytest.mark.parametrize('tag', ['--no-virtual']) def test_groups_only(setup_groups, tag): """Test ``verdi group path ls --no-virtual``""" diff --git a/tests/orm/test_autogroups.py b/tests/orm/test_autogroups.py index e1426ad2e8..9deed78bed 100644 --- a/tests/orm/test_autogroups.py +++ b/tests/orm/test_autogroups.py @@ -9,7 +9,7 @@ ########################################################################### """Tests for the Autogroup functionality.""" from aiida.backends.testbase import AiidaTestCase -from aiida.orm import Group, QueryBuilder +from aiida.orm import AutoGroup, QueryBuilder from aiida.orm.autogroup import Autogroup @@ -21,16 +21,9 @@ def test_get_or_create(self): label_prefix = 'test_prefix_TestAutogroup' # Check that there are no groups to begin with - queryb = QueryBuilder().append(Group, filters={'type_string': 'auto.run', 'label': label_prefix}, project='*') + queryb = QueryBuilder().append(AutoGroup, filters={'label': label_prefix}) assert not list(queryb.all()) - queryb = QueryBuilder().append( - Group, filters={ - 'type_string': 'auto.run', - 'label': { - 'like': r'{}\_%'.format(label_prefix) - } - }, project='*' - ) + queryb = QueryBuilder().append(AutoGroup, filters={'label': {'like': r'{}\_%'.format(label_prefix)}}) assert not list(queryb.all()) # First group (no existing one) @@ -64,7 +57,7 @@ def test_get_or_create(self): ) # I create a group with a large integer suffix (9) - Group(label='{}_9'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}_9'.format(label_prefix), type_string='auto.run').store() # The next autogroup should become number 10 autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) @@ -76,7 +69,7 @@ def test_get_or_create(self): ) # I create a group with a non-integer suffix (15a), it should be ignored - Group(label='{}_15b'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}_15b'.format(label_prefix), type_string='auto.run').store() # The next autogroup should become number 11 autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) @@ -93,19 +86,12 @@ def test_get_or_create_invalid_prefix(self): label_prefix = 'new_test_prefix_TestAutogroup' # I create a group with the same prefix, but followed by non-underscore # characters. These should be ignored in the logic. - Group(label='{}xx'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}xx'.format(label_prefix), type_string='auto.run').store() # Check that there are no groups to begin with - queryb = QueryBuilder().append(Group, filters={'type_string': 'auto.run', 'label': label_prefix}, project='*') + queryb = QueryBuilder().append(AutoGroup, filters={'label': label_prefix}) assert not list(queryb.all()) - queryb = QueryBuilder().append( - Group, filters={ - 'type_string': 'auto.run', - 'label': { - 'like': r'{}\_%'.format(label_prefix) - } - }, project='*' - ) + queryb = QueryBuilder().append(AutoGroup, filters={'label': {'like': r'{}\_%'.format(label_prefix)}}) assert not list(queryb.all()) # First group (no existing one) diff --git a/tests/orm/test_groups.py b/tests/orm/test_groups.py index 67b189195c..46f4ddf232 100644 --- a/tests/orm/test_groups.py +++ b/tests/orm/test_groups.py @@ -279,6 +279,11 @@ def test_group_uuid_hashing_for_querybuidler(self): class TestGroupsSubclasses(AiidaTestCase): """Test rules around creating `Group` subclasses.""" + def setUp(self): + """Remove all existing Groups.""" + for group in orm.Group.objects.all(): + orm.Group.objects.delete(group.id) + @staticmethod def test_creation_registered(): """Test rules around creating registered `Group` subclasses.""" @@ -290,6 +295,17 @@ def test_creation_registered(): assert isinstance(group, orm.AutoGroup) assert group.type_string == 'core.auto' + @staticmethod + def test_loading(): + """Test that loading instances from the database returns the correct subclass of `Group`.""" + group = orm.Group('normal-group').store() + loaded = orm.load_group(group.pk) + assert isinstance(loaded, orm.Group) + + group = orm.AutoGroup('auto-group').store() + loaded = orm.load_group(group.pk) + assert isinstance(group, orm.AutoGroup) + @staticmethod def test_creation_unregistered(): """Test rules around creating `Group` subclasses without a registered entry point.""" @@ -325,3 +341,36 @@ def test_loading_unregistered(): loaded = orm.load_group(group.pk) assert isinstance(loaded, orm.Group) + + @staticmethod + def test_querying(): + """Test querying for groups with and without subclassing.""" + orm.Group(label='group').store() + orm.AutoGroup(label='auto-group').store() + + # Fake a subclass by manually setting the type string + group = orm.Group(label='custom-group') + group.backend_entity.dbmodel.type_string = 'custom.group' + group.store() + + assert orm.QueryBuilder().append(orm.AutoGroup).count() == 1 + assert orm.QueryBuilder().append(orm.AutoGroup, subclassing=False).count() == 1 + assert orm.QueryBuilder().append(orm.Group, subclassing=False).count() == 1 + assert orm.QueryBuilder().append(orm.Group).count() == 3 + assert orm.QueryBuilder().append(orm.Group, filters={'type_string': 'custom.group'}).count() == 1 + + @staticmethod + def test_query_with_group(): + """Docs.""" + group = orm.Group(label='group').store() + data = orm.Data().store() + + group.add_nodes([data]) + + builder = orm.QueryBuilder().append(orm.Data, filters={ + 'id': data.pk + }, tag='data').append(orm.Group, with_node='data') + + loaded = builder.one()[0] + + assert loaded.pk == group.pk diff --git a/tests/orm/test_querybuilder.py b/tests/orm/test_querybuilder.py index 68fad02f9d..ef7625d0ef 100644 --- a/tests/orm/test_querybuilder.py +++ b/tests/orm/test_querybuilder.py @@ -60,10 +60,10 @@ def test_ormclass_type_classification(self): for _cls, classifiers in ( qb._get_ormclass(orm.Group, None), - qb._get_ormclass(None, 'group'), - qb._get_ormclass(None, 'Group'), + qb._get_ormclass(None, 'group.core'), + qb._get_ormclass(None, 'Group.core'), ): - self.assertEqual(classifiers['ormclass_type_string'], 'group') + self.assertTrue(classifiers['ormclass_type_string'].startswith('group')) for _cls, classifiers in ( qb._get_ormclass(orm.User, None), diff --git a/tests/tools/groups/test_paths.py b/tests/tools/groups/test_paths.py index b6e8940ce5..75e445bb97 100644 --- a/tests/tools/groups/test_paths.py +++ b/tests/tools/groups/test_paths.py @@ -125,7 +125,7 @@ def test_walk_with_invalid_path(clear_database_before_test): assert [c.path for c in sorted(group_path.walk())] == expected -def test_walk_nodes(clear_database): +def test_walk_nodes(clear_database_before_test): """Test the ``GroupPath.walk_nodes()`` function.""" group, _ = orm.Group.objects.get_or_create('a') node = orm.Data() @@ -136,7 +136,6 @@ def test_walk_nodes(clear_database): assert [(r.group_path.path, r.node.attributes) for r in group_path.walk_nodes()] == [('a', {'i': 1, 'j': 2})] -@pytest.mark.skip('Reenable when subclassing in the query builder is implemented (#3902)') def test_cls(clear_database_before_test): """Test that only instances of `cls` or its subclasses are matched by ``GroupPath``.""" for label in ['a', 'a/b', 'a/c/d', 'a/c/e/g']: @@ -161,3 +160,27 @@ def test_attr(clear_database_before_test): assert not set(group_path.browse.__dir__()).intersection(['bad space', 'bad@char', '_badstart']) with pytest.raises(AttributeError): group_path.browse.a.c.x # pylint: disable=pointless-statement + + +def test_cls_label_clashes(clear_database_before_test): + """Test behaviour when multiple group classes have the same label.""" + group_01, _ = orm.Group.objects.get_or_create('a') + node_01 = orm.Data().store() + group_01.add_nodes(node_01) + + group_02, _ = orm.UpfFamily.objects.get_or_create('a') + node_02 = orm.Data().store() + group_02.add_nodes(node_02) + + # Requests for non-existing groups should return `None` + assert GroupPath('b').get_group() is None + + assert GroupPath('a').group_ids == [group_01.pk] + assert GroupPath('a').get_group().pk == group_01.pk + expected = [('a', node_01.pk)] + assert [(r.group_path.path, r.node.pk) for r in GroupPath('a').walk_nodes()] == expected + + assert GroupPath('a', cls=orm.UpfFamily).group_ids == [group_02.pk] + assert GroupPath('a', cls=orm.UpfFamily).get_group().pk == group_02.pk + expected = [('a', node_02.pk)] + assert [(r.group_path.path, r.node.pk) for r in GroupPath('a', cls=orm.UpfFamily).walk_nodes()] == expected From 41a4e29b46c8b63c7602900493e74eff1f03b7d7 Mon Sep 17 00:00:00 2001 From: Carl Simon Adorf Date: Thu, 9 Apr 2020 18:57:39 +0200 Subject: [PATCH 42/54] Trigger test-install workflow for pull requests. (#3918) Replace `push` with `pull_request` trigger. --- .github/workflows/test-install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-install.yml b/.github/workflows/test-install.yml index 54be58f1d5..1f06daae6f 100644 --- a/.github/workflows/test-install.yml +++ b/.github/workflows/test-install.yml @@ -1,7 +1,7 @@ name: test-install on: - push: + pull_request: paths: - 'setup.*' - 'environment.yml' From ff9e8a29a38c2871fadf8a718843d0b57f44efa9 Mon Sep 17 00:00:00 2001 From: "Jason.Eu" Date: Sun, 12 Apr 2020 01:54:07 +0800 Subject: [PATCH 43/54] support for BandsData nodes without StructureData ancestors(#3817) When storing `BandsData` nodes without `StructureData` ancestors, `verdi data bands list` would fail with a `KeyError` because it requires the formula from the connected structure. This PR now uses formula `<>` for isolated `BandsData` nodes and fixes `verdi data bands list` for this use case. It also gets rid of a backend-specific implementation of `_extract_formula`. --- aiida/backends/djsite/queries.py | 51 +++++-------------- aiida/backends/general/abstractqueries.py | 61 ++++++++++++++++------- tests/cmdline/commands/test_data.py | 13 ++++- 3 files changed, 68 insertions(+), 57 deletions(-) diff --git a/aiida/backends/djsite/queries.py b/aiida/backends/djsite/queries.py index 209d646306..ff8121c7ab 100644 --- a/aiida/backends/djsite/queries.py +++ b/aiida/backends/djsite/queries.py @@ -108,43 +108,6 @@ def query_group(q_object, args): if args.group_pk is not None: q_object.add(Q(dbgroups__pk__in=args.group_pk), Q.AND) - @staticmethod - def _extract_formula(struc_pk, args, deser_data): - """Extract formula.""" - from aiida.orm.nodes.data.structure import (get_formula, get_symbols_string) - - if struc_pk is not None: - # Exclude structures by the elements - if args.element is not None: - all_kinds = [k['symbols'] for k in deser_data[struc_pk]['kinds']] - all_symbols = [item for sublist in all_kinds for item in sublist] - if not any([s in args.element for s in all_symbols]): - return None - if args.element_only is not None: - all_kinds = [k['symbols'] for k in deser_data[struc_pk]['kinds']] - all_symbols = [item for sublist in all_kinds for item in sublist] - if not all([s in all_symbols for s in args.element_only]): - return None - - # build the formula - symbol_dict = { - k['name']: get_symbols_string(k['symbols'], k['weights']) for k in deser_data[struc_pk]['kinds'] - } - try: - symbol_list = [symbol_dict[s['kind_name']] for s in deser_data[struc_pk]['sites']] - formula = get_formula(symbol_list, mode=args.formulamode) - # If for some reason there is no kind with the name - # referenced by the site - except KeyError: - formula = '<>' - # cycle if we imposed the filter on elements - if args.element is not None or args.element_only is not None: - return None - else: - formula = '<>' - - return formula - def get_bands_and_parents_structure(self, args): """Returns bands and closest parent structure.""" from django.db.models import Q @@ -175,14 +138,24 @@ def get_bands_and_parents_structure(self, args): # get the closest structures (WITHOUT DbPath) structure_dict = get_closest_parents(pks, Q(node_type='data.structure.StructureData.'), chunk_size=1) - struc_pks = [structure_dict[pk] for pk in pks] + struc_pks = [structure_dict.get(pk) for pk in pks] # query for the attributes needed for the structure formula res_attr = models.DbNode.objects.filter(id__in=struc_pks).values_list('id', 'attributes') + res_attr = {rattr[0]: rattr[1] for rattr in res_attr} # prepare the printout for (b_id_lbl_date, struc_pk) in zip(this_chunk, struc_pks): - formula = self._extract_formula(struc_pk, args, {rattr[0]: rattr[1] for rattr in res_attr}) + if struc_pk is not None: + strct = res_attr[struc_pk] + akinds, asites = strct['kinds'], strct['sites'] + formula = self._extract_formula(akinds, asites, args) + else: + if args.element is not None or args.element_only is not None: + formula = None + else: + formula = '<>' + if formula is None: continue entry_list.append([ diff --git a/aiida/backends/general/abstractqueries.py b/aiida/backends/general/abstractqueries.py index bf8a74bca1..1851eca2c6 100644 --- a/aiida/backends/general/abstractqueries.py +++ b/aiida/backends/general/abstractqueries.py @@ -120,8 +120,19 @@ def get_statistics_dict(dataset): return statistics @staticmethod - def _extract_formula(args, akinds, asites): - """Extract formula from the structure object.""" + def _extract_formula(akinds, asites, args): + """ + Extract formula from the structure object. + + :param akinds: list of kinds, e.g. [{'mass': 55.845, 'name': 'Fe', 'symbols': ['Fe'], 'weights': [1.0]}, + {'mass': 15.9994, 'name': 'O', 'symbols': ['O'], 'weights': [1.0]}] + :param asites: list of structure sites e.g. [{'position': [0.0, 0.0, 0.0], 'kind_name': 'Fe'}, + {'position': [2.0, 2.0, 2.0], 'kind_name': 'O'}] + :param args: a namespace with parsed command line parameters, here only 'element' and 'element_only' are used + :type args: dict + + :return: a string with formula if the formula is found + """ from aiida.orm.nodes.data.structure import (get_formula, get_symbols_string) if args.element is not None: @@ -136,7 +147,7 @@ def _extract_formula(args, akinds, asites): # We want only the StructureData that have attributes if akinds is None or asites is None: - return None + return '<>' symbol_dict = {} for k in akinds: @@ -161,7 +172,9 @@ def get_bands_and_parents_structure(self, args): :returns: A list of sublists, each latter containing (in order): - pk as string, formula as string, creation date, bandsdata-label""" + pk as string, formula as string, creation date, bandsdata-label + """ + # pylint: disable=too-many-locals import datetime from aiida.common import timezone @@ -173,22 +186,23 @@ def get_bands_and_parents_structure(self, args): else: q_build.append(orm.User, tag='creator') - bdata_filters = {} - if args.past_days is not None: - bdata_filters.update({'ctime': {'>=': timezone.now() - datetime.timedelta(days=args.past_days)}}) - - q_build.append( - orm.BandsData, tag='bdata', with_user='creator', filters=bdata_filters, project=['id', 'label', 'ctime'] - ) - group_filters = {} if args.group_name is not None: group_filters.update({'name': {'in': args.group_name}}) if args.group_pk is not None: group_filters.update({'id': {'in': args.group_pk}}) - if group_filters: - q_build.append(orm.Group, tag='group', filters=group_filters, with_node='bdata') + + q_build.append(orm.Group, tag='group', filters=group_filters, with_user='creator') + + bdata_filters = {} + if args.past_days is not None: + bdata_filters.update({'ctime': {'>=': timezone.now() - datetime.timedelta(days=args.past_days)}}) + + q_build.append( + orm.BandsData, tag='bdata', with_group='group', filters=bdata_filters, project=['id', 'label', 'ctime'] + ) + bands_list_data = q_build.all() q_build.append( orm.StructureData, @@ -200,12 +214,15 @@ def get_bands_and_parents_structure(self, args): q_build.order_by({orm.StructureData: {'ctime': 'desc'}}) - list_data = q_build.distinct() + structure_dict = dict() + list_data = q_build.distinct().all() + for bid, _, _, _, akinds, asites in list_data: + structure_dict[bid] = (akinds, asites) entry_list = [] already_visited_bdata = set() - for [bid, blabel, bdate, _, akinds, asites] in list_data.all(): + for [bid, blabel, bdate] in bands_list_data: # We process only one StructureData per BandsData. # We want to process the closest StructureData to @@ -217,7 +234,17 @@ def get_bands_and_parents_structure(self, args): if already_visited_bdata.__contains__(bid): continue already_visited_bdata.add(bid) - formula = self._extract_formula(args, akinds, asites) + strct = structure_dict.get(bid, None) + + if strct is not None: + akinds, asites = strct + formula = self._extract_formula(akinds, asites, args) + else: + if args.element is not None or args.element_only is not None: + formula = None + else: + formula = '<>' + if formula is None: continue entry_list.append([str(bid), str(formula), bdate.strftime('%d %b %Y'), blabel]) diff --git a/tests/cmdline/commands/test_data.py b/tests/cmdline/commands/test_data.py index ff63bfa927..11764640e3 100644 --- a/tests/cmdline/commands/test_data.py +++ b/tests/cmdline/commands/test_data.py @@ -141,7 +141,6 @@ def data_listing_test(self, datatype, search_string, ids): # Check that the past days filter works as expected past_days_flags = ['-p', '--past-days'] - # past_days_flags = ['-p'] for flag in past_days_flags: options = [flag, '1'] res = self.cli_runner.invoke(listing_cmd, options, catch_exceptions=False) @@ -158,6 +157,7 @@ def data_listing_test(self, datatype, search_string, ids): ) # Check that the group filter works as expected + # if ids is not None: group_flags = ['-G', '--groups'] for flag in group_flags: # Non empty group @@ -289,10 +289,14 @@ def connect_structure_bands(strct): # pylint: disable=unused-argument bands = connect_structure_bands(strct) + bands_isolated = BandsData() + bands_isolated.store() + # Create 2 groups and add the data to one of them g_ne = Group(label='non_empty_group') g_ne.store() g_ne.add_nodes(bands) + g_ne.add_nodes(bands_isolated) g_e = Group(label='empty_group') g_e.store() @@ -321,6 +325,13 @@ def test_bandlistshelp(self): def test_bandslist(self): self.data_listing_test(BandsData, 'FeO', self.ids) + self.data_listing_test(BandsData, '<>', self.ids) + + def test_bandslist_with_elements(self): + options = ['-e', 'Fe'] + res = self.cli_runner.invoke(cmd_bands.bands_list, options, catch_exceptions=False) + self.assertIn(b'FeO', res.stdout_bytes, 'The string "FeO" was not found in the listing') + self.assertNotIn(b'<>', res.stdout_bytes, 'The string "<>" should not in the listing') def test_bandexporthelp(self): output = sp.check_output(['verdi', 'data', 'bands', 'export', '--help']) From c74048b1518411c063e178b3cbd6b2d425f16fed Mon Sep 17 00:00:00 2001 From: "Jason.Eu" Date: Mon, 13 Apr 2020 02:54:06 +0800 Subject: [PATCH 44/54] Plot bands with only one kpoint (#3798) Add support for plotting `BandsData` instances with a single kpoint. --- aiida/orm/nodes/data/array/bands.py | 291 +++++++++++++++------------- tests/cmdline/commands/test_data.py | 27 ++- 2 files changed, 185 insertions(+), 133 deletions(-) diff --git a/aiida/orm/nodes/data/array/bands.py b/aiida/orm/nodes/data/array/bands.py index b636f00097..4c78eac4ea 100644 --- a/aiida/orm/nodes/data/array/bands.py +++ b/aiida/orm/nodes/data/array/bands.py @@ -825,7 +825,7 @@ def _prepare_mpl_singlefile(self, *args, **kwargs): s_header = matplotlib_header_template.substitute() s_import = matplotlib_import_data_inline_template.substitute(all_data_json=json.dumps(all_data, indent=2)) - s_body = matplotlib_body_template.substitute() + s_body = self._get_mpl_body_template(all_data['paths']) s_footer = matplotlib_footer_template_show.substitute() s = s_header + s_import + s_body + s_footer @@ -854,114 +854,13 @@ def _prepare_mpl_withjson(self, main_file_name='', *args, **kwargs): s_header = matplotlib_header_template.substitute() s_import = matplotlib_import_data_fromfile_template.substitute(json_fname=json_fname) - s_body = matplotlib_body_template.substitute() + s_body = self._get_mpl_body_template(all_data['paths']) s_footer = matplotlib_footer_template_show.substitute() s = s_header + s_import + s_body + s_footer return s.encode('utf-8'), ext_files - def _prepare_gnuplot(self, - main_file_name='', - title='', - comments=True, - prettify_format=None, - y_max_lim=None, - y_min_lim=None, - y_origin=0.): - """ - Prepare an gnuplot script to plot the bands, with the .dat file - returned as an independent file. - - :param main_file_name: if the user asks to write the main content on a - file, this contains the filename. This should be used to infer a - good filename for the additional files. - In this case, we remove the extension, and add '_data.dat' - :param title: if specified, add a title to the plot - :param comments: if True, print comments (if it makes sense for the given - format) - :param prettify_format: if None, use the default prettify format. Otherwise - specify a string with the prettifier to use. - """ - import os - - dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat' - - if prettify_format is None: - # Default. Specified like this to allow caller functions to pass 'None' - prettify_format = 'gnuplot_seekpath' - - plot_info = self._get_bandplot_data( - cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin) - - bands = plot_info['y'] - x = plot_info['x'] - labels = plot_info['labels'] - - num_labels = len(labels) - num_bands = bands.shape[1] - - # axis limits - if y_max_lim is None: - y_max_lim = bands.max() - if y_min_lim is None: - y_min_lim = bands.min() - x_min_lim = min(x) # this isn't a numpy array, but a list - x_max_lim = max(x) - - # first prepare the xy coordinates of the sets - raw_data, _ = self._prepare_dat_blocks(plot_info, comments=comments) - - xtics_string = ', '.join('"{}" {}'.format(label, pos) for pos, label in plot_info['labels']) - - script = [] - # Start with some useful comments - - if comments: - script.append(prepare_header_comment(self.uuid, plot_info=plot_info, comment_char='# ')) - script.append('') - - script.append(u"""## Uncomment the next two lines to write directly to PDF -## Note: You need to have gnuplot installed with pdfcairo support! -#set term pdfcairo -#set output 'out.pdf' - -### Uncomment one of the options below to change font -### For the LaTeX fonts, you can download them from here: -### https://sourceforge.net/projects/cm-unicode/ -### And then install them in your system -## LaTeX Serif font, if installed -#set termopt font "CMU Serif, 12" -## LaTeX Sans Serif font, if installed -#set termopt font "CMU Sans Serif, 12" -## Classical Times New Roman -#set termopt font "Times New Roman, 12" -""") - - # Actual logic - script.append('set termopt enhanced') # Properly deals with e.g. subscripts - script.append('set encoding utf8') # To deal with Greek letters - script.append('set xtics ({})'.format(xtics_string)) - script.append('set grid xtics lt 1 lc rgb "#888888"') - - script.append('unset key') - - script.append('set xrange [{}:{}]'.format(x_min_lim, x_max_lim)) - script.append('set yrange [{}:{}]'.format(y_min_lim, y_max_lim)) - - script.append('set ylabel "{}"'.format('Dispersion ({})'.format(self.units))) - - if title: - script.append('set title "{}"'.format(title.replace('"', '\"'))) - - # Plot, escaping filename - script.append('plot "{}" with l lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '\"'))) - - script_data = '\n'.join(script) + '\n' - extra_files = {dat_filename: raw_data} - - return script_data.encode('utf-8'), extra_files - def _prepare_mpl_pdf(self, main_file_name='', *args, **kwargs): """ Prepare a python script using matplotlib to plot the bands, with the JSON @@ -982,7 +881,7 @@ def _prepare_mpl_pdf(self, main_file_name='', *args, **kwargs): # Use the Agg backend s_header = matplotlib_header_agg_template.substitute() s_import = matplotlib_import_data_inline_template.substitute(all_data_json=json.dumps(all_data, indent=2)) - s_body = matplotlib_body_template.substitute() + s_body = self._get_mpl_body_template(all_data['paths']) # I get a temporary file name handle, filename = tempfile.mkstemp() @@ -1033,7 +932,7 @@ def _prepare_mpl_png(self, main_file_name='', *args, **kwargs): # Use the Agg backend s_header = matplotlib_header_agg_template.substitute() s_import = matplotlib_import_data_inline_template.substitute(all_data_json=json.dumps(all_data, indent=2)) - s_body = matplotlib_body_template.substitute() + s_body = self._get_mpl_body_template(all_data['paths']) # I get a temporary file name handle, filename = tempfile.mkstemp() @@ -1064,6 +963,17 @@ def _prepare_mpl_png(self, main_file_name='', *args, **kwargs): return imgdata, {} + @staticmethod + def _get_mpl_body_template(paths): + """ + :param paths: paths of k-points + """ + if len(paths) == 1: + s_body = matplotlib_body_template.substitute(plot_code=single_kp) + else: + s_body = matplotlib_body_template.substitute(plot_code=multi_kp) + return s_body + def show_mpl(self, **kwargs): """ Call a show() command for the band structure using matplotlib. @@ -1074,6 +984,113 @@ def show_mpl(self, **kwargs): """ exec(*self._exportcontent(fileformat='mpl_singlefile', main_file_name='', **kwargs)) # pylint: disable=exec-used + def _prepare_gnuplot(self, + main_file_name=None, + title='', + comments=True, + prettify_format=None, + y_max_lim=None, + y_min_lim=None, + y_origin=0.): + """ + Prepare an gnuplot script to plot the bands, with the .dat file + returned as an independent file. + + :param main_file_name: if the user asks to write the main content on a + file, this contains the filename. This should be used to infer a + good filename for the additional files. + In this case, we remove the extension, and add '_data.dat' + :param title: if specified, add a title to the plot + :param comments: if True, print comments (if it makes sense for the given + format) + :param prettify_format: if None, use the default prettify format. Otherwise + specify a string with the prettifier to use. + """ + import os + + main_file_name = main_file_name or 'band.dat' + dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat' + + if prettify_format is None: + # Default. Specified like this to allow caller functions to pass 'None' + prettify_format = 'gnuplot_seekpath' + + plot_info = self._get_bandplot_data( + cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin) + + bands = plot_info['y'] + x = plot_info['x'] + labels = plot_info['labels'] + + num_labels = len(labels) + num_bands = bands.shape[1] + + # axis limits + if y_max_lim is None: + y_max_lim = bands.max() + if y_min_lim is None: + y_min_lim = bands.min() + x_min_lim = min(x) # this isn't a numpy array, but a list + x_max_lim = max(x) + + # first prepare the xy coordinates of the sets + raw_data, _ = self._prepare_dat_blocks(plot_info, comments=comments) + + xtics_string = ', '.join('"{}" {}'.format(label, pos) for pos, label in plot_info['labels']) + + script = [] + # Start with some useful comments + + if comments: + script.append(prepare_header_comment(self.uuid, plot_info=plot_info, comment_char='# ')) + script.append('') + + script.append(u"""## Uncomment the next two lines to write directly to PDF +## Note: You need to have gnuplot installed with pdfcairo support! +#set term pdfcairo +#set output 'out.pdf' + +### Uncomment one of the options below to change font +### For the LaTeX fonts, you can download them from here: +### https://sourceforge.net/projects/cm-unicode/ +### And then install them in your system +## LaTeX Serif font, if installed +#set termopt font "CMU Serif, 12" +## LaTeX Sans Serif font, if installed +#set termopt font "CMU Sans Serif, 12" +## Classical Times New Roman +#set termopt font "Times New Roman, 12" +""") + + # Actual logic + script.append('set termopt enhanced') # Properly deals with e.g. subscripts + script.append('set encoding utf8') # To deal with Greek letters + script.append('set xtics ({})'.format(xtics_string)) + + script.append('unset key') + + + script.append('set yrange [{}:{}]'.format(y_min_lim, y_max_lim)) + + script.append('set ylabel "{}"'.format('Dispersion ({})'.format(self.units))) + + if title: + script.append('set title "{}"'.format(title.replace('"', '\"'))) + + # Plot, escaping filename + if len(x) > 1: + script.append('set xrange [{}:{}]'.format(x_min_lim, x_max_lim)) + script.append('set grid xtics lt 1 lc rgb "#888888"') + script.append('plot "{}" with l lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '\"'))) + else: + script.append('set xrange [-1.0:1.0]') + script.append('plot "{}" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '\"'))) + + script_data = '\n'.join(script) + '\n' + extra_files = {dat_filename: raw_data} + + return script_data.encode('utf-8'), extra_files + def _prepare_agr(self, main_file_name='', comments=True, @@ -1646,6 +1663,42 @@ def _prepare_json(self, main_file_name='', comments=True): all_data_str = f.read() ''') +multi_kp = ''' +for path in paths: + if path['length'] <= 1: + # Avoid printing empty lines + continue + x = path['x'] + #for band in bands: + for band, band_type in zip(path['values'], all_data['band_type_idx']): + + # For now we support only two colors + if band_type % 2 == 0: + further_plot_options = further_plot_options1 + else: + further_plot_options = further_plot_options2 + + # Put the legend text only once + label = None + if first_band_1 and band_type % 2 == 0: + first_band_1 = False + label = all_data.get('legend_text', None) + elif first_band_2 and band_type % 2 == 1: + first_band_2 = False + label = all_data.get('legend_text2', None) + + p.plot(x, band, label=label, + **further_plot_options + ) +''' + +single_kp = ''' +path = paths[0] +values = path['values'] +x = [path['x'] for _ in values] +p.scatter(x, values, marker="_") +''' + matplotlib_body_template = Template('''all_data = json.loads(all_data_str) if not all_data.get('use_latex', False): @@ -1700,33 +1753,7 @@ def _prepare_json(self, main_file_name='', comments=True): first_band_1 = True first_band_2 = True -for path in paths: - if path['length'] <= 1: - # Avoid printing empty lines - continue - x = path['x'] - #for band in bands: - for band, band_type in zip(path['values'], all_data['band_type_idx']): - - # For now we support only two colors - if band_type % 2 == 0: - further_plot_options = further_plot_options1 - else: - further_plot_options = further_plot_options2 - - # Put the legend text only once - label = None - if first_band_1 and band_type % 2 == 0: - first_band_1 = False - label = all_data.get('legend_text', None) - elif first_band_2 and band_type % 2 == 1: - first_band_2 = False - label = all_data.get('legend_text2', None) - - p.plot(x, band, label=label, - **further_plot_options - ) - +${plot_code} p.set_xticks(tick_pos) p.set_xticklabels(tick_labels) diff --git a/tests/cmdline/commands/test_data.py b/tests/cmdline/commands/test_data.py index 11764640e3..e4d507235a 100644 --- a/tests/cmdline/commands/test_data.py +++ b/tests/cmdline/commands/test_data.py @@ -7,7 +7,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -# pylint: disable=no-member +# pylint: disable=no-member, too-many-lines """Test data-related verdi commands.""" import io @@ -343,6 +343,31 @@ def test_bandsexport(self): self.assertEqual(res.exit_code, 0, 'The command did not finish correctly') self.assertIn(b'[1.0, 3.0]', res.stdout_bytes, 'The string [1.0, 3.0] was not found in the bands' 'export') + def test_bandsexport_single_kp(self): + """ + Plot band for single k-point (issue #2462). + """ + kpnts = KpointsData() + kpnts.set_kpoints([[0., 0., 0.]]) + + bands = BandsData() + bands.set_kpointsdata(kpnts) + bands.set_bands([[1.0, 2.0]]) + bands.store() + + # matplotlib + options = [str(bands.id), '--format', 'mpl_singlefile'] + res = self.cli_runner.invoke(cmd_bands.bands_export, options, catch_exceptions=False) + self.assertIn(b'p.scatter', res.stdout_bytes, 'The string p.scatter was not found in the bands mpl export') + + # gnuplot + with self.cli_runner.isolated_filesystem(): + options = [str(bands.id), '--format', 'gnuplot', '-o', 'bands.gnu'] + self.cli_runner.invoke(cmd_bands.bands_export, options, catch_exceptions=False) + with open('bands.gnu', 'r') as gnu_file: + res = gnu_file.read() + self.assertIn('vectors nohead', res, 'The string "vectors nohead" was not found in the gnuplot script') + class TestVerdiDataDict(AiidaTestCase): """Testing verdi data dict.""" From 6ea9ecb2063cf78a8f6bd6a211a0aaffd6ab2c3e Mon Sep 17 00:00:00 2001 From: Leopold Talirz Date: Mon, 13 Apr 2020 22:20:09 +0200 Subject: [PATCH 45/54] Improved validation for CLI parameters (#3894) Various improvements to CLI validation, implementation and documentation * More strict validation for parameter types: o labels o hostnames o emails o entry point names o profile names * Fix incorrect behavior in the `InteractiveOption` where the special character `!` reserved to ignore the default, caused the validation of the option to be completely ignored. If the character is now encounterd, an empty string will be returned to the validator. * Reduce pytest output on coverage/timings to make it more readable * Update verdi setup docs for authentication via Unix sockets * Deduplicate code in reusable options of `verdi setup/quicksetup` --- .ci/workchains.py | 4 +- .github/config/profile.yaml | 2 +- .github/workflows/tests.sh | 2 +- .pre-commit-config.yaml | 1 + aiida/cmdline/params/arguments/__init__.py | 6 +- aiida/cmdline/params/options/__init__.py | 76 +++++++---- .../params/options/commands/computer.py | 6 +- .../cmdline/params/options/commands/setup.py | 91 ++++--------- aiida/cmdline/params/options/interactive.py | 9 +- aiida/cmdline/params/types/__init__.py | 2 +- aiida/cmdline/params/types/identifier.py | 2 + aiida/cmdline/params/types/nonemptystring.py | 31 ----- aiida/cmdline/params/types/plugin.py | 5 +- aiida/cmdline/params/types/profile.py | 6 +- aiida/cmdline/params/types/strings.py | 124 ++++++++++++++++++ docs/source/install/installation.rst | 58 ++++---- docs/source/verdi/verdi_user_guide.rst | 71 +++++----- pytest.ini | 1 + tests/cmdline/commands/test_code.py | 12 +- tests/cmdline/commands/test_computer.py | 2 +- tests/cmdline/commands/test_setup.py | 2 +- .../params/options/test_interactive.py | 8 +- tests/test_dataclasses.py | 12 +- 23 files changed, 313 insertions(+), 220 deletions(-) delete mode 100644 aiida/cmdline/params/types/nonemptystring.py create mode 100644 aiida/cmdline/params/types/strings.py diff --git a/.ci/workchains.py b/.ci/workchains.py index 4ae521540f..5504813a10 100644 --- a/.ci/workchains.py +++ b/.ci/workchains.py @@ -69,8 +69,8 @@ def a_magic_unicorn_appeared(self, node): @process_handler(priority=400, exit_codes=ArithmeticAddCalculation.exit_codes.ERROR_NEGATIVE_NUMBER) def error_negative_sum(self, node): """What even is a negative number, how can I have minus three melons?!.""" - self.ctx.inputs.x = Int(abs(node.inputs.x.value)) # pylint: disable=invalid-name - self.ctx.inputs.y = Int(abs(node.inputs.y.value)) # pylint: disable=invalid-name + self.ctx.inputs.x = Int(abs(node.inputs.x.value)) + self.ctx.inputs.y = Int(abs(node.inputs.y.value)) return ProcessHandlerReport(True) diff --git a/.github/config/profile.yaml b/.github/config/profile.yaml index c5e0dfa3c5..e58ab2821d 100644 --- a/.github/config/profile.yaml +++ b/.github/config/profile.yaml @@ -11,4 +11,4 @@ db_port: 5432 db_name: PLACEHOLDER_DATABASE_NAME db_username: postgres db_password: '' -repository: PLACEHOLDER_REPOSITORY \ No newline at end of file +repository: PLACEHOLDER_REPOSITORY diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh index 5fcc682769..3aff3b28f9 100755 --- a/.github/workflows/tests.sh +++ b/.github/workflows/tests.sh @@ -11,7 +11,7 @@ export PYTHONPATH="${PYTHONPATH}:${GITHUB_WORKSPACE}/.ci" # including the numbers/ranges of lines which are not covered # - coverage results of multiple tests are collected # - coverage is reported on files in aiida/ -export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --durations=0 --cov-config=.coveragerc --cov-report xml --cov-report term-missing --cov-append --cov=aiida" +export PYTEST_ADDOPTS="${PYTEST_ADDOPTS} --cov-append" # daemon tests verdi daemon start 4 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ceab33c0e..57a52b649b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -165,6 +165,7 @@ files: >- (?x)^( aiida/cmdline/commands/.*| + aiida/cmdline/params/.*| utils/validate_consistency.py| )$ pass_filenames: false diff --git a/aiida/cmdline/params/arguments/__init__.py b/aiida/cmdline/params/arguments/__init__.py index 2ceb521725..71bb8c2544 100644 --- a/aiida/cmdline/params/arguments/__init__.py +++ b/aiida/cmdline/params/arguments/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'PROFILE', 'PROFILES', 'CALCULATION', 'CALCULATIONS', 'CODE', 'CODES', 'COMPUTER', 'COMPUTERS', 'DATUM', 'DATA', 'GROUP', 'GROUPS', 'NODE', 'NODES', 'PROCESS', 'PROCESSES', 'WORKFLOW', 'WORKFLOWS', 'INPUT_FILE', 'OUTPUT_FILE', - 'LABEL', 'USER', 'PROFILE_NAME', 'CONFIG_OPTION' + 'LABEL', 'USER', 'CONFIG_OPTION' ) @@ -62,10 +62,8 @@ OUTPUT_FILE = OverridableArgument('output_file', metavar='OUTPUT_FILE', type=click.Path()) -LABEL = OverridableArgument('label') +LABEL = OverridableArgument('label', type=click.STRING) USER = OverridableArgument('user', metavar='USER', type=types.UserParamType()) -PROFILE_NAME = OverridableArgument('profile_name', type=click.STRING) - CONFIG_OPTION = OverridableArgument('option', type=types.ConfigOptionParamType()) diff --git a/aiida/cmdline/params/options/__init__.py b/aiida/cmdline/params/options/__init__.py index c2352ac70a..9d7f77927e 100644 --- a/aiida/cmdline/params/options/__init__.py +++ b/aiida/cmdline/params/options/__init__.py @@ -10,6 +10,8 @@ """Module with pre-defined reusable commandline options that can be used as `click` decorators.""" import click +# Note: importing from aiida.manage.postgres leads to circular imports +from pgsu import DEFAULT_DSN as DEFAULT_DBINFO # pylint: disable=no-name-in-module from aiida.backends import BACKEND_DJANGO, BACKEND_SQLA from ...utils import defaults, echo @@ -23,12 +25,12 @@ 'graph_traversal_rules', 'PROFILE', 'CALCULATION', 'CALCULATIONS', 'CODE', 'CODES', 'COMPUTER', 'COMPUTERS', 'DATUM', 'DATA', 'GROUP', 'GROUPS', 'NODE', 'NODES', 'FORCE', 'SILENT', 'VISUALIZATION_FORMAT', 'INPUT_FORMAT', 'EXPORT_FORMAT', 'ARCHIVE_FORMAT', 'NON_INTERACTIVE', 'DRY_RUN', 'USER_EMAIL', 'USER_FIRST_NAME', 'USER_LAST_NAME', - 'USER_INSTITUTION', 'BACKEND', 'DB_HOST', 'DB_PORT', 'DB_USERNAME', 'DB_PASSWORD', 'DB_NAME', 'REPOSITORY_PATH', - 'PROFILE_ONLY_CONFIG', 'PROFILE_SET_DEFAULT', 'PREPEND_TEXT', 'APPEND_TEXT', 'LABEL', 'DESCRIPTION', 'INPUT_PLUGIN', - 'CALC_JOB_STATE', 'PROCESS_STATE', 'EXIT_STATUS', 'FAILED', 'LIMIT', 'PROJECT', 'ORDER_BY', 'PAST_DAYS', - 'OLDER_THAN', 'ALL', 'ALL_STATES', 'ALL_USERS', 'GROUP_CLEAR', 'RAW', 'HOSTNAME', 'TRANSPORT', 'SCHEDULER', 'USER', - 'PORT', 'FREQUENCY', 'VERBOSE', 'TIMEOUT', 'FORMULA_MODE', 'TRAJECTORY_INDEX', 'WITH_ELEMENTS', - 'WITH_ELEMENTS_EXCLUSIVE' + 'USER_INSTITUTION', 'DB_BACKEND', 'DB_ENGINE', 'DB_HOST', 'DB_PORT', 'DB_USERNAME', 'DB_PASSWORD', 'DB_NAME', + 'REPOSITORY_PATH', 'PROFILE_ONLY_CONFIG', 'PROFILE_SET_DEFAULT', 'PREPEND_TEXT', 'APPEND_TEXT', 'LABEL', + 'DESCRIPTION', 'INPUT_PLUGIN', 'CALC_JOB_STATE', 'PROCESS_STATE', 'EXIT_STATUS', 'FAILED', 'LIMIT', 'PROJECT', + 'ORDER_BY', 'PAST_DAYS', 'OLDER_THAN', 'ALL', 'ALL_STATES', 'ALL_USERS', 'GROUP_CLEAR', 'RAW', 'HOSTNAME', + 'TRANSPORT', 'SCHEDULER', 'USER', 'PORT', 'FREQUENCY', 'VERBOSE', 'TIMEOUT', 'FORMULA_MODE', 'TRAJECTORY_INDEX', + 'WITH_ELEMENTS', 'WITH_ELEMENTS_EXCLUSIVE' ) TRAVERSAL_RULE_HELP_STRING = { @@ -210,41 +212,65 @@ def decorator(command): USER_EMAIL = OverridableOption( '--email', - type=click.STRING, - prompt='Email Address (identifies your data when sharing)', - help='Email address that will be associated with your data and will be exported along with it, ' - 'should you choose to share any of your work.' + 'email', + type=types.EmailType(), + help='Email address associated with the data you generate. The email address is exported along with the data, ' + 'when sharing it.' ) USER_FIRST_NAME = OverridableOption( - '--first-name', type=click.STRING, prompt='First name', help='First name of the user.' + '--first-name', type=types.NonEmptyStringParamType(), help='First name of the user.' ) -USER_LAST_NAME = OverridableOption('--last-name', type=click.STRING, prompt='Last name', help='Last name of the user.') +USER_LAST_NAME = OverridableOption('--last-name', type=types.NonEmptyStringParamType(), help='Last name of the user.') USER_INSTITUTION = OverridableOption( - '--institution', type=click.STRING, prompt='Institution', help='Institution name of the user.' + '--institution', type=types.NonEmptyStringParamType(), help='Institution of the user.' +) + +DB_ENGINE = OverridableOption( + '--db-engine', + help='Engine to use to connect to the database.', + default='postgresql_psycopg2', + type=click.Choice(['postgresql_psycopg2']) ) -BACKEND = OverridableOption( - '--backend', +DB_BACKEND = OverridableOption( + '--db-backend', type=click.Choice([BACKEND_DJANGO, BACKEND_SQLA]), default=BACKEND_DJANGO, help='Database backend to use.' ) -DB_HOST = OverridableOption('--db-host', type=click.STRING, help='Database server host.') +DB_HOST = OverridableOption( + '--db-host', + type=types.HostnameType(), + help='Database server host. Leave empty for "peer" authentication.', + default=DEFAULT_DBINFO['host'] +) -DB_PORT = OverridableOption('--db-port', type=click.INT, help='Database server port.') +DB_PORT = OverridableOption( + '--db-port', + type=click.INT, + help='Database server port.', + default=DEFAULT_DBINFO['port'], +) -DB_USERNAME = OverridableOption('--db-username', type=click.STRING, help='Database user name.') +DB_USERNAME = OverridableOption( + '--db-username', type=types.NonEmptyStringParamType(), help='Name of the database user.' +) -DB_PASSWORD = OverridableOption('--db-password', type=click.STRING, help='Database user password.') +DB_PASSWORD = OverridableOption( + '--db-password', + type=click.STRING, + help='Password of the database user.', + hide_input=True, +) -DB_NAME = OverridableOption('--db-name', type=click.STRING, help='Database name.') +DB_NAME = OverridableOption('--db-name', type=types.NonEmptyStringParamType(), help='Database name.') REPOSITORY_PATH = OverridableOption( - '--repository', type=click.Path(file_okay=False), help='Absolute path for the file system repository.' + '--repository', type=click.Path(file_okay=False), help='Absolute path to the file repository.' ) PROFILE_ONLY_CONFIG = OverridableOption( @@ -389,7 +415,7 @@ def decorator(command): help='Display only raw query results, without any headers or footers.' ) -HOSTNAME = OverridableOption('-H', '--hostname', help='Hostname.') +HOSTNAME = OverridableOption('-H', '--hostname', type=types.HostnameType(), help='Hostname.') TRANSPORT = OverridableOption( '-T', '--transport', type=types.PluginParamType(group='transports'), required=True, help='Transport type.' @@ -459,7 +485,11 @@ def decorator(command): help='Only select objects containing only these and no other elements.' ) -CONFIG_FILE = ConfigFileOption('--config', help='Load option values from configuration file in yaml format.') +CONFIG_FILE = ConfigFileOption( + '--config', + type=click.Path(exists=True, dir_okay=False), + help='Load option values from configuration file in yaml format.' +) IDENTIFIER = OverridableOption( '-i', diff --git a/aiida/cmdline/params/options/commands/computer.py b/aiida/cmdline/params/options/commands/computer.py index 1209524de5..5a39fc99b1 100644 --- a/aiida/cmdline/params/options/commands/computer.py +++ b/aiida/cmdline/params/options/commands/computer.py @@ -45,15 +45,13 @@ def should_call_default_mpiprocs_per_machine(ctx): # pylint: disable=invalid-na return job_resource_cls.accepts_default_mpiprocs_per_machine() -LABEL = options.LABEL.clone( - prompt='Computer label', cls=InteractiveOption, required=True, type=types.NonEmptyStringParamType() -) +LABEL = options.LABEL.clone(prompt='Computer label', cls=InteractiveOption, required=True) HOSTNAME = options.HOSTNAME.clone( prompt='Hostname', cls=InteractiveOption, required=True, - help='The fully qualified hostname of this computer; for local transports, use localhost.' + help='The fully qualified hostname of the computer; use "localhost" for local transports.', ) DESCRIPTION = options.DESCRIPTION.clone( diff --git a/aiida/cmdline/params/options/commands/setup.py b/aiida/cmdline/params/options/commands/setup.py index 781aa97be9..3fffab2102 100644 --- a/aiida/cmdline/params/options/commands/setup.py +++ b/aiida/cmdline/params/options/commands/setup.py @@ -15,7 +15,7 @@ import click -from aiida.backends import BACKEND_DJANGO, BACKEND_SQLA +from aiida.backends import BACKEND_DJANGO from aiida.cmdline.params import options, types from aiida.manage.configuration import get_config, get_config_option, Profile from aiida.manage.external.postgres import DEFAULT_DBINFO @@ -157,102 +157,58 @@ def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argume cls=options.interactive.InteractiveOption ) -SETUP_USER_EMAIL = options.OverridableOption( - '--email', - 'email', - prompt='User email', - help='Email address that serves as the user name and a way to identify data created by it.', +SETUP_USER_EMAIL = options.USER_EMAIL.clone( + prompt='Email Address (for sharing data)', default=get_config_option('user.email'), required_fn=lambda x: get_config_option('user.email') is None, required=True, cls=options.interactive.InteractiveOption ) -SETUP_USER_FIRST_NAME = options.OverridableOption( - '--first-name', - 'first_name', +SETUP_USER_FIRST_NAME = options.USER_FIRST_NAME.clone( prompt='First name', - help='First name of the user.', - type=click.STRING, default=get_config_option('user.first_name'), required_fn=lambda x: get_config_option('user.first_name') is None, required=True, cls=options.interactive.InteractiveOption ) -SETUP_USER_LAST_NAME = options.OverridableOption( - '--last-name', - 'last_name', +SETUP_USER_LAST_NAME = options.USER_LAST_NAME.clone( prompt='Last name', - help='Last name of the user.', - type=click.STRING, default=get_config_option('user.last_name'), required_fn=lambda x: get_config_option('user.last_name') is None, required=True, cls=options.interactive.InteractiveOption ) -SETUP_USER_INSTITUTION = options.OverridableOption( - '--institution', - 'institution', +SETUP_USER_INSTITUTION = options.USER_INSTITUTION.clone( prompt='Institution', - help='Institution of the user.', - type=click.STRING, default=get_config_option('user.institution'), required_fn=lambda x: get_config_option('user.institution') is None, required=True, cls=options.interactive.InteractiveOption ) -SETUP_USER_PASSWORD = options.OverridableOption( - '--password', - 'password', - prompt='Password', - help='Optional password to connect to REST API.', - hide_input=True, - type=click.STRING, - default=PASSWORD_UNCHANGED, - confirmation_prompt=True, - cls=options.interactive.InteractiveOption -) +QUICKSETUP_DATABASE_ENGINE = options.DB_ENGINE -QUICKSETUP_DATABASE_ENGINE = options.OverridableOption( - '--db-engine', - help='Engine to use to connect to the database.', - default='postgresql_psycopg2', - type=click.Choice(['postgresql_psycopg2']) -) +QUICKSETUP_DATABASE_BACKEND = options.DB_BACKEND -QUICKSETUP_DATABASE_BACKEND = options.OverridableOption( - '--db-backend', - help='Backend type to use to map the database.', - default=BACKEND_DJANGO, - type=click.Choice([BACKEND_DJANGO, BACKEND_SQLA]) -) - -QUICKSETUP_DATABASE_HOSTNAME = options.OverridableOption( - '--db-host', help='Hostname to connect to the database.', default=DEFAULT_DBINFO['host'], type=click.STRING -) +QUICKSETUP_DATABASE_HOSTNAME = options.DB_HOST -QUICKSETUP_DATABASE_PORT = options.OverridableOption( - '--db-port', help='Port to connect to the database.', default=DEFAULT_DBINFO['port'], type=click.INT -) +QUICKSETUP_DATABASE_PORT = options.DB_PORT QUICKSETUP_DATABASE_NAME = options.OverridableOption( - '--db-name', help='Name of the database to create.', type=click.STRING, callback=get_quicksetup_database_name + '--db-name', + help='Name of the database to create.', + type=types.NonEmptyStringParamType(), + callback=get_quicksetup_database_name ) -QUICKSETUP_DATABASE_USERNAME = options.OverridableOption( - '--db-username', help='Name of the database user to create.', type=click.STRING, callback=get_quicksetup_username +QUICKSETUP_DATABASE_USERNAME = options.DB_USERNAME.clone( + help='Name of the database user to create.', callback=get_quicksetup_username ) -QUICKSETUP_DATABASE_PASSWORD = options.OverridableOption( - '--db-password', - help='Password to connect to the database.', - type=click.STRING, - hide_input=True, - callback=get_quicksetup_password -) +QUICKSETUP_DATABASE_PASSWORD = options.DB_PASSWORD.clone(callback=get_quicksetup_password) QUICKSETUP_SUPERUSER_DATABASE_USERNAME = options.OverridableOption( '--su-db-username', help='User name of the database super user.', type=click.STRING, default=DEFAULT_DBINFO['user'] @@ -270,13 +226,10 @@ def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argume help='Password to connect as the database superuser.', type=click.STRING, hide_input=True, - default=DEFAULT_DBINFO['password'] + default=DEFAULT_DBINFO['password'], ) -QUICKSETUP_REPOSITORY_URI = options.OverridableOption( - '--repository', - help='Absolute path for the file system repository.', - type=click.Path(file_okay=False), +QUICKSETUP_REPOSITORY_URI = options.REPOSITORY_PATH.clone( callback=get_quicksetup_repository_uri # Cannot use `default` because `ctx` is needed to determine the default ) @@ -293,14 +246,14 @@ def get_quicksetup_password(ctx, param, value): # pylint: disable=unused-argume ) SETUP_DATABASE_HOSTNAME = QUICKSETUP_DATABASE_HOSTNAME.clone( - prompt='Database hostname', - contextual_default=functools.partial(get_profile_attribute_default, ('database_hostname', 'localhost')), + prompt='Database host', + contextual_default=functools.partial(get_profile_attribute_default, ('database_hostname', DEFAULT_DBINFO['host'])), cls=options.interactive.InteractiveOption ) SETUP_DATABASE_PORT = QUICKSETUP_DATABASE_PORT.clone( prompt='Database port', - contextual_default=functools.partial(get_profile_attribute_default, ('database_port', 5432)), + contextual_default=functools.partial(get_profile_attribute_default, ('database_port', DEFAULT_DBINFO['port'])), cls=options.interactive.InteractiveOption ) diff --git a/aiida/cmdline/params/options/interactive.py b/aiida/cmdline/params/options/interactive.py index f632c39c08..8006d1b6fb 100644 --- a/aiida/cmdline/params/options/interactive.py +++ b/aiida/cmdline/params/options/interactive.py @@ -21,8 +21,8 @@ class InteractiveOption(ConditionalOption): """ - Intercepts certain keyword arguments to circumvent :mod:`click`'s prompting - behaviour and define a more feature-rich one + Prompts for input, intercepting certain keyword arguments to replace :mod:`click`'s prompting + behaviour with a more feature-rich one. .. note:: This class has a parameter ``required_fn`` that can be passed to its ``__init__`` (inherited from the superclass :py:class:`~aiida.cmdline.params.options.conditional.ConditionalOption`) and a @@ -185,8 +185,9 @@ def safely_convert(self, value, param, ctx): successful = False if value is self.CHARACTER_IGNORE_DEFAULT: - # The ignore default character signifies that the user wants to "not" set the value, so we return `None` - return True, None + # The ignore default character signifies that the user wants to "not" set the value. + # Replace value by an empty string for further processing (e.g. if a non-empty value is required). + value = '' try: value = self.type.convert(value, param, ctx) diff --git a/aiida/cmdline/params/types/__init__.py b/aiida/cmdline/params/types/__init__.py index f2849e7933..3b44d31358 100644 --- a/aiida/cmdline/params/types/__init__.py +++ b/aiida/cmdline/params/types/__init__.py @@ -20,7 +20,7 @@ from .multiple import MultipleValueParamType from .node import NodeParamType from .process import ProcessParamType -from .nonemptystring import NonEmptyStringParamType +from .strings import (NonEmptyStringParamType, EmailType, HostnameType, EntryPointType, LabelStringType) from .path import AbsolutePathParamType, ImportPath from .plugin import PluginParamType from .profile import ProfileParamType diff --git a/aiida/cmdline/params/types/identifier.py b/aiida/cmdline/params/types/identifier.py index 5fcb824e08..94deaf21a4 100644 --- a/aiida/cmdline/params/types/identifier.py +++ b/aiida/cmdline/params/types/identifier.py @@ -86,6 +86,8 @@ def convert(self, value, param, ctx): from aiida.common import exceptions from aiida.orm.utils.loaders import OrmEntityLoader + value = super().convert(value, param, ctx) + if not value: raise click.BadParameter('the value for the identifier cannot be empty') diff --git a/aiida/cmdline/params/types/nonemptystring.py b/aiida/cmdline/params/types/nonemptystring.py deleted file mode 100644 index 295bd29bf3..0000000000 --- a/aiida/cmdline/params/types/nonemptystring.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -""" -Module for the non empty string parameter type -""" - -from click.types import StringParamType - - -class NonEmptyStringParamType(StringParamType): - """ - Parameter that cannot be an an empty string. - """ - name = 'nonemptystring' - - def convert(self, value, param, ctx): - newval = super().convert(value, param, ctx) - if not newval: # None or empty string - self.fail('Empty string is not valid!') - - return newval - - def __repr__(self): - return 'NONEMPTYSTRING' diff --git a/aiida/cmdline/params/types/plugin.py b/aiida/cmdline/params/types/plugin.py index a4a6077554..607e0c2a35 100644 --- a/aiida/cmdline/params/types/plugin.py +++ b/aiida/cmdline/params/types/plugin.py @@ -16,9 +16,10 @@ from aiida.plugins.entry_point import ENTRY_POINT_STRING_SEPARATOR, ENTRY_POINT_GROUP_PREFIX, EntryPointFormat from aiida.plugins.entry_point import format_entry_point_string, get_entry_point_string_format from aiida.plugins.entry_point import get_entry_point, get_entry_points, get_entry_point_groups +from ..types import EntryPointType -class PluginParamType(click.ParamType): +class PluginParamType(EntryPointType): """ AiiDA Plugin name parameter type. @@ -203,6 +204,8 @@ def convert(self, value, param, ctx): Convert the string value to an entry point instance, if the value can be successfully parsed into an actual entry point. Will raise click.BadParameter if validation fails. """ + value = super().convert(value, param, ctx) + if not value: raise click.BadParameter('plugin name cannot be empty') diff --git a/aiida/cmdline/params/types/profile.py b/aiida/cmdline/params/types/profile.py index b89cacdaf7..6c3902bad6 100644 --- a/aiida/cmdline/params/types/profile.py +++ b/aiida/cmdline/params/types/profile.py @@ -9,10 +9,10 @@ ########################################################################### """Profile param type for click.""" -import click +from .strings import LabelStringType -class ProfileParamType(click.ParamType): +class ProfileParamType(LabelStringType): """The profile parameter type for click.""" name = 'profile' @@ -31,6 +31,8 @@ def convert(self, value, param, ctx): from aiida.common.exceptions import MissingConfigurationError, ProfileConfigurationError from aiida.manage.configuration import get_config, load_profile, Profile + value = super().convert(value, param, ctx) + try: config = get_config(create=True) profile = config.get_profile(value) diff --git a/aiida/cmdline/params/types/strings.py b/aiida/cmdline/params/types/strings.py new file mode 100644 index 0000000000..d85c667794 --- /dev/null +++ b/aiida/cmdline/params/types/strings.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +""" +Module for various text-based string validation. +""" + +import re +from click.types import StringParamType + + +class NonEmptyStringParamType(StringParamType): + """Parameter whose values have to be string and non-empty.""" + name = 'nonemptystring' + + def convert(self, value, param, ctx): + newval = super().convert(value, param, ctx) + + # Note: Valid :py:class:`click.ParamType`s need to pass through None unchanged + if newval is None: + return None + + if not newval: # empty string + self.fail('Empty string is not valid!') + + return newval + + def __repr__(self): + return 'NONEMPTYSTRING' + + +class LabelStringType(NonEmptyStringParamType): + """Parameter accepting valid label strings. + + Non-empty string, made up of word characters (includes underscores [1]), dashes, and dots. + + [1] See https://docs.python.org/3/library/re.html + """ + name = 'labelstring' + + ALPHABET = r'\w\.\-' + + def convert(self, value, param, ctx): + newval = super().convert(value, param, ctx) + + if not re.match('^[{}]*$'.format(self.ALPHABET), newval): + self.fail('Please use only alphanumeric characters, dashes, underscores or dots') + + return newval + + def __repr__(self): + return 'LABELSTRING' + + +HOSTNAME_REGEX = \ +r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$' + + +class HostnameType(StringParamType): + """Parameter corresponding to a valid hostname (or empty) string. + + Regex according to https://stackoverflow.com/a/3824105/1069467 + """ + name = 'hostname' + + def convert(self, value, param, ctx): + newval = super().convert(value, param, ctx) + + import click + click.echo(newval) + + if newval and not re.match(HOSTNAME_REGEX, newval): + self.fail('Please enter a valid hostname.') + + return newval + + def __repr__(self): + return 'HOSTNAME' + + +class EmailType(StringParamType): + """Parameter whose values have to correspond to a valid email address format. + + .. note:: For the moment, we do not require the domain suffix, i.e. 'aiida@localhost' is still valid. + """ + name = 'email' + + def convert(self, value, param, ctx): + newval = super().convert(value, param, ctx) + + if not re.match(r'[^@]+@[^@]+(\.[^@]+){0,1}', newval): + self.fail('Please enter a valid email.') + + return newval + + def __repr__(self): + return 'EMAIL' + + +class EntryPointType(NonEmptyStringParamType): + """Parameter whose values have to be valid Python entry point strings. + + See https://packaging.python.org/specifications/entry-points/ + """ + name = 'entrypoint' + + def convert(self, value, param, ctx): + newval = super().convert(value, param, ctx) + + if not re.match(r'[\w.-]', newval): + self.fail( + 'Please enter a valid entry point string: Use only letters, numbers, undercores, dots and dashes.' + ) + + return newval + + def __repr__(self): + return 'ENTRYPOINT' diff --git a/docs/source/install/installation.rst b/docs/source/install/installation.rst index da0af649da..6cabea822e 100644 --- a/docs/source/install/installation.rst +++ b/docs/source/install/installation.rst @@ -119,22 +119,18 @@ and set up the database manually as explained below. Database setup -------------- -AiiDA uses a database to store the nodes, node attributes and other -information, allowing the end user to perform fast queries of the results. -Currently, only `PostgreSQL`_ is allowed as a database backend. +AiiDA uses a database to store the nodes, node attributes and other information, allowing the end user to perform fast queries of the results. +Currently, the highly performant `PostgreSQL`_ database is supported as a database backend. .. _PostgreSQL: https://www.postgresql.org/downloads -To manually create the database for AiiDA, you need to run the program ``psql`` -to interact with postgres. -On most operating systems, you need to do so as the ``postgres`` user that was -created upon installing the software. +To manually create the database for AiiDA, you need to run the program ``psql`` to interact with postgres. +On most operating systems, you need to do so as the ``postgres`` user that was created upon installing the software. To assume the role of ``postgres`` run as root:: su - postgres -(or, equivalently, type ``sudo su - postgres``, depending on your distribution) -and launch the postgres program:: +(or, equivalently, type ``sudo su - postgres``, depending on your distribution) and launch the postgres program:: psql @@ -143,12 +139,13 @@ Create a new database user account for AiiDA by running:: CREATE USER aiida WITH PASSWORD ''; replacing ```` with a password of your choice. -Make sure to remember it, as you will need it again when you configure AiiDA to use this database through ``verdi setup``. + +You will need to provide the password again when you configure AiiDA to use this database through ``verdi setup``. If you want to change the password you just created use the command:: ALTER USER aiida PASSWORD ''; -Next we create the database itself. Keep in mind that we enforce the UTF-8 encoding and specific locales:: +Next, we create the database itself. We enforce the UTF-8 encoding and specific locales:: CREATE DATABASE aiidadb OWNER aiida ENCODING 'UTF8' LC_COLLATE='en_US.UTF-8' LC_CTYPE='en_US.UTF-8' TEMPLATE=template0; @@ -167,8 +164,8 @@ If everything worked well, you should get no error and see the prompt of the ``p If you use the same names as in the example commands above, then during the ``verdi setup`` phase the following parameters will apply to the newly created database:: Database engine: postgresql_psycopg2 - PostgreSQL host: localhost - PostgreSQL port: 5432 + Database host: localhost + Database port: 5432 AiiDA Database name: aiidadb AiiDA Database user: aiida AiiDA Database password: @@ -180,44 +177,45 @@ If you use the same names as in the example commands above, then during the ``ve instructions :ref:`here`. -Database setup using Unix sockets -+++++++++++++++++++++++++++++++++ +Database setup using 'peer' authentication +++++++++++++++++++++++++++++++++++++++++++ -Instead of using passwords to protect access to the database -(which could be used by other users on the same machine), -PostgreSQL allows password-less logins via Unix sockets. +On Ubuntu Linux, the default PostgreSQL setup is configured to use ``peer`` authentication, which allows password-less login via local Unix sockets. +In this mode, PostgreSQL compares the Unix user connecting to the socket with its own database of users and allows a connection if a matching user exists. -In this scenario PostgreSQL compares the user connecting to the socket with its -own database of users and will allow a connection if a matching user exists. +.. note:: + This is an alternative route to set up your database - the standard approach will work on Ubuntu just as well. -Assume the role of ``postgres`` by running the following as root:: +Below we are going to take advantage of the command-line utilities shipped on Ubuntu to simplify creating users and databases compared to issuing the SQL commands directly. - su - postgres +Assume the role of ``postgres``:: + + sudo su postgres -Create a database user with the **same name** as the user you are using to run AiiDA (usually your login name):: +Create a database user with the **same name** as the UNIX user who will be running AiiDA (usually your login name):: createuser replacing ```` with your username. -Next, create the database itself making sure that your user is the owner:: +Next, create the database itself with your user as the owner:: createdb -O aiidadb -To test if the database was created successfully, you can run the following command as your user in a bash terminal:: +Exit the shell to go back to your login user. +To test if the database was created successfully, try:: psql aiidadb -Make sure to leave the host, port and password empty when specifying the parameters during the ``verdi setup`` phase -and specify your username as the *AiiDA Database user*:: +During the ``verdi setup`` phase, use ``!`` to leave host empty and specify your Unix user name as the *AiiDA Database user*.:: Database engine: postgresql_psycopg2 - PostgreSQL host: - PostgreSQL port: + Database host: ! + Database port: 5432 AiiDA Database name: aiidadb AiiDA Database user: - AiiDA Database password: + AiiDA Database password: "" Setup instructions diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 1fc8bfabe8..90bac3a17c 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -617,28 +617,29 @@ Below is a list with all available subcommands. -n, --non-interactive Non-interactive mode: never prompt for input. --profile PROFILE The name of the new profile. [required] - --email TEXT Email address that serves as the user name - and a way to identify data created by it. + --email EMAIL Email address associated with the data you + generate. The email address is exported + along with the data, when sharing it. [required] - --first-name TEXT First name of the user. [required] - --last-name TEXT Last name of the user. [required] - --institution TEXT Institution of the user. [required] + --first-name NONEMPTYSTRING First name of the user. [required] + --last-name NONEMPTYSTRING Last name of the user. [required] + --institution NONEMPTYSTRING Institution of the user. [required] --db-engine [postgresql_psycopg2] Engine to use to connect to the database. --db-backend [django|sqlalchemy] - Backend type to use to map the database. - --db-host TEXT Hostname to connect to the database. - --db-port INTEGER Port to connect to the database. - --db-name TEXT Name of the database to create. - --db-username TEXT Name of the database user to create. - --db-password TEXT Password to connect to the database. + Database backend to use. + --db-host HOSTNAME Database server host. Leave empty for "peer" + authentication. + --db-port INTEGER Database server port. + --db-name NONEMPTYSTRING Name of the database to create. + --db-username NONEMPTYSTRING Name of the database user to create. + --db-password TEXT Password of the database user. --su-db-name TEXT Name of the template database to connect to as the database superuser. --su-db-username TEXT User name of the database super user. --su-db-password TEXT Password to connect as the database superuser. - --repository DIRECTORY Absolute path for the file system - repository. + --repository DIRECTORY Absolute path to the file repository. --config FILE Load option values from configuration file in yaml format. --help Show this message and exit. @@ -681,14 +682,14 @@ Below is a list with all available subcommands. verdi -p restapi --hostname 127.0.0.5 --port 6789 Options: - -H, --hostname TEXT Hostname. - -P, --port INTEGER Port number. - -c, --config-dir PATH Path to the configuration directory - --debug Enable debugging - --wsgi-profile Whether to enable WSGI profiler middleware for - finding bottlenecks - --hookup / --no-hookup Hookup app to flask server - --help Show this message and exit. + -H, --hostname HOSTNAME Hostname. + -P, --port INTEGER Port number. + -c, --config-dir PATH Path to the configuration directory + --debug Enable debugging + --wsgi-profile Whether to enable WSGI profiler middleware for + finding bottlenecks + --hookup / --no-hookup Hookup app to flask server + --help Show this message and exit. .. _verdi_run: @@ -733,25 +734,25 @@ Below is a list with all available subcommands. -n, --non-interactive Non-interactive mode: never prompt for input. --profile PROFILE The name of the new profile. [required] - --email TEXT Email address that serves as the user name - and a way to identify data created by it. + --email EMAIL Email address associated with the data you + generate. The email address is exported + along with the data, when sharing it. [required] - --first-name TEXT First name of the user. [required] - --last-name TEXT Last name of the user. [required] - --institution TEXT Institution of the user. [required] + --first-name NONEMPTYSTRING First name of the user. [required] + --last-name NONEMPTYSTRING Last name of the user. [required] + --institution NONEMPTYSTRING Institution of the user. [required] --db-engine [postgresql_psycopg2] Engine to use to connect to the database. --db-backend [django|sqlalchemy] - Backend type to use to map the database. - --db-host TEXT Hostname to connect to the database. - --db-port INTEGER Port to connect to the database. - --db-name TEXT Name of the database to create. [required] - --db-username TEXT Name of the database user to create. + Database backend to use. + --db-host HOSTNAME Database server host. Leave empty for "peer" + authentication. + --db-port INTEGER Database server port. + --db-name NONEMPTYSTRING Name of the database to create. [required] + --db-username NONEMPTYSTRING Name of the database user to create. [required] - --db-password TEXT Password to connect to the database. - [required] - --repository DIRECTORY Absolute path for the file system - repository. + --db-password TEXT Password of the database user. [required] + --repository DIRECTORY Absolute path to the file repository. --config FILE Load option values from configuration file in yaml format. --help Show this message and exit. diff --git a/pytest.ini b/pytest.ini index de85753fa6..36877fc59c 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,5 @@ [pytest] +addopts= --durations=50 --cov-config=.coveragerc --cov-report xml --cov=aiida testpaths = tests filterwarnings = ignore::DeprecationWarning:babel: diff --git a/tests/cmdline/commands/test_code.py b/tests/cmdline/commands/test_code.py index d50bfc78b3..91d92b10ed 100644 --- a/tests/cmdline/commands/test_code.py +++ b/tests/cmdline/commands/test_code.py @@ -240,6 +240,7 @@ def test_code_show(self): def test_code_duplicate_interactive(self): """Test code duplication interactive.""" + from aiida.orm import Code os.environ['VISUAL'] = 'sleep 1; vim -cwq' os.environ['EDITOR'] = 'sleep 1; vim -cwq' label = 'code_duplicate_interactive' @@ -247,12 +248,21 @@ def test_code_duplicate_interactive(self): result = self.cli_runner.invoke(code_duplicate, [str(self.code.pk)], input=user_input, catch_exceptions=False) self.assertIsNone(result.exception, result.output) - from aiida.orm import Code new_code = Code.get_from_string(label) self.assertEqual(self.code.description, new_code.description) self.assertEqual(self.code.get_prepend_text(), new_code.get_prepend_text()) self.assertEqual(self.code.get_append_text(), new_code.get_append_text()) + # test that providing "!" to description leads to empty description + # https://github.com/aiidateam/aiida-core/issues/3770 + label = 'code_duplicate_interactive2' + user_input = label + '\n!\n\n\n\n\n' + result = self.cli_runner.invoke(code_duplicate, [str(self.code.pk)], input=user_input, catch_exceptions=False) + self.assertIsNone(result.exception, result.output) + + new_code = Code.get_from_string(label) + self.assertEqual('', new_code.description) + def test_code_duplicate_non_interactive(self): """Test code duplication non-interactive.""" label = 'code_duplicate_noninteractive' diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py index 574110121d..568550dc76 100644 --- a/tests/cmdline/commands/test_computer.py +++ b/tests/cmdline/commands/test_computer.py @@ -317,7 +317,7 @@ def test_noninteractive_from_config(self): with tempfile.NamedTemporaryFile('w') as handle: handle.write("""--- label: {l} -hostname: {l} +hostname: myhost transport: local scheduler: direct """.format(l=label)) diff --git a/tests/cmdline/commands/test_setup.py b/tests/cmdline/commands/test_setup.py index 7820bc5a12..c7c5fb8d18 100644 --- a/tests/cmdline/commands/test_setup.py +++ b/tests/cmdline/commands/test_setup.py @@ -121,7 +121,7 @@ def test_quicksetup_wrong_port(self): @with_temporary_config_instance def test_setup(self): - """Test `verdi setup`.""" + """Test `verdi setup` (non-interactive).""" postgres = Postgres(interactive=False, quiet=True, dbinfo=self.pg_test.dsn) postgres.determine_setup() db_name = 'aiida_test_setup' diff --git a/tests/cmdline/params/options/test_interactive.py b/tests/cmdline/params/options/test_interactive.py index a72b52b266..04899ab19a 100644 --- a/tests/cmdline/params/options/test_interactive.py +++ b/tests/cmdline/params/options/test_interactive.py @@ -223,15 +223,17 @@ def test_default_value_empty_opt(self): def test_default_value_ignore_character(self): """ - scenario: InteractiveOption with default value, invoke with ignore default character `!` - behaviour: return `None` for the value + scenario: InteractiveOption with default value, invoke with "ignore default character" `!` + behaviour: return empty string '' for the value + + Note: It should *not* return None, since this is indistinguishable from the option not being prompted for. """ cmd = self.simple_command(default='default') runner = CliRunner() # Check the interactive mode, by not specifying the input on the command line and then enter `!` at the prompt result = runner.invoke(cmd, [], input='!\n') - expected = 'None' + expected = '' self.assertIsNone(result.exception) self.assertIn(expected, result.output.split('\n')[3]) # Fourth line should be parsed value printed to stdout diff --git a/tests/test_dataclasses.py b/tests/test_dataclasses.py index 83e37ce8b3..47f9a7199c 100644 --- a/tests/test_dataclasses.py +++ b/tests/test_dataclasses.py @@ -255,13 +255,13 @@ def test_ase_primitive_and_conventional_cells_ase(self): c = CifData(file=tmpf.name) ase = c.get_structure(converter='ase', primitive_cell=False).get_ase() - self.assertEqual(ase.get_number_of_atoms(), 15) + self.assertEqual(ase.get_global_number_of_atoms(), 15) ase = c.get_structure(converter='ase').get_ase() - self.assertEqual(ase.get_number_of_atoms(), 15) + self.assertEqual(ase.get_global_number_of_atoms(), 15) ase = c.get_structure(converter='ase', primitive_cell=True, subtrans_included=False).get_ase() - self.assertEqual(ase.get_number_of_atoms(), 5) + self.assertEqual(ase.get_global_number_of_atoms(), 5) @unittest.skipIf(not has_ase(), 'Unable to import ase') @unittest.skipIf(not has_pycifrw(), 'Unable to import PyCifRW') @@ -310,13 +310,13 @@ def test_ase_primitive_and_conventional_cells_pymatgen(self): c = CifData(file=tmpf.name) ase = c.get_structure(converter='pymatgen', primitive_cell=False).get_ase() - self.assertEqual(ase.get_number_of_atoms(), 15) + self.assertEqual(ase.get_global_number_of_atoms(), 15) ase = c.get_structure(converter='pymatgen').get_ase() - self.assertEqual(ase.get_number_of_atoms(), 15) + self.assertEqual(ase.get_global_number_of_atoms(), 15) ase = c.get_structure(converter='pymatgen', primitive_cell=True).get_ase() - self.assertEqual(ase.get_number_of_atoms(), 5) + self.assertEqual(ase.get_global_number_of_atoms(), 5) @unittest.skipIf(not has_pycifrw(), 'Unable to import PyCifRW') def test_pycifrw_from_datablocks(self): From 39d8ea04efd2edf7b6a5cab6777a32ba96fd7949 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Mon, 13 Apr 2020 23:56:59 +0200 Subject: [PATCH 46/54] Unpin the `click` dependency (#3921) This was pinned because `7.1` broke our pre-commit hooks. The breaking of the tests was caused by two separate commits: * 718485be48263056e7036ea9a60ce11b47e2fc26 * 37d897069f58f7f2a016c14b0620c6d387430b4b The first one changes the format of the output of a command if a required option is missing. The double quotes around the option have been changed to single quotes. The second commit is more pernicious: the `Editor.edit_file` method was updated to escape the editor and filename parameters passed to the sub process call. Our tests were abusing the absence of escaping to pass arguments to the editor command, in our case `vim`, as to make the normally interactive command a non-interactive one for testing purposes. Now that the editor command is escaped, the arguments are understood by bash to be part of the command which of course then cannot be found and the test fails. We "fix" this problem by patching the changed method in `click` to undo the escaping. --- aiida/manage/tests/pytest_fixtures.py | 72 +++---- docs/requirements_for_rtd.txt | 2 +- docs/source/verdi/verdi_user_guide.rst | 26 +++ environment.yml | 2 +- pytest.ini | 2 + setup.json | 2 +- tests/cmdline/commands/test_code.py | 185 +++++++++--------- tests/cmdline/commands/test_computer.py | 63 +++--- .../params/options/test_conditional.py | 15 +- tests/cmdline/utils/test_multiline.py | 93 +++++---- tests/conftest.py | 45 +++++ 11 files changed, 292 insertions(+), 215 deletions(-) diff --git a/aiida/manage/tests/pytest_fixtures.py b/aiida/manage/tests/pytest_fixtures.py index d5c9c61837..310b07e944 100644 --- a/aiida/manage/tests/pytest_fixtures.py +++ b/aiida/manage/tests/pytest_fixtures.py @@ -7,6 +7,7 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=redefined-outer-name,unused-argument """ Collection of pytest fixtures using the TestManager for easy testing of AiiDA plugins. @@ -16,8 +17,8 @@ * aiida_local_code_factory """ -import tempfile import shutil +import tempfile import pytest from aiida.manage.tests import test_manager, get_test_backend_name, get_test_profile_name @@ -30,14 +31,13 @@ def aiida_profile(): Note: scope='session' limits this fixture to run once per session. Thanks to ``autouse=True``, you don't actually need to depend on it explicitly - it will activate as soon as you import it in your ``conftest.py``. """ - # create new TestManager instance - with test_manager(backend=get_test_backend_name(), profile_name=get_test_profile_name()) as test_mgr: - yield test_mgr - # here, the TestManager instance has already been destroyed + with test_manager(backend=get_test_backend_name(), profile_name=get_test_profile_name()) as manager: + yield manager + # Leaving the context manager will automatically cause the `TestManager` instance to be destroyed @pytest.fixture(scope='function') -def clear_database(clear_database_after_test): # pylint: disable=redefined-outer-name,unused-argument +def clear_database(clear_database_after_test): """Alias for 'clear_database_after_test'. Clears the database after each test. Use of the explicit @@ -46,18 +46,15 @@ def clear_database(clear_database_after_test): # pylint: disable=redefined-oute @pytest.fixture(scope='function') -def clear_database_after_test(aiida_profile): # pylint: disable=redefined-outer-name - """Clear the database after each test. - """ +def clear_database_after_test(aiida_profile): + """Clear the database after the test.""" yield - # after the test function has completed, reset the database aiida_profile.reset_db() @pytest.fixture(scope='function') -def clear_database_before_test(aiida_profile): # pylint: disable=redefined-outer-name - """Clear the database before each test. - """ +def clear_database_before_test(aiida_profile): + """Clear the database before the test.""" aiida_profile.reset_db() yield @@ -81,7 +78,7 @@ def temp_dir(): @pytest.fixture(scope='function') -def aiida_localhost(temp_dir): # pylint: disable=redefined-outer-name +def aiida_localhost(temp_dir): """Get an AiiDA computer for localhost. Usage:: @@ -118,7 +115,7 @@ def test_1(aiida_localhost): @pytest.fixture(scope='function') -def aiida_local_code_factory(aiida_localhost): # pylint: disable=redefined-outer-name +def aiida_local_code_factory(aiida_localhost): """Get an AiiDA code on localhost. Searches in the PATH for a given executable and creates an AiiDA code with provided entry point. @@ -126,47 +123,56 @@ def aiida_local_code_factory(aiida_localhost): # pylint: disable=redefined-oute Usage:: def test_1(aiida_local_code_factory): - code = aiida_local_code_factory('pw.x', 'quantumespresso.pw') + code = aiida_local_code_factory('quantumespresso.pw', '/usr/bin/pw.x') # use code for testing ... - :return: A function get_code(executable, entry_point) that returns the Code node. + :return: A function get_code(entry_point, executable) that returns the `Code` node. :rtype: object """ - def get_code(entry_point, executable, computer=aiida_localhost, prepend_text=None, append_text=None): + def get_code(entry_point, executable, computer=aiida_localhost, label=None, prepend_text=None, append_text=None): """Get local code. + Sets up code for given entry point on given computer. :param entry_point: Entry point of calculation plugin :param executable: name of executable; will be searched for in local system PATH. :param computer: (local) AiiDA computer - :param prepend_text: a string of code that will be put in the scheduler script before the - execution of the code. - :param append_text: a string of code that will be put in the scheduler script after the - execution of the code. - :return: The code node + :param prepend_text: a string of code that will be put in the scheduler script before the execution of the code. + :param append_text: a string of code that will be put in the scheduler script after the execution of the code. + :return: the `Code` either retrieved from the database or created if it did not yet exist. :rtype: :py:class:`aiida.orm.Code` """ - from aiida.orm import Code + from aiida.common import exceptions + from aiida.orm import Code, Computer, QueryBuilder - codes = Code.objects.find(filters={'label': executable}) # pylint: disable=no-member - if codes: - return codes[0] + if label is None: + label = executable - executable_path = shutil.which(executable) + builder = QueryBuilder().append(Computer, filters={'uuid': computer.uuid}, tag='computer') + builder.append(Code, filters={'label': label, 'attributes.input_plugin': entry_point}, with_computer='computer') + try: + code = builder.one()[0] + except (exceptions.MultipleObjectsError, exceptions.NotExistent): + code = None + else: + return code + + executable_path = shutil.which(executable) if not executable_path: raise ValueError('The executable "{}" was not found in the $PATH.'.format(executable)) - code = Code( - input_plugin_name=entry_point, - remote_computer_exec=[computer, executable_path], - ) + code = Code(input_plugin_name=entry_point, remote_computer_exec=[computer, executable_path]) + code.label = label + code.description = label + if prepend_text is not None: code.set_prepend_text(prepend_text) + if append_text is not None: code.set_append_text(append_text) - code.label = executable + return code.store() return get_code diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index cb290f8f83..1d2e60adb3 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -7,7 +7,7 @@ circus~=0.16.1 click-completion~=0.5.1 click-config-file~=0.5.0 click-spinner~=0.1.8 -click==7.0 +click~=7.0 coverage<5.0 django~=2.2 docutils==0.15.2 diff --git a/docs/source/verdi/verdi_user_guide.rst b/docs/source/verdi/verdi_user_guide.rst index 90bac3a17c..abcc992071 100644 --- a/docs/source/verdi/verdi_user_guide.rst +++ b/docs/source/verdi/verdi_user_guide.rst @@ -477,9 +477,11 @@ Below is a list with all available subcommands. addresses. Automatically discovered archive URLs will be downloadeded and added to ARCHIVES for importing + -G, --group GROUP Specify group to which all the import nodes will be added. If such a group does not exist, it will be created automatically. + -e, --extras-mode-existing [keep_existing|update_existing|mirror|none|ask] Specify which extras from the export archive should be imported for nodes that are @@ -492,20 +494,25 @@ Below is a list with all available subcommands. mirror: import all extras and remove any existing extras that are not present in the archive. none: do not import any extras. + -n, --extras-mode-new [import|none] Specify whether to import extras of new nodes: import: import extras. none: do not import extras. + --comment-mode [newest|overwrite] Specify the way to import Comments with identical UUIDs: newest: Only the newest Comments (based on mtime) (default).overwrite: Replace existing Comments with those from the import file. + --migration / --no-migration Force migration of export file archives, if needed. [default: True] + -n, --non-interactive Non-interactive mode: never prompt for input. + --help Show this message and exit. @@ -616,11 +623,13 @@ Below is a list with all available subcommands. Options: -n, --non-interactive Non-interactive mode: never prompt for input. + --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when sharing it. [required] + --first-name NONEMPTYSTRING First name of the user. [required] --last-name NONEMPTYSTRING Last name of the user. [required] --institution NONEMPTYSTRING Institution of the user. [required] @@ -630,18 +639,22 @@ Below is a list with all available subcommands. Database backend to use. --db-host HOSTNAME Database server host. Leave empty for "peer" authentication. + --db-port INTEGER Database server port. --db-name NONEMPTYSTRING Name of the database to create. --db-username NONEMPTYSTRING Name of the database user to create. --db-password TEXT Password of the database user. --su-db-name TEXT Name of the template database to connect to as the database superuser. + --su-db-username TEXT User name of the database super user. --su-db-password TEXT Password to connect as the database superuser. + --repository DIRECTORY Absolute path to the file repository. --config FILE Load option values from configuration file in yaml format. + --help Show this message and exit. @@ -662,6 +675,7 @@ Below is a list with all available subcommands. Options: -e, --entry-point PLUGIN Only include nodes that are class or sub class of the class identified by this entry point. + -f, --force Do not ask for confirmation. --help Show this message and exit. @@ -688,6 +702,7 @@ Below is a list with all available subcommands. --debug Enable debugging --wsgi-profile Whether to enable WSGI profiler middleware for finding bottlenecks + --hookup / --no-hookup Hookup app to flask server --help Show this message and exit. @@ -709,13 +724,17 @@ Below is a list with all available subcommands. Specify the prefix of the label of the auto group (numbers might be automatically appended to generate unique names per run). + -n, --group-name TEXT Specify the name of the auto group [DEPRECATED, USE --auto-group-label-prefix instead]. This also enables auto-grouping. + -e, --exclude TEXT Exclude these classes from auto grouping (use full entrypoint strings). + -i, --include TEXT Include these classes from auto grouping (use full entrypoint strings or "all"). + --help Show this message and exit. @@ -733,11 +752,13 @@ Below is a list with all available subcommands. Options: -n, --non-interactive Non-interactive mode: never prompt for input. + --profile PROFILE The name of the new profile. [required] --email EMAIL Email address associated with the data you generate. The email address is exported along with the data, when sharing it. [required] + --first-name NONEMPTYSTRING First name of the user. [required] --last-name NONEMPTYSTRING Last name of the user. [required] --institution NONEMPTYSTRING Institution of the user. [required] @@ -747,14 +768,17 @@ Below is a list with all available subcommands. Database backend to use. --db-host HOSTNAME Database server host. Leave empty for "peer" authentication. + --db-port INTEGER Database server port. --db-name NONEMPTYSTRING Name of the database to create. [required] --db-username NONEMPTYSTRING Name of the database user to create. [required] + --db-password TEXT Password of the database user. [required] --repository DIRECTORY Absolute path to the file repository. --config FILE Load option values from configuration file in yaml format. + --help Show this message and exit. @@ -774,9 +798,11 @@ Below is a list with all available subcommands. --no-startup When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script. + -i, --interface [ipython|bpython] Specify an interactive interpreter interface. + --help Show this message and exit. diff --git a/environment.yml b/environment.yml index d92ed0950a..e63cbefaa5 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - click-completion~=0.5.1 - click-config-file~=0.5.0 - click-spinner~=0.1.8 -- click==7.0 +- click~=7.0 - django~=2.2 - ete3~=3.1 - python-graphviz~=0.13 diff --git a/pytest.ini b/pytest.ini index 36877fc59c..c5128a6476 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,3 +9,5 @@ filterwarnings = ignore::DeprecationWarning:yaml: ignore::DeprecationWarning:pymatgen: ignore::DeprecationWarning:jsonbackend: + ignore::DeprecationWarning:reentry: + ignore::DeprecationWarning:pkg_resources: diff --git a/setup.json b/setup.json index 7f4b7783ca..59d707a2f1 100644 --- a/setup.json +++ b/setup.json @@ -26,7 +26,7 @@ "click-completion~=0.5.1", "click-config-file~=0.5.0", "click-spinner~=0.1.8", - "click==7.0", + "click~=7.0", "django~=2.2", "ete3~=3.1", "graphviz~=0.13", diff --git a/tests/cmdline/commands/test_code.py b/tests/cmdline/commands/test_code.py index 91d92b10ed..d61c3194ee 100644 --- a/tests/cmdline/commands/test_code.py +++ b/tests/cmdline/commands/test_code.py @@ -7,10 +7,14 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=unused-argument """Tests for the 'verdi code' command.""" import os import subprocess as sp +from textwrap import dedent + from click.testing import CliRunner +import pytest from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.commands.cmd_code import (setup_code, delete, hide, reveal, relabel, code_list, show, code_duplicate) @@ -24,13 +28,11 @@ class TestVerdiCodeSetup(AiidaTestCase): @classmethod def setUpClass(cls, *args, **kwargs): super().setUpClass(*args, **kwargs) - orm.Computer( + cls.computer = orm.Computer( name='comp', hostname='localhost', transport_type='local', scheduler_type='direct', workdir='/tmp/aiida' ).store() def setUp(self): - self.comp = orm.Computer.objects.get(name='comp') - self.cli_runner = CliRunner() self.this_folder = os.path.dirname(__file__) self.this_file = os.path.basename(__file__) @@ -42,46 +44,20 @@ def test_reachable(self): output = sp.check_output(['verdi', 'code', 'setup', '--help']) self.assertIn(b'Usage:', output) - def test_interactive_remote(self): - """Test interactive remote code setup.""" - - from aiida.orm import Code - os.environ['VISUAL'] = 'sleep 1; vim -cwq' - os.environ['EDITOR'] = 'sleep 1; vim -cwq' - label = 'interactive_remote' - user_input = '\n'.join([label, 'description', 'arithmetic.add', 'yes', self.comp.name, '/remote/abs/path']) - result = self.cli_runner.invoke(setup_code, input=user_input) - self.assertClickResultNoException(result) - self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code) - - def test_interactive_upload(self): - """Test interactive code setup.""" - from aiida.orm import Code - os.environ['VISUAL'] = 'sleep 1; vim -cwq' - os.environ['EDITOR'] = 'sleep 1; vim -cwq' - label = 'interactive_upload' - user_input = '\n'.join([label, 'description', 'arithmetic.add', 'no', self.this_folder, self.this_file]) - result = self.cli_runner.invoke(setup_code, input=user_input) - self.assertIsNone(result.exception, result.output) - self.assertIsInstance(Code.get_from_string('{}'.format(label)), Code) - def test_noninteractive_remote(self): """Test non-interactive remote code setup.""" - - from aiida.orm import Code label = 'noninteractive_remote' options = [ '--non-interactive', '--label={}'.format(label), '--description=description', - '--input-plugin=arithmetic.add', '--on-computer', '--computer={}'.format(self.comp.name), + '--input-plugin=arithmetic.add', '--on-computer', '--computer={}'.format(self.computer.name), '--remote-abs-path=/remote/abs/path' ] result = self.cli_runner.invoke(setup_code, options) self.assertClickResultNoException(result) - self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code) + self.assertIsInstance(orm.Code.get_from_string('{}@{}'.format(label, self.computer.name)), orm.Code) def test_noninteractive_upload(self): """Test non-interactive code setup.""" - from aiida.orm import Code label = 'noninteractive_upload' options = [ '--non-interactive', '--label={}'.format(label), '--description=description', @@ -90,23 +66,25 @@ def test_noninteractive_upload(self): ] result = self.cli_runner.invoke(setup_code, options) self.assertClickResultNoException(result) - self.assertIsInstance(Code.get_from_string('{}'.format(label)), Code) + self.assertIsInstance(orm.Code.get_from_string('{}'.format(label)), orm.Code) def test_from_config(self): """Test setting up a code from a config file""" - from aiida.orm import Code import tempfile label = 'noninteractive_config' with tempfile.NamedTemporaryFile('w') as handle: handle.write( - """--- -label: {l} -input_plugin: arithmetic.add -computer: {c} -remote_abs_path: /remote/abs/path -""".format(l=label, c=self.comp.name) + dedent( + """ + --- + label: {label} + input_plugin: arithmetic.add + computer: {computer} + remote_abs_path: /remote/abs/path + """ + ).format(label=label, computer=self.computer.name) ) handle.flush() result = self.cli_runner.invoke( @@ -115,17 +93,7 @@ def test_from_config(self): ) self.assertClickResultNoException(result) - self.assertIsInstance(Code.get_from_string('{}'.format(label)), Code) - - def test_mixed(self): - """Test mixed (interactive/from config) code setup.""" - from aiida.orm import Code - label = 'mixed_remote' - options = ['--description=description', '--on-computer', '--remote-abs-path=/remote/abs/path'] - user_input = '\n'.join([label, 'arithmetic.add', self.comp.name]) - result = self.cli_runner.invoke(setup_code, options, input=user_input) - self.assertClickResultNoException(result) - self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code) + self.assertIsInstance(orm.Code.get_from_string('{}'.format(label)), orm.Code) class TestVerdiCodeCommands(AiidaTestCase): @@ -136,19 +104,17 @@ class TestVerdiCodeCommands(AiidaTestCase): @classmethod def setUpClass(cls, *args, **kwargs): super().setUpClass(*args, **kwargs) - orm.Computer( + cls.computer = orm.Computer( name='comp', hostname='localhost', transport_type='local', scheduler_type='direct', workdir='/tmp/aiida' ).store() def setUp(self): - self.comp = orm.Computer.objects.get(name='comp') - try: code = orm.Code.get_from_string('code') except NotExistent: code = orm.Code( input_plugin_name='arithmetic.add', - remote_computer_exec=[self.comp, '/remote/abs/path'], + remote_computer_exec=[self.computer, '/remote/abs/path'], ) code.label = 'code' code.description = 'desc' @@ -175,14 +141,12 @@ def test_relabel_code(self): """Test force code relabeling.""" result = self.cli_runner.invoke(relabel, [str(self.code.pk), 'new_code']) self.assertIsNone(result.exception, result.output) - from aiida.orm import load_node - new_code = load_node(self.code.pk) + new_code = orm.load_node(self.code.pk) self.assertEqual(new_code.label, 'new_code') def test_relabel_code_full(self): self.cli_runner.invoke(relabel, [str(self.code.pk), 'new_code@comp']) - from aiida.orm import load_node - new_code = load_node(self.code.pk) + new_code = orm.load_node(self.code.pk) self.assertEqual(new_code.label, 'new_code') def test_relabel_code_full_bad(self): @@ -195,24 +159,22 @@ def test_code_delete_one_force(self): self.assertIsNone(result.exception, result.output) with self.assertRaises(NotExistent): - from aiida.orm import Code - Code.get_from_string('code') + orm.Code.get_from_string('code') def test_code_list(self): """Test code list command.""" # set up second code 'code2' - from aiida.orm import Code try: - code = Code.get_from_string('code2') + code = orm.Code.get_from_string('code2') except NotExistent: - code = Code( + code = orm.Code( input_plugin_name='templatereplacer', - remote_computer_exec=[self.comp, '/remote/abs/path'], + remote_computer_exec=[self.computer, '/remote/abs/path'], ) code.label = 'code2' code.store() - options = ['-A', '-a', '-o', '--input-plugin=arithmetic.add', '--computer={}'.format(self.comp.name)] + options = ['-A', '-a', '-o', '--input-plugin=arithmetic.add', '--computer={}'.format(self.computer.name)] result = self.cli_runner.invoke(code_list, options) self.assertIsNone(result.exception, result.output) self.assertTrue(str(self.code.pk) in result.output, 'PK of first code should be included') @@ -238,39 +200,13 @@ def test_code_show(self): self.assertIsNone(result.exception, result.output) self.assertTrue(str(self.code.pk) in result.output) - def test_code_duplicate_interactive(self): - """Test code duplication interactive.""" - from aiida.orm import Code - os.environ['VISUAL'] = 'sleep 1; vim -cwq' - os.environ['EDITOR'] = 'sleep 1; vim -cwq' - label = 'code_duplicate_interactive' - user_input = label + '\n\n\n\n\n\n' - result = self.cli_runner.invoke(code_duplicate, [str(self.code.pk)], input=user_input, catch_exceptions=False) - self.assertIsNone(result.exception, result.output) - - new_code = Code.get_from_string(label) - self.assertEqual(self.code.description, new_code.description) - self.assertEqual(self.code.get_prepend_text(), new_code.get_prepend_text()) - self.assertEqual(self.code.get_append_text(), new_code.get_append_text()) - - # test that providing "!" to description leads to empty description - # https://github.com/aiidateam/aiida-core/issues/3770 - label = 'code_duplicate_interactive2' - user_input = label + '\n!\n\n\n\n\n' - result = self.cli_runner.invoke(code_duplicate, [str(self.code.pk)], input=user_input, catch_exceptions=False) - self.assertIsNone(result.exception, result.output) - - new_code = Code.get_from_string(label) - self.assertEqual('', new_code.description) - def test_code_duplicate_non_interactive(self): """Test code duplication non-interactive.""" label = 'code_duplicate_noninteractive' result = self.cli_runner.invoke(code_duplicate, ['--non-interactive', '--label=' + label, str(self.code.pk)]) self.assertIsNone(result.exception, result.output) - from aiida.orm import Code - new_code = Code.get_from_string(label) + new_code = orm.Code.get_from_string(label) self.assertEqual(self.code.description, new_code.description) self.assertEqual(self.code.get_prepend_text(), new_code.get_prepend_text()) self.assertEqual(self.code.get_append_text(), new_code.get_append_text()) @@ -286,3 +222,68 @@ def setUp(self): def test_code_list_no_codes_error_message(self): result = self.cli_runner.invoke(code_list) self.assertEqual(1, result.output.count('# No codes found matching the specified criteria.')) + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_interactive_remote(clear_database_before_test, aiida_localhost, non_interactive_editor): + """Test interactive remote code setup.""" + label = 'interactive_remote' + user_input = '\n'.join([label, 'description', 'arithmetic.add', 'yes', aiida_localhost.name, '/remote/abs/path']) + result = CliRunner().invoke(setup_code, input=user_input) + assert result.exception is None + assert isinstance(orm.Code.get_from_string('{}@{}'.format(label, aiida_localhost.name)), orm.Code) + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_interactive_upload(clear_database_before_test, aiida_localhost, non_interactive_editor): + """Test interactive code setup.""" + label = 'interactive_upload' + dirname = os.path.dirname(__file__) + basename = os.path.basename(__file__) + user_input = '\n'.join([label, 'description', 'arithmetic.add', 'no', dirname, basename]) + result = CliRunner().invoke(setup_code, input=user_input) + assert result.exception is None + assert isinstance(orm.Code.get_from_string('{}'.format(label)), orm.Code) + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_mixed(clear_database_before_test, aiida_localhost, non_interactive_editor): + """Test mixed (interactive/from config) code setup.""" + from aiida.orm import Code + label = 'mixed_remote' + options = ['--description=description', '--on-computer', '--remote-abs-path=/remote/abs/path'] + user_input = '\n'.join([label, 'arithmetic.add', aiida_localhost.name]) + result = CliRunner().invoke(setup_code, options, input=user_input) + assert result.exception is None + assert isinstance(Code.get_from_string('{}@{}'.format(label, aiida_localhost.name)), Code) + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_code_duplicate_interactive(clear_database_before_test, aiida_local_code_factory, non_interactive_editor): + """Test code duplication interactive.""" + label = 'code_duplicate_interactive' + user_input = label + '\n\n\n\n\n\n' + code = aiida_local_code_factory('arithmetic.add', '/bin/cat', label='code') + result = CliRunner().invoke(code_duplicate, [str(code.pk)], input=user_input) + assert result.exception is None, result.exception + + duplicate = orm.Code.get_from_string(label) + assert code.description == duplicate.description + assert code.get_prepend_text() == duplicate.get_prepend_text() + assert code.get_append_text() == duplicate.get_append_text() + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_code_duplicate_ignore(clear_database_before_test, aiida_local_code_factory, non_interactive_editor): + """Providing "!" to description should lead to empty description. + + Regression test for: https://github.com/aiidateam/aiida-core/issues/3770 + """ + label = 'code_duplicate_interactive' + user_input = label + '\n!\n\n\n\n\n' + code = aiida_local_code_factory('arithmetic.add', '/bin/cat', label='code') + result = CliRunner().invoke(code_duplicate, [str(code.pk)], input=user_input, catch_exceptions=False) + assert result.exception is None, result.exception + + duplicate = orm.Code.get_from_string(label) + assert duplicate.description == '' diff --git a/tests/cmdline/commands/test_computer.py b/tests/cmdline/commands/test_computer.py index 568550dc76..668728c21f 100644 --- a/tests/cmdline/commands/test_computer.py +++ b/tests/cmdline/commands/test_computer.py @@ -7,13 +7,14 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=unused-argument """Tests for the 'verdi computer' command.""" - from collections import OrderedDict import os import tempfile from click.testing import CliRunner +import pytest from aiida import orm from aiida.backends.testbase import AiidaTestCase @@ -117,36 +118,6 @@ def test_reachable(self): output = sp.check_output(['verdi', 'computer', 'setup', '--help']) self.assertIn(b'Usage:', output) - def test_interactive(self): - """Test verdi computer setup in interactive mode.""" - os.environ['VISUAL'] = 'sleep 1; vim -cwq' - os.environ['EDITOR'] = 'sleep 1; vim -cwq' - label = 'interactive_computer' - - options_dict = generate_setup_options_dict(replace_args={'label': label}, non_interactive=False) - # In any case, these would be managed by the visual editor - options_dict.pop('prepend-text') - options_dict.pop('append-text') - user_input = '\n'.join(generate_setup_options_interactive(options_dict)) - - result = self.cli_runner.invoke(computer_setup, input=user_input) - self.assertIsNone(result.exception, msg='There was an unexpected exception. Output: {}'.format(result.output)) - - new_computer = orm.Computer.objects.get(name=label) - self.assertIsInstance(new_computer, orm.Computer) - - self.assertEqual(new_computer.description, options_dict['description']) - self.assertEqual(new_computer.hostname, options_dict['hostname']) - self.assertEqual(new_computer.get_transport_type(), options_dict['transport']) - self.assertEqual(new_computer.get_scheduler_type(), options_dict['scheduler']) - self.assertEqual(new_computer.get_mpirun_command(), options_dict['mpirun-command'].split()) - self.assertEqual(new_computer.get_shebang(), options_dict['shebang']) - self.assertEqual(new_computer.get_workdir(), options_dict['work-dir']) - self.assertEqual(new_computer.get_default_mpiprocs_per_machine(), int(options_dict['mpiprocs-per-machine'])) - # For now I'm not writing anything in them - self.assertEqual(new_computer.get_prepend_text(), '') - self.assertEqual(new_computer.get_append_text(), '') - def test_mixed(self): """ Test verdi computer setup in mixed mode. @@ -749,3 +720,33 @@ def test_computer_duplicate_non_interactive(self): self.assertEqual(self.comp.get_default_mpiprocs_per_machine(), new_computer.get_default_mpiprocs_per_machine()) self.assertEqual(self.comp.get_prepend_text(), new_computer.get_prepend_text()) self.assertEqual(self.comp.get_append_text(), new_computer.get_append_text()) + + +@pytest.mark.parametrize('non_interactive_editor', ('sleep 1; vim -cwq',), indirect=True) +def test_interactive(clear_database_before_test, aiida_localhost, non_interactive_editor): + """Test verdi computer setup in interactive mode.""" + label = 'interactive_computer' + + options_dict = generate_setup_options_dict(replace_args={'label': label}, non_interactive=False) + # In any case, these would be managed by the visual editor + options_dict.pop('prepend-text') + options_dict.pop('append-text') + user_input = '\n'.join(generate_setup_options_interactive(options_dict)) + + result = CliRunner().invoke(computer_setup, input=user_input) + assert result.exception is None, 'There was an unexpected exception. Output: {}'.format(result.output) + + new_computer = orm.Computer.objects.get(name=label) + assert isinstance(new_computer, orm.Computer) + + assert new_computer.description == options_dict['description'] + assert new_computer.hostname == options_dict['hostname'] + assert new_computer.get_transport_type() == options_dict['transport'] + assert new_computer.get_scheduler_type() == options_dict['scheduler'] + assert new_computer.get_mpirun_command() == options_dict['mpirun-command'].split() + assert new_computer.get_shebang() == options_dict['shebang'] + assert new_computer.get_workdir() == options_dict['work-dir'] + assert new_computer.get_default_mpiprocs_per_machine() == int(options_dict['mpiprocs-per-machine']) + # For now I'm not writing anything in them + assert new_computer.get_prepend_text() == '' + assert new_computer.get_append_text() == '' diff --git a/tests/cmdline/params/options/test_conditional.py b/tests/cmdline/params/options/test_conditional.py index aaad971fb4..94c300eff8 100644 --- a/tests/cmdline/params/options/test_conditional.py +++ b/tests/cmdline/params/options/test_conditional.py @@ -66,7 +66,7 @@ def test_switch_on(self): runner = CliRunner() result = runner.invoke(cmd, ['--on']) self.assertIsNotNone(result.exception) - self.assertIn('Error: Missing option "--opt".', result.output) + self.assertTrue('Error: Missing option' in result.output and '--opt' in result.output) def test_flag_off(self): """ @@ -89,7 +89,7 @@ def test_flag_on(self): runner = CliRunner() result = runner.invoke(cmd, ['--on']) self.assertIsNotNone(result.exception) - self.assertIn('Error: Missing option "--opt".', result.output) + self.assertTrue('Error: Missing option' in result.output and '--opt' in result.output) def setup_multi_non_eager(self): """ @@ -139,11 +139,11 @@ def test_ab(self): runner, cmd = self.setup_multi_non_eager() result = runner.invoke(cmd, ['--a', '--opt-b=Bla']) self.assertIsNotNone(result.exception) - self.assertIn('Error: Missing option "--opt-a".', result.output) + self.assertTrue('Error: Missing option' in result.output and '--opt-a' in result.output) result_rev = runner.invoke(cmd, ['--opt-b=Bla', '--a']) self.assertIsNotNone(result_rev.exception) - self.assertIn('Error: Missing option "--opt-a".', result_rev.output) + self.assertTrue('Error: Missing option' in result.output and '--opt-a' in result.output) def test_ba(self): """ @@ -154,11 +154,11 @@ def test_ba(self): runner, cmd = self.setup_multi_non_eager() result = runner.invoke(cmd, ['--b', '--opt-a=Bla']) self.assertIsNotNone(result.exception) - self.assertIn('Error: Missing option "--opt-b".', result.output) + self.assertTrue('Error: Missing option' in result.output and '--opt-b' in result.output) result_rev = runner.invoke(cmd, ['--opt-a=Bla', '--b']) self.assertIsNotNone(result_rev.exception) - self.assertIn('Error: Missing option "--opt-b".', result_rev.output) + self.assertTrue('Error: Missing option' in result.output and '--opt-b' in result.output) @staticmethod def user_callback(_ctx, param, value): @@ -181,9 +181,8 @@ def setup_flag_cond(**kwargs): @click.option('--flag', is_flag=True) @click.option('--opt-a', required_fn=lambda c: c.params.get('flag'), cls=ConditionalOption, **kwargs) def cmd(flag, opt_a): - """ A command with a flag and customizable options that dependon it """ + """A command with a flag and customizable options that depend on it.""" # pylint: disable=unused-argument - click.echo('{}'.format(opt_a)) return cmd diff --git a/tests/cmdline/utils/test_multiline.py b/tests/cmdline/utils/test_multiline.py index 50fbee9451..8731972f30 100644 --- a/tests/cmdline/utils/test_multiline.py +++ b/tests/cmdline/utils/test_multiline.py @@ -7,56 +7,53 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=unused-argument """Unit tests for editing pre and post bash scripts, comments, etc.""" -import os -import unittest - -from click.testing import CliRunner +import pytest from aiida.cmdline.utils.multi_line_input import edit_pre_post, edit_comment +COMMAND = 'sleep 1 ; vim -c "g!/^#=/s/$/Test" -cwq' # Appends `Test` to every line NOT starting with `#=` + + +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_pre_post(non_interactive_editor): + result = edit_pre_post(summary={'Param 1': 'Value 1', 'Param 2': 'Value 1'}) + assert result[0] == 'Test\nTest\nTest' + assert result[1] == 'Test\nTest\nTest' + + +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_edit_pre_post(non_interactive_editor): + result = edit_pre_post(pre='OldPre', post='OldPost') + assert result[0] == 'Test\nOldPreTest\nTest' + assert result[1] == 'Test\nOldPostTest\nTest' + + +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_edit_pre_post_comment(non_interactive_editor): + """Test that lines starting with '#=' are ignored and are not ignored if they start with any other character.""" + result = edit_pre_post(pre='OldPre\n#=Delete me', post='OldPost #=Dont delete me') + assert result[0] == 'Test\nOldPreTest\nTest' + assert result[1] == 'Test\nOldPost #=Dont delete meTest\nTest' + + +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_edit_pre_bash_comment(non_interactive_editor): + """Test that bash comments starting with '#' are NOT deleted.""" + result = edit_pre_post(pre='OldPre\n# Dont delete me', post='OldPost # Dont delete me') + assert result[0] == 'Test\nOldPreTest\n# Dont delete meTest\nTest' + assert result[1] == 'Test\nOldPost # Dont delete meTest\nTest' + + +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_new_comment(non_interactive_editor): + new_comment = edit_comment() + assert new_comment == 'Test' + -class TestMultilineInput(unittest.TestCase): - """Test functions for editing pre and post bash scripts, comments, etc.""" - - def setUp(self): - ## Sleep 1 is needed because on some filesystems (e.g. some pre 10.13 Mac) the - ## filesystem returns the time with a precision of 1 second, and - ## click uses the timestamp to decide if the file was re-saved or not. - editor_cmd = 'sleep 1 ; vim -c "g!/^#=/s/$/Test" -cwq' # appends Test to - # every line that does NOT start with '#=' characters - os.environ['EDITOR'] = editor_cmd - os.environ['VISUAL'] = editor_cmd - self.runner = CliRunner() - - def test_pre_post(self): - result = edit_pre_post(summary={'Param 1': 'Value 1', 'Param 2': 'Value 1'}) - self.assertEqual(result[0], 'Test\nTest\nTest') - self.assertEqual(result[1], 'Test\nTest\nTest') - - def test_edit_pre_post(self): - result = edit_pre_post(pre='OldPre', post='OldPost') - self.assertEqual(result[0], 'Test\nOldPreTest\nTest') - self.assertEqual(result[1], 'Test\nOldPostTest\nTest') - - def test_edit_pre_post_comment(self): - """Test that lines starting with '#=' are ignored and are not ignored - if they start with any other character""" - result = edit_pre_post(pre='OldPre\n#=Delete me', post='OldPost #=Dont delete me') - self.assertEqual(result[0], 'Test\nOldPreTest\nTest') - self.assertEqual(result[1], 'Test\nOldPost #=Dont delete meTest\nTest') - - def test_edit_pre_bash_comment(self): - """Test that bash comments starting with '#' are NOT deleted""" - result = edit_pre_post(pre='OldPre\n# Dont delete me', post='OldPost # Dont delete me') - self.assertEqual(result[0], 'Test\nOldPreTest\n# Dont delete meTest\nTest') - self.assertEqual(result[1], 'Test\nOldPost # Dont delete meTest\nTest') - - def test_new_comment(self): - new_comment = edit_comment() - self.assertEqual(new_comment, 'Test') - - def test_edit_comment(self): - old_comment = 'OldComment' - new_comment = edit_comment(old_cmt=old_comment) - self.assertEqual(new_comment, old_comment + 'Test') +@pytest.mark.parametrize('non_interactive_editor', (COMMAND,), indirect=True) +def test_edit_comment(non_interactive_editor): + old_comment = 'OldComment' + new_comment = edit_comment(old_cmt=old_comment) + assert new_comment == old_comment + 'Test' diff --git a/tests/conftest.py b/tests/conftest.py index 29210b7305..98f36fa465 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,3 +11,48 @@ import pytest # pylint: disable=unused-import pytest_plugins = ['aiida.manage.tests.pytest_fixtures'] # pylint: disable=invalid-name + + +@pytest.fixture() +def non_interactive_editor(request): + """Fixture to patch click's `Editor.edit_file`. + + In `click==7.1` the `Editor.edit_file` command was changed to escape the `os.environ['EDITOR']` command. Our tests + are currently abusing this variable to define an automated vim command in order to make an interactive command + non-interactive, and escaping it makes bash interpret the command and its arguments as a single command instead. + Here we patch the method to remove the escaping of the editor command. + + :param request: the command to set for the editor that is to be called + """ + import os + from unittest.mock import patch + from click._termui_impl import Editor + + os.environ['EDITOR'] = request.param + os.environ['VISUAL'] = request.param + + def edit_file(self, filename): + import os + import subprocess + import click + + editor = self.get_editor() + if self.env: + environ = os.environ.copy() + environ.update(self.env) + else: + environ = None + try: + process = subprocess.Popen( + '{} {}'.format(editor, filename), # This is the line that we change removing `shlex_quote` + env=environ, + shell=True, + ) + exit_code = process.wait() + if exit_code != 0: + raise click.ClickException('{}: Editing failed!'.format(editor)) + except OSError as exception: + raise click.ClickException('{}: Editing failed: {}'.format(editor, exception)) + + with patch.object(Editor, 'edit_file', edit_file): + yield From 9a75173869eca4925bcd44a19c81ba66dce9eca7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 14 Apr 2020 11:31:59 +0200 Subject: [PATCH 47/54] Deduplicate code for tests of archive migration code (#3924) Each archive migration was being tested on some archives included in this repo in `tools/fixtures/export/migrate` and the test was always the same. Simply take an archive, migrate it to the next version with the appropriate method and check that both metadata and data dictionaries of the migrated data match that of the reference archive. This same test was implemented for each migration method, but has now been centralized in a single test using pytest to parametrize the versions. The testing on external archives from `aiida-export-migration-testing` has also been streamlined a bit, by defining a new base class called `ArchiveMigrationTest` that provides some utility methods. --- .../importexport/migration/v03_to_v04.py | 5 +- .../tools/importexport/migration/__init__.py | 32 ++++++ .../importexport/migration/test_migrations.py | 62 ++++++++++++ .../importexport/migration/test_v01_to_v02.py | 59 ----------- .../importexport/migration/test_v02_to_v03.py | 73 ++------------ .../importexport/migration/test_v03_to_v04.py | 81 +-------------- .../importexport/migration/test_v04_to_v05.py | 98 ++----------------- .../importexport/migration/test_v05_to_v06.py | 76 ++------------ .../importexport/migration/test_v06_to_v07.py | 84 +++------------- .../importexport/migration/test_v07_to_v08.py | 72 ++------------ 10 files changed, 146 insertions(+), 496 deletions(-) create mode 100644 tests/tools/importexport/migration/test_migrations.py delete mode 100644 tests/tools/importexport/migration/test_v01_to_v02.py diff --git a/aiida/tools/importexport/migration/v03_to_v04.py b/aiida/tools/importexport/migration/v03_to_v04.py index cd7a8e32da..32745f73f7 100644 --- a/aiida/tools/importexport/migration/v03_to_v04.py +++ b/aiida/tools/importexport/migration/v03_to_v04.py @@ -432,7 +432,7 @@ def add_extras(data): data.update({'node_extras': node_extras, 'node_extras_conversion': node_extras_conversion}) -def migrate_v3_to_v4(metadata, data, folder, *args): # pylint: disable=unused-argument +def migrate_v3_to_v4(metadata, data, *args): """ Migration of export files from v0.3 to v0.4 @@ -446,6 +446,9 @@ def migrate_v3_to_v4(metadata, data, folder, *args): # pylint: disable=unused-a verify_metadata_version(metadata, old_version) update_metadata(metadata, new_version) + # The trajectory data migration requires the folder containing all the repository files of the archive + folder = args[0] + # Apply migrations in correct sequential order migration_base_data_plugin_type_string(data) migration_process_type(metadata, data) diff --git a/tests/tools/importexport/migration/__init__.py b/tests/tools/importexport/migration/__init__.py index 2776a55f97..3a12435017 100644 --- a/tests/tools/importexport/migration/__init__.py +++ b/tests/tools/importexport/migration/__init__.py @@ -7,3 +7,35 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +"""Module with tests for export archive migrations.""" +from aiida.backends.testbase import AiidaTestCase +from aiida.tools.importexport.migration.utils import verify_metadata_version +from tests.utils.archives import get_json_files + + +class ArchiveMigrationTest(AiidaTestCase): + """Base class to write specific tests for a particular export archive migration.""" + + @classmethod + def setUpClass(cls, *args, **kwargs): + super().setUpClass(*args, **kwargs) + cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} + cls.core_archive = {'filepath': 'export/migrate'} + cls.maxDiff = None # pylint: disable=invalid-name + + def migrate(self, filename_archive, version_old, version_new, migration_method): + """Migrate one of the archives from `aiida-export-migration-tests`. + + :param filename_archive: the relative file name of the archive + :param version_old: version of the archive + :param version_new: version to migrate to + :param migration_method: the migration method that should convert between version_old and version_new + :return: the migrated metadata and data as a tuple + """ + metadata, data = get_json_files(filename_archive, **self.external_archive) + verify_metadata_version(metadata, version=version_old) + + migration_method(metadata, data) + verify_metadata_version(metadata, version=version_new) + + return metadata, data diff --git a/tests/tools/importexport/migration/test_migrations.py b/tests/tools/importexport/migration/test_migrations.py new file mode 100644 index 0000000000..75da75a855 --- /dev/null +++ b/tests/tools/importexport/migration/test_migrations.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# pylint: disable=redefined-outer-name +"""Test the export archive migrations on the archives included in `tests/fixtures/export/migrate`.""" +import copy +import pytest + +from aiida import get_version +from aiida.tools.importexport.common import Archive +from aiida.tools.importexport.migration.v01_to_v02 import migrate_v1_to_v2 +from aiida.tools.importexport.migration.v02_to_v03 import migrate_v2_to_v3 +from aiida.tools.importexport.migration.v03_to_v04 import migrate_v3_to_v4 +from aiida.tools.importexport.migration.v04_to_v05 import migrate_v4_to_v5 +from aiida.tools.importexport.migration.v05_to_v06 import migrate_v5_to_v6 +from aiida.tools.importexport.migration.v06_to_v07 import migrate_v6_to_v7 +from aiida.tools.importexport.migration.v07_to_v08 import migrate_v7_to_v8 +from aiida.tools.importexport.migration.utils import verify_metadata_version +from tests.utils.archives import get_json_files, get_archive_file + + +@pytest.fixture +def migration_data(request): + """For a given tuple of two subsequent versions and corresponding migration method, return metadata and data.""" + version_old, version_new, migration_method = request.param + + filepath_archive = 'export_v{}_simple.aiida'.format(version_new) + metadata_new, data_new = get_json_files(filepath_archive, filepath='export/migrate') + verify_metadata_version(metadata_new, version=version_new) + + filepath_archive = get_archive_file('export_v{}_simple.aiida'.format(version_old), filepath='export/migrate') + + with Archive(filepath_archive) as archive: + metadata_old = copy.deepcopy(archive.meta_data) + data_old = copy.deepcopy(archive.data) + + migration_method(metadata_old, data_old, archive.folder) + verify_metadata_version(metadata_old, version=version_new) + + yield version_old, version_new, metadata_old, metadata_new, data_old, data_new + + +@pytest.mark.parametrize( + 'migration_data', + (('0.1', '0.2', migrate_v1_to_v2), ('0.2', '0.3', migrate_v2_to_v3), ('0.3', '0.4', migrate_v3_to_v4), + ('0.4', '0.5', migrate_v4_to_v5), ('0.5', '0.6', migrate_v5_to_v6), ('0.6', '0.7', migrate_v6_to_v7), + ('0.7', '0.8', migrate_v7_to_v8)), + indirect=True +) +def test_migrations(migration_data): + """Test each migration method from the `aiida.tools.importexport.migration` module.""" + version_old, version_new, metadata_old, metadata_new, data_old, data_new = migration_data + + # Remove AiiDA version, since this may change regardless of the migration function + metadata_old.pop('aiida_version') + metadata_new.pop('aiida_version') + + # Assert conversion message in `metadata.json` is correct and then remove it for later assertions + metadata_new.pop('conversion_info') + message = 'Converted from version {} to {} with AiiDA v{}'.format(version_old, version_new, get_version()) + assert metadata_old.pop('conversion_info')[-1] == message, 'Conversion message after migration is wrong' + + assert metadata_old == metadata_new + assert data_old == data_new diff --git a/tests/tools/importexport/migration/test_v01_to_v02.py b/tests/tools/importexport/migration/test_v01_to_v02.py deleted file mode 100644 index 4dcc92e27a..0000000000 --- a/tests/tools/importexport/migration/test_v01_to_v02.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -########################################################################### -# Copyright (c), The AiiDA team. All rights reserved. # -# This file is part of the AiiDA code. # -# # -# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # -# For further information on the license, see the LICENSE.txt file # -# For further information please visit http://www.aiida.net # -########################################################################### -"""Test export file migration from export version 0.1 to 0.2""" - -from aiida import get_version -from aiida.backends.testbase import AiidaTestCase -from aiida.tools.importexport.migration.utils import verify_metadata_version -from aiida.tools.importexport.migration.v01_to_v02 import migrate_v1_to_v2 - -from tests.utils.archives import get_json_files - - -class TestMigrateV01toV02(AiidaTestCase): - """Test migration of export files from export version 0.1 to 0.2""" - - def test_migrate_v1_to_v2(self): - """Test function migrate_v1_to_v2""" - # Get metadata.json and data.json as dicts from v0.1 file archive - metadata_v1, data_v1 = get_json_files('export_v0.1_simple.aiida', filepath='export/migrate') - verify_metadata_version(metadata_v1, version='0.1') - - # Get metadata.json and data.json as dicts from v0.2 file archive - metadata_v2, data_v2 = get_json_files('export_v0.2_simple.aiida', filepath='export/migrate') - verify_metadata_version(metadata_v2, version='0.2') - - # Migrate to v0.2 - migrate_v1_to_v2(metadata_v1, data_v1) - verify_metadata_version(metadata_v1, version='0.2') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v1.pop('aiida_version') - metadata_v2.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - conversion_message = 'Converted from version 0.1 to 0.2 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v1.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v2.pop('conversion_info') - - # Assert changes were performed correctly - self.maxDiff = None # pylint: disable=invalid-name - self.assertDictEqual( - metadata_v1, - metadata_v2, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v1, data_v2, msg='After migration, data.json should equal intended data.json from archives' - ) diff --git a/tests/tools/importexport/migration/test_v02_to_v03.py b/tests/tools/importexport/migration/test_v02_to_v03.py index 0d5d00b6c3..8a0a0c0cc1 100644 --- a/tests/tools/importexport/migration/test_v02_to_v03.py +++ b/tests/tools/importexport/migration/test_v02_to_v03.py @@ -7,79 +7,21 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -"""Test export file migration from export version 0.2 to 0.3""" # pylint: disable=too-many-branches - -from aiida.backends.testbase import AiidaTestCase -from aiida.tools.importexport.migration.utils import verify_metadata_version +"""Test export file migration from export version 0.2 to 0.3""" from aiida.tools.importexport.migration.v02_to_v03 import migrate_v2_to_v3 from tests.utils.archives import get_json_files +from . import ArchiveMigrationTest -class TestMigrateV02toV03(AiidaTestCase): - """Test migration of export files from export version 0.2 to 0.3""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v2_to_v3(self): - """Test function migrate_v2_to_v3""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.2 file archive - metadata_v2, data_v2 = get_json_files('export_v0.2_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v2, version='0.2') - - # Get metadata.json and data.json as dicts from v0.3 file archive - metadata_v3, data_v3 = get_json_files('export_v0.3_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v3, version='0.3') - - # Migrate to v0.3 - migrate_v2_to_v3(metadata_v2, data_v2) - verify_metadata_version(metadata_v2, version='0.3') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v2.pop('aiida_version') - metadata_v3.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - conversion_message = 'Converted from version 0.2 to 0.3 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v2.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v3.pop('conversion_info') - - # Assert changes were performed correctly - self.maxDiff = None # pylint: disable=invalid-name - self.assertDictEqual( - metadata_v2, - metadata_v3, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v2, data_v3, msg='After migration, data.json should equal intended data.json from archives' - ) - - def test_migrate_v2_to_v3_complete(self): - """Test migration for file containing complete v0.2 era possibilities""" - - # Get metadata.json and data.json as dicts from v0.2 file archive - metadata, data = get_json_files('export_v0.2.aiida', **self.external_archive) - verify_metadata_version(metadata, version='0.2') +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" - # Migrate to v0.3 - migrate_v2_to_v3(metadata, data) - verify_metadata_version(metadata, version='0.3') + def test_migrate_external(self): + """Test the migration on the test archive provided by the external test package.""" + metadata, data = self.migrate('export_v0.2.aiida', '0.2', '0.3', migrate_v2_to_v3) - self.maxDiff = None # pylint: disable=invalid-name # Check link types legal_link_types = {'unspecified', 'createlink', 'returnlink', 'inputlink', 'calllink'} for link in data['links_uuid']: @@ -137,7 +79,6 @@ def test_compare_migration_with_aiida_made(self): metadata_v3.pop('aiida_version') self.assertDictEqual(metadata_v2, metadata_v3) - self.maxDiff = None # Compare 'data.json' self.assertEqual(len(data_v2), len(data_v3)) diff --git a/tests/tools/importexport/migration/test_v03_to_v04.py b/tests/tools/importexport/migration/test_v03_to_v04.py index 8ec51bfcaf..63a7f151b0 100644 --- a/tests/tools/importexport/migration/test_v03_to_v04.py +++ b/tests/tools/importexport/migration/test_v03_to_v04.py @@ -7,13 +7,11 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -"""Test export file migration from export version 0.3 to 0.4""" # pylint: disable=too-many-locals,too-many-branches,too-many-statements - +"""Test export file migration from export version 0.3 to 0.4""" import tarfile import zipfile -from aiida.backends.testbase import AiidaTestCase from aiida.common.exceptions import NotExistent from aiida.common.folders import SandboxFolder from aiida.common.json import load as jsonload @@ -22,79 +20,13 @@ from aiida.tools.importexport.migration.v03_to_v04 import migrate_v3_to_v4 from tests.utils.archives import get_archive_file, get_json_files +from . import ArchiveMigrationTest -class TestMigrateV03toV04(AiidaTestCase): - """Test migration of export files from export version 0.3 to 0.4""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v3_to_v4(self): - """Test function migrate_v3_to_v4""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.4 file archive - metadata_v4, data_v4 = get_json_files('export_v0.4_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v4, version='0.4') - - # Get metadata.json and data.json as dicts from v0.3 file archive - # Cannot use 'get_json_files' for 'export_v0.3_simple.aiida', - # because we need to pass the SandboxFolder to 'migrate_v3_to_v4' - dirpath_archive = get_archive_file('export_v0.3_simple.aiida', **self.core_archive) - - with SandboxFolder(sandbox_in_repo=False) as folder: - if zipfile.is_zipfile(dirpath_archive): - extract_zip(dirpath_archive, folder, silent=True) - elif tarfile.is_tarfile(dirpath_archive): - extract_tar(dirpath_archive, folder, silent=True) - else: - raise ValueError('invalid file format, expected either a zip archive or gzipped tarball') - - try: - with open(folder.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle: - data_v3 = jsonload(fhandle) - with open(folder.get_abs_path('metadata.json'), 'r', encoding='utf8') as fhandle: - metadata_v3 = jsonload(fhandle) - except IOError: - raise NotExistent('export archive does not contain the required file {}'.format(fhandle.filename)) - - verify_metadata_version(metadata_v3, version='0.3') - - # Migrate to v0.4 - migrate_v3_to_v4(metadata_v3, data_v3, folder) - verify_metadata_version(metadata_v3, version='0.4') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v3.pop('aiida_version') - metadata_v4.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - self.maxDiff = None # pylint: disable=invalid-name - conversion_message = 'Converted from version 0.3 to 0.4 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v3.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v4.pop('conversion_info') - - # Assert changes were performed correctly - self.assertDictEqual( - metadata_v3, - metadata_v4, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v3, data_v4, msg='After migration, data.json should equal intended data.json from archives' - ) +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" - def test_migrate_v3_to_v4_complete(self): + def test_migrate_external(self): """Test migration for file containing complete v0.3 era possibilities""" # Get metadata.json and data.json as dicts from v0.3 file archive @@ -138,7 +70,6 @@ def test_migrate_v3_to_v4_complete(self): ## Following checks are based on the archive-file ## Which means there are more legal entities, they are simply not relevant here. - self.maxDiff = None # pylint: disable=invalid-name # Check schema-changes new_node_attrs = {'node_type', 'process_type'} for change in new_node_attrs: @@ -331,13 +262,11 @@ def test_compare_migration_with_aiida_made(self): metadata_v4, data_v4 = get_json_files('export_v0.4.aiida', **self.external_archive) # Compare 'metadata.json' - self.maxDiff = None metadata_v3.pop('conversion_info') metadata_v3.pop('aiida_version') metadata_v4.pop('aiida_version') self.assertDictEqual(metadata_v3, metadata_v4) - self.maxDiff = None # Compare 'data.json' self.assertEqual(len(data_v3), len(data_v4)) diff --git a/tests/tools/importexport/migration/test_v04_to_v05.py b/tests/tools/importexport/migration/test_v04_to_v05.py index ab1d5f62e9..664d1f495a 100644 --- a/tests/tools/importexport/migration/test_v04_to_v05.py +++ b/tests/tools/importexport/migration/test_v04_to_v05.py @@ -8,104 +8,18 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test export file migration from export version 0.4 to 0.5""" - -import tarfile -import zipfile - -from aiida.backends.testbase import AiidaTestCase -from aiida.common.exceptions import NotExistent -from aiida.common.folders import SandboxFolder -from aiida.common.json import load as jsonload -from aiida.tools.importexport.common.archive import extract_tar, extract_zip -from aiida.tools.importexport.migration.utils import verify_metadata_version from aiida.tools.importexport.migration.v04_to_v05 import migrate_v4_to_v5 -from tests.utils.archives import get_archive_file, get_json_files - - -class TestMigrateV04toV05(AiidaTestCase): - """Test migration of export files from export version 0.4 to 0.5""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v4_to_v5(self): - """Test function migrate_v4_to_v5""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.5 file archive - metadata_v5, data_v5 = get_json_files('export_v0.5_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v5, version='0.5') - - # Get metadata.json and data.json as dicts from v0.4 file archive - # Cannot use 'get_json_files' for 'export_v0.4_simple.aiida', - # because we need to pass the SandboxFolder to 'migrate_v4_to_v5' - dirpath_archive = get_archive_file('export_v0.4_simple.aiida', **self.core_archive) - - with SandboxFolder(sandbox_in_repo=False) as folder: - if zipfile.is_zipfile(dirpath_archive): - extract_zip(dirpath_archive, folder, silent=True) - elif tarfile.is_tarfile(dirpath_archive): - extract_tar(dirpath_archive, folder, silent=True) - else: - raise ValueError('invalid file format, expected either a zip archive or gzipped tarball') - - try: - with open(folder.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle: - data_v4 = jsonload(fhandle) - with open(folder.get_abs_path('metadata.json'), 'r', encoding='utf8') as fhandle: - metadata_v4 = jsonload(fhandle) - except IOError: - raise NotExistent('export archive does not contain the required file {}'.format(fhandle.filename)) - - verify_metadata_version(metadata_v4, version='0.4') - - # Migrate to v0.5 - migrate_v4_to_v5(metadata_v4, data_v4) - verify_metadata_version(metadata_v4, version='0.5') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v4.pop('aiida_version') - metadata_v5.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - # Remove also 'conversion_info' from `metadata.json` of v0.5 file archive - self.maxDiff = None # pylint: disable=invalid-name - conversion_message = 'Converted from version 0.4 to 0.5 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v4.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v5.pop('conversion_info') - - # Assert changes were performed correctly - self.assertDictEqual( - metadata_v4, - metadata_v5, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v4, data_v5, msg='After migration, data.json should equal intended data.json from archives' - ) +from . import ArchiveMigrationTest - def test_migrate_v4_to_v5_complete(self): - """Test migration for file containing complete v0.4 era possibilities""" - # Get metadata.json and data.json as dicts from v0.4 file archive - metadata, data = get_json_files('export_v0.4.aiida', **self.external_archive) - verify_metadata_version(metadata, version='0.4') +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" - # Migrate to v0.5 - migrate_v4_to_v5(metadata, data) - verify_metadata_version(metadata, version='0.5') + def test_migrate_external(self): + """Test the migration on the test archive provided by the external test package.""" + metadata, data = self.migrate('export_v0.4.aiida', '0.4', '0.5', migrate_v4_to_v5) - self.maxDiff = None # pylint: disable=invalid-name # Check schema-changes removed_computer_attrs = {'transport_params'} removed_node_attrs = {'nodeversion', 'public'} diff --git a/tests/tools/importexport/migration/test_v05_to_v06.py b/tests/tools/importexport/migration/test_v05_to_v06.py index 23bea83d46..08f1490ded 100644 --- a/tests/tools/importexport/migration/test_v05_to_v06.py +++ b/tests/tools/importexport/migration/test_v05_to_v06.py @@ -8,65 +8,25 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test export file migration from export version 0.5 to 0.6""" - from aiida.backends.general.migrations.calc_state import STATE_MAPPING -from aiida.backends.testbase import AiidaTestCase from aiida.tools.importexport.migration.utils import verify_metadata_version from aiida.tools.importexport.migration.v05_to_v06 import migrate_v5_to_v6 from tests.utils.archives import get_json_files +from . import ArchiveMigrationTest -class TestMigrateV05toV06(AiidaTestCase): - """Test migration of export files from export version 0.5 to 0.6""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v5_to_v6(self): - """Test migration for file containing complete v0.5 era possibilities""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.5 file archive - metadata_v5, data_v5 = get_json_files('export_v0.5_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v5, version='0.5') +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" - # Get metadata.json and data.json as dicts from v0.6 file archive - metadata_v6, data_v6 = get_json_files('export_v0.6_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v6, version='0.6') + def test_migrate_external(self): + """Test the migration on the test archive provided by the external test package.""" + _, data = self.migrate('export_v0.5_manual.aiida', '0.5', '0.6', migrate_v5_to_v6) - # Migrate to v0.6 - migrate_v5_to_v6(metadata_v5, data_v5) - verify_metadata_version(metadata_v5, version='0.6') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v5.pop('aiida_version') - metadata_v6.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - self.maxDiff = None # pylint: disable=invalid-name - conversion_message = 'Converted from version 0.5 to 0.6 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v5.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v6.pop('conversion_info') - - # Assert changes were performed correctly - self.assertDictEqual( - metadata_v5, - metadata_v6, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v5, data_v6, msg='After migration, data.json should equal intended data.json from archives' - ) + # Explicitly check that conversion dictionaries were removed + illegal_data_dicts = {'node_attributes_conversion', 'node_extras_conversion'} + for dict_ in illegal_data_dicts: + self.assertNotIn(dict_, data, msg="dictionary '{}' should have been removed from data.json".format(dict_)) def test_migrate_v5_to_v6_calc_states(self): """Test the data migration of legacy `JobCalcState` attributes. @@ -141,19 +101,3 @@ def test_migrate_v5_to_v6_datetime(self): 'the archive `export_v0.5_simple.aiida` did not contain a node with the attribute ' '`scheduler_lastchecktime` which is required for this test.' ) - - def test_migrate_v5_to_v6_complete(self): - """Test migration for file containing complete v0.5 era possibilities""" - # Get metadata.json and data.json as dicts from v0.5 file archive - metadata, data = get_json_files('export_v0.5_manual.aiida', **self.external_archive) - verify_metadata_version(metadata, version='0.5') - - # Migrate to v0.6 - migrate_v5_to_v6(metadata, data) - verify_metadata_version(metadata, version='0.6') - - self.maxDiff = None # pylint: disable=invalid-name - # Explicitly check that conversion dictionaries were removed - illegal_data_dicts = {'node_attributes_conversion', 'node_extras_conversion'} - for dict_ in illegal_data_dicts: - self.assertNotIn(dict_, data, msg="dictionary '{}' should have been removed from data.json".format(dict_)) diff --git a/tests/tools/importexport/migration/test_v06_to_v07.py b/tests/tools/importexport/migration/test_v06_to_v07.py index e856b43826..34f2f10d87 100644 --- a/tests/tools/importexport/migration/test_v06_to_v07.py +++ b/tests/tools/importexport/migration/test_v06_to_v07.py @@ -8,78 +8,18 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test export file migration from export version 0.6 to 0.7""" +from aiida.tools.importexport.migration.v06_to_v07 import migrate_v6_to_v7 + +from . import ArchiveMigrationTest + + +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" + + def test_migrate_external(self): + """Test the migration on the test archive provided by the external test package.""" + metadata, data = self.migrate('export_v0.6_manual.aiida', '0.6', '0.7', migrate_v6_to_v7) -from aiida.backends.testbase import AiidaTestCase -from aiida.tools.importexport.migration.utils import verify_metadata_version -from aiida.tools.importexport.migration.v06_to_v07 import ( - migrate_v6_to_v7, migration_data_migration_legacy_process_attributes -) - -from tests.utils.archives import get_json_files - - -class TestMigrateV06toV07(AiidaTestCase): - """Test migration of export files from export version 0.6 to 0.7""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v6_to_v7(self): - """Test migration for file containing complete v0.6 era possibilities""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.6 file archive - metadata_v6, data_v6 = get_json_files('export_v0.6_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v6, version='0.6') - - # Get metadata.json and data.json as dicts from v0.7 file archive - metadata_v7, data_v7 = get_json_files('export_v0.7_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v7, version='0.7') - - # Migrate to v0.7 - migrate_v6_to_v7(metadata_v6, data_v6) - verify_metadata_version(metadata_v6, version='0.7') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v6.pop('aiida_version') - metadata_v7.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - self.maxDiff = None # pylint: disable=invalid-name - conversion_message = 'Converted from version 0.6 to 0.7 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v6.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v7.pop('conversion_info') - - # Assert changes were performed correctly - self.assertDictEqual( - metadata_v6, - metadata_v7, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v6, data_v7, msg='After migration, data.json should equal intended data.json from archives' - ) - - def test_migrate_v6_to_v7_complete(self): - """Test migration for file containing complete v0.6 era possibilities""" - # Get metadata.json and data.json as dicts from v0.6 file archive - metadata, data = get_json_files('export_v0.6_manual.aiida', **self.external_archive) - verify_metadata_version(metadata, version='0.6') - - # Migrate to v0.7 - migrate_v6_to_v7(metadata, data) - verify_metadata_version(metadata, version='0.7') - - self.maxDiff = None # pylint: disable=invalid-name # Check attributes of process.* nodes illegal_attrs = {'_sealed', '_finished', '_failed', '_aborted', '_do_abort'} new_attrs = {'sealed': True} @@ -119,6 +59,7 @@ def test_migrate_v6_to_v7_complete(self): def test_migration_0040_corrupt_archive(self): """Check CorruptArchive is raised for different cases during migration 0040""" from aiida.tools.importexport.common.exceptions import CorruptArchive + from aiida.tools.importexport.migration.v06_to_v07 import migration_data_migration_legacy_process_attributes # data has one "valid" entry, in the form of Node . # At least it has the needed key `node_type`. @@ -180,6 +121,7 @@ def test_migration_0040_corrupt_archive(self): def test_migration_0040_no_process_state(self): """Check old ProcessNodes without a `process_state` can be migrated""" + from aiida.tools.importexport.migration.v06_to_v07 import migration_data_migration_legacy_process_attributes # data has one "new" entry, in the form of Node . # data also has one "old" entry, in form of Node . # It doesn't have a `process_state` attribute (nor a `sealed` or `_sealed`) diff --git a/tests/tools/importexport/migration/test_v07_to_v08.py b/tests/tools/importexport/migration/test_v07_to_v08.py index 2068abb895..65ca6dcbda 100644 --- a/tests/tools/importexport/migration/test_v07_to_v08.py +++ b/tests/tools/importexport/migration/test_v07_to_v08.py @@ -8,76 +8,18 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Test export file migration from export version 0.7 to 0.8""" +from aiida.tools.importexport.migration.v07_to_v08 import migrate_v7_to_v8, migration_default_link_label -from aiida.backends.testbase import AiidaTestCase -from aiida.tools.importexport.migration.utils import verify_metadata_version -from aiida.tools.importexport.migration.v07_to_v08 import (migrate_v7_to_v8, migration_default_link_label) +from . import ArchiveMigrationTest -from tests.utils.archives import get_json_files +class TestMigrate(ArchiveMigrationTest): + """Tests specific for this archive migration.""" -class TestMigrateV07toV08(AiidaTestCase): - """Test migration of export files from export version 0.7 to 0.8""" + def test_migrate_external(self): + """Test the migration on the test archive provided by the external test package.""" + _, data = self.migrate('export_v0.7_manual.aiida', '0.7', '0.8', migrate_v7_to_v8) - @classmethod - def setUpClass(cls, *args, **kwargs): - super().setUpClass(*args, **kwargs) - - # Utility helpers - cls.external_archive = {'filepath': 'archives', 'external_module': 'aiida-export-migration-tests'} - cls.core_archive = {'filepath': 'export/migrate'} - - def test_migrate_v7_to_v8(self): - """Test migration for file containing complete v0.7 era possibilities""" - from aiida import get_version - - # Get metadata.json and data.json as dicts from v0.7 file archive - metadata_v7, data_v7 = get_json_files('export_v0.7_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v7, version='0.7') - - # Get metadata.json and data.json as dicts from v0.8 file archive - metadata_v8, data_v8 = get_json_files('export_v0.8_simple.aiida', **self.core_archive) - verify_metadata_version(metadata_v8, version='0.8') - - # Migrate to v0.8 - migrate_v7_to_v8(metadata_v7, data_v7) - verify_metadata_version(metadata_v7, version='0.8') - - # Remove AiiDA version, since this may change irregardless of the migration function - metadata_v7.pop('aiida_version') - metadata_v8.pop('aiida_version') - - # Assert conversion message in `metadata.json` is correct and then remove it for later assertions - self.maxDiff = None # pylint: disable=invalid-name - conversion_message = 'Converted from version 0.7 to 0.8 with AiiDA v{}'.format(get_version()) - self.assertEqual( - metadata_v7.pop('conversion_info')[-1], - conversion_message, - msg='The conversion message after migration is wrong' - ) - metadata_v8.pop('conversion_info') - - # Assert changes were performed correctly - self.assertDictEqual( - metadata_v7, - metadata_v8, - msg='After migration, metadata.json should equal intended metadata.json from archives' - ) - self.assertDictEqual( - data_v7, data_v8, msg='After migration, data.json should equal intended data.json from archives' - ) - - def test_migrate_v7_to_v8_complete(self): - """Test migration for file containing complete v0.7 era possibilities""" - # Get metadata.json and data.json as dicts from v0.7 file archive - metadata, data = get_json_files('export_v0.7_manual.aiida', **self.external_archive) - verify_metadata_version(metadata, version='0.7') - - # Migrate to v0.8 - migrate_v7_to_v8(metadata, data) - verify_metadata_version(metadata, version='0.8') - - self.maxDiff = None # pylint: disable=invalid-name # Check that no links have the label '_return', since it should now be 'result' illegal_label = '_return' for link in data.get('links_uuid'): From e2c28ced85989a5d3db626f38746710e7e426b12 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 14 Apr 2020 14:19:31 +0200 Subject: [PATCH 48/54] Add export archive migration for `Group` type strings (#3912) The `Group` entity class is now subclassable, for which the type string of existing entries in the database had to be changed through a migration. Here we add the corresponding migration for existing export archives. The only required change is to map the type string of groups. To test this migration we use the existing export archives in the module `tests/fixtures/export/migrate`. They did not contain any group entities so `export_v0.1_simple.aiida` was updated first and had four groups added, one for each of the migrated type strings. This initial archive was then migrated to each subsequent version, step by step, using the command `verdi export migrate --version`. Also fixed and migrated `tests/fixtures/graphs/graph1.aiida` which was corrupt and could not be migrated automatically, because it contained a process node with an active process state. --- aiida/tools/importexport/common/config.py | 2 +- .../tools/importexport/migration/__init__.py | 2 + .../importexport/migration/v08_to_v09.py | 58 +++++++++++++++ docs/requirements_for_rtd.txt | 2 +- requirements/requirements-py-3.5.txt | 2 +- requirements/requirements-py-3.6.txt | 2 +- requirements/requirements-py-3.7.txt | 2 +- requirements/requirements-py-3.8.txt | 2 +- setup.json | 2 +- tests/fixtures/calcjob/arithmetic.add.aiida | Bin 8777 -> 8786 bytes .../fixtures/calcjob/arithmetic.add_old.aiida | Bin 8611 -> 8620 bytes tests/fixtures/export/compare/django.aiida | Bin 1895 -> 1905 bytes .../fixtures/export/compare/sqlalchemy.aiida | Bin 1883 -> 1894 bytes .../export/migrate/export_v0.1_simple.aiida | Bin 57683 -> 56550 bytes .../export/migrate/export_v0.2_simple.aiida | Bin 57820 -> 53074 bytes .../export/migrate/export_v0.3_simple.aiida | Bin 58079 -> 53030 bytes .../export/migrate/export_v0.4_simple.aiida | Bin 58230 -> 53155 bytes .../export/migrate/export_v0.5_simple.aiida | Bin 58181 -> 53114 bytes .../export/migrate/export_v0.6_simple.aiida | Bin 57841 -> 52766 bytes .../export/migrate/export_v0.7_simple.aiida | Bin 52446 -> 52665 bytes .../export/migrate/export_v0.8_simple.aiida | Bin 52453 -> 52672 bytes .../export/migrate/export_v0.9_simple.aiida | Bin 0 -> 56628 bytes tests/fixtures/graphs/graph1.aiida | Bin 4332 -> 8262 bytes .../importexport/migration/test_v08_to_v09.py | 70 ++++++++++++++++++ 24 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 aiida/tools/importexport/migration/v08_to_v09.py create mode 100644 tests/fixtures/export/migrate/export_v0.9_simple.aiida create mode 100644 tests/tools/importexport/migration/test_v08_to_v09.py diff --git a/aiida/tools/importexport/common/config.py b/aiida/tools/importexport/common/config.py index 549c22be7d..5f5a8e0751 100644 --- a/aiida/tools/importexport/common/config.py +++ b/aiida/tools/importexport/common/config.py @@ -14,7 +14,7 @@ __all__ = ('EXPORT_VERSION',) # Current export version -EXPORT_VERSION = '0.8' +EXPORT_VERSION = '0.9' DUPL_SUFFIX = ' (Imported #{})' diff --git a/aiida/tools/importexport/migration/__init__.py b/aiida/tools/importexport/migration/__init__.py index a99ee359e7..402147ff7b 100644 --- a/aiida/tools/importexport/migration/__init__.py +++ b/aiida/tools/importexport/migration/__init__.py @@ -20,6 +20,7 @@ from .v05_to_v06 import migrate_v5_to_v6 from .v06_to_v07 import migrate_v6_to_v7 from .v07_to_v08 import migrate_v7_to_v8 +from .v08_to_v09 import migrate_v8_to_v9 __all__ = ('migrate_recursively', 'verify_metadata_version') @@ -31,6 +32,7 @@ '0.5': migrate_v5_to_v6, '0.6': migrate_v6_to_v7, '0.7': migrate_v7_to_v8, + '0.8': migrate_v8_to_v9, } diff --git a/aiida/tools/importexport/migration/v08_to_v09.py b/aiida/tools/importexport/migration/v08_to_v09.py new file mode 100644 index 0000000000..bfe1f5ea94 --- /dev/null +++ b/aiida/tools/importexport/migration/v08_to_v09.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### +"""Migration from v0.8 to v0.9, used by `verdi export migrate` command. + +The migration steps are named similarly to the database migrations for Django and SQLAlchemy. +In the description of each migration, a revision number is given, which refers to the Django migrations. +The individual Django database migrations may be found at: + + `aiida.backends.djsite.db.migrations.00XX_.py` + +Where XX are the numbers in the migrations' documentation: REV. 1.0.XX +And migration-name is the name of the particular migration. +The individual SQLAlchemy database migrations may be found at: + + `aiida.backends.sqlalchemy.migrations.versions._.py` + +Where id is a SQLA id and migration-name is the name of the particular migration. +""" +# pylint: disable=invalid-name + +from aiida.tools.importexport.migration.utils import verify_metadata_version, update_metadata + + +def migration_dbgroup_type_string(data): + """Apply migration 0044 - REV. 1.0.44 + + Rename the `type_string` columns of all `Group` instances. + """ + mapping = { + 'user': 'core', + 'data.upf': 'core.upf', + 'auto.import': 'core.import', + 'auto.run': 'core.auto', + } + + for attributes in data.get('export_data', {}).get('Group', {}).values(): + for old, new in mapping.items(): + if attributes['type_string'] == old: + attributes['type_string'] = new + + +def migrate_v8_to_v9(metadata, data, *args): # pylint: disable=unused-argument + """Migration of export files from v0.8 to v0.9.""" + old_version = '0.8' + new_version = '0.9' + + verify_metadata_version(metadata, old_version) + update_metadata(metadata, new_version) + + # Apply migrations + migration_dbgroup_type_string(data) diff --git a/docs/requirements_for_rtd.txt b/docs/requirements_for_rtd.txt index 1d2e60adb3..f518609215 100644 --- a/docs/requirements_for_rtd.txt +++ b/docs/requirements_for_rtd.txt @@ -1,5 +1,5 @@ PyCifRW~=4.4 -aiida-export-migration-tests==0.8.0 +aiida-export-migration-tests==0.9.0 aldjemy~=0.9.1 alembic~=1.2 ase~=3.18 diff --git a/requirements/requirements-py-3.5.txt b/requirements/requirements-py-3.5.txt index 545e13a27f..96a42ecec3 100644 --- a/requirements/requirements-py-3.5.txt +++ b/requirements/requirements-py-3.5.txt @@ -1,4 +1,4 @@ -aiida-export-migration-tests==0.8.0 +aiida-export-migration-tests==0.9.0 alabaster==0.7.12 aldjemy==0.9.1 alembic==1.4.1 diff --git a/requirements/requirements-py-3.6.txt b/requirements/requirements-py-3.6.txt index 665898918e..c4e942b38e 100644 --- a/requirements/requirements-py-3.6.txt +++ b/requirements/requirements-py-3.6.txt @@ -1,4 +1,4 @@ -aiida-export-migration-tests==0.8.0 +aiida-export-migration-tests==0.9.0 alabaster==0.7.12 aldjemy==0.9.1 alembic==1.4.1 diff --git a/requirements/requirements-py-3.7.txt b/requirements/requirements-py-3.7.txt index 3a166e7b53..6a5fd19a03 100644 --- a/requirements/requirements-py-3.7.txt +++ b/requirements/requirements-py-3.7.txt @@ -1,4 +1,4 @@ -aiida-export-migration-tests==0.8.0 +aiida-export-migration-tests==0.9.0 alabaster==0.7.12 aldjemy==0.9.1 alembic==1.4.1 diff --git a/requirements/requirements-py-3.8.txt b/requirements/requirements-py-3.8.txt index a3295d91f7..4603457b4b 100644 --- a/requirements/requirements-py-3.8.txt +++ b/requirements/requirements-py-3.8.txt @@ -1,4 +1,4 @@ -aiida-export-migration-tests==0.8.0 +aiida-export-migration-tests==0.9.0 alabaster==0.7.12 aldjemy==0.9.1 alembic==1.4.1 diff --git a/setup.json b/setup.json index 59d707a2f1..9de32a4b12 100644 --- a/setup.json +++ b/setup.json @@ -88,7 +88,7 @@ "notebook<6" ], "testing": [ - "aiida-export-migration-tests==0.8.0", + "aiida-export-migration-tests==0.9.0", "pg8000~=1.13", "pgtest~=1.3,>=1.3.1", "pytest~=5.3", diff --git a/tests/fixtures/calcjob/arithmetic.add.aiida b/tests/fixtures/calcjob/arithmetic.add.aiida index 4c0de43491913f2831935d27051ca4d13c2bdb8b..093473ca20216a4fe12012a467c9420669c8f5fe 100644 GIT binary patch delta 1951 zcmX@<1+z?+#xgaHJm)OG|+CtjFrh<(oII zH^y(XZu(-_usZqtzB!KEAvHedret}9dG43~5Lfdc*Fn$Lc50u=!}$5No`MRGRexMd zl=q5t=39HK`O5)W4X{_b8utLoAx#C{=D+ysKuWbu3S3Bf@{G?M&~v0A3yy) zk@EP5JZhXzUcf0fS(}R&9PwaIKPQ4CXp*RpMSUegz4+vNoIG&Nf|eXjkV{@h}d z@AC;jIC^|ylR5bfA)E$LvB@`O^>~oe;$)EW1zbqVPjMkBKgFd!*@l}RVX_p$WOszA z{d{I{B_c_w`pIeN&I38bTNG}x3#udRdDS5LLCWfR;mX7&pW;QBJb4K}k~`1w8w!Fl z4>(&v3G>MZ1*|4}32;G7ix7x_FeVDdLl|2GZh{$`w+s6*3UGk4qs_G|*-^mUH8YWNro6-$P+MGUYdQfJ}(y=#PG=? zJb`?SOd`yP>_7REr~yPdo0u^lYW|<>E~X2STQ4TXRKhvAKwKQomz~YYz>t@pl3J{9 zVE_zZG-H7}WDmhrfKwEv3Z|EwlkXv$DZm9Y(lSqF$ppiTuZZkYRkel|72G#+GJ z3HM}w32TTZkRkg)eErEcB?J)$@$$eG6A)XYI5kgn@_$Kt5munH7#P%nun?FQPK$zg06ZdmLI3~& delta 2016 zcmZvc2~ZPP7=Sn7DmgX;l0X&$3l$IwBtf7>T8^>)IckTaoWo3O zJ<1y_9mE0xErMEWr63}pIEbw$RY9i^N)=n`6uFC1x-Xk7k&w*J?*ISa```cm4-eP_9ul=j!Vu&lNIZ#!iJvHmQ!!dTAq(82iH;uyqsg#b(w5_ zXpnwtsCasBc*&mBf8BFOyE5Gh6O>8bRZ}VLZM%Bzj+J-H2fz30ec{(yu+$#oakN?X z(!>wF=+@mFQD`ZnpJ7Zcrl_)M!+C~N0i`8fo!PZt|J9`2cK)+SJT$70S>}01r7Sge ztSnU4GeS8p`$I#rrFCwHer)`FO_JTLF>>{HN(%K}*IYjm zo-FpC^r<;MeEp!;sO+2kEd^YB%yHqq+%8nM2|e|&WTsoR-u37~?uEeUvW5H~##G*Q zCBIJfq}4sGPVIS7y|Zz0k)yjk0rUKAq%2t;j3S7@U zVts_`snFNHPAvkJIvzkQW5pd{YlRr56UpCU3qU1PN(%8jC^!QKi$f~@w4g9c;L4L| zv907s9zTjjcJ6EH&IPA=yGhGwUL;85bB)Q42H&zNKksUNSc0c)C$Iz6Y$ak*hycW~ zbRznW{>Ep@Y~mHAo7ogS4O;WE71eJ@{~9$d6ghkU1Q!1;>hLB1puLcmfbG zD)I$G94j?*&dI!!W=0}G#*)kqfI)0$Xv0(_>N^<4Fg%YUh=X{0>O)c?MsH+*BTOmy zRLIcmgkkjOP$vctgj`ZbIHLzK0ghv&4A&`{z@8@s2XLKrtxzWhf8bmkCOF#n9RpZf zI`Q}skM>tzWXw{79u8(yl{8J9W{=T#lhz3y2IB2mU8}bMzt~glO$0}ig6G!O zE3w70!3bLl;%szcYlJ#6Xth~66Fx^==wzjd8Yq0$P7scQ8kX2hzlcOql+flwok25j zFkKxeu(i@j*JBj?%#f}oiwIJ}AuMA4NAj$_`T-EG@IsC@O5X(W26!i}YA0BZlN?|# z@*`8QON7y8fl9Q|Z8fT`0DSvXf*JBGDAbIYKh4c@+?HqyDEor9%{V$I*VIT#*h{!- zdzmed-1C4PMt=fzfneOu(qZ-F5}Bd4Ly-ESCJQx+bJ-x*j-j1rn7Ab#Si*D?=gbEf cd(FgUK#SN~lQ<)IlOWE-e;*4$F7u)EKfShiTL1t6 diff --git a/tests/fixtures/calcjob/arithmetic.add_old.aiida b/tests/fixtures/calcjob/arithmetic.add_old.aiida index 17357815a08a60d12660027f2cacaae10cae7256..bbe502e33a6d4db98d720b524515ec5c5aaca51d 100644 GIT binary patch delta 2096 zcmZuy3s4hR6kWnc@*$996EGnVt&|QHHXsC$3dB%Qm})3mTAcC^RHX!kQni!_Xa}nz z@MLQFNyi^j6exuZj?92o1wo;;YJs8PjM|}8E4H-YuYE6@?1oP=o4ot>+;h)+@7&!J zszw!89c5{SW9W0Es90Ubm06+x7N4ayIy5*Cmlyww1%@eXFw72uKGx(Ur6%Phd2P$q zW>#$KEg6w>UJ8}JEninKG#sa% zYE9YqpACQ1W|I)mkb1R=HFP=H93JiWwvMF=SEulJaU0%F@ zL`|*OVcF%CS5mLu_P|@CN4lyaET{4#v@Kyijy+Qyt1ehBHHtHMGetJPYcCq)H6k3Q%uvi5(kGwW=OM%!`sR0sFU9^JI> zLyt*LL+(?3M*q&p^cVV;$6NpX>W1sE!a4WVr4{=!o2u4g?TO*Zx*F-yI~8r$H2bB+ z-xXS2V;rks=j0_sPM>Q(pIVsQwpX<$txvAGr9Yh~OndiUMwMq4(bcd1Z_Dp@x5i6+ z?`%GGH1QzjF*!K9^Pk!5@-OW!hd^7f&3pl}Y(+eO4Kdd(Xcj!TwKG@-2Ctb0j^Mm4 z#}tUbdL{)Cfdm>a;`yb_rwkRmP9_q#2<#Pj8-jA42=oaQ zm-qq_KEzM5u{D?*2>G$l#>mz{4H^Ad4s@vm7ERQ^qDqOtGjmkE#2rMjS(GR(+lV~N zjwZQYvEL^LE_MmzAdb^z7_`1)A7EjW(pq45)xunh1gEq_7|>|RrMMTzygLo5vzQc8 z1S*-d>Pn_N;IQn;qHujU+}yMUM~1ew&%u-x20oOMs{#GEoGdI{#hu&vKq>SjS;vK9 zi!e|pa-u@%c3TX~)TU~(2?eTU!N4u)#|eN5(sBynEnghbK_ieK7U3AD5WaqkA{a}U zphW0M>+)inbQwH0k8{*X%-GBX1Fm8+quE)^&@lm17G#LaUBry*C?o)o@kj_vk%?I* zQn=WFfQ3pBPZ$eyOKF}XC?r4=-x{c0j7<^jDrStC=EcCv)oebTZNv*ewJXnL-9)J9 zoav4=Xm$~p+=FznI3`_ii{(j+>+4?~l+ER}3xvKhco64FCWD delta 2140 zcmZ4Eyx5s9z?+#xgaHKJRh9Zr4;B>^W^w%bUNAAo zt26nxN>SRqJ?7mvF1xLkmFSKNU12t>PF?>?`XuiHB(w4n@PM@g$)RyNxPoUm*;@z`Zd!w#zk7s&s{NZ8N17|k7I+jcI zjO_odb7M@8wQtJo=;f4DN?&T5+;m*K=4*f=`_qDEMWr3pzru?<3hybO?^?nmxb)@r zi~F>B>q~Ywl-z3-6Wf+P`@MoBmz7JZQ%GZ2fpzaIhx&V$k1dq(->E$HcDbi`I-k(I z>t`n}U(V+8;e(gq%*R=xlWv~KeQ#LVvtX^Zh;+E(A`b~zXHES2{@2&J-ZLk0D!nTJSKBucGmQ>#qaZ2D$WBE4yWuJ;qbQ68I?IzQArZYOP zHZQH4_iU?Ua%^Z2=Qe`{oA~DHr_Y~1c}kANJx1izz*Oo#c>#ymWNl7faGC&elwl0< z$@e&TU>q?)(`0>QoluSa9B_?dlQWPwf+nWu>iI;(CdUg4K#W``BsRHI*bu_mC+q`Z zfRyO*ASsF9fh!T49KoYL`4kU7T&aj*vVNK&x-}q^WqFabW%8*{?&swf1f_LwE`So| zlN0!?&@#v5@4OKZ^SALGfG~pj;=zo~I|Pm~vT=a((B>+B3r03bewxfDq6Se3ve{1r z$>y0NdOXNk52#dZvK<$a|FXD&ZsX#D=-($SHaUx1XYzfRz;5nvsD+c?awS6)z2yp+ zyo6g2VY%4kN=_sbw~7M;i4)1hcbsMrogm9+3Bi@RBP`_?)|$!9jMpch6_tQ+HQ7sA3Sv0ODGc0`6Z!N7Fq{d}?+um%rgwHNRHve(d^ksd;jQCr zkQU3y6*4>u=vl-LMd1hT$sc7MknDwIhRKPtQcONPU_<`$@#`Sm1(E-uIyD@aTp3t^ z*dA^m1H+O=0bZacM@$3#adGWwzgi6t3G z@e2);3zAYy>_Q*~lcl93V7WLND4GHh)#8?#>@5i`U_s)iVdBD*cS^$ZHz+X~3BwW- zFpa0+NS`snlh=yl@Z)lj9Kw$Z-{A%VvqVY?lIx*G8Yq=1icFT3w}u1^C}rh<`1+G4 LDvGl00q1i7x=^mZ diff --git a/tests/fixtures/export/compare/django.aiida b/tests/fixtures/export/compare/django.aiida index 633c58159e4131478eeb39bd897b2637660e9219..4b1cc81496666f3dd2b89d61245648fd2081aa0d 100644 GIT binary patch delta 765 zcmaFP_mPh;z?+#xgaHH;YC8fZ@+q)@=#55MO!XiJSHAVa2aF61v)C9Ic!7#?Q%e$4 z5=#>GvWoNb)`pzSyKNxwSJdhGp@Qu6%zQ`YCqZAP1qX6$v6#mGU{OI~FvqX&lcXY- zy}9AlaEN`j@q62Sk!QB8=(*Qino-hOmED#9Cj8^x{z`$I?A|2jdAj29Gt56`AN{xB zzF|fE*96n)XF|oK<7<+p&(uy@XMZk6_;cF~{*HeBod1;@V|2_{uRbEJbZ?(yOuXtn zp^xtlAF)=tH(z-DrFWg*zP5y+svQy(W+T~@+&G*B$ING(-z{nSIU z)3WyLQH}aDvEkI>MH+7x>i@mJ?7PN}_PEuv+D)#7Jh-EHQQLbTW2ME5itj4YCyJsY zCIq@pnWcT{j>;PE)W{0X48^T|UOp}xSN6$PnAg6E(r4WA)nv=I*J_h9*n7D@i87wg zd$Id=__R`UvorkltBhUaC&|A4$KU*F)7Px-DcW^ z{nkXTi~WD^NxgXZTL%`-G9g>zk2Nzci>>{fx=#D2XMb}SkxelPb}^bhApcfgwfAx0AU^yEPz-~{3jYtdm&vC;PCAvURe8)Bym*|0=cs delta 732 zcmey!_neO}z?+#xgaHH=Rh0TqIBCeS@)+VZ3z?^Zx_ujKhz_?4Q3=?U}5hyx@M@O@EIcE!uczO`g#Ej*9_&MoBy}I0{0Zp7cj<$Hb?EAk( z$Vks^$`g;*EgH`E=l14G{z#l{>?M6!R0JsTm9HF{O5xUCX4qU`jt{+-eVs5Vb|Nr>+5}+%j(jEPMPJ; zc49L7dGpfaZ)bO3%AD(39F=yXSxeM}W#WNFF$*q-&AUG3ll5z7%>b!65ygjoJlw1K zUvJ;N#vZOaDpiS(URo^IwUS>I6yCX+!B(uDSxVmE$Yp_F|K!1mf)k!9Ca-2u1}BTj zmsr#wj6W>y5QaOeAA~WJ)d0dc%WBMvl5{5Xvgtvj?AfF&CbKXwIZl; zGKnxF(jVALD8U2g2r#^L+%56f4Qx`3PbQycvw?8A*`=8DStp-i)1Ms1F3MKV H3epDv#w#$5 diff --git a/tests/fixtures/export/compare/sqlalchemy.aiida b/tests/fixtures/export/compare/sqlalchemy.aiida index bd3ae08f442d34d1a38b3a022b636a5f12b1a131..19b819a44f245f81d5786a6664f35070ab83e619 100644 GIT binary patch delta 772 zcmcc3_l%D(z?+#xgaHK9YC8fZ@+q)@=#55kO!XkfjH$oYK4fHIn8n7xzzbBAn_7~X zl30?cmsOmfw|0te{%r%Ew*OrZJ`1FH|Jr!BOY}wRjkU`Y4Me&NIbIyT>VECuhx+*` z$NC{)5L)2E zcBr~E+BQGZ=+?8Gys787 z*Tc4@QvCUx-S$BXKdsR4s(#`9fBQ>Yu6GNsuX^S!m915f=kev#qgt?Wq2zT|G&3(~t@rg!AA~MAWy!`GHYNqjT7Sr^c@w42xaF(KmTj;7)Go@I=$!}; zsVH;0fA?QnWO?%8KlP5K&n`_*IbQw0-Ldp$)mGP2Q_n?T`T8LL){K`T>wm1D{4VwT z=V!;al|DXq-p6UyuiD^_*3zomD+_-=d%I%CxjlAz#%~;-adFx7AGL{J@HlMW#S?Q2 z?Zi*;&WqR_v-@A|VT-~}qmO)W`G zNi0d!%PP*#TN`pZ@3w)!AJL}ghYGUQHLJLz7EE8XeNLCtf}>maFvxSs=W#jyj|<8^ zxkWab^TOmMcV|9)+XRMdq)>xl#eWrHOy8Gv2gg^Jq==*ckJ`r zqyFgd?oNJ}KQ*3D_6zgJMZI<^J+R#GbIaK#rob*Ezd2Jn+2$*UXdGm@ljO9dyWYKZi6-$(~l}$49 zjIKT3BxB=VG9lVsHEq$IE64fv%(N@boxl(kFBNtDyZYo8d{W_0vYF!M7?hN+So0z; zE5c&MIgwi5vQ@wBdFof`Ut9gyGW_R*3nq*Ae?B(H|6|*Kwbyaw_W5;L(e`iuoKcyQ zd(W(uCFj%DrOV&W-oE&Kh`Ntt!> QK{ox#-t3}m#jGH80NLO*W&i*H diff --git a/tests/fixtures/export/migrate/export_v0.1_simple.aiida b/tests/fixtures/export/migrate/export_v0.1_simple.aiida index 0778e826d40d7fea2348cb852ab8e504bbbae31a..673c35780b3babdb2b081e44a55de71399a3f4a0 100644 GIT binary patch delta 14274 zcmb`OcRW@9AIFc8?7b>`ws2)#36YG*D3qw|kdcfs>Sk2-=pZAjGP1G?nT0Zb$j(Ss zWh7~clz!)4aqrc&^3{XtkI(bI&g=a?pU?Z8UbQ|WcvnP#&{QQLBn6OPPwdOpP=EOI zg9u;&Z0$_V9E5bVNdSW1@2K?iyaUfTT_ywY2uBD20QF<#^(0H{&BRtjoI`?EW{_ht zV3I3hR;LI6EZ~JQn8d$a#Xi(pL759ijuvZvJUwI`G0I3AGVp^e^fv|Q`@jMMoWrAG zT0sJ6AFF^^sSs!*%_Y{9fV-AxA0dR85l#y4+9gFAnd2=e`ELz@w373w*cxR(dzjF3 za-xd@037ZCfGo5&@u~cxfBDNAG4Cv4AH@y=LiFO_w}Ys**a1Lg3n?Ilk}U$p(2}hP zcuGrZ2LW)~r}PI<;YoSW&dE{G@v`HR1VG|UEg5hr z5uBZb*+^&~_(ReIu%*DkC)Ly|`XJ!54gta&5FjC6OJa%T1suHZx3YjHKif4O022ZT z*IkT>=nNv0Wdc6FSXqF4patGKb_J8QO~BpO=v@Iqz;W*_>)aKZgEzA)GzYJLS6IHY z%7w@-w$Sq9X`Q>OvFMsjw3dGmLKyeyv`+tE_5Xf6!Rj}CJV6Lh?|+*;o=jkyHWEUJ znB%0Z)5w8_%{1ab!@oD8QR8MCQ3Urbh`F7v)rb%RhPw*u^zTB%X8gMl@$dfiM{L$V zNmHDivl0IwgaqzGZJquNv~I?~fmV?HGhUoX!WgD^2Ob7`H2qN zMQLM=NG^R;^_tMqmrKhf!H`y>Rn|*`*Q1%S80R=%8!a-B^Tj}`@~Z1CVwl%)qKO!9 zx{we;bRSO2ItwYBw3$LetHH+htr9ED`%p_FA9~QGTz;w{*QIkdqNwzOapw?02Y*3LuE$AHeg3CHa4~ z;^%~)E6PVs>yQ9xXwb%4Kg`=1tpL+NWFsOkq3-U|Y=vtySW$t@pY^crts5{AQ-Sgl zfo{>ne^4O1floKr))Yie*Oibxs;Ey_U;O0d?&n1(p(bmzd3SA;f=@$>6Y&=p%ko*- z8J~5uxnM15eptuWD2jVJ!n`TgG7>_V<9;KvP9xbe{@uu|H~&5%2==cvAuO~smzIxa`i$#K z3Q7Rr5C#C`3lvi$M()f6q$aa(4>EV}0rOhn!8$w{Jy4@Rip z(a?GFoL3~ydH=(x!coZCIQC>!3&9@eZ#$*jdPRW*AqWl*kL66z*)5Hs*|liL~sR^0n>C z8j{?5A!Dceh5LAdY{s}xT#_Ce;o(sJeg)ug{8{l_m^2fxNe^P7FHBT& z8syaA&lwtT6FcA$uaO&Oe1^TxaYCo7g6JCi#3Rbv6)^Kfr?4IOLRO$vRyK=8voi@kIH_HAR8MxZ@_b>26(JSVV6}bYOXSEWY?XL|>JnTBphnm}r!u z%$DcI6)dyaQKm58|2nI4a_zPbmrJpEQNmoZ&Nw3F1{C$-w~-SY}* zp=%Cxj)z!t;6a(p=JG#xIS80#eLLD9FlKF$zOBOc{@xbuPCW5K-2^v5=YF4WG}bf$ z2A5iN?@E)H$ZAZ9*gGM7Nn9fK#Am+q%7SU;Y^i$JSa^GoYmOVtb*qgZt^z8<|YNe_59(>hT@jPdSmO{cQgpaevBszkeaNrOX)R&{;MrXL0DW&)D&%iu6(pw#Uo~6)SAT)uUF-F)~&biSr<8u_8 zyL51eU3ZIb2!av3vRhxRgof|tnbU8!@QpnYzZwx-zBO=4QL-&~hGqMVYT`k|Zra4k zL`}*7drtPV#MKeGjHKrXs}uWr8R<`4FmI5LzTtG_y0!Cd?asWh=!UE5mb&fZU88Sf z{9EhmzeK?gm}eH3vdc&gBJ5J~n&QQ_=GMtqRwhfd&u~)($cBe!^wMYSbs)$uE0SUp z{lOJk%FEdU>*0>1`s`fZJm0hL)0g|>EY3r@0#_uzCBry}#>YY`-n%_jaLrMqE8iy6 z{$*0Gdb^!6+|9te+tu2`>rn5`76R~jzuFVQDYe@${&`zXm-&fDTj>xE-nDW@W3sGt zOo1>-QI*HCcjh~3>R482S@v54&L9HuA|ugNlE46#rp?7d3^PL#v1bg z&hT?8jg9MhhiK@Ca{F&}heD04FS+~M;)d8JJCb#UDkx8=ht{Y*+-;#-7jhyum7anv zo-bl5<#Fc+cxL^P>U+=b#v+U;9{V2WE{SNZu?X2G#}4o8Qa)R3V75yi-^P;8>3r~{ z*pse`HeH6|hkafnrPFNNOFKJa2tDH&vMrD1DYuoJznfvf?SsF0jT68xLy`ZMmZ;0uR?C z-OpH6aU;o9VQ;e2DW|j(J9INTL`&57@hMgWDHM`FjABk5$x$9RmL{K2wg#`|vLKW~ zZ;o6H57ua4(`zPW^u4nb@RiN9Ep|Cx%F7xcD;+yQ`F-5cNy(r5 z?j_DE{Ks^&?LCU;s#F-?U-@ms&G`dH+V?z1HLuSriB0OOO_qH)pIJL4+)gy`%>Cp{ zCMQPEa|gsVd3vkG4ZcPFFHZBK;;*^rzRHYNQNroIj3Y!}*;??Q6P=KHSUu=;v*#Xy zuZi#x)7C)QM`l~|>|mCSA!TlQ%~sjozZ(0}Mqqo47>+UWTT;EbsilNRRpKo%!OGz4 z(Uz}U3ot~&kKTl34)Unhjpq>Cf}z)lE)WItXk~fppU=GO7o#9I_fcyPdxu=yG&yaC zaqWw7E1pxW7Ym(6U9SxeGY?VHanHXtLDWdodUsqqD`yeZ_sQ_^fTs=cYWUOL#+SPy zR5NsYI_6vVlNb?8l#RgJhhTb_ex|}H?hcJ#VH~zM$c;5Icf9c+o5;P`-SJR>^v|I$ zdFlRNPUIf&X}+mXKfQN`|9tpY>stECiKyhgECxR9$=CAEUm(?{evx}I_50WMQQ=f6 zM6vdd{`u27oz)3X-~JYSsD8Ho6k|vGX%@(o+brxS zCDV`3b!fL7j!EshD3SlV&F}H})qp#XrHRN9;dV)`1?~)iuNxUfNF^+FhFeT}ob*4a z#8)d6vymy@csoPr7H4VP^aAW76J}|h7TDny-4qE;>=(-KjMdp*-Wuv`QsqQIj5SsXZ1e*cTUD3ZFadG?Qm826jfIg>jA< zW<2ja&KVWrX@4Qx<80fg9>dqHQEM)ZZvx|nv0+>Rw?OK4*idSoQRLBV1#yqaqvCSk zm=Lk^bvip=^%7XI&9Yj1^HqfrT2&mkEZeuHVag&rKtt@Fwo29vf81M&Yn*|7pE|(;->+K?Cx_x_aYHqxK zSV-J%aDG;pydi-*zsxf#zsdjJ??#Q2q1WmWS6$<^2QNLIwLBuh>)#*cuI5QkxUD4r zQTa99+3{Hxb0V`D8Lvmt<;maW^IU!2u*D2A2-qp)IEU%fYS3i%vBs?>AtoP8%`OkQt^!?fJABxgEIx(n8f42a*rp#3F;FAGw}2(~{u*^e}XnA;~G_t(X+>m*jMPE?C#!%GW`-LP)Ozpr!L>IJ)9wFJW{qBt!A{GTiVe%xqYb*t< zbc_vd`KFUVX=ZA6T;ienL%tmD9u^+v8;req zul~FHi>o2rZC9__PY-_XYz3$6fYaYcXanQlH|7eYixjre6rAl*DX=Z8WJ|W_ zy*lyLf?wjb$&nYcag-MEe!i3=pt#scr>H}@V&~dpt;tS}mU(k}2bt5AO}ji3j*C~O z^)r(cRUs}G>~PNWK4maGuH&GLu;`h6HeJvpc=i?P6L+KY&-&aGZ$t&@9JAVIrmQWK zq_OQ3pRx=n;!V&T*OZ}=OqwGSzLRjYwYBv(9`+kDjUo-$L+Bj^d@BG@Ko4{_W{yVK z4|T|d!e8$?+>7(^5P0}(1h}7yi2!EkwH5M<8u%rL{CzR|qYeCzxee&zZKmr!N_F++Ac%WUGkjkHc z#A-D33n$EXWe5JypxOfY9(PbQWL5&P8VYwcu`8ejz0F#vCMq|vQJg6NU{@dvlZOgX zY!tb?0NwbYN41c`zcdGS4de(Qkmwp9wEfu0iiP%Aus;(80>ucaR_zNk3Oi?kiTX{0 z)PxLOtct>2Nu{M0LpFtr2MPtnFI1P}7pr-XmWLg_z?Ao6k;(KA@+6n!A-4@uZ%Ce` zBJ!Ub)~5u)s<0c7Dyc*3)r1|-z|^!AyjFc=({k8+Q6?d?3hOfo zJFbB#C=`W4W;Zsi%Ece43Yp?q6@_jn>f%0U(t5IJ|l>aft%}?5(Q|MG`x-nj*CQD<2%Yg8>lAf^6FR zH^GZup>iRs$-;Gcf1Mg!As9SVNMxhP<=$I_fI{`ctM)4{Wy_6kLSlrJ|D$tT3KXn| zAu}YKF2JYIMZ-{el8w6cR{=zBVenAll8qvl7r^Bf*x&G99NYa!koc;VTuF7q(~=Du}XCm;Tyy6fk+HRLVw?%Z>L713)OC@*nIvQz+8rcb(JW00;$FHtjua z#G?185X(l5`m5=6FnOp%%SMsQy^o0io0~Vk>)_vpFhU?z+m>}ttJpD@g(DaFAd>H4 zy99%BFLJA0A^JALPQ75#M8KELFi060G6l0v8g~8#lQ#Q@wtdsRQ4_V;8>n2&YL|&? z-~a2rB#Xx2p+YhnMK13Rt!MxWT>KaJW#Iw|nVs46M8Fe+QiaUXtjfY=8URROjrd(C z7JQAJ2i;}Zi5ko?@yH#dDpbB^RaIyfcDM$UWf!-=f}%F7vT$Fa6xJ2Ki$}3k7GgK6 zbp%=&<}`Nr4{hN&Qa@oq85F_!2W2u?%C;pgu%I~3y8Xvgri`U5bcsf{X=TUmB9+Om zTNQRl2eXoOD3bcBiqO7e=XEeS@9(1&A(o|gRx2m&=D>cHQk%TEWuSsQYvK@p{cj@d zS1IHv7(7&}XHDc^MC^B2$FPVX@oU~y$12+5CVko(2l^uRb6z++2Sr zhcIuh@iYK4M+BLK%n{*yLyW#x(eJIHs4fN8RwPvfEPsu?lJQ%JkQ0v@^edhd0ECN? GfBQf60uK2A delta 15483 zcmcJW2|QHo7sp4*zSAmOq{VJzX_SVPUTcL?^xCtN<*g_a*~xb8yHT_thO$J~EXlq^ zlq@NtQoRUM|9fSa>2Dd!d;2s^AD`#jx#ygFo^zi2Yanl9bLmEun&L)M78rEpT`E$V z`7--K2IGWTSsNPJ32CU)!pMIO(4RN&*sWs*{&u)R4I?4_xDf`M`BARAgobsNVL9)dIU9h6(4#1F3QKBWSO#1Ltt7_h#u96A)V#|<%mF>2c`q$wA(#q_ zTLk(X$n+4LCoJhFDE2aQJ4Dr>TO~W})XK|Ch zq2(i|Na#E}(m@M@K}&z%*q_j{CMpxyd#323uY z0rg}62@D1?7LTyhjLniVqC_U#pU#{hg~8;1GXU07h9#Z(!z~sv3@&NS5&;?pEnUtq zX~b$C(0QW{0v9~#mNe?7=ZsN%0w4GhYm_9AQAWB(!z2Nr@}<2+>th9ZOG37q(v&D~ zkvu!N%Et`nFOp{pOZv*h&x8c5W+PFlWCqKcG{1uwh|DL~ic8RZH3Kn{5_r{Y#kB~k zW(gH*t!4=oYuhyvD&T-(<$tkj=!>n3pFxa>)LQ+VUbR*~r&q1b&*@cgz&Gf>@UwyN zyq{N`gn0QZECe@RtDmvKYxOfWcx`^h2E&0E;J@%Qe0{ng5F;$SRzK5)uGPSZ`3zzt;A{1>TEtrYtQN7_ZnFyP!=S!itzKrSZ4l@QR|hxdoKBjp zs^AJ})xzCd1CAH59Pbd&7qJawUVTGfqXw>mRxZwj`Yp8xDL^%H$&MJb^ALcE+y?I0 z7dc<+5puzQ;QxB{`5haszb+X8V#Gw(ni0t8wPpk|dbJsWoAi*;a}ivqfq|wxFnYvI zc@kjsz(NwE!*Brpc8Jdb_DL{M-)l&JIRUDHEb&?156zEdHwhdaoaB}5{B4%kOL!$%NA^+6KCE==2h8O1`eKzbM`R;+Ao(ZA6ZICGn0|Ax|$#~89~bz zPMbtj(qi5buPdLiswE-Z9|^DQojF+PCj>lLG* zkxj1?Zv$8{0rIw9=UnTx18`IzsDG2T@Ez0d0dU1_3L?O#ieM5|(;(n?YqfgvX%4}JChoy8j=UAkS?r_>U-F~ z+A|@Zs_4L5*tW`xc<)5j$eS4E>pm#ZuaQ*c*7SS9A?8c_Ez|t;jL-Qs#Q7w(e3 z;6@+Kym-=3-mvVN2G6j+{6ldzhT$iqQjXg;8uEPC9Z)&y;A7zFE}UM>TCaUia6EM? zho&Szo6pZXiTna7D-Yd<*b4uo=7xWDhJgm2(R8i`bV5M4E|&Rj2Flanf_jaYCJRfv zSQHQAfeky}VEFgn`j8`fFM@GX1=VA_b368wM7F;~-@BOiwIgPW=ZLlN_DoU!V29BD z$S%q8ikmFtdQV+PH{KdxmVR|k**AZ9NK*sDl+>W)X35rZa4bDgqSDZ+lPs7cF{tG1 zXI;~feWy({1I*jlRVy+lP)xKh9Rhi|stlsSXh?gCVT~efK@RR|Qker`%{y+qYgVea zpJt@-WHR08AwYpSsO-77!0$#9x2^l=PA9w0Zr@@CSZ4pR$_ssHfmn(kTSYo9VxE<= z>e+sH`cqLCT_@VN=f{Ly!#3*>IrQ>)x-jdS1X+ju|!ySD> zDS%M?0dmZ9*|M@^j{BqM*|!xvIEk^<$-umz&2YSgk~aCSA?#!)E3WtWxG+aQ)pYz= zcLl6SDZegSWQ*nC$9p-{C}z>8>2#x1zxGE|@x3vLIp3G&_*3fc**;pzivcxC=K}<` zir;3Hchqe@jiMG+c^BXKpnS)JKU+&O(sApE=b4dLqXMS6H zV&kL7=l00Q3b-iqMaiQF4ERv$wBIe#s8luR)LuT=-IDGr%^&~pkt(|Pu#i>KZMDKW zUH@zDEjNG#|G+kry&OG(G^sUT81J4!gJ? z2^&g&7=-*_8`~BSq=wU7GQZvTWnY~y!!eDECp5;)GK@)%1Xl`wz4j<;*NsOU>gB0t zcPoZv_I@)E()g^n!@~htmgb{_!iJe|;k9Uh+ju1_O4Zfsy?JLU;9b<&W&ADKQSRUe z)VRI)W_HcNuW?(ng0F9B3fL9rd5gO1^&S8@pKdJ?_D%e*5kg3?J$~^PR*vHkm79{2wipXu^2l zmIjI3S62nLJVbQGiCyKrTPSHV-4w?5;%UikDLKb~G3ZuZ%Bk4GlZ|?srTcI5t2D7c zRppi^p)YXD8DZP(?cb6eRt?)6A*t*sEYQcJ_&hq3!U_nwL1sf1z;iO)jnzC2?bCS_ zIW>BMkK+t7Zj6R0RloXuv>DI2MwD%?!=TxX-nzpTR0zlEtf@dJ9*Q)QBG-nUg*_!c zrJFio8FfAKv58O43TB?O@*n%0%;Uz@H6g1PUZIEiG;#9Mj-!KlTr9YkJ(&fcY4=U z@@|OgwP&PR)^$ebuGWMirhjtjrl#H70CogT)6LgCf;XOM3|u;3Zp-BuO%vJi7SYr3 zt|%tbKE>XGwdzO~dA(NLtINs3b+3-1YCWDLTBc}bTaFv_XOCI~nSH&cBCd}U7|gXMdzA0h zOUq%KZCG z87nb$aQ_>odxadPk!}FB79vm1t#$x&Bel9xi(s5OKi{{~2D$~sruAk;T&~!Z(^{i zEY1(LYWpAE*>B`^@WX@Af7KOuZwpIGwigQx4f9hwcJGa@I-PICmKuD@?W8RedWSJ; zr-Q*ZV`ly!Wlm#2S#45wFhJYgwf0cLKu~wtF%wy(6A9umskf)btlcxrY?>(c(0&4> zVMwwKk=v|y^;cisZP8?g{+RJuGkdZ};l-DU-ZFhh7LTj38qM62ze*+cG$$d*Y)5kw z;B;)~_8w9hYpc zgQ1A4e$S(T3@lMRKLtOF)jFDXI%0cB&lF9Y*<{>dikcxVXD17vGuZ{3pBYQ*+_OZ# zQGdY044kCb5c0)*RjT`?v+a`JjAIzx=5~Dm^WCZ>x%7-(BP4y1dNMuj=z&fYFczS`5-S`;sl*UnhvGl;o_=d+A6WdAL_!I>|S>&suvC!KcF$#YG@ zU|u}I(;Gb*#7V*c zY=3NlD*GDjB;MQGyt_y8xQYL048x~grHwl&Nv2(NuN|h$EaWQt*=3(Q=(6Ql+~5IC z7Lp%tbq8LPd${IFa!ci~xwa@AA+;4@vbM_G8I(6tX9*a*w5wH)?v3e}VJp0P*%4#s zWj3v#Li)s`?+4m&Uyw|Wx68HCOiTTmzM*Lr+ZvPzg$Z>wnXW@@q;=kB$v5Y%qT)V! zmu{2Ww0(M{O!bzNkaGdDmcq@TxHSi}w^4Ru*-#Hdyhio;2R`L$g%iY(EhU$Ht7d3MbMZRh<3wxY$iiCuE5%wrY@%KrX^ITR+acejO! z*N$Fu8x%uNl=GEgOY!3G$xd(kC!VrNmIl;!|6PD6eC5s7(a%}Z@aS!vQSMm_vS$*C zqsp-l-tMh!_06v>Neuku)XekHv##-uW6X(e-Gg7HJbuEg*z1gwTu|IO?)%YsruUoZ z#P9k9J=mSqFIMyREn`Zwt;D3rW}2@1rzRApu1gJbp?FSrA)2rDp?Zcej^{6Fa~53Q z{9;7$UrIUhl*upQ->$WHM80z>G?1xcG?OWma>|-`Iaoa*aU*@Qe|lKmNhjUr<2b+F zM6*^n(xNKm7RIZV{y-VO{vNX|FUxir1@3>)NcNM@=}z#N$(*v}4)UTEQy461li+V2 zIekUe08Wd#tF!yn5zkzY_qhlsO9Y8~no6DLb=~vjPy3&#)IHJ@%q+QIy?e|$7u5+U zhA>{5dK78@aRAXGEZ;X~u9y_tWpztOyl#AN!gbz%_9kqtY|(8sqN}cm2|d`slHGKj zQjUVHmiOAoH#Y<6zZIB#jHJX)P&^TAdz|HVPP_ksksV6GB>G3`SWdm*-|bZS*Ypr> zg+F9p*!zv~(s(~l{!S@ZLt4F6)Wc@yt7h+i_0bH7Crxgg*PWezR{jDfeR2vt4l_SV z@Etug3oU%4AW05`(SlQwrIEcJ;iLqqSy+-n7Z3oT^0J|01=T!N;fLU{0)$XgB-y~S zadDN*?~x1!!y=(?#A;?1XHud-n{gKw3C)kcp^J!U=F|%_^Yr3eXrjhlSfnpLmQYiJ zoYUtJiv}lb92s;&D-0+s5K^2I7%cjFYjIn_fg2Ba1SlwwII-Mu+HV?e{Kh{=1;_6> zTDD6DNIL*&ZcwwtMaTKY?SqDI+=WG61rMKohlnW64GxR?|6?7(7!~7lK@rZJM^|An zVTh=}T(JV&%K4S%m}`!b!$!cRT#RTSE>QSd8}L9H=T%s?%exsMq}lmvHc|^oRq(PN zhs0OffXBvoNJS>dMgUaotPlw?z-Y`|phRc(8w|9v!(|2u>UCC##MjmkB5hn}k#whs z5$oW20J7^(^%SVinKKuQ3;}vdq*ojye7OyHd_Kp^cA<6$ZIyy#J4D_-w03C|#XTXU z3Vc4_m&r|B-0}4{gjS}_AOUU9^|P|wd*;SL6%X#liL?@5j6-Ops?Q7(6!qYc%Ow2r zzEt!Bk%w?NA}z$%<`7y~3gHj{;u#&Tc&@@i!s40hk1L);NqmSdp1<(9pyp>Sg^sXz zo(O=7C%R%ii)VHK5MLp_!Z5YW?D*6%69UAc@hs#c3CS^CwXfBckJEiq8c#N9z}-EQk#J z3g%Z{3zM*AWDkJ>peRXky_!b<8QzRYY26}qCKMMbqC`qWM=B1V3#yjZ;#I=R_W>dU zM{C#fi8>tyAZu6NHZETK${^s8b&HZ;I4(*=Nt}p|QZqgm)I6p&)8MweW%>vl5ntRyNNkJ*8tad&5Qnd6BJ6Y?M1i=2 zkUR>PNy}KcP%lQfgZf9~GKnZ@6w#T~iq8eLQtOvV95GNP$wjYcW%h}If$AGL(X}2I z`Nbdj$r+*yuGX)j(*fmFQ21spEV3*XvT#ORwPGvr^-+Y=tYq9A6%t$FtX(FnaCeWd zkpk6Kv$jI>=cR<-Tu^I8BokWv=LCj7CS#G)iSxFCTB{X%0Y6rRkCg)&pUi->coi`N zl}vM9z#@+)&2vHR6^={fz=LlusJ@!xX1kOTaA&kwIQE)tj<}7pnHk3<(q4Qy6)2j* zvzO%#jya>v!f}brGJG`^Ay+{m%rp;#Go;`OVmU=l{lA{Ef>oSOib!QsmH;&JMNaAJ@$jL0}a zNJtBY|Na9%PT4MZ(qP0Z!*FjmC}0{^<3%yF2IhEW*2Bp6e P`0E!p40gB>`rH2i)ZwVg diff --git a/tests/fixtures/export/migrate/export_v0.2_simple.aiida b/tests/fixtures/export/migrate/export_v0.2_simple.aiida index f7ded561205a8e6da2fdec13c818af55a7b49d57..8d8aa26fa98975e02d50f0e986c759ffae883df0 100644 GIT binary patch delta 11623 zcmb7~2RxPk7ss#lLv} zGk4}f+2A2;`X&h<7Lq_9#M%%DK4@cR>|)GmRWU{hlpfXkc=)h4X zJ-vc8ULWA9p(N*B{_G4NxA3_BoAly5vYDn_IkViqWqO!GY?%hX3Go_*_2978KK_zF zG5kp6e%BDh->zZZ!Fy)Ea% zqSX=VtQG>;l%_}1+$mL|W){>XdKBj3_Y_qUG73nk6?<3*(S33Jm$-%S^Z)99_2p=>4# zC{wC?Y~poSyrQZ@t~9-3xyA2OIcw6=5qtJ!?fa*-fm0m8X_KO4%}nI2flLp_e8~en zuLVTiV;7u`P-ycpe-Wc45M1yr@%9JN0O$H_Em>OJb9oBE{`D=H1aCOZ!-eSkPb%L$ zcBp(tKKv(E3dWFx;&;+gjbT!A9j{398wDR3RSb@fK2}X&kn0RT#$cim=ZP-_k@RP2 zo@S(LVpaquuwCxllBF=8Qjc77J)Led&A_3<@Y+GfRD53lkykw3SgA!(PBtE?y8^jK zue9!aI*v5&IgC$}!BeNMd&k;)S+Q^>3tcNq3wvgoRF&nJ%1n@GLTp&pu0Nior%HmA z5}$2rDf-quwyU zx_m0vT%Y|(yiZYIU5tC9)?(P@A)~e!#st&+Z&wBiR##z@oz6b`Z|pCdidzYLnpAu{ z;^mkbNU1pcc~)#f?|6!xU*Q}xX8}ut!?dH;p^Ga@@lMB&x-ZM6XOO#(1s!Fz)M?9mtnekId3I2Nj;T9Aguy{e zE#Q0e+w_*AV=`EAFwM)lc+-+P7{!g_&(fcqn;#HpCWZlQ zFyvZAn0U-jJqZ#XCGoMdFr@7^_FUwT~XQG=Qs9vT{Ua%RZN=)!@oRq>U&y( zTW{VXE!X!QnkjhI_1ahi2Ud_D6Qr2zQW$$ei#)#_sGSWtcXNJOSB1rn*y3L1oAb^B zRVf&HhkiWPlGiLqI>{9Fwzj3{A_;_>;GLBmX3VJ}uOOM5b|mUDbsi##!W`KWX>4Xa z!DrPqBCds9gF@T<&aq8?!%iWoy(%-$&}4r%lWg5YGDEf}KM9M1W_3clM@|0vjE$Sp zN3n)%p(?;0T6B$p<+Y!zLbDiKtEtVI-srb_iv!stCD&#JQ@*FG@icWs=kh3e7Jt2O zoyALkRY$k2gni1B!FAMb+^1_zK=cTd_=l->107S){mLo14*PTQ%T?|Eog(sQC$C^1 zZ*_Hc!oB=47fGoWr0dPvwYunEDz!Io+{fp?oW&gZT zQl(j}pDz5u>f(ZO^_$uFTF!fx(72thM%>KUS z=kIZ2v$-#i#C=FU`7XpfzlRx9vo@O}(&`7*^5VMWWW?u1?SF&(XRHHOL7%^#Q@+t_ zX((kAyT+lJ>EhoT0FzJZ`j?)9NkaxJXI%ryWvPM%fL?=HQ4R_?6j#4-S#VQf(z#?K zcgB-u%5dmfwS6OJV_&H{^!gbm%w<(k2ZwY;{8YN=u?|_+*!pLTb=Q(Ygz#fL)!+L% z^!p{eu!;|Ms5v$^+c3(5=jPVLq!Zp(KQ=xc_JVVL;rsL~tE(ix)mS(4m_;;sWczKW z#}cA!F91t^$CYw3sfi^=4*6~(ce=wDkByA=N+0{(N={m0M4{=((1L%F$ zTh=3Y64O{^Ho(p43bt0FU}Ei^L2Yibdv@;nWWlb0CtK9|$FV_X*MXc9yf%X6DN}k; zqquQ;B67-m-D26qyKTg$Esqqs z+A{TXL3Li3#{A4Nqbb?wn#?8lxTIzi=zxKZbsjo4T+8%&Iyc;+>V|4MKXnOtK_ZQr z3(PZs2`KKe)S>(_GB0vFx;zl_FkjcHKIT&zZaqY>cTQ~oEbh$~Uq^Wyyc&-nS_55Wl z({ehdFKl8;e69A2_>nIRdaAaChm^)H(P2T)-AJVX++xF`lIdyn^F=VMd^k=e#!OQ- z7aI7}c?`S`;huRk!dqWI%6_lC-7-$SQ+Y&@>cj(E!HU}NZV^<|zGwTj&WLRMn3!nm z49$H9C9-`klv#7$QR8isHjP7+VBA}ZN4IctQhr-K)={+U{y;fx-kpv=Y+02}d7Jbt zHFJozm<@2^bcdJ@hMsM%pG_mpT56bJrnm4$Ze-$XoQKW5U+-GJe4CslcXGk#VnsxM z5(jO8C-evd#h@BJuXFdk;ikS*4ncDY?eAh66xTwTIKTZaHsbssudd1OAui$P%H`?G zD^c`R?UQ70_-WnWfiw3A)_;$yQjG#5uS-IpP}z10fEsg-MIm=pOmcZ)(LZXG=)USy zbIeA+nRiUMoSf;Ra&|NKEkD{)c5A?2y{hq8HT^NMij`v>R;%Q12AJwd)?o?rzauoE zOQv;=W0s1HWs2$i!|!WIbhwVo{fG|R2&U3DU0?t2>-)vv_DciT&2=L!*DMK33n(q7 zemB~4y#^eHXGxlVJ@#wFVOy#of+nvG@>6-+H}+YJl=gG`a;a1;K&61+__Kn;9Is1r zBijw9lLM&MFc0WegzKT%FrB#}~6+xtiDZiQ-0W zoo##h=NM~iS)B~vQ)WB6gTaPC9(&)+Qbi$g^2h#cRUl%@aP6g*^|_i?qRORpWVZ`H ze<+_`xmz$_{6;gAZMNPpZq0M{zw(?_G5ye7D~-^c#8+b34;pfoOjm;aq>}mUt%U1+ z8kD>9^a+(sA3nj2w8}fWnpHM2ott?6>6!dAflA7pwgB_xpFoRey%x{-)AqaMtK#~O zkLb$QMtfpgl#+isP~GbAe<3_?Q^B`n7UrJE(wk&-o32&UCp<&g=42(wM)%{KhK8m$ zt3u>nw|;+#H+7c0QV6wsc+C`GO1bTQR--OL^$>r}-K?xA1qmh(;*dZu!8bjZXEZN% zXkGFSZ7qAqcf-F3h&CN#zP-L-_44DT4&i~o)rmZ>&r?cQ`s-a3ppVKE&U^O2O0E>$ zdY2pb^>#HhxTxLR_TsJiii|vMstqymPKJ(J;PZvv__&_|*W;P7OjyI)$7+QQhvo7$ zlJv?x-S21^`TFs`niiKclcw*B`-6DqbLZUrpHGS3E!Fh&pBD$3A|He*hH?|RWJ`39 zyErrl*m_hv>^mRuz14O$lkZxzNbzxg*C<_LjEdmiEt`@zEc{qJ>0CdQS{WTvODkx0v*hmn(aK)3i{W zb&2vU^xY`}6Q%DVt$wq=*zp>Yirx%UB?Yp`TuF`=Jj`H|0a(wZHu$U2TE+D+dXaVo zvP!vps#v%Hj1(GWd>36>|51i6{uvF$^7=6#5Pw_w&pIzn+E_{mz3m=+HlKF!>g9||p!2wNy5xssB$@U*=s=u_q5r+0uK|UbvrauKGvpftf$Xup%@D^c z(;}h9K1!S`olH*pHD2z*B|!7{7z^Pu^iTa8Vg(modB%oGYnC+03XEqI50x@bZX8cH zQYf~%wfH)Xc+_ioZSfFlI_x|zf}aH;&x_2?Y#tfyW_DGOOAYwv@zdYt%mwz}Ib7j3Gl>?q8>fe}e&s zTflgiusI+Q*h_y-y4}KoTk!2-A&h842+FsMAEb@QK3dsr2B19sopi|VQGQFX910f5 z7EuU99uW;6tlaNN-ke`J1`V>4kcJ7}mnS>H66%}?fq?x9PYh=8o+!@*yPF8j5I$0v z@E7p<+amD^dsi1u7k3vd2&%)7?QT2_A;GbSKq{IKLBJRg?ZzI)&#OF)Yme40Ygk6` zBQVlMY;chx`;C~7%iPk!+|JF|hSS~FCXf403gohR-JhjJ@WQ0mk3? z^#3L}1lE8dzRwvBRv@9>7A8ERrqI12Y6{&eBI&UGMg$hStGiWCctpoD_lgL#V018c zxwhHa|G%R^GZ2L(sfxKkcJ7tu_vYN~O<>ud!U1fsMVJIo=OC@(H~TTOmuD<5hA+CBwu z5?gG&z7pkqN;N`{aI0$tWV>Lxu=1`$`&_l){)7*01GQ~q1TQF`VSBAw&|*KU7Bqu( zM(($ufO{hD-F8p#&>W)n3e6#Euj<<$g{Zy&1||j8zn`3f(*V3kA(PVZ->B!PC^BwaBM7;|v70aSWXur+->w8GEB;*Yo-RNTLR|0-$iL%4 zX9y|bafuwjiz$K-;)2Va{dy_-!@%Vaia`#}HUWf62P|r2R7f=f%AqaMA*pR@XbXjN z3`v37Xa@y1+ais&P@=3TU&Q=>gwpmvw+W>!@UuTKD4TY;+@_kgP>_dkmqH@w8fv>& zbOp7EgWa?82(G7gINTzd4@TLc&H{ zShn0McREF1Wv)qZUAhH8jvu-*-K-m6eX4>35<38Njk*=EPlhX2VKl}1w^NYr&;{#O znlS|)AMjVc{zZ4DhTflbDX9KL>Dc}D=iD|@80OGPza{0Jp3ys1DixG-Ts+%W#!gCb zGgJ6&J`7F?Z*jBNQu={#1DC~Hpq*a86h1(2Mx4fCS1OvH5C;1Hy38d;o|q z4FQ3watnma2VF*Qx&o(GC=voi^$rMKQ*VVHWgt9(3VR2%Rpx?6o10gX?@T5L$?pKr z_4cMc7}|p@R42gI_YM$Ud~XG#vNu6cithlohN$qD13l`$M1X&E`2k8LMlNdFqg48S zr~T<%gla!PsdPgU2Q8H!kQiLmA0RQKJXB&RjlbW-g7OfW{{V@#B8Y?BteNv6Fz`%( XQcVd5{x8!IW$+82hCoX4;qU$j%oWKq delta 16415 zcmcJW2{=^m7sp3-vLt(1BeG}9QkY5q^B8yTXU;w6yytz-d%v~D62Gere2BQ!Ab*a5m@`LFp+pdSw}Y8Vm8Ffk0a__8{K@+ZSe=C>FWew>ZC z90~blRKZ#;5?x6GI%0YE{Jg(4(jv}XUpawoGgO@^=JE*rija>1)NyO|8BJ>%T*%KYRz)$*(*da7{pjT|Kt~~R`|zQO_RW2s!IeB zD?1x2M^B`^n3se7-|oNQ#LZ~PMkzTb#wgH6i1Rla<&>qC2go1mz^`#%wo(p=t)Tt8 z2Q6+aS|<>mc?o~(b`rq4sY*Po^cnZS}{g$zCaIzdw0<_=zyPq5UazX1s!WP6svRDexoE<*WA%P_IXyz55?;9Eu4v6zH=LEDwV} zOiOsjLuv7TmDgdG`Vv8CZA+9j8Yl;7KPCT1BWu8oCyhlT7b&6^NP7!AM{6gslWtCb z&z|}0#rDhkh>jT4enGI`XHNpyJ>z^0FxP;`cEQ43Q>Z~nbWpou0@5MI>M68bt}JB< zQm#OZg!y{Q)z_l+mMf4zh{ZC90p1kj4BJ19kAK(_leQ*^&q0i}%zAwuny_AV)h+Ytc)7Tq znjh6}D-W_$87kGRNlAI>zSBF}C#9j=j>%GWzhoUBU*;!!N&aD(#{T#eIvMp;efmgu zgBQ~AbkbC>k}Bad{tAxE$263kIpP#jrr`$~J$VfRkLEcPx zcm$=mPkmtc%y*(=frqx8%H(yFRCr3opgk$Q$DTPxeZoVZd^l?+`r~W=tXHq}+NauV zbfk9IjLk%ihHMPs8H~!fqO2{o^;kQ{^ZFPA^@ezLSgv`1(Hm|p+mb`X(QV_y+k)J+ zWH`E8_cTkmH@Cz4k&&cRNzdAT2qQ}8H%KOMp~3}E9%4YMYItny*yYgawzV-nBsGplzc}e})8F4fx|wxHxS3vCxzHhx*<>s9MNL~N z^Bw9DQ$NBE>8b6SU=nrW%9KCY7DZwOG&edsvbiY-i#~4ork*F#enfq1N640dgyH?c zgQ`e11-1LM!&kamxHU4PPv6*;#;kRjSa%?tUKaHt#1wyy5wDdS$- zpwb7yW+>If80pmVz_7j2M_wjfj}%kBeY-+&?%iUb-O*Zhb8o<>PY+f%o58h9BT z5a`Z!;qZTHcue|l25T_Sx9g(9im+q<7ifMM$3CNwws!*j;n znu+H3&;e32hNQ!Vv4T`06-?<>%GH}VP0t!Pg(arhj{A6$%$a=qV(ICw1`oSSJsydO zDn2eY_I>z?)hzjK4jV0lHdoJ$TKZ`eZw)0PCeL|)>wZHi+NQ`Jr@|v*=s*-3g=<}5va;XHxcbDS&1RP2fdg#MUVh%@tr66r*Krx6wU_=dfPtGl=7xtEz{8*^!8uE@ue%x%o|8-#GQtKd>KS=a$YU8Fk7*XsbbgC^t*q;)kU@HHQ#O&2iAC+1 zkBVVav21|@K2fX|g4`FX&P@lOAANk~;ENaZ9K_pWt9jKlwy-jzW^ztR-Yt4CwAE(v z%jfnxbSi<9=j6f?ZG*N=hXoGT=cRAoZe=`yF^MXnuCe}jCM71P*XX%j1H9Qyz@F?v zl~O_H4N*aP`?vHmm$M(L`CH8P<`wPm8`PS5FgB0z=n?TeJ3rP}Gx+lG=PWH+V+HBU z@1zV2pFHNCwx+5KKF5UGnjBCU)V6hN2#iB!ckSILO)be@w_Uo;WXI(L=Y7RYn^IaI zRP{FO<+~Sv40=H;8-%`^`8_~me5)+yyH5YImZ|il9)!WCZ6i}3+_l?cTWp@S{$R%3 zjrXv!_(nIkG0pu%vU7j#kM}=5yT2{@UiQU?M`6S`$XcY2bD-xfSpRbchw}u*{0k%R8M4P{H#i8@dSRrqISr9lgHOGt^Ec%~ zB=y0fa!#^~b-nC~b*&iOZe zOkNiS57(PB)m=tEQGCUJ993U`zI7Df7RtYLz(YJ!H3|7Vpp7o|iP?<(4o#LB6P<1| zVFd$s8ky@0$4=Zn|NgP`$)P#Ez2>35R8;j_%F!`o>aicNA3V7=BS;T{N*CxA$ct0f z@|f3b&CgJD{hHfYcv+yZb@K*AS246FliWKk#m&247otTerQUt3@-`Yz@!b~3U~^?SZN z28F<6Bgb2U=Og^7@+$ThShS%eC+%+ynTetYb~1M!RX*3p?`#-a@7|f;f6-HD`?#@>Ab-=N5Y(5N&JlOj z(t%lBbtZ8ahR2ek`jm~24Ei43(PcfBmwfXqqBb=*`%_osM^X3UAFnUy%dw{&4apIX z4nA?yDv|t6rWJ{Lz)6R1Wt%jD6s3_bCRj*W?^WHg;+K%nd7-x*&CBHUQfMkGT0-N_ z!@LSV&Rw!$zC{xL*T0l8%dzMtqDn%Tyes!^{mK~}D<|#hz~J+imX)k2Xk^r{m-HTO z;0^8vW^b9P_R%}%rQMQaJekJgFhug+Kyn8pT^` z!_ass+ld`Ws_RKioUb#zt7cKIW3EQsS+`B%Y@>eGBcj0K14(i@%mRqdJyjy8$;5<* ztTUJiE9v_$Y4=*3xoQvR5rYl!qbfM5Y)AswjB9zOE)>xd$3G|=6cfyOpz(0wLizd8 z=UNWM-rtnMsfx%w=DMwh9%IH-2I=d(2FOH1A9$SGCr}miQ6RM;w4v?8_^l}vTG5ES zU=RAuA%>XgT#gb>7e-W@ zyM8k}mtpixBPG8*2kBbMzS8h(zSD6Dc8HGtXzY@NYKwBdt6yQl(j#_c=*-ypJwhM7 z;MHCIdNl}!w#&~4HjFlK2Qv)iobt=G*A*+g~Q8M6q zwK6&C{;(G<4Tn_N{8&_-hF)keSDE!Sqc8@ap6kaBQaF0|$x#I#?|yksSTYN*XZO=6~iQ$_=Ae zSakako34ZWE4AatNZAlqN50rwsAanppiXbOpyQ;mZPU<*+pJmi>5D$6bI&pgjQGrH zXG&Aee@SzRve#zxGNhYz{)!l+-qgzjEQs*F6K&B|DelZElW=YrN3s?h3{^kK?yNxe zAdohU#e3vR1tr?g%Ttt77gV(Ln4qXISHFr(A>}^#ZzfF=k7hqSF(~X<{bM?=lK0)K z8O;&QR|f=Lxr_(m1Ll2WAe~3`oR`_~P-W+2Li<3I=V_Bp>2tQ9Ql-w>nku%9t1uB| zYE4|M?d>wJ{4Or5$;Hl$8BPhvHd`p?^0D!DW0{_1S$H}Z5@+KfJU=)yv!H|00QU>O z)=Y+h5(%iyTi#K@-J%U9hrwvT2GGIE9l5dzgw#b)i};)E>*OPQoH``vF~S=L_U0;j z3hTvdw212KQY3k0+)Ysr*quV@|6>Zpsd@;Zx9cj0_{Vb-6{q_IjcrCBlDprBcgT;W z&+<5K=km)@7@f^{lxb3Nw{EQa$q-#@0-Ah-Q*!#=VHfVPiGBP%NaxsSz3NVqRBBrF z{i5yj=PEK!yT+Nf^qKgR$rx4B%3s`Edt6WW4d=rlo#AdG%9GhcU3L-)^NEhfl~MaE z_Ybjb8?t(yX<%9rT4Tju#6#zIBJmAk{5IVUniDl}RYD+7+0vd!6v5#zff(IwU~_HEG6o_l!PzZ>DLwntG{sap)%=WLK%+<`d7{ z>dEgvVBW~$TKk>CX(06UCt{gXng=SK-$#m)y;V_n;j!!}&adwuj6Uuecr5$vcV0|G z#)kuj;iXwNWN8oVb-vag4fVxzP^D~2WpOyWV~9TTq38X#yJj5#bIgHDq#sZe;ci1H zCK1gL?T9Xo2?ff*36nct4n2NUJ9O6ni50b>qfA^;uXuB3UCc9K7v_@CM+vpgAJ2p< zcNDyv_*^{5I~r44RRt+lFd4Zcec+}UlqNbFL>m~1S68M%$pRf1IP~g>!c(ZWIuzQN zz#bSlv^~2dep^Mjwom)h&jkY}kVs1AhrL#!6@|Zk|J< zY1Idjs{=u06_yA3RfPy>iWS$zQ`)vf#cE@4A02>ClYUYQ{c;znkkSqHbDw{eZM{FG zz{+TOgS)It(ZWmSwqz>`vI@64LGM>a>j5qo)V;08xwz57z^RB~0GdgRfZ_5W2K<<$ z@djceg{x}=AH+aDSUd{-J`e*uZ15!@q(_AQ{G0NOjS?1!D`x{Aku1WXmqI@c2FOAH z)W)qD3IV`aEFPc|?!Vt4=o`O9C@70tGZa_Cwi4=${tvpSiNhNVne}TzfeN^v)?(n- z{DBrG`NNWUXA;({xKcLovC&Utp&xYsgamYM(h|^4*^3}laS4b=F~M4iD`{J4W%eQv zh_Bd4Kr3GdFM1OnZP&{3gez`aX{UC`qBlXM90BdrxU^^|K6}$0XT8EJ zfJi*SXFa}fhzhCgmczB~S#Km98!o)0c6f(N`YH|=)ZP&;T<5RC08jx(z;F#-1wJJb zJX}X3AS!B&LOtwE_`4)qIna0ktcU-k(gPPL|NZxhrO^G3yK=o^hsyr;3fY{oHT?yi90av11ZUVhYN=qrahPDD% z|F?1}lo<^I6;P%~3kwfdq=old%!S6zpVvfTG1&C_v!W2S#!~$fmkUY^*PC7&v1I@Q z=eh-KN+o2e4!H#orL8@^Rxj1F5D+H^{$VKu$_}x23>Lnc+dy8nJl=H=o29r4!j;Xh zTr31Ug0Cd;*Yw9#5P|~4pOzN-xx_JZphWQ}w9u~x3*AIOOQVuN zOI6lvC9bRx6c6H9$(RfRwU#Y}PW_=>!v+XfNw^ZInhIG6$r6_rJ%52hr+-bQl}l5_ zbZouDOOlB9bc)91f*Qs3);s7Bi`XE!sUYUM#6=QC<_LFB0~a(%RUPTNg#N*-Yru9wFiVo zFC=}$eI|w*xkA6>Y#1IwNUvXdK!^(^FYcof=!qKkDD(>{gRSlPnvMo38FaZO_-|nw K4ECe~diOu!K&7St diff --git a/tests/fixtures/export/migrate/export_v0.3_simple.aiida b/tests/fixtures/export/migrate/export_v0.3_simple.aiida index 5c2cc1f8416921ffbe347c6277fbe17865a44cbe..b35c5827766469db79d4def1e7add0b984619fa7 100644 GIT binary patch delta 11512 zcmb7~2RxN;6vwZ<_g)b~_Li&)$tr)LLUhgSO@ylyN#+%zkVM&AL@7yDNLKb=Wv|GN z|NCCu+r9aR-ab~&_c`Z0=XuWi{NC4KElzq84opWA2bUHCUK;=0(m{Ol^AkSs)zQhq z(oFRaJ5<=#&B<}vc+#o&2=&&$%hz*pIP|ev>K_}c zvE#H0uU2`sJ4W}{K#dq}B{+11XwMN-)^K94<_`mVE2 zl+c*>j*tlHTljj;S?bmp8S;f&r2_Emaal?EE()AX?*baT?$@#O=L@n_3)E^jUQ?Yk zc<%DuMIGjC)JDW$S(&>f?`;KD6kOoWYVh}tq%%+E5R%5uhIYx6aCqW0A1bvx`1xNx z$rn7{v1uG6hHa^)+_qeUk8(4-yH7BuTcuG=nKu_+taH0ObpD?esSrH_RW<{UHj1OH zb<*=uS>*>@Bv~(|tt!R~xNlTTo6#C#`Iv=p65u()#;C}yzH4`$T48o+)^RhYtv%u@ z$lwuVtf|+kYSQL?KZ8oh=dsVN4+gq7@ea``7-x5v5QQ9v6*whwBr@1yeHVps23b>C zs9rgioq1}EEbIk;6t?|}@X2ucw*i!_K9>&jax#mV1qX3B4ShG#@RS%dH~7Jnx*BD7 zQ_QotIuusj^Y(a3RJV&Hb;S$2l_Qcam70>ew4X=f;2QE(aYAnw9q%!8$aB(N!)Fnz zv-S}}$L(Ux~V-LiAc8Y!1rU5R^_$aKZ3_b9c9egxCy2^hZf;R`bQ3VG95X=`GzvmV9S zfs#bmgn4Y#>2QbX1J6s4l3j=klB(%!ay1|wu=`$qlcZgO4b~k%5ucUn*BBrd7S~~O z)Z%4b&-*sY7m|1G4L94voO;Rbg||@pP2X57%gUfJ*}QDGR8ZNTFpw|Bo*#c}2}Wes zP;0vNkM9Po-m`DmfIYv9YMP>CBt_GYn2S@Vf%IX9L~7r=@zH-rLc+SuJTs^sK)Dnz z9b%X^I$4=!DMhT#+&ndViU(~bx>I4(<9 zGS0`fw~cgtBzSK@LMD{SqCd7(kMo1#DW0J)iD7IxIn4NFmBIk0k8Z#fi95^qw-iVt z30PBA*7Mqo%JddiLS85aPB3I;WF*fJT%eilCT2fSMmfDj-AQYGpo-|nSl;K+WcxFA z!RtX%osZLVYGX-edc!z{MAftLf+o#c+{HXTsVTH=ia=XN2btJywcWZCCx+A9MSNM@LSGr3Gk5- zOe9?SBAAFz5qIgFm#AlC#zf?#Bl+_%?&tr8scvPuoWp|OGE8FQ6q39o5}_Py`+^NG zUl2DvP>a{y{4v)B^YrWeQgCUBs;u>QD*Y&aYgJ1 zCyuUuP0cGX_AZag3;2cuS=P2zDw$YsufwgQ_*&lUA1~B>GgO;u^bJ2*akjwVNB)F| z`~6PK$DX=f-WNQ&TgSyuf1fk#2(aQlb@-exYoXp`bIJcPVL!dhQcJ?jJ6;nZ){CU>cZoGO%x|_y9ig%xg+t{9 zm0q&E9HlGsoS`z*kjgcNgbujXDp&426)q`eqd z%Q8{>LuYgB1i9V)7`In9T_eiKOr(yxJQFN8X20a_1GL@ps@<;}Sz6yh#%p8012*<6{(5#DwVU?^_i=TYHio>ET8lZD~Q?^dUi{&g)E7kuGZsh*dqNPv3<*-U>D@~0N?*4#$hg|L8KIgeZ8~LB8)qDeT#Yfd6s;Wu3rzO71pDs+R zoXHE)FD;A4%@3K(33FA?w+r`ll6=cfO_Vq@HG!>?IXS}l?yLvALsRzEil$h-N=BJv zc|~s0vv$10uyv>)U&3@$;b=?0w5cxF^LsG*2Y&6Rp0^(lBhAHW-Cj015<5=6~IE?Ri{#3i(;ikC3 zu&u4Il@mX5s_Z7MgzqpESNbnrvmHIo6EPC4xH;rmU0w`h89D$jZkkn<_F?hLqk3EC z^j^4Su+o6tt5i2??TnnWbLJ_m47PjFeq|4q7N8C`Pr{E7CD>l5zUDk+Qphimgj3SO zG_m-7T#V4uvyJOQQh!s{q*-#V@TQ-O_gYZ}J9B4;1M5;z6VdHSW7*Tm4`|D3%w5Lo zEsjmCx(KPkCh;qmh@O(VdpPTyl>d&aLJ@kMf99GIwAIt%933v(nMU!5EE_mHBPaS> zda2{s+rvw)Vs}ca=hc_*>p`b9sbsKUL8Rz${C35B9p8$AOKr-RgkCgtN5{FoUQd{;sHqbHS$5}k88Qva#74Zd4U-g`85W+5(Et4^BK z`g~!|0L;#-=XNDO-%PMG7A}qk`^0EH*RyY)LA)cI#~Qd)fpE5;g?xf_Vqes#A_cNbBn zxXsd-#ix*Wx9i&)EkAdtkGkVn%%HukJmHhT5IUicELDz5c`w%4Y&O>qYv*3}RQ@pQ zvj)$(%HQNynl!R{RPgCteO%wMjt9Ek>oR@kKy)7jtUm6HGZD(9-m1IZA4xcOMZ3FVX*xatPUqiV3Ng!`?Wrl~zQk66-W|{f8pRlqNIi zLN<7wM&BXfQnIMJLVMTx#HNIa@#&RyJ7*HOgvq(n1N7=9+#Pj9E1eqmt9V~lmJhvt z(5|FX<3_B=089H!A>$kK72a+a9HiS3ME!aoN!~i5G;GuUI{o=(Px%EVsI< zHN)Ezp(bLQoWyq_W`pbCt>KCA(FI;j`c(;*$#->4`Pwz#;un)E`&`{gbIMCgV@hV4 zd~1E{=1s)fVT;BQN_|}q^_VukkDvM0pQY4!S({?TGq~PMHno-h6Z3h0)1ppa8H3wW zHTuh&oE*z(a*(XKKX1RZTua$TqFCJQ+9(c`WYHlGw z*MTSSWpkXc2%A1}$wP;ngQYieqy^i(*k#P6Z}*T^{c|5SG&_8|W@@Fm#mO|;gx@2$ zm+=k0dvk&Hc<*29(I08G@@bU{j; zY=qr3sj70NZd^S$1->P!=BPU74jrGhHH zdW&-(9e$(CPoX=qq(9AN-DeVgPq}F})AyN0MB94mKXzeNQM2Pn+kd%fl zi|}=))EyOtv12d03<%^A2VBkISzJ2$rcQ!X=8c-yGlmJL2=zeG=QUZk@wF8qf273Z z%8^xKKQdk zB^>=qBxd$H&E4}g-+xda%>U426hax>Q@Yx2b%Kw)#1mHbj`_yYCJ|-M!rZHIis>a5 z{mRJM;wMml@>@A^D- zZG4(ZDLpd7$H>Z98Wy>A6|2`Sq5FcqV!XHCpiB*y>x#sAwSP(9!yZX(kc^ZC6ZqFW zQ_(A28f5zr$MtH+4swl{6dHV(>n0bhkl59P@QLduFG-Pl>En|Yes0pF9xH82${fxm zvpr{|7pXlwSwmQ*4?Ey97m4PmnsqdgNIu9Z=aR80unm_rz2?M5uJ=5_(=wv(S}d*x zV^18p9vtdRKQttj5(w)X&OD-AeB*~ok0G24>RU;OZ% zRcP{{By(>qe_->ZOMm3MwlqcQyFJGabGtWeaO5?u;4JGE5E)!ntYOY_qbggnIeGhB z`l^8WRK&uX$^9TPVjs96WLpkh&IFnHr90n^)9x+AMdgn7B@cGX6Gaw6T)MKFC%&cVgWo9Y$(+mbU*zx4) zmsfMKeGMmRwJ$R>$sJ}0|6mN;&$702m9^Mmw{3!HCSkTOGjq{h%+CJGRbscPBL!Te z3L(0iV<8AUuTzBe&ovbX4J1(p+Yj{|NZ?aSfDaYmLmmPb=~9H7;vQG|)3d=};Q$4f zg6@(a5Kckg9^n-9{R(I2YLEp-kc8(C;vJCK#SzMk0F~OVW9?iEV0E`I)L3{2el5W5 ziPEYhS&INVVY?gzq6S8$UZnXj3X6GrrnYTTde2Yy0BzFk5d@<2Gx}(6yKRatIzHPzj40XSZ8tDI4IbtCA z7zYKo)b|k@(2S1kwnxFxhFJCrZHPqz75x84rNgSQ&xnA&XNByf;5z#K5k44^64hQ2 zDN*eeQ63c{qJ6Xjpv8*qUOqTk>mKcZto2tr_`ex~hqWTQ(>_WDbaI*9ng%#Q59#)r zpoet7PmuQjMI-pKq!sv(M~oo>cWfqxAE(u2LGJ|C9Q{zFohr%>AHLa42G}aYt?4K* z?8|yd8O{J>zC(?x0N5RcAE9U1$K(SyBuR19lK;Qu02h0`$K(SB@M;{qU3#bt1b*$U zEXv@2D+G=rii_A-Kl{_pXn;NjjKcwRyj?<81dWcNob4P1VIT{4diSgCZ%1?#dWt+7 zXhj}CD+HP_;EE7xV`Bjows9;@gfWFF!rINl%)!Q8*zGcW zh+q|?)uP`qfmREQ5AUlZ2RAN;nU$W7FxX{la3By(L^AM~L}W!%M89>}R0p+)i-6rj z;D&&((=K8oHN}h)-Q}rmmq6a|917xn_6y)BBC*?13EU3HnD*KZfW>~AD_{m!XWnmf z1x`PryB(Ck&@N)_71~9t-*+ndoPj&F84JpHb>aM-vm=6n>ocz%- z80Ncf%*kv(aR?=V&h3WHqcDUr*cC(S4zLD{G{_ntq{6NoTFI4A7(zMx83PaN+ZX7~ z3V}(3@AZMDf{+q_MuA<2hDOqW(n4^zRVj|NN35=15RgOz{_kdiB7mJEKnqF_tKB?c zX7dKv!`s^vDMa?0%nU6kOweN6xmPfgk);E5$gY4aDi?63Ll6KcT!g_h$e#nl#D+rj8cgHh`&n(19Mt?eL<>c0i!n zFA;VD_1 zJ{{Dqf3X;7Ge7{;vb#busC>X%wnYXg;U;+eB5uy}Oh-lDuKA74V~$i$GceZPraEJDcmzewx}g4o#x(dz{Wd(@S7Z3Q3znNP2yi3k1-Geisc N2V;gnqKd(v{sSbBgs1=j delta 16724 zcmcJW2V7168^=$jK@%;MmWF6)-4+cB?L||&lAs_l%uu3|_%?41`$0AkgCw zp>nvne~Chm#Icct^vPiqTN@vWVZWvghug`c`sf%ykG!12DQRQiaB~ zf(@=d3GzgjF-3#_?7#49`G&`{@L>&0V!L#iyj?!ANifTH=8C14$kud z>>o{Y+yMY5mI-VI=H|u*oaWYR=0OR#GJR09A?HPM1{P?au)%4PG%Nss7|ZFfIpqq# zPojVD{8Cjn?2ZDL5WKPV0KLXvpo@bi}eta1moQ(gEV=n>AitHMM!^ptW z$lTf#&S_x}|GN(w(<2}sn5rpphJj{6tY7;e4jc6;$Tu~_e|1AnQ>GpSkGGNu(gooV z;`e?KSssD}+|gkhmEqJ%m}t^T*6sU4IRMT3bvgJ=n2b}N=%+;%G#NWjE`0b-DB@hG$R4{7laJ{K~^h|fWcAn$g4-dDC= zpZAsh>nc$Xra81e_mv4DAC>J`pM}3GmP$#&$lk=ANP$FBa5lPjlh!DKKMG-fU5TJi zplRM$CXFtf8U&lscNzQ{TnU1gE$3x7h1(gK*mGLjxc+S!K`JFFgXvPxNkpXx0wBnG zk=wk4ukY^+`7n|$AcjY1yXo>ge!J-c5->4EK@8I8{{L-?8f|AIyuOhGF}$YR^*K%U zc6|;KFyV6$gB;BM|M{E;BYAGb!vz!EuFtVzx9f9|fC-<27-V(q|IO#d0vPc*h~YNe zuFvTnZ`bE^kNw2b+` zrMKy?TO+7vyWg+C>t!6o@EdG5y(dDq>+^}we@*X_#$5n(YHD~Ux}M%(%K!4P`-T-C z4(@_}-VG$hg@Xq@@ZMi>m*+KB-Gy8qYYCJ_LT z0qupzdaNX@CU$n54lWLV+hFCKrMe9rBjuhE&If_VNC@-m2S8(Ae*lHz-^e&12cH1C z3p|-FUF&(X`TQreWH{`^%5t{l=E*GBU4VA26QM9=SO z2S*Lv|99(-Emx!MSjTyc%f#Hw#M;TglGDY?@^3!OITpUuLEweK$)OGcI(7>GW2a-? zhkxJs=Lp_ivWxXrsz17R!A`7f%D3Y`BwZvL(dCFV!2(YWV-ab1MjN~a^`7eOMiiFw z5dfN10025Q7#lbkthaRhzy>F@kU@WLYA%oIjKVwfDSvhszpB5B<;6V8(f;CUBy)R9 zr$Al50~M8(fzqCki?^8-gAk(Uh7iTq6OM}PVeYgIrl8u5eUpW>YHD`E(IpBF#~VLw zdq-z5C)*#}{Mc;mzQ^^= z-{W)F5RpC@Xl`1hGo=8(GjgEf^6SK6K4((~bT z+NkBJ#uL41!hZ9LkHn^hIV8qFt@lD!zRIYA;eUbuDG`k9tOzB^ep2+wpf%xe!V^4| zfES;3r&cn$2Y7usd+5#E^1$SdY&vW}t_N4P9V_@YWk(ynt5`v&%CqqHA!Wq3AYTCz zni5iZ?df;GP-Fd+F?mP6zq{F|i$+bYbvDS0Y&G>=HD}6H^jwYh6zL{t@(?O{#wLe^ zHSW|kWRZWVTVigKja}?JWxt+wE1k-5bxXQEgvmd zo$hN{)KbWt8k+XxLyAa4t)B_vTW(QAut*1oJ@o?8lcm3Qs!53LF_In&R~LsyXtdBb zA)nzE`}MCbJ}HX$!*6U9w7z9q~&!?nNtDuyDxfooKW*A7fAPVY5$p#k&hSWUcw)pGWTQ z@-w%Fm)b{B;n0|=@$eC&6lM#9z)zxXcO;p(|wm&=!(M58@u>IbdqVNBVkGqf-K zgL;DS-xMcV_;eIJ%utJ42)|s7(9L8?lI+iuegzft+3TQXE z1HU*Y5|KW7|G_ccq-na(Pidt;GG!;tS=8yKR|N{69h^|^o|ySD{jLt@B;9SXfb(u1 zR3b)&ZNeG_@9S;!GaRppPdkuT-y4D@rHZiRm(;bq)k~k#)vy;l&vIo)iv!|qtNrVN zp$?q+C&d&6llEe{{nZ1x7U$Z7zD2Gzz?68%hIoZ(Qsqx{GnhZhU4nNCw5V{feO^FWNi%}SN`o!HYUYm3EFl4Kk}}YX|+KfhA<+@q<1o6x4;Dh1VHm)o+DxX6Nd>r3&o{ZFdL^ zkHfa<#R{{pch8QnYk60~DD#MXkQX6f?5)~X=vTI*Ihv@b?0D?+$Ku$S#+(P7u_ z*4_GYci*I&pTNXzu19ozBntYou`cDtU-m8v2gnNOS(@f$hTq7&ZWs_#moj>37~ypA zjkQwf$tv8$*@Zkm$~!OB9uc3ps*JQw6hgkOZYXn)@XhKk10=7tE}6#j&3 zDsFq1$IRf)$vG2e9?P8~gtzgvqhB}D5AKL8zu3varhAG;V634TkKlZ3*E?Aw+YrPc z6OI1Y-9*P)hKU0?Id8IBI|NQzQ}f=ikY%@=E7JYI%%2@TXV%c3Djg9pZIR$itKm}A zz?11#)U6rsSnHAe5pP4blSMTcVH(rfVZg-G^~I=pWCy`S%zVb@Xom}(;%C_vaM za(xaK_}{B%3)TxiGb;r9;dQ#&-{%!05CaQ#UvQesz2x@$2zCGL%`bV9dxe)J4yH)w zA|QOEfXgtUB#8Bc8C{R_?APw)lad7JIy@ySF};NMN!LVRrZRv z4LEU4UXv!bwD%GAK)4fJiI$5Vb?ees_-ZnK5G1KDYVl|-h(cHr90gpGT_`NH4vIk6x z65mv&=wsxP(vc30Kio0=NzkqIS!1^8;gtA5Yh5=dOWjdX=R*W<&4rZ{oyO&Iq)U?L zU$Az+Y!g;}^>Bnk?9ra0gZ_AIPp*1~2_U)#u+7q%&tyeqewEf&ZX!)hzF`m~U+3tk zYhMwND&VQuqGs8(|3K(aM}CTt=HByIXT!U#j~L0TT9v%DN{?(0dD?QZ-q$^E@oY+0 zetw+)$AOeWEAxmhu6C_a1Ye}3%LQC=Qr*YEwS$Q>c~kEX>Ue~l_Gtql}N~Eo_cZ~ z*(15%Cq06)*jC3iELqXvqiK>up+e2ghrT?SskN;+5`9Hj z$klD~Napx?KiOK6BhPMik!R+q3q{Szv)17b>0=$WQJUecsh3yRJg<8}kn6eYou<@N zEQlQ(cz#|H*T20W^uoIkcA3KE2W;7q7Qo{yUCoxL7$(h|V%xKb%Efd92pdUTwiaoLN+Kk&pJ^tcMm$**MT5 z_H(RZym!CU@hj2sgi%eU?G9)45bj~phyepf1nmh??<5|z9V7O)jOUJv%)S{XFl*o^ z354Ij_QS&1h35XLKpYnts@qgj(%5>TiN%tc#f_cs7*6Av&x&Gc6Q79H?%+i<#7wDG z+27G?FfLP!)(RenN3oxM$h}ZjqZv$bk9$t>qSNt&m@fue)c5MM-CZJ1nNw0YOND6@ zI!z-E{`g^g_CJvlMAi*6>LHz5!7Lf(T+NYc%aAZB(qS-sc>eTlv5fp^itg) zJm=ldhKZ-(5hMeW>{rQ1$g+13^Z^X~;-_)R5Gq*SYA@)IT)l+{{T2KK>*}pSazK(= zoB#*90Y(G9~RQ1T0$w`6a_vj!-!I4`FI7j z2rR^{2^)|2_H#^%2NWG5hdJLis-1UEy#NEMI!fh6@HH9;qM3GPS*i|3iBrB;Re0yu zN7fvr@w&9pZ|HGg_b23QkbR!_h9W;_j)-An3Nsg8%xogZ*_9tl#@QWY)iVpu@V zROTjTIMx=orm6SjB^vT4nN3c=5AvD;d}FP!?Ooo*r1i7%jB7 zi?LiJeqW$<Gde&36U3IMk2jC68*URw1zSJ08jPbf?d!k>`wDPK1^B z3{aQ_mjux%^w7l+I+_XaT(qd?E4vp5+fPeG1IMd45OdxDpTAFU)`H}~uIbcfb{5Mk z_73HGtd?+Y_eUQzdPpxe*I4gKAA^J2i8GGzqoT~>I9x7|nhjr?v7OQHkIOmGW1Z$w z&25=#S15xR>8>%)ND*MFD3#dNbz&jIh(gQEJ?P$*4*bOo0Iyok>+5YSqF1x6KZbGZ zK0Aiy8B$H3pytnt6XX$ssT`eL;W7MPuPPW~LGYPJ0UuSX1@2IHYb!e@SdHNJwFu!|C z43B+2rTIMRv!4f9b!*6zM3+cOy5VcBny$t5_%ML)r!)C|i_+t#4MJa=HM`23{7Jtn ze$-jpk^Cf;1TF4_c)!nEe~0gE1jIEF9i1&s24qR~m556QWIo-wvrXj^1cABdL~4d{ zr>FZ!1DeKPza*`jKcfDuFJ|T^4%DL=ZJV9WAz3~R|GM|1hYJA2;5pgK#KB9~e$|bXrT1J|%iOrbg~#*+8tUX~&#$$zZle=8RJsXH~W; z^`zLyg!1Ge5wo5b_zw9Jog#DUqSe=#4lAv_UKX>{OloFyNNES2}azU5C2%<@GI)QR$7RR9)&C($k|MZvEixqW~q36eSINs;T-t^WU5M!PE zHfQ}^+vA;0*lQY&>9_jn6gs>9RrsSlS# zqlqiN)zeDI;1ne*iWZ9Jeb-mN)SDZ2`ONze;<;o*3o#yjNI6UBC<&Bgv9QT;elO8mZLBOxfbj>=R_y&A3Uy~zuSE&yox`#U@HYy+^)@y5 z{685Bb*fjdMTwiJT(lH@zqc6D^nuq>=Mz|zXhyo^aLcVV`hKg%TL*4#j)Jm22)J}D zN{C}2U30jU(bV`YQ&%NdsA3%O)y)bM0CGJaEm09$l>Gh4!5ahT3ig0k0`~{K9~iS0 z3Fo>uHoxTG7`PXxTu=bG6+dq#a{wgkE?Jb=g@g30ts!+?F%IH(zUpFG}1X2DxHe_5^CV0pHH7 z0dqZZNFX4RK7D%`yD?7%uHStKmXRiimc&;!(MvHW72-`m_fCReYEX*ZIB3-ZH)|PcBr^ z;l^D@1q2j^gpBJ+1tr@bD)(v;L=_xvtSxcmAXH|{Kvaq0Mk~E3pkX7>7cH(9Y-lK( zr_{zGpi8-$Rex11ZfQT8fXW3Wj2OtOozQ5&{p@E-3M!EG_cjawYrOiY; zN-n5eP^^i$D7~Tskf3A|gHht3N1iCg_?(JfqC(0}42H{;VKrQ6$vV*vS2`*e)Olhq zTr&&+5}OK>;WA`IVo_~KUS zsgN*qJzdd+qUtw6l_qr8(v|C39t%RxY#9jMy(-+?2powAfs#lo)|GSiZ(jjy?7(h5 z3WA!?B@3Z1ixQsqmxkUOwe`b0s)%#rQDAlzx)<%?L)QG+(yxh2mU2^W-BMH;=SE8} zB`pvCIVLR)eYj#NuENT}atkd-RB7i%TZxlbvB<-BNigVHSn4WR0UfF6a(vzs^Gh96Ab~*6ZPbL7UE`t8{KVem(NB{r; diff --git a/tests/fixtures/export/migrate/export_v0.4_simple.aiida b/tests/fixtures/export/migrate/export_v0.4_simple.aiida index c38c76a2dca65b4a8898bc6c1245fcc8d80a9564..53ac38a473f51bcbb3436f424e1ec71483bc5841 100644 GIT binary patch delta 11615 zcmb7K2O!m76n?ghN620YEqi5;%FGTKQDm0ANA;w#M?5RpB&!gq?8sJT_KuQWMnygU z-+Qm8_wo;2oA0~loO{nXzx&;1y#Xg91qY&e4hNSC1Rv^JftrYKetp6RzS=vOTe$F| zFoXz9bVf&HI~fS%X9NNr05ImJuBJR!T^#He7_T|J7x}UYBdWUuKE`!FDm(5Lb{waT zL;29Wh_a8UD-Aw}R*=0#^SdL>qt08Kq{`npAm^VZUAS!)R+HXzSd14OQ0G)tL0@AP z&pGhO*|1MpD3Jt4?)GEF!hk~4%lOS45v#wRubNDF;Rn_uJ$&xmVP_*BzceL(p-yXL zLQA6fpz=ILN3;icC9sqMnolPtB-uxXlj~JPd9A-TmRi3kPrgX8jOYiyT(AnT_>KEJgeP*N=)uXC9me7)$U6;#Kwgzr2QoBBN z^@$=QDFLlb6&W@$lT2Y4?Oi=z<_DQga<>?~A!qBDzX&l(R4H5(>|vyl=MQIWl{a$X zQ9d!lRWKv;wmXnI$$TW>8u8Owzo=kl1?~C4JFjah`k$+%wh}+{^^T7VoG`#OIJkH? z=LCiKnGBjU>b|67^-Z0%v^qEe%t}o`feNAhX@@)LJx3^_>Quk#YqYDyvb@xzy`o55 zK^O|LWyGZqVp2SkHdpQ+afY(7yopU-PFjXcK7Cc|G})&SN#0zWvnQ}?PErI|T{pU4 z612|ck*6&r^kC$M2xF&Ax`jUb%86z{Q(=cS2@ZYo4}zD8@;jSrG(q|E_Kk#Y0|Op< z9D=U|DCtW5ZQm^OM4sYbeqHLuE}eT&aJ+|f29j}hL?=wL;I`0#G+|B~kUWf8kfXubsWKl4-n@TAj=8xzTu^1MyfT`H*bzZ_(TPLhNkreYzh$#=Gc~Z{1nV zv6z(pc2;Nsx?ab=U`=?5zTx#UXKD5GOct&g9LuVd)C~6gRZ7@1qYjhm)TJWIAlMl0 zPezDetbGV9!!vO?VFY{3v}tKVEeIAsRM8hG<6m-+@48>dCDw%z!7+`&t_$zl>#u0o zNc`-o3b=iE<9X%{<<_e;QeVY6D(oE}==9y5O#t6~tgQF0_|>QS&6Vzu5D%Ox88RxX z3u5!5^lX+t)?AmD7huVyuTDqg@Q&iX$63A04pGM4{2n|pJ*ClXZTObhw{7kFg8IX& z3dSso(OVA)HuxnWZ*Ld>tiUV3)Vk<&g6ij`NzSK6`Wn`HU>)esww5nC1(&!~eCTN^ z-m5x#wkfP`ajt(PFBkQiRU1}=97*vZ8XYXk^NkqS?(Q{fy=z<_#>UO3oR8-}XV&h@ z?=}b_xYD`xU_9l_azG-u?WWw=5x?Ub5Xx0z%94-`yc-dkaWh*Kct5{N;H#X!`MO#c z|N8e*&TkVM@+R%iac;N=u+1NCViwDfH)R{gweq)FK1Sp2MsS6|D;G;F?EJEN)O@f9 z_4~XJ#;-Hku6nr#mUQ@1l7N|Vzq?b_JfVyS3qX=O!eSb%b1oiAJbsxc1M;lDA!PcV zroZ=-i$TOZkxfUFleRC}>YrHOC=2=G zkgH)8@%NG%yBTpP1X`=kmKbXDC3TyLADM8Aa`K{kt9L3!AZ9{%T9D?X*dsUoIisth zrTHB}kO{8o_#dLA;qfeuB~83N3#|w+>MBuno_Dv>`PTA zOqy27i!?9n&ns)s?xa)hvIw0^v0;viE=X5zF?rm1s$$aQjR2liQRu_gvnTYgWA#T+ zL`xZc_qZ09l6&o~skS;EwaV9$XDw79HGizijKdJx_*p|z4k*h9n;S9aR|8`|I1@hf zeD~a=PL#K%II-72J7)I4Wdr=w2rrVv7AuKc*oD+gvi#hZ&C!-6FE;Z1@7bNF@OP7| zHAyx0jCP^tEWx4VJH?(vG`Oi$-P~2D+J!`iz%@TRQl)}rO7)mSPz+a$)x zhzL77$YDILl^e2ATyNjG-7W;1Lw%CJa0>%6v*&D5tE67#fFNjzYYIuwUfLu-p|~CGDCkVE5tVgV zzg(j`$}e)>a6Bwiue=C5E8lY?sLbZ&_cajrv;o8qoF$~*6=OZ5s!!T9fW@{ z=L3f0HBW51Ug5DOW*T!PG$_YL_l>+M^l%f20u7hF?L8I;VrsbbPxC1MBCj(S2q8EJH|D=BocOGo66D z#|vzn$ve^>zIc0LvG=xqv)!9jC!u);r?J2}o1Y59*cl_Q*9HskW#vT0fBgD->!x6p zcJ;H$(uM2c6QQwvWYVe&lCP(nE_Ma541z_W_KJ{^0Cw(`uxtgZj|=5t6Q9eATUR_B z-i>}W^wWL5Ldtwyp2)Gv0{bRGkDVmxiqONfja1glxn5WSuaf6_a`Qc-;=3L?D1TU8 z_{2}{?%pY9kUXNAH)ocT#shP4^4bXTW~A?ZXUFKC(S{W~cTr3uE|Ds3@Qjmgi#gx? zr!~X3kW!}k!J~;?f<3RW=(J`D{h6t;bca2bURZ15$p*ZC+V=kHFgy3vko@|h#|&ls z(pzzY)UQ0>6uM<*d`|kPs@c|*3nAZd`VZ9z-&(GIOJrB)vBA41O9ug z<|mgAe^^MZW+T7mahhvBWI|L zV2?3K`~4@AG6&kGqQXzKBTY|EKhq;tIP_@2s!PCpPQR>OhFO4Vh%fPGORvIKrIh)} z*<9a#k}*GuRlZ7xM8$~FmIrjn#(ATVB@tpmVbOqw|7FJ5fVF<5HX6(%Bgk{`Z{k#0z3xVsFsW zvj=-C+t0+0HC>g|$+;bPNUA4%bY0Y;Y7@r>*2u5+#M=E#*OGVxv{Hh;?Z(CS30FGF zcP%HdC0A$FmFGEGxrwm)%Waj=F6E!rdaU{rYM7!pU%6&h6V=aic`!3WUd*WchZjxo4k6{E}s*2 zyusO%o0Wj!vV6w0WVhe2NRrxAbCm^CTU6AglSFKuf|`unPWtV5#hyI0A`dR|4WxK% zTuMFwv93yj7IOOdG=`KmR2 z?P$F2)T1z8fnzHyY`*yl)%PT%i?1)Ia0=C*)4n-bkDp>;gT*$+t`p^CHg%>nB;8t) zay8}itn=&YF+T&^q|c?!t5r&0*m9&JYD)Dc=cP8*M(~a{8$;^yj-hql9j;}Hx*redHLxYdw&4&jZDyNJ{3pPk;YYOJ;X>@#>UYK&$ z2p{vi*Sa(m;HzOa$5$LLsru zilK*v_(Kmgw8_V(9hf`>StdC2U_I!<&|~qslz6b9)Qkmd`)YzgCiYX|y0%yr+4{@F z3IdiAw92ZfM6+U?qpzLmYAJ`3`Z)zJwhIrrI7>9&l=S|&zIrw&{WZ&h(BvazJ z45l?~^N+9y)`^g|Ug^5|#k;kWfAzz#;gw!zz7^6HrbeGS)ovFllYYolnmOGMx6c}r zBh50sj%s8t%Il_{Cu%toKUU)!i^~>XYVa%a9~bnwvA)@9r0HMp0+DXXa+CH`bSSew z^T7VI*?EPkg30?5{C1X=rvB3jVEoLYx^^6u()%PZJ>H=gwnf^7q>o)li^N_G7OIWi zFnKe;XDPfE?1WvtDF`lxy!WWCfcSXlOflKdv5H)_e&Q3mNPnGQ=|*v8m2x=|bI;kT zrV&O)qR(5j-$Y=ISuU;B@;zD5+Fxn?{cLF*|bERW-zbw&s)># zF{h(twjaqXFRIN+9oLH)R-9d*CLv2JFxsL#6O-EU(?PZD3)Q#&SxCq26W5lj8bj7C z44^f9KR&Mw%!NRbl5MeUr^QJ$Mw62436z;RhD_)B@GTn)NcF_X!_uD(UGO5;)p~wJ zdd`cy<%2x)of8*JVhPi1OgbfaB68u=w&=j7%wPZ4IkVxX4xni~viCb&8xuWe|3NjnXz_7W#60(a zlP{M^OO`(Jd3e)DiRjKtsmk2k3q4Zz$y7GVPJCr|xG;z_wk$3o!?l1F&fUV2Rr-b5 z-zl;^>&jJ-RGJ>6G?$Nhq~+vfV=RK|x-G&AaFqKqUaC5$V4 z^h#WKaj@JQ_n;E%(kh(}66Aeh z;Hfq>a$M@2rlduGZUbA#WA!h@tZ`fSI$vha?;Xt$0NSm7|h7 z`?vea-%bJBE^N9z-_EV#20=}`c^P-{3Jwt31t10oh%pQ9V&oS@U~kC(52nFEfjLrt+;S9JO>b{u8RmHft*#9`arQ8HGu=h ztVi$Vg~kJ3e!~TU7}5k1Jxfmk2rAg!?-_i3kmu7S1{)bO#~O0 zC0bz4cj|QW0ULCj1e76xq*yWG8rD8?2j~TSV!P}akUJd{d*n{X#P8hspKJy}H7BL_ znaV)R5#6m40e6~e_izUQKx=UD{zxK>0z=FK<-^@?g#j(j@^{nx-wK1h-oq^baCoo6 z2SEW8^jO1w{Q`O_{~o({xo;1@0Kh){0#MN4{{8v|^zc)=H49)HwMO>vOKarMtnwNq zYlXi=(*b-)!exd}|3sfT#O9{ZvK$&yMFrd$eNN6;G zDVtE!N&_%)s9-jY{NBA6cnk^c*7LYQAZR234-=}V?93ejB!UeiH+I5K0kDGTlTSy?koubTFVA-DoV1-{mk7hFHL89*%IP~I`N4ihMV`bxNV`cB) zYys~r7;sA+O&)lQt-}F<&TYp+Rjf*$fN4M7PBFq50Hy(tgWo^`=*d>nVmkl9BJ&4)nAtb`CJR}Q2tJtO?6vN+X@Z&jhz^%PU&_v+>O#q|{LPGqV z3OqB=sH9X-(pS7(yJe*S3XgD6A5IWLBmD29Vw;kAE1L=$_Db4vHw&22$Q?p$F-m&u zH<)XO;8Fz5x1F_u8B8ghfF8_Fg4lK_@Ri1=hD#M5`03;CPGM4vkreo2e7}?uBn2*I z_DdnA0UVP+3mv&FnBrzT6riD*!3jcesk4KGsdUgudUOa9pnGD z;4cyMJ1nG^m?8+ByMp8bl4yrPOhtrF(uxBNiox~K$v-V&$|H1=D)e%Lzp&X%av}sHlIaiGgHPO`s&z-xC9~z;ROG5(~}0ohgo46Y6jR zdQI$A;K?a)3HA@coJvIngA!!>4aPJTF3|oVn0y2QF4)jWdj<3ZNdg4iKLiz%h6)NL z=k^;^bsAikq4~Gh4iTlp3H-l~^e8iXMP-O!!R6mSM75HRiV7tLQRa3!iJ5my8Bjpg z{X3)|^;SoYWa_?pf zvlB%k83KPK`c4-C_4;eTpV1Ql4bX&WhW08Ak?d`4i^ke%2hg@?cG>|JpmcX-09qG~ zaV!U^RsrSv53D=CSb!2nVXIf!qeIuN(vtyt~B)J_AT$<7p z!;4uyw-bso+ZZu3@X(gDy>XYYG>`Iny$!~i?s z=wxB(Ds)Vr5+LDGNI$iugWH%4{Bidr2k;0#5&*#R!`ck8RfdHG8UrDIV&J%OC@?v~ z=D$VZMiRg#A+$?5+1A(R~*TSfxgB{Uq>(s{2Ba1ed@Ym8IS2nd>m+X%Q4A;k@trM?^;MgD)QoplV+R+8x9VvS2* z+x_U)1!BZu8*N?f^c!tm?ewDSbb3w4768g7kifT6wodMDf2~`s-6T$6RxCw^P!l=M+pGX`M)Q3$^wAeis6Xoyi|xv1`>2K zuT{%+4B=3bX_b@Dg@Q!}F@8^n0BHL5BY*P<0mvaCh0X%o!}76q_DC;VEu|=e&cHuT zSbpuaGhmm5%D0_fN>D_|(#G1-(F0~L=;>hpH!BNrqnDeEsFV{O)MOyi-|fVh%&yl? zg!&_@zemS8)((n}q)G#s4UX2ytTXM=K9oVk-KO}vZOGe}kZofzS;VJtv`tD;bmI|5 zj6cM|bA^#HcwiIc4a>a#_qd|hiRNNB8VF_duS-^R69h5)q&M0GWs*1A1VO@P%-=xF zdggEI7=um=Z;JatjHuN{-5=h(QTK;8uXhZB<|ELm{7bVux)9eGl-Ic;m$ujxSA!Ty z(T%#g{mDjM-Tq{~u5SD3YLI}SNh9VGbaeLm+o*wXPSpcjpVtfOCLQo}2=V(tvwjth z5l8T~->xg1Q)Lh&=?oyaWPpO<2>2=J^xuP_uUsDSmaV2mCkgoa8u0xHt2An`Cau{A zVc1qf=lt5s&@E9EV_x{j(e8H`bV~#=!onL(l!=y&Cdx$1dJ_dzlRB*!vE1C6v=m3z zsQfrMX1pFd)Loz$WH4Pp?Z@fhp!H7INL zK?uz0^kH>(`KbX|A^A1J%bqP-D?rtd~73~~)aegz*^Gl+yOZ{Z-Q9h0+2Pm98sSNAChF|TftlnAwL1v5| z{(A7~f34}(40f*v$`}}Ly_HcYZL4(WrZc>CRqnibCT&gUb+bsr-P-Mglj^lEDkfU| zW;~)(W}mA`U(V^_q-q_>7s)0c8}UdEJLc(CYF2I=XseFo=bQ6I9-;I`8s%3jh%cTH zWwSP67Mv6gh%P7(cRFAZ6#PP4V;^_-eI59n3|lSjg5>W-w2q@C^=A1;NRZb=do}43 zUKw0wq;sPbew$^-8DTkeFp;DPFHmGJbG*~mIf3dVypGzM6B4F)2UPdZW|7{FwJbXw z7CPGN9JEkDEZp3l)!^uQMR~8rK0$qVq|8y*j+f8nwFXPxH+K0)Ned>ONl&E~nIe#5 zhqJv>_M6@>6nSK%*KxnBkl#hE{fcJ8?+E1T+b#@nF4gYjEwm`9IxqFLaqpIA7RcR4 z%q9lq6$7jOlbyJd_2W`DF9_&)cmCMy9r<%72l`w4Te^hjjOOvJXD@x84kB&OuhNb( zrTn@_ZYEpF#bhsWdI9zO^n$*5JmUb8%e(f#KThjJR?f_fxlK$pJw)#De>IGkE~KDD zDYToy+px7arZo+IHu#NwG`=OLkdwz{rKF1$PP$(|cyB`ah%rIvpgvAU`GP(q@`VKXOKa<*_&r90EckkNX z>jiR8g9P1P5p7AfO`p%*?c+UqEMnN3OV(ruye!^rPYB+LJo3Z)hnP5z=-#;(#TTS% zU6?sP*tX=-{r;5{e}1`3U1h&c1G84WCzERaf8EVbw~(CR;X3t-MclmHi0;h&+Ixzv zUmRYTQ@m&w(F*dULY(n=J@&DgzW-Jnqc5G%1LUJ&vR8KvpC!#*|1AeFTh#Mbfev$mZayMeUj^e1Rnagt?fjf!Ch570WoYhx$D=z}Y8dCUAp7>=o zCXMXqb}Dl5~8>bqb-*6hpW{Z{MCg*Y8)(Ub;(Ou$Mr+)-L_N zOE`O(2%}%$oo35K+8mg!lFo~sA)`-KVo!5x^%v;RGHs89?M=Je7pX^T93Zt#fg*yg ze5(9fjT|jC@XEc)Rs)DRL@yg~D(a|!D&Ap+A(lF+hvl?KC(5M1mUA93;cXXG$mgC` zd0tJX1wTaB9R0~S$&^>O>QtYj!GkhB4cXliGkw`*IU$Y=?F+=IE{vvnfk#X~oKfbR z2~psJv2eZKC$U44XPka<Rw+yhZd;?_xk7!uZscy{o&lWH<%7LmepMR9 z+7lgmAH_A55W`bD4Nn~t*vhJUQ(m;v?CKTaledd=uDbi{vrg2^m-Qp(FAtp2>6No@ zSCQV+m>nFJyO-~-kV9Woy8vJ+`nd75zU-}jzwVE9Y{Jxm{S?yF)lyG$`ZHni{U-xX zJxeO;nS+z@J`-2GUDZ9*8&;DcU2xLFt8cJco6`l+ttNGUB;EbCZ%tF?Lb$APLe^l< zb#sEl-V(a48R3IZ6$4sK#Pr4skM4@KtL*qTMep?P+)2CO{@1ZJEaT$3kz`^+=SLmg z3)$j5dOxQg`e64i^hzGXkjd4LcfGedK5lm$lJSo6hbOD=emXf`yX3X2V%rjOyg%l@ zyL!P>Tn^a3aSACr&epq26z>g%Kcg{$s4>tAs8pmx3*Gt z8oOe>tc9BfO<$08X2%ZnWCtY5grsG(q0C0<30A6p;u~4 zoXWYlQ~q2Nd|c8-VJLJr`NJv_1KCo8x96s{|I`1^GbT+#ru6<67*89=rL&dCgOo$w z?A5SiC<Q+@cs4`%qy%T5_hKlE}(`yGes!p5z>Va(9Iq`ho#rS{_E8msavK5!>A#B6ZVB>s9x&b}L+btG;9qI2-G3b)(q#>cIOOg;6j z$gYPZ+EGyY%;9sLkZ%oLl$WWp@eKaaIUt-4e<~^y+$K_^AXn4W7M#HD$ClY259^8_ z)&5M%b>+X9F7jgLhx<$1EEW@NDuzE2HCA-=M#O(`I8Y6DK?CcWoyN1i3gpbD%9Jq=Gv$$ z1vL1b=IJIe)Ft~Cel_Y=qVwSd(ndbs7X6X{b1m)jwzjH8tVWrk8b7#ZM@PO5bkcGJ z-GTd(CH3bIYmMA)3F0~08Fn*38LofysJp-Ol17uerbL2xcaL8_vZYYc!u&+bjwG9q zv9?GD*0VRO9!dOgsWAGcb|||+aPTwDVzZh>zn%o~yGMI^8ee`mbdSGN)XRH%zTF6@ z`Os$?T5fwZG%vZ={=p6Ho>9Bm94#Ks*uJC9Tyr~;J``IqRTje9CokzZF#EvaAKA1` zn2{E?Me@_d0-S|sy&-3Rvx?*+f5)xH<>CTtg zXS#_vmyfbIJoS8qXds$^3 zPS$oSU1^@)f2ZnwcWQZ)E}^DILcgISxveIx7p3KGXvEI4`tgpp9=@#{q1NDWhKt8e zAfVwR%e8H1f=B8NcB<~W^Sa}-m1tVN>DjBoqh7gOwbSL-r^rkOHoH-YQ|bw9CNKGFPD~)6KapKc{by;iZZno8)#g&#j-+ z5fSj3TdbMEKhoO<4sml{WIMq<&X%&ZJ4ILWjgwBmrI#jsTQ4)2TB%*L(|2j^v``aM z37FUaocg5MuAjjAlu$#V;Jg2hDV)l$f%CM`YI=k@R(hBd^Q6;YG?<&jb*N4mG0`*=#k6OMviFlBDXs|w*y)LOEW(*+ z_;2Z_7UW*!qw#C2-@!*yW>7No=A9C0i=|b8`Z%G-QkQ>!xZ`-O|GD`h{$~RQ{R9=w zN4XAr(Y$#5{ZSX)3yCeQ7gZv6-ML#$wp+|O^X`MiuCaLbZ<+H;_|W~FnuKWTQ7Qm{ z*3jskseG5jmDM#I+yk;E0RT#HdF^272E$%ry976!(gc5gb5s4MewE{ZFvXAUHSamp z`{O*wCCnmWC9!^0VGfTu_%(A>Qw1oN=65&b4~vI%o=Vag-xnH6H(##zwYmt8n>>v7 zCcJs^?aK>r3a{AMLfe8b3IpR*O#E}_>P_%knc(|#xM`y^#N49b4L#A+KC?4k?@pU3 z5WU;_YJT4X7LWPC3c0uNfQFYcsrML}^py^uiHyMO+8eA!KX`&n=oW*HYk)I;HBCY* zJR?RnrASBi8FjXOByq?!=0ibL+}SiHUIV&H>3Q+&@o?)}Ct63ZCZ!p>?vyi)Z4n`S zVp3mJ=u$Mhc&07&dP(S5S@I5?*)LYOjr_v+g{|VdqN(&8_7m>8e~r(fZQy+jIewR~ zg~O%D2rkC-6O#<&F|q?H-XVsO-S$rgry7(Zt2y4?@{*1fAkzQBz&)`afnSI$-FFYE z@GhEQe@9a)+*{Jtfd%&PnZwasb>o>&q`rwh5zQ$gta%sR><_R7wNW$wNbn?L;E@m$ zvB+>r4gc2ValxMV*7hnE(AibsWm7)HKdIxKl7V}d1(SuFl z(t~WetesinofNfMH49wxEL;KQwi)^A$9>t~mZjpk-5iIDI<>vWV|>@#_MW$mIH-%b zYLWcv*w*vxl=%G(gRp-7rY{w5hBMP#nxtl}&t2lAESxGeEU;Z_=185|V?`0vTyn#^ z?n#;TIA6f&Y38v8&ifhs8*P(>OwfEQe1P7bZsuQb%#2}l!Gw2IH?U+ z$sGT)j|&M7QOB5aH-Jzo71CPW-(H8U;LaY32aG8z;Xy!M90iCgcA_@=FclQw&5l)S zicj7$71ATZS4~}G*YAIGV3o7H$%lG2rtAe~%}Tg%5+knIx|~qI?u}!Is}hq7suNc& zUWaqBqqT(qfTV_-7%q1Lgm=QGhiiD53P~9^FsBqz=W5hXJ-!QqLuyOOkrAP!I zKzZRNh6_oA7)adoa4{1@R7eH6iQ#%fj0zV{5=flG#SixRm8xCQL~dTV+(-aWHMp7K zdP#yv$k_C79U+CNM*k?F{8L{9^^&Nz2)1xWGLSEgbJn7|H>Um(luxb#Q+$G#fsmFG z4TawR&9o0?B?^iudjzGE7*IxX$Vz=&b(APDMEJFRgPOcm@R+hLB?Lx*5=ykSf9c#P zYcU0opv-XU=HTsjp3@h2|!q@x#;3+gR5p)HavTRo=Cu^mxWwwZIp>kv@vpW}ro zrNm6p6rY7<;4z%VnJBlS>K0Q+mObUu{i5^;Mh{n>bH5 z=@T)ypoDW1`YLp-;55rD5kP=KN_0;BrLds0e$9RCwRck@LOOZVr<<}#5EZ_O(@pHi z5rX>lpUdz+h&{o<@aO;STnXLpyn#I;YGQIh_2$C-zkTwsbqv4fNf@gR+<4lWu~)EJ23VroLMx$tyE+Y<$JV}YFV zhrtR3#MFUecWlGEAW#gnlop*)e}O`8|JtpvGfFlCl~FheN^#Dp%a~kHVY-p{V{gAb z%>WQ8g`3&2C6Nf_!p*GvbR;6Ya5L-PITIBnoV2JoN2vsp3yM=$qqL4rg&iflEC5oP zqSJpJMC`h^&qA=p|2nSzg^DVwwOjP_Wh$g(MHdwgP)N9ndD9KEs8W1VvjN;?tf-D+ r0~rh5vctTWLW!{mNbwmi2mbB;osK3ExB&t=k1U>sd^6QZq diff --git a/tests/fixtures/export/migrate/export_v0.5_simple.aiida b/tests/fixtures/export/migrate/export_v0.5_simple.aiida index 541e903fcbe4f505d58fb4836a05eb3efd3ef3a7..ec56f75d2d7d0b6a76a2f000ec716bf7642cd169 100644 GIT binary patch delta 11536 zcmb7~2Ow4N8^^EA>@riRNW`^ehD-JcMKZIKRY;<`vLY)kk&zk7$SPSO*(2F|7s)Ck zWc<&$epmPAA37Pu^Ld}=dEe)K&ig&5$|~&DXV`ExC2Sl@2zaTh`=}xR^4}Lc;JdAz zshI;8%7y@GGo>TE_=*eyDbR*M4gxl&Mvg|D*B$I^r;l0L^$PuPg?kNi;UD988j>3R z0Gl()^k?C8azAs?!-_ZDtiIPB!%n?oP;fEZ{6(U$atf}V6O&Xjq{fuRYA@g9S227e z`XiU@+>sQ8rR?a=TZaQI5+%;lZ{9Ts$GK#1xxVm#Pd3%n)*z;I;Ti$~e|;q;byF)_ zTAWOZswXsfqqVlPp*Z}XV=BZFrmL8(WJyUrE>^f*H<1YgGh7zgpF92fIwWtF1>6diywYfr2 zAVe5oA=4Z}X$oW4S!g|PGv?a1$WNv#U(x2z?|v`+%A0S_q$J1K31g{L%rx7KG-=jd z9*%agh}#QvG6i~L`|}UtKky1~%goQQiB0=-~2{K$)$cB3!rDw$@B~%Af7M(r8!_Bp1Lox9~3L3}o<&?2A zHB8GBU;gDI&w-YS?#C2YD{k}H=W*k;m&M~Bwm+G0h{@?1K^~q#splB?Ja$KGK1+R& z9=m^bBCY`rT=p#Kfq)NziNVc_`W4mjzN7a{gA`=o z-@>zSYPu*j#Ovx)Ut(S9H|EZrHBm>L&3i=-Gq(E-YYC>qk9d2hTUA6FS(yuC_dk(g zXvv9wHJ0RFD@kAl;be-LU0P+^q?>hvTPh4LC|5I5w_|H1P7EGyH{db56-lZvc~irl z-}4oTiMcahuY{VSoTF<*XaaNk0^i1KL&brtP=+jApN+xiZSG$c6>OJQgGO#vSuH79 z1+BQck)2#+gtKSBT7Oa|NR`bUk*a=l9iIQtSFzAy^||vg)-x8`m&m#<%4|MK&9uA{ z@HST`#j`VDCS=n5Qh)QOR|2&!U3f%FU0mIDeuoJcy}ovk7~h@|nbVd8N^> z#B=G#$%HC=Ha>p(F+>WYu4gQ>*ZB9ilHcpQQ?OJjeHZ zaei}F<9fN6q9UA=?nw)_`=NwQhtL*2Q7(BR=l7+WP8X>7=r%8nE#@4R%XUq2cB90L zcx&YC>qZfMB)J^>P**#>KmFJXyfOYJBRSO^vf^A?LxI^d*5vB-xzb5qc+T}gj;UXI z)ge_P-1Md*@kiMk%XIk8%o!SSnrOG%jCR}0vr3m-;!LE1hs%#q2*n(|5iKcBThHAl zrtO|}=G|Ad8l{j3G zPeOqiw9Y>3(zIsNMwfz!e(2F^wsn(-H5 z_}q!J-yBUo>zc#UEL6=6^F3P^XUc+M$GRXh8b2V4*fh=Gw`{xnswNn{sN3H2~KF299wYTfd&5b5%%ga zwdf^vqUr_uOpYI^s$W?f75LrfAu3qeY#k)}RAV$u=lNM~>141)a9Q#gKAvN#eOpYN z(mL%Yb=S;UdZnRKw#ibT!}|$&Ae7zFSV%{RMS{-#_E6wDB{!;`f(as#P})+^U6MdZoKw=7(plXrcr^m1NR{QgQa10PRod92IaE8$BvxW7*DRByWCDDD~kzzuZ1Rc7Fklg`v+Sr)u!y$yQXsrPB9zZ z*^I$oHy*3i>*N0@xI(G;7PkQZov~N=$x!ZRKGUXMXj|CgZ{jMH9WVD z9~?0Ji*%>I%I4XySmTXNxrNV-*5w8)<3Mj*?G*bue1KdP-Xc!jILfS47qwyDRpfG1 z0b;9fXBDdC#xTm&c|Et{Oo8SXf&961q@1@4l7bSbU%2&*_BcAKPaCG%u~acqyin~A zYacCr5=#wx_Od8lysolRqJ)%LMZE9Q1+fYD&~-g3a$R@P59ijN9P18RyFA=|J1`HQ zeynDQA&Xcltr~8N6W`ryyr4avlpqIfGb7bc;yTw5xZyFp(UGWKYty)9$v-7}bI|wb zwRP1#!o>9tBt2P=lV5~H*vT-6c!vD#f922+>7XO^{E|wn-nCGM}L!L zTDG;C89E=6Dr+}B6H~do&{(&HDalC6DPxi{as!r_>yMx)woEcpNniLdXB;K!@qCz~uV6&-BXfzoP5 zJ4Gmbdz@kNReaOA#?iXshBsR;-TKd^#~G<<2aE;p5-D<*EyUk^Cg#F!XYQ;qO_rqm z{*&$}kwO`l@oG;G_5Sa7)W<3(W>_ACm8C#xg8QJdRE>4T*G>h9-DxL?9_V7KM>TkWuFyaz8{%z=mme!%6_{%>Pjc;RFsuPrztE5pyGr;e6 zelQp;?=8wU6DH`Lt)gJHJNZ$}zKBr5L8x_XT2tfsB`1ozEpShHv%%>J7JMG(Q?c@% z;btlIgPfcO25ZcsYgbw$A5}cEro23_Ya;Od0?(5QD=gtI2s>TI_*0s(%&*obFDya7 zT@4Oh(@`fck9sh2Eug|FFuc;CIIDZixpt$t75+n6a(;TP{!!o>sq&*^(wsyG89yoF zM8lsPdjhdg`J6fo|HVocchZ9}xNER)K4?S3T_IgI60gG=_OdkQ2H}Ol4JQHNgB8V1 zFKZ7@xenLo*49G3TsEAQ#G6BCIA3KhkNz~w4B2c}Kx}dgd&RRk;!Ie@nJyNKt&ETP zQ`bTRnr0D?HkYjIpM{tyFgdd@h8*{LL~NjyAyJ-wa@P7lC>-|G;)6#zOX*`*@!Y#} z$5xV^RqBF@yM}T(GbG2NJQaCW{qNQ|sZ$2fxNi^{=&LUOwBja05DjRl4p1u?Ft?{1 z?CMaAE;(KLu3+SYev5=mwG!c3n$$5bU2&!PG49KK`Qw9BF?u%z9)uT;ZQz#~-aD`X zbtY?i#Xp?Uc{dF{tXYbMy&P7u>`q92M6dzpEuIXy{fb~g;Kq*`eJ6#q!W!9Vzbpxu z*HQ%8LO6>N-t*V)S+X$g#N*0m2z1#95QOhL2;bA;8U+A9=IH`{qXhoa`uTBUO3w^k z7%W1KoQ2X;fbe~iNEToNe7ruHMUQJFiu?&2`6KT5-=k?E2TTH;bSVNu(OLtpSP3Wq za{xtsgt>F8kyqXF0LGQn_O5t&G%~y)_t0qUtsjR>2pWk^ym=fBnP2mE#$AI|X5JfV zcS>uTvq-K`CUZGb!&hPs^ik`2?%YLuxEc>_ez1QxsqA5jRCkv^U%^~G@3{sii#N5z zoeqsl(S52pj28^viA!8eVEr!VPF~SY|8e}3lGX~}(Qh@?4OuD`KO*vCiwEo-iA#j@ z-$cA-Xt-N>xB9yQPwRqS5RKCv)+>1Mdus^ipFe^G^jfiJX1G_N7LM-{=49RCuF&AwH0Li(%28KdMHm$%dtg~k z&CR=o*syq%HY(ACld|LXHlOVTx_lZ*@4|XYV>F4$*^73xr{`RSM!7}r&DZyKj&~{! z48I{Y#`gYFXz?|(oW`R~P-!wMFKq}8YaH|IYb=i^moORcU1lt5`tL(%oKXks zU)ZwqH+m#>Hv+V&b9~H`*^~%v9+QY>cDSk|%&t1Wy@i;m!b@>?*VVLVd@&f#C+7Li zaU;y_3@m=LRAS*X+gx^gJicbfkaQ5ubbSuX1EaOkdcW|?n?e%bAddF*RTg%f@TMYI z$mv?AiM2;%Q!7;EEcglPc}~Ar$P&&>I(@JZC6nqduJcgy@_TdmL};V#Q}UNf{g-@H zqfZZw587spzg;8He7SJ*PVDBdHlOcgV>Nv}*qpCP=;;N_9l}WQbn=yNFOTT@&b1~V z@PCCEb#567((iXzZQi82tDAk^YnB3@JJ#qSWZWe->Ai{1p+4lQ=H&TaRiOLOB(-$^ zk6SrRBJm$g=z@$#mjtGnF89GgpGw!vTHO^g3(^%be3~OytNeU(@NpK*>~3hyJ8`35 zH#WUJNp#Yg98TO|{8F;;l5%b+&vQWi;oLG|E6MPum>I9tX&yFm^V=?(?QyC2yl}?Q zg+)yU`GI$N`0s`no$l-PU?0;Bl@PM|e9nLLq+vtU#Yn>Yr?C@?U_=orc;^|*M6it? zj!?HBSePUB$%YAFlWh9WXE76`Q_gE0_B4BAwvl&*{1Nu!{)`+v&0lk44?DWQPP9FA z+9Y8=Z+ZIrgMrJq>Zi}K&VDB=?YJ}`2tRWHu9%pp_plVLKg+0^2*OB%<;5qztB3YdZU`vCd#4C`;zGWDWG?AGmn;zydcvN% z*Y5VhJj3bl{V?xiNtK7$-1}@+6NHw9#vlL6vIw=y3s)uRS?{P5$o{3!9*`Sn(#uG( zq{c@%kd?1xqH5%jV4)e4^uo8=bW-fr3Eq)YZ)??_oH6fAKBgR>ogM$Oqk6tLqBidZ zUOC5{at^$Z%V#{KtSUBnxIZ4xa8A$dg1UCP^oEqe;YRbN+=JKZ+8+uz=x-Phlckq_ zgldx-bEI?{X$$I5U2X80u-VA#Zf?xDelhe?%axG2EB0pYCri1fU$09s9dBa!7SvtW zE+hEGy8i5kbTM1~Mj|?@ulcDpa^~K)N$D(KpN&L0z`ZA6M?`NBjmTN0J>_3tuAF{R z&O7qHs)UWSECy2I6DjXB- z%fY@j1@|&;ntgVgpnB9{Hz)iluwmuxOmcO+e%_q>S9MKXmw38kun*pm-aeXX=Xx&r zw%F;-uVBG-Tiy-H6bu<1e@LGLug@h=?r<&OakT2 zu+WAGF(m=`IS+h_fkud5SY!woCG7m)9y@|jZ3P7^yMWR5cso}f3PC*h#C2?!s6Ye$ zT?~XFh)-?841Bvd`Ok!RFB8$d-Tbb*t+`PyCRz=`fh$9@{|F$oWgG}Z202NHm1m^6r(+{AL*o;&qx{3} zb~tJ7^YB0HrX$nwOwh-Z%PovH!yHRcs`j8Ix{WYmu9Y-bF478XAH@P}es1Ai76~Yp z*x@~jC3g6CvHYL%g&-J5#P?adK$8*LZSMi$gumY-9KZlkhK;*jSpPR9(1;snzgr6E zY393iNg%Ao4|{|K7@XK^rz3|!{H)w>TtE}$*<VVd7dOmhJku?97&IAA7%;7g_1$MykkJN&!#I3UsZ+C35l3@{g9XUi4@ zhY{YrqJd!$&>G|l{g>a)ae&?hjC0)Xmg6p4UL0w}#A#${X}a5eWXldgPKp0VU?N8H zr0GCMx&?GWpz+4n!g9liGBLXrkBywfPhiIL0XtcwLxgNC8R8J(3dZ7~-v@zp1cFCo zHjskN1jEeyk{T!2Pb;t?5Tz|M|9Z+?W%57zX@ZShkUn4+5A^&0?B?qzbG1kfF{49w zE59`!Ipv}K_AqWPzTM71KurvF?@<#&-G3H8dPqR=U+vj%O9jp{+`FB6fN*98_6P?s zz%0@K+fxB61nm&A0^wK(D6#+keiT@E;39`i!@%#Tx1}g%Yw$ncmx7kymGpoa_`0p3 z)RZu>Ex`qx4^fH>2_d;v;O#4h{`$@C+$!MFa_a)aeAg8;{ArIrI zQ1^1@zKtOd=ljK8Y-4cbX??$3^EQV(yzdwLxs4$Y^XM2H^PM>6WHL~oCKGwON9T6K zWVSKnVSiWbKYf5)1M6)LdHUa#LtAphHilFKf5*U+_tpijcp(xy3%>scN(HG7{*FRX zBr?!33WQM^7sqbp#ft%OQGx)^f2RtRoBy{rw=rP*z>yRvD9m?rg_#yD6>7~;@&e^< zXDwr9B}E0wjep2000j7V1%$Wr0jIp53X~(9;DO`sL17~N)JO!pb?+DP0};`G2y0}^ z2Pj`qKDL$yQ^0Je0xXR%4NA_S`PqUn)eJfWJp#@bsBLyYpxJL}gASRT1|WqU0H)AE z2kJn89|6do9T2AKL5Enw!D$9H&<+Sw0--}pPmvHHh;~4jdI%k2hyoxv_CKc@QyifK zV~FN95l|`p!zRdjhS~%u*@Rlyy*5G5Gf+mM<+BsO+}fvN0RVjy?6s?J#ey;mcrNt! zkT7>NX&fpUl<4|9iMgYN{vd@-jLo1z=4Z{J{D`dg)c=sBM;s{UK(&Sj?UiZk76hob ze^@%}cvQA1Ew|rnpTvXO4lTaDmTnjc{A*X{NdVn|+V3B-s!Kp+g;Ik1&1xY50Z6)k z$Vws+1pYOvAS4AU$A8F*H3^j!N=NQDD~%*jSN=m*F(9yWn_((V^tLgL1W>vY&C^!Z zV(LzGF#I`6h3 z$-D!?l+3@Wqz5Hi5Fns;K$!X&9eVc)=?PHNJ0SG%i||29LEfj@9M2;_4f_^EJ`0IkQovAfI5z$_7eL#$0eNE#+Wi1VOq6d69s~T3Oi$-U6aMHd zFcp8zOVH=(=|Wf%HgflL`qBTFtA#DL#nk}DLS(D1Udq65^?xoF(A7&BTXnTs=Ksyr zLKv-J3t0_dgaxWrIPbu~cPj!pt#kVy}*>m*#*b`nt_6*6v$ZL;dBaUc-+6#}yM z5r%1CfrDxJ%Z!3#FZvFcCSa!^^%ZyrIDDl${aVg0co2xxKVbDoMCg*<09+0$D8T{9 z5~hmeMF15Xzj5D_Tp=IhHMO=rV~VcoKlZF&P0YZBKve!Qk*OW_cM}hko(4_Kv{Q&M z222FU|NiD1fC+e&oYjT}d4EX~NVd?{;7tI?;P{Pi{_sCGI~3z>GV0*mB)3K(N8kAd zr3y}MfO*(WZ7Z>n+Jb3{DASa%GHVkvu7xN%VK%tojh&C4aRH2o=~jD_CM`y9`eXKj zb@RI!SCe+L(?G*BVhET^0D%KlN4)kf&TFfjjp~%`AfY{w3(N806!`W)ssfPT7lBjO zN(TV&NN)x*bTl_22Q&1M0p#>Py~$oO2m~Dedve!dpf{};j=V}o^LKJ8T@+YeswNLAPJseALLzW5&=OGB#t9^j!b@zjJiY;K5v*LCI{Clf zc3>i^s|bnN!@#Zrj{UP;IM=pp*EKBcQn+RVhC@`Qb%!-pc4^XzNeQEi_K$V)YnNa3 z7{EXl)nW8HFiyXj;mZAz&w|!XCKVBwV{M(#HdyB!kA-z1@kuy9&pQxV82a~^qIY>_ z81CCb&jT<5;#=)`R9P53Z&OQOfUq6wF#v;nm4&hB*}yZK-P!S@lVSt%#^s2fXUzHe zge<2Zb0OOF zWlg}P`!TltpKaSHTw1<1BPZVq%sQwZUbOa24r_Wt-Boqak6~Cc_~(8+8`C#BEw0`a zDr#1jqce6PY6eF$c_`qCH|dLFl0C@|webre?TK{ms$wGXjNAQ< zPf_dCnVi!xz2TJ^sboxd6RJ;E4}ECcqTHkPmmf``qZ;_J-WsXJ)sVC`Ul%O-?V8oDxdyt zV(xWEwcKoXI_?7v8|sz=&W4F1S6!X&=(IBaU^U=35|I;O*NKJwH0rhtl6WEDMB$-n`FbWXa87>oM~P4T=^F_spJybTuqUsVc(<>1L^Z_VVoesH0pNv%9qa z<`M;xx_<&Ol=gBU=*2|b3&`h_pS5uUJ77Z$*_EDy2cmS-dOdxsn}E^Uua6?td(G4H zi+&y(Y9}?nmEYlbDm35UusP^O*!j#47qpE&JiW0`IrxHrgyf>LhnMTT<1x{+sCO~V zf!-5lXDiqla)d)&J47%xUi9#n3`Q3xCfy~r zFN3ldm!drmG|Ve}7N-h4I)ckqQ z90O&4*7FF&l^ENhFSQyfqc~A?FO~&pr+t@#CsGeu2rsqounW-CzDe3}ur0*>lGq)J z%#ji4_Fl4lHatm3R@{*2+MwVv^@@vrnYUaYFVW<^Q#8(X7YHb8sEi5Jtgfejj{CZb zn|h&;%=?EU{$YvGwpy#p7W#XF9c>&K_!{))`Q(^`NfG_T>s$+K9KDt)gViens)Z zrJvYTy-IjY%TM32fwDb#G$5E^Bq_YUqkF~z8Z{#q7uU8xJKa~1lt`oP8|+c)aY5ti zAfu&t&)2YTg$JKWgwKi_X34a@?3BZic4_sqzbSP(6Y=2$X%nA)oTEF{KC_@eFOGoD zV?q9r1N@KgFLboAb6%LiORwFn$V29*HEMPslF+6Ada$vVHE96PlFrSH=d?~ueUm766n?uv6cw%pMjd;3gt7l!S2 zE4V+sdZCZ_s!fyOWqMaPl|pzaiL-h2#p1!q_2c1lfdmiU1X7n8Yoo8->JL$O=-M(m#&#Jf|;EwSuA5CM9 zeLw3a^eydL*h45?ak+@1`2CVEz2UQAH&{#5CS$wb4-L7I@P#!Q>+X5~Au~-)lth6p zzG){#jkJ4p5rUyHP=d@c^Hen3ohSrt>fH<3;#PbL9$$TPK8`cLX#ZTZtCC+^w2F+JDjSbDP4RT-V)Z&-mMC1}N-2Q`=S>;ZP9Tr~SW(I%E z4J?M+T>4V%{xu5HL4*J3o#K5b$l>(!^|#ZS1?HQc?kCXV77``Z`V8_;Wusa z^7FVzS~2qkBefc0l;PZzFDaJ`p6u5U-@sC3hntG#bo?$kg=)XEwshcP1s7-=*dhi zd8)EI6=e4XM=o~PU6hs#(1|w{j1NXaXNpT9?{d*Jzkj-(IMb?&1)nJK5Z1X-reOav!L;^h)z6J0)sFCDZTF%Y% zT`3>oJ951YZ~qm3;lOZ+H+f7^Y98ToBGY^Bn&t;HcG{iGnyQF!^NMyy$deJ)S+jh_ zcW1j7gxVK`?zUcooYlzf&0#5QC3IG?9HvOJH=uc<=_40HBk5v|U*9jL9iw6P&A+;w z)#ixlTw3RtmkgrcUq2zGv&w6hBjg!3=coN9h9s`@sXrEGLaE(?m(MV(3%oQ+kESgd zcwo(H8x-R7j4x;O@00;cNT=;v9 z;EKg8*(X+h=U+qd8<4`nZmmn;Bhc^6ZQ)A-jAnEyV`BmmNkHxQF zW0zvC^+tHHAB!5e!7rn2Sj7|{=X`3YLseGS$Ui?fcZ~e~Jo3eOoUOF|qhd0PZhz~k zARGOT!|^&y(1!rcxmD>jMrW_==mf6q(4ndhvz+0v;j-MfWc%>|)P z1}9tX38&x;x=-GF4IWSS!izXnJ{f#SKd-5v=vGOP<-DrU)ur(Knqqwd7CXJUBP?f< z<=@@$u^xD4w$D7dtX1SP_k-?0yM_7lM=hHYzC5>S7WlEdL(HGa=g0KuPsGgaNH=*c zLNc1b+?u?$DrGuokLb1WUDbu};E%tN^gn)(VS?C^)Awzvuj45PB89)-q-@B^b^&_s zOKFnyt|c-<+S>ef;jnnTSNC^S85Q3s+OKqfT4TKVO_+!p13@)sp5%#YzC4NG3;hd* zB?sXdi)qh|-HDxT9Y^3XhF?1o19G3sij+<0m*?gOCwuQjL>NGdjjqf*Z9xvWMh+^K zF&p&O8z@vwOAuzBsL*vj!$pxvqc$@lP|mE?%O@_AaJBX%wVB(gV^Oh>#kws{1(dqS z((NU_V8<>`lq@R9rsw%lU@qqbTeDQ=Q6UXNqjQC2WjzVqIui6&`aKg%Aqe}u_3FvQ z{E`v#14@_gnYM*@UtV&WwrirMs#9-|j-dDubnaPx@{DNdG2f~538DwVH_F%=c?xa~ zxtFmSWHfp+el;5y7MWx_`I_ZXh2)@{h-6Ax3r~t=?u4e`&e3HO>KBEf5!Ah(cUF;n zrDHGZF`G;z0k)BEm2N2F5a#VeIETNw(juhVb49_kM)X5!a+Go90dGo~Ugg(&~`LM3IL_^YiGwdpI|CAY@DS?Yzn%T#?|TSe2;m=e~p(7Dw=> zK|kY84@JLLyE5=TA?*`&8gyoWcqV0ubw9!;w%OK1V~8WeSeZbhOiZ$Dq(k{)lNQSzq3tCCZ;7U&xG_oUSahW4gt1?jesvOohzFq2nqz%nfyZUWrsvs<(#7pzm+! zFlTYd%1Y#FOf`~3Me%AT-ONDN@TQL--uaTr?94PJ;BWZ)f=*K~dX%L}@>pp~h(HdR z#~p1axUV3xXvefsG~R%)&C)rzlciz)6RNh_gvS>XBpF7urukJf&Yn^$)OirbAh(#H zB22iDucPAjDdxoCdRLys)M7>Bmm@@p8SYVEvISGq&hwv<-LorW*=o+|9nI^6mroH7 zBb(}Upbg_89eh8$kI29}TAc1N&7Cj4sAc4Cjo%ZZW*qwCN$MFx4?&vf<^-L^A^6wi7OGcUXz@$q?QY~Xh&p=pSC{v->x-%}fF z%1$-j`K}9k;WJPQz3xiG=BM%B!$QP55uI#iUveM2U5R>TpYW8A_T`CrXvoK?FFEYw zy>2eF@K)D>Puu}K=k<2XEkeP})4sExfDw2m2*d~~$&2LQ))t9-!E(m2sDK20nof-BI~r;C_U2;jhU4Vvv&3PM4_ zAY#qX4GvE?a59RuK6dUv3a$7UE4AQ?si~XEv5wLgOfH~2+`cGD;X{xo z>M^3UiMX#XsRDeYX8pF;6e9shMS5j!XHg9htcD9Ku_D&t;skpAN*b)F7q>55&V&#^ z>bITY>L)~|-`w_asS|-zQ0j;-*G=?5K-sv~S0~~0I{>IS8u*unvReO`N<~2ExCYGj z3qt`x)gu}TzKmMwtzYP^cIs6qrbrPGIbuNRNkA)+fZ`ErC^-7pHVtU#)}WJcO)?Ow zg|*mmB~@3A#gr=oqQo_Bw%=nE7YS$|u{L%Uim6mw4{e}8c>s_-VhsgN`qcw~Fm27! zN%$NU3JNG5(NHWC0CT1W9x1Ouv;FL-|ACUDp_@q4^|6xL2|%UMz)e^Q$R9DRyaNK2 zw{e<^1eTyH{Rt}}B-VYe2a^jZBe$>b?WKVrpWWtFStI_%si60M&8PL#R4B6Zwg}d| z)uhMNJYwc&w%-qsr-QYK^4ETXDS2GqPl7KapWYS*^^@q70&fqCCWWbX1k{q?DAbkh zH;qI=0qNv_hJu7ujpKSKEbbpDxfYhH$yG0a%wNL=s3gH(qpnGKVH{GRMqEu0ol}3w zfYpd$N*~u3KWjXQM2gp86hHWBe{d}L@}KQIo`|v&a=o36C?s;vz3sH~ zelp5V3{4=Gc4CSm*V`$Yg7PPzu*5p2{_annQ~-Jy4aCw)OmXCTE3;8RBqE*^-B3X@ z{$teF@6Mj2uQpVyRF+sbRN4#-E+D&HYk#1%|Iy<0yEBsv2oexKq8+;l;`)Z#mVu($psXf+ZV3bEC>?NO>SejAlXR! zvTYBS1BwdDJGU`h;yJ6~!b;nTb+~*nxq#Sn`@;1o2LcKgw=rBIxk!qpO@<3o$+`Zd zDhEXcm7!=;|I!Rs=T%Gv=Xxk24>T1CC_?`;6jQmmzHRH}qoCiLP)bmAa{a|x@a118 z?Df6i@b%T?!b%2;b#h(8kv?#iFWRmTeoNd@^SellM8cmwLTB>P*hMv ziVoM7Tem~uYPhgckYXLKYD_L5Dc!zs;S@oTGHDp$`pXjLcPYDmrnN0X!qc|%9Q1V& zGAa%Gb5O7afO1pJhiaHbmF<^P48dM}y1EIlUTrOyk4(Tva;sOi--yYEYdTfIa Uobf>*s=%LRHVB0O9{8{S0jGYsg#Z8m diff --git a/tests/fixtures/export/migrate/export_v0.6_simple.aiida b/tests/fixtures/export/migrate/export_v0.6_simple.aiida index 759b6f059c5cabf536d6be88f416831293f11d49..71f125c8cfb260b78c12edf63f37ad4fe3f9892c 100644 GIT binary patch delta 11235 zcmb7~2Ow4N8^^B@vS(ISNZG5*tgH|Und#c2i%^krQ!`#$IWp3|Rl+^4a)a2*X?JQ@gi>FWmQpuYM0X+QAQ$=S-< zO%QEEh_c~RIjF8h27%BkK_EncjTO`#D&XMe>@;QqbM8A%wc2A2o!9V@)=1$Tu9x@G zC@8+*T+|K=pLH1&z&o8tWoA{=tZ~8V@tU{N*+@9-@!43bqDSQ7`+~V`)jrjge|Jf{ zv1+p~&OY*2tA2x+y^NWrH}#d}sbXHDjKuWwTBJ1Rct51XeK2%;Kxt3^I#T6FTT2%Y ziP@3Ue1Zp33(Usv`HJ<`=~6a%TSgbjs`?Eq>Z*wgUZx^7Y5Stcd-;XLA3K^#iOT9> z(bhTMTDYh35rtFvb{&*5b*1@Yt(hG4BMydaR$OgI)HLos#!+(fPX>pk(?H?xdhj(Cuf9^wtT@K96V?6_`sMER?7 zbqi|N;^+}d)pB-~=-+qVb5xPT4l9JwCJOSO9gS~*-|p!`dS}lzr3Oa4I?hhSBlrhK-9O2}pUm=$%ff?B z!Qh_isf>>iJq`2C0n`@d*MwaQd2sB$RpSy%3O5FXNr>$;QgUun%#ZnYYi3$C_-EB~ zTCTQ6MM3!fBQd@SDpXaCYW<7PzxP#tsIjyN%jcYf-#e8!t$tQ}s5^sagpFGq=g0kk zrgNp)e2a=+k+07mW$dUQmt+e07HUuXa46$Uu&GDA$;ugZI+Y5lh<%Rk!ooCoOz)%{ zl+KY(3XHfP)wAI!Z+Vc-(1SloxBo)h%a9MM&6b4~esJ;fh6h)mhQycn`D}e*`gD}p z85dzkXtl^x1_@u?cQ5T6sakd{QCe*%C>?qnBIlH0o2Bx;SF}0FrZ$F;so?9!tbRK} zbNWi;1l>z`{KKbZvogFD_Ie}E^R0abla<-i_xR-}*6g3L)R#W&x5-&Elj)g=uF9SX zSn^UK;h2ZVIb`d$&T(npmH$rc`_v($s{N_$F@~o@wm&Y%r^jACCu^xqyDWRotRds+ z#KjjiggG%?DKpKeLmNwXS;QdR7F;#c5tBEq`mL ze>?pxexmHzw#PtZJ0_@8@r#)r^hya%Z<3QIT9wOgN%Az}bOvRu4N$>kIQ zTIHT3}-ZhVv*D@}*+TV#XetL+VUxdxLGE7OlH$=&$ zC1HrIDX{C*(0(bauN3c)hLsXuVSUXGYPS-Mjy`IH8*6aR62*QZl{mx38f#5s&%&o* z_>e{Y2Zi^!B?|t>p?3CVw(s`whouNiyVo=YTL$HG99I00$asZ%;`{}|*Fi+OkVN%x zL!v>7C4ZjU>!);a*{dCO_1gO_jYX7Ve0?j8N2PKHO?kh{w7Oj6ou!m;$1NULFh=^% zXZ78G^c!BB?IZq}I(iPoE#^B|txXjWa!F9(>ZQkY>SRWT-sx!@GN>CH zT+E|Gs>1CTf2qCIEK2L4DB+9fVXKEyw$?`#Bx;1)nC1U;Hlhr>my@VaQ`#Qxe7z>= z;duSs&m#_$2f`TX%n7{{IwNhGyLt^E92FD^}dsvbQ z-)mhyKs}>IWh~Hi>sR5MHv_zsLy>)!C)j7P<>ivj+h_IqX%m*SblsKK|JsxS~qEX8{>vHB#XX;5Ybt3N}if}RQpic~c zWG3g5Pbbw7G)fgKEcPskc0L*Ribdq})z*yZAK>+YljB5CH^^oAsB2AS@OTX*Yv0K% z>ztG{dxk779EbKXbUeAF{&8fZ)c%1I<% zGm>J}M~7*P6WmyQ?TC1^38;A`zJCAUE$6&8$@FDe+b#OSmt{XIIK!Z}E4f&d*NY%E z3r6?XIdFq!;X0+9-x0`hsxSF8zOU^Dp<1K9Ye$jdr{&$&}b01wPUdo-Z z)hXpNqI)mkc4$IKCe2RomTjMVN=+v;vo6hw`uZ1Nb*`L%1gi#i36cvn1Q`xzi!^QD z#Xep2SiEC3|2Bb?)}-(fP2-?}IZW*EqN@kLCfRX#S#)K$go3)0i4C=9c##9r+E_m~ zftcMxRE_zXTiY2&Lz!%!`s3N`3zALnU{jr`dv)c;AwTF$gGQAEh}Bt@+K3H<*N%$w z5%m;4L>T-=aQFr>ANyctz|?W3G`ozeg<1>O`6cJfHC0n&ANjl3&moCvc9Yky;w@d6 z?u%(}gp=OAzB(^2U|&IOJ20^PXGD8o2tJfWx?t<_#qjJ!ykX)8Rx2{ni>;%i{7p=Z zab3^E;oZ=y;bHp9mL4gO?4uNv$>dtH<;v1HnniGH;_C!`$_xtPst>Umot%H6WAcWr zSzPet)X-^1ss)X+&m<43oV0q)6I6MQeeGz@bDL%O(xF#(+mAYD+OXCZ@-!`a?=Ph- zBUE|1vb-8gd`$&@@SZKr4FRWC_KUu2-5ggE*)DI{{Bvg?+ofEad~=(@0Dx_1vdnV z2ikLgFFOu_GTq80BfLbw2K?1T`oJJa;CK7jH23*t;~+WZ+Zfrha0w9*C_n03)Nn+% zgm5Uhagb5~3I3}IF5DMKp1x|oBM!36(64-$*I2KvfU+-c{7pT*B5}Fl#X3`824l7h zIB=c3kvNYR-I=1xcZYeVxy?Klu3` zvA@afz6%p6pD`_vUepF|sOo zt$!vm$@1H;j*hbit&bNX7ee~pTBi&Zvj~n3>tv9u`b{4hkWG_oanK_7d{y3G#H?#a z3?t=(UpwarXL77yRQx>`NsT!7zq}($lu%$il}n zkqnWJQuN&S4|%ibz%%NtCW;pFi*O(p+*Mj)c3{3eM7Zpp8<0{7N$!3sa>!#xn*v>a)7m)Wnm5%PS9iMheF20I}k zg$O-I$y9gMB4o^?%#QU;oE>i|avy3hXhTxy4#RN|g+vJii-6AVt5e(9Yh z%sj?&n6%2(&%dF(r?bG`yWTSI;#9~~8}Vli*CKJnPS4IFshRlG5wde0jZ9EP2~}Q7 z_52$-+4j#qg#@JD@wGg1?C!Q9-VJYt!u>x_94=vba}YRXTfk`tN62~az`~*q+6@=o znN{QdYW7G=E}NRu9{$wee0e=b9dk7iJ||!8^;ePtoE7q3UL|Hn8W(F~AF#U4iApsX zipeyJJZS5Y;wCZ0k>QVPe-{}~5l6@@_GaxQeQRsPAswj|GK#B1tJkK(Pt8#WxQ3j_ zQ72_}pX>FFzL4sAe>6^xVDe}4iU^;GxQ6PqtfvK%F{I%9j7KWmFXZXhfp9%!RAi<} zinM%TWtamp$ah-9-8Jl_){(L0yiBi84b$)ZdRmU@*3S`V_OBF(S+vRI_^**4)2*9> zrrHP|Jg5@cp{^J#*=CVulA&kwll($vX5#5i6JMzHC>L+smw8TcS99tck7=(xu=>^3 z&+2%i)Jo7q!rUnTAiQ1k?wXuVuC7pNwUxPfZq=_Uk#tOCv}5+=r^aO2Xm&UExIZl?bZ3| z^JDDYW6`_wrGo;;7W__Bzgb&tnx0=7s%T%jHXPtR@^1eJ6Wp*#IKO}d-9*1}Zp0d% zq*kp3L*alt?Fx+QlyYQant@Zp?UV}-JlXf2j5>5btkOG1fX6+8$5Ty_GHJm$zNLG3M zJ}WiML5t#XuOGeIX-`kmo_HuRzaKsvmMyg0t7_z3Z9mFg%}!Mlx;U>J(Mhu^e!6p% zup>TvXpZx3yX>>x@CbHSx=!b7(^D)*{&Xw&F(+409lhNJTReAp;_@fg>P=KlVfy~}d=r}aC4vD1;boO^Dc`#j_M1jj5$6lJ+Vm4`dOLlIH`jbvAguUi_GRmzw+3m0TAU|@8$;?AUX7f>KeU}RSK;1}%|pr0w*p%?hY*>q|F$K*@tH|64QP&WTV`S6+oLd_PhVqtg3HleHC z^49aw_i`qkkChIP${c->fp{O+HR?p2Irv*(A$az|C?$Lp(Kn56e776FZRKI^62$8j z)hxxu(DO(Z+|ePDQSCX_^)&Z03~js!?}Vsu)9Fr?KT2if1U*+MvTef&<*0q*!*~ zDhAp$>e?8SmIW7rp1t+q)*gkB<2yqji_Y3_i8-D$iM740wbNDT zB>@lErGf|P*4P7|P^=-c#{?u1zf;Hx4y8KcDDS+EfYu-+xy}6XLm&tRT7g685^Y^+ zA}9;d4U7M`xFLuYu;AEn&!GlFs59<%(*cdoW@{_` zyP|+Ez@}sUyu+qr{rqp!{l8Kn;y$0;Wu<@yE4*EVgKVfu?P3yv zo_=Dx&H+xM9OX`-$x&`zR-QfNAHkOe4Zw#CFh&lgB%XkiiznhYl@vmoiU>hWL(YWR z8*JISp=dxE1*rVBrN^=>>LfX+4GdcScVNIZqPzA0V5_@}bb))5*mhkIOqY#tr*r`e z?3viQ*-oGalGtIh0RthHa8PsgH_5Fd0euw+NoHfDtrjQOeIZy65bVNg1^LfveO*gI z)yT4d#&iYfjKF;k@Fs!Kc~4m2i2ED|tM+ zmq7Rkwhsru-GXIjqo*SPw%T%B2t?yw5(s7z(gJmoXl&<+Fx5~qz&HW8GyF5ADL8k) z9NQ4nyV~DTSCEvuyk7&y64C9BO+f3oShYjzxLCDA>-biMY8|^cJ^-g5q3sSzKsfK4 zcL?Ww^A6!Cw4lO)ZTA`sJ6c+`&NW{`>|JbcN0X&4RUtpMR6zDKfr*yQ-|HQzfHps1QaHwPZ zj@)02n-c8~b&TIFrnP~=QOEk-a_$=(>X^S|!D}IFt;)8VaLH&*}zgk>4^VE zfqMi78bAf=3cl@DC2;V<0Gd#M@ZU2J+RgtvVW25+`y8eQ-GI8oW;<8dX}zIF7ZI&O z(C)VG0qm@PQG=>v4_OJ*fJgsr0m+Sgz>@obfTRF;_W19hu#vWP1iXLm77?Tcyh!gM zEI$yC6aZBY+Q<6TVC$TXRDh}J29XWbgW+o(#1=i6VB5VmTnw>Tq8x!Tau12sC885Ui^|<5HVgOzMCKk6Q%C}dt=nmn?A&M($v$BK0f?5!5u=>}H+vJ{z4>2#yoK5%k1;5#jCBeS%UdW+xeAEojVsybT_&m$ zpmg3sZIaFy6f|qy7f=mQNN*uG>1Yhn;yIe6*+OAP35e>AE7^x9cbx(x_8wY#eJ*;z zqXqYF7yMi$Zil>AwqR_bhe7*C=Cj^vsV?l$WRcKH5-KpAOMdzO{+K2XaG-qCg;ktjZniiYMFxkV*KO42)O8X#M(y|J*be3aRO*g(GD0U|78dR< zFbfZ12Pco^N?)UtQ1MB6U?tgb^uu((w?LfLE2#-Q1C2SLjVMI@=5?zG1k0i>C2t4e z#eG}0YlD|Vh}GfH$3k$&I(b~cswm+kYA{N36s~#}fVkL!_YymN%G8@g7 zn7EB*3rN66g@PE7rMbwT4G(`&tdiDvaX*N$mfEQMb*ne(e%IbUE_QMw46T&i^gBt?`rGAVvzd(c~V8+MuUV15tnL>P|TTI0TN8 zAOR)BK)>$1Fkgis1X>homQ&0JgUdGP`qgKf&?FHsr-PW5b6M;BVGU#G( zxw&+#j(5HLYYPN2oT&_l^``KSK1$H=gg&FmUP=Ifu3w#8`*bq^Z3{r;hkWGgyQGni z)Da~Aj0mD8b_7?l;Pt=Akomu_8)cTr(uxu|7W(gNkZ<7D4YURsh>}Lxl~b*6ok(~z z6tJDQGHycaZ0P#+t;3qw{O2h9U)J@<;9%W&j6G!CUX^C@(>Piu`g4 zrWyqbK0tWB?Xddsl^L0LMk93Qc?$D0GQsFrP8kVBDjT)omW-`l4X2T2;ofFDJI?Q! zZyJ9-qG9qz-^1+w*k=9)Yo9G5LAT89JySW}Ms5z`(r~WfaGDU^(=WP1`|N{^K6+TW zI!W9&POVyDVn-NH;*{)(9W?cUlSayX;$c-cS$PA@5VRy)18!NLjXKJ5D^y}+D`PSI z9LiB7(7K_iHkx*vK=0NN<6#;(-#4dPB&FJvbyKMiyFPe?$`m*ms#N!MUxakl8?K{A zX~ejQxv6)h3_s60LfCiHv-Q5xZqq^DG__B8mMYF?VA`&9jrB#B2$OcjlfK&w7s>f- zMA4?i9GaGi2$0PjdlOP}#NcJ;fO#IvunkDT5g(CAMm$Iu|gFY-l@=&g$?FNd%>Z)O=^cPrUN=8$wQ&SN~D zvC;%j_uF;Yw@JBg_q{V}r;Uk2u5p0%uF?xx4{&Kt`WUFODVi9SIQrKkJ80P{~tJH5;k$DuE!cR8C``j+lClnTv zna=a%#jZ)6es`r)C08f=9R=DS+_!yi+GA5HmtTG@W-RfH{ZV88+G(0F{@ zyQr`*p5~L&51X8DGM8r(Kfc-7%U2PvYC>L07 z&vdgK>`^b=VL*WppEn>=l#Q=cl#72d;GBD~Onvu46JRc{s5E6`LiCoLaoY&uYT?Uw zC&#?ZJjH%&ui%N1f6aesM)7jn`CHk08kxtKKY586N}NP?J$m*NpNgpe+cak zX+y$(=UevD*Pr*h1X@Xd0{gSg1kRoW>j22RE>SpQ82fvQ`fCAWg=te zY3}fS2|uID3R6?Fnhr)~W+Kxadh-1I1f7vZ>OGa6EaEgp0VF+(8A5Y@*9f_{m%NlW zhCkLRo^6lscHeDgoE+L8Zg4#BFi(c0RoK-ey$Qv#J16KoD(pn|RZiFf#|CW(oqSz{ zCXOrYx|n5Ui|}_oj(q&37y-`~Io?D`zDS?NVD22@VR|Hn#n^af^HB=JNCo+YVEN%2 zS{*_LM+-*-?H3xm!#M5kAz3^=5VGyCzCbprTUmK{{DvQqF0xG{BIvu%Ij^vLQr8~B z)7chEn5vXMl-?B+90gbm*)ID|^X3P3C0S(53$k{JUM5d(#7MP)=mdB26l{nw8jes8 zXz6pYZ`pf!mw3w8_-;zARK&;UCnD!h>lwOSxsIwR&$2|AgveeY5j#*=cg*t9B@SMa z;k2A-9+v6Ign=h99~A8ZzIq%+QXtJO2qE!9A9b_EZJzpOp6Y&1SY%btQH*CT&R)WXPbCuyYTUs_lKLZt23@E)aB89-s;!yQ}}*+A_AuD^h`g9sUmn95D-rt zyqAB(OLjhMQ?^kc9Wl!b+GC0=qU6*Ah)8X!8qw^0I~QQV^xM3xi_?LGc==aD6nc3z zyc+67VM%0(xy_t(4;r76#AK^DIkM2esF&dZYgBx{`>Hri? zbKCWOnj#iQpLqGB;8V z@SlDa!q)*%0sqLtp%3_F5vUL=y=USmASR;+5-FEnF7aG^bf{`SnQ|zou7Cu?P2bS=Ogydcrcs!2!ItJUVlPT9RM`_NXx zQ)ZbPS?Pgr9U+9i8?0ApcCXRS8jJyGh7YyZ_w9oe3AR#lyc@w|E5(Gu6&&nbuh;U*L37 z5$0%StG4zk@z|Mw(BdvRK3=CD9(v+!wI`!>IN|>?lU(7JB%9$6l+?lDd;RN z`c8{HJXKa3@ZImE+BETVIBDDi`^OA+3b3~?>(bTx;=H8$9>vfub<{&>4?XtPIxW2GV4n*69)b8AU%br?<-%EbwS)bHVPVfR50;4i-VZp#98;%# z-^>o|Vo}YBE7${Mm~DQ2RqHX_Re!LT+@388onos2xQqM zH=%g43@abKq&Op{_LIa!JbH03h^?+tw|@7hi(f&}4=Gv4*iS-BaEAg~yRE)Mkw6Ln zln8KH2e)#Atz6ncVjyVSVHFWbnEnyRKG7|UyRSZ0y(KwHRYuI5(~{GtReP$5VTd-2 z_ZYh|1^3U3p;mI!c8#ZnLv}s9*Ldi#(C$Ln%ekorL1Rh15fghCUevQ(pRja`n?)@2 zp3QHGO{u&#AyRs8%L&#U^;Xszmjf+s=UKf~-9M!m^zzVKPOjkV13|qW#k~$i^;3Kw zX?%kvsuqt7T5R%;Ct!Qdu2N%vH?ZZxCgTQvIUSe%)t%pb?#O3z7`WE$+EpNnXEev_Ssw zHZqZF%{w2qOx!LAPvjEI?>_y(c6UVRgERe>=G^AtnR4$KdWProiYELTO7{sFPX8qQ zG|O6U*qu0Y_r~W1QYZnF$!xPZ!QD3~&2%&f2a0(YI4p#{EPOVHzgrg%dK3F7>>PqoM)n<2Fu#t}F=7F(|ds?xA* zeuRe7VW7}y9MTC}rnVAVMS->y(!C*;3x6FjbozG}Ks&YQV^R4NI0yLE`*6~{{R$k| zmH*9M+54oau(+TIZrS2>{JR=?W$U{{1we{6jC22jxU%_8QlWBM@ZS8)s39t(n8SqY zFRE;1xcF$$ZpBG1hjX~#*j!LQhre(Y(*U5*44>iR+KQs8lf*gx=w6Mji(A=ckXykC zldSO{iS5u$22IBn#;qI#-Dx3nA)y_{WGKsV=m*%MxRp??ZJ@~tn8aEKxLQ@MOe}tI z_Vud|;H0d>Ik6nExuDbze~I;g4nTo|H;hv^+y{8*QPFXD?*pMrRKy?Z^3eIUe&oy; z(2@HuQ6AW`|M|}vze2YfcV!1p!sdc1y^TeViUaz@rDIWfiUF0FAcDup8E*%Hf8G+X zXGbgRt9d)>Qvz0fZMbnG!I}I&<1E%=?{%;^jI+JiTu|V*u_$r;6CwauROV&`P}7z8 ztgj$O)Tc^(M(GD5O1BE1QEFsDM+qkhAkJNoiW!RwY69^WB_n141xf_*SWC6cs1LOG zjFJQkN^u)Lqx78x?MZ;sF3y$ zGqqT!M?oFqzn2(d>}a>*Bs|19T<5X5pb`;(;i_Q=Kw%;tYf6X%^)nHlH5JE!(!PVw znzH6Z2MH(9BF;g&kIe;DjQ9)ES5B~|O7R&bb1sx~>7PdlTcrqU5toP10^g?)P#u#; z&?y|I{m|Rcy2O>hFFYVnl zB`BzF#2E_R`R~mH)G|VUUOE<)eea^g%H=S?bte7jTgMhAu3Y?mgMf!HK?bVTQSN;tt(Bq@p{YKZLFURy)UD7vhTZW6_P9^ z$-ZR$&logc-}j#%znS}-=Q;P>bI!f@_r9N>0vdKDRXkH6kgP5twg9 z!gg^{3J@qk4g?}W+sxr^@bk8=PLA^ib55gT)cc=n?i87jj^>p{E4+hpIpO+PKA(`~*bX0^U@Hm=H|$yqOrLq{%tM3aExdMOqZ^J~3-?(RAv3c9q=-DoP&3 z5@I94HRM`~g7WfkzeddVE2tVKMBqoqZK6nA9z;U5C|9V3q(hi_n^{=~EmWPXcWfI; ziY6-QCG?{~BF$HXKa-(>j&T7f`R)U=5nI zW^;Koym;6A>y%K@)!R|qGD<9(TWqgitvc#N--7^1(2{QBEH`3Y7iyGxsbgLz*x$Xx ztJ|M-(@bB{q9VJWh0b_Ix8`QlQ_Uj$m$fjXYh(m-g*G21Fvscu|oFx|2}f$vPoKu7MBHL(eObNwO|g-D4>s$G}nZL-mx zbK2~g)Lm}QTMgxxUEH^MNufTj>QXYif1b(7qib@=vvKzlD`fUkG@89-&6)+#ZE-ssN*Pm*qpl{`}W3GVtWJB!A z4Cr-off~_yTg1-Q&EKo5-e30nEg6y3U5zA)PAkG&2EV0yZHHL%>D?9Mkf>O_^W}Fg zhP~-!3xdM(LoW3<4a_p7;O{5v&h=LDQH!>qb0QLhcA?6Mq`TdwIH~KYbU|*hMv3=b z@ye4`Z{?j^(;M0n>9dp;eYsl?XuWEKiS*zKK?WA_2x5`C7&uIt%ExG|?!%R&Z29wM zASoB5iHl`ljqT{YPt2LIR?1gH$O2>UPHA+NO+pgySxdIMv0T)w@FBlo8)yROGh_IH zTCx>otL@E=?y$~dr{2cD{*916R?-l+eS63w5 z7x{S*uQh@(Ppi-fBsCc|wY~8UuN3bR2`RtHGf_l-h-7Pm1?bY!rCiT=o*)_WYwIYk zg30JJ?vg}y5t0>1#LVW5>f`tjJWtTgL`s#DEvuLE_(ICtzO(UOmG)Tyb(!9A%{^1W z(DUl5$nb@uJ(Ckeau=?~QPK^BVF!OACF zrj+Fa!*11LdAW;;NbZnK}vKv@zw z{@}Xvo$>f$_x#qFHyfQfxo@vMl|0*}_$&=!dZ!Z5`piQyI&ck0^Mi<5FPG*w&dyn0 zNJd2LP_I8r3t8bV_k#?{MS8~sv%e}M;+Av1nOLvD808n+A(A4&FR<`iRiR>f^TVQa zzld~A$*wxPk!)HPw@Tyb!e8+yr;q;MWErlC%6EaIVKR$z)o|vP`b;yj8z}ugu2X>} zk|JM7RG`HCj9D28EOw2gu6$$P5KIa=C^$lVGHQNx&#tT^*3IvZ6;c+&Os~16Cv(4p zubkb2!b|+s=KM`MWCnhX8?g$Kv#I&?lJxRMhH82nQJ>`7E1nD6&&RHBGkwe83vqGr zim6xqA`hCzU!}A&)#|U}Y44Vp?f=5F=>D)fQ)|Ldf1_}XZ2mebV|wk;o05>*6ATH6 z6=h(@m*&Sfv;F04;^NZmLDEQ@Uul~D@3t6VQ?zTrqrDQ8a;5muuY7|}G5l~&8}AvZ z+3^?I?@!E(6RA1((S&dd7{w#EEYPJ=x#rvTD@W%Z*-G!6)XTzczfwMeiOVoaJsPPA zM6mO-_dRD{EkZMvls&e^-|!K^w%HrZWb&Gl)}J0!Z)14f>-1Ctl~EHHH4# z1b&s>F{-%5%?5#QCFxu|$f_j9x@x-JO-pw-`P3wt%Moh&OZ6XrOCWZ(`X6kWNZQHo zolX>qrKR`T0<+zg=?U8t^^)&c^>0RS-nZ$JxgY<+++3%RK55nhW~mX`&{45QM6Z*f zZ^?W*`Zqh|gY7!a+ey|Aq8g}e2wg_SOq2WNg!f2=YL#q}Z}qJK_H>GU9z0Vr(}RPd z`_*`5f>P5CgFoe;LoM{jduq$T^mVNz$>Yhc6W!kzo-Bnv=^@~LFySBm{<1_D;sLqg zPFZG&D+P^*4G$9nwEO$gcc(Uq^6KK~GULTO|699Y(l46Xn?9t1r|);-ml7%px~#o0 zR33l46QA5y!M#E<%<}VwM(VpM%FUN*da^cnkRh<&8xWkD*`6H2aiU%f1m~C4JBdbp z9Zu9sVn!ce%w(W?u{;oj0|BbX$5=TgYEdA#fp;M}t{={)TeMhLCf>?Wc-iwQq&ecX3AIoSem08 z_+5S2!J?nv*uJB6Lr2n;F0(7Qth3OtzfRER=i-XDmIH?`VkB0G}PHs(=lefD~sHD@(dha9x8Uc^Ghhwx(| zo$VuP(FYV^0bUnRJKBHK)*{Z~VPYk3aq$o6m0y_qC{i<1q*u7x{NShWH}pf2+Ai~ zXzE;h_rZiRZ)PQY<0O`pMc@Sv$ExSSjh)8DN4x+K4n8bU&{^})p z0*xVaz=|du%0Ud<0fQp-pkLg#BZBebBFVW`UlzSh0 z>q;#5%`#^1@xVXHfGIL^bG9w?%xz=I`!=S6B+SG$jRz-Rz$87jlGU_Gh>i>7Ms4)G4}`A_!1iRa6okeD-XRgj zl6;}L?waccopn(8F|C&ugdQ*I8=cbm#IYO@)7L?F`Q?&|?Nl|#<^|qg%x#@TZ8Aff z58I4U&eg@{0>#fs_Yo81}O+p4Qg!@G7VY3&Eh6d?dFfo6z*faPVb0#a{M&4C8; zbxE=C0>RM_(pwFW(jU($S?%_OP0;HgjJWgQ>jAfPo*^r)Mg?at&_yqk>o9Q{?@z{a zw`JdPy(JENyDoDD$y6irPTXt%I^+Dp!*YT1zgVXOw4Ker{LVAw-aq?QggeBdm#Im6 zwM+$a>b6q4{@W`jk)Ap;Dw<)C){vb1M#k3!vu%}tS;W%nL)liA8j>Hpcb|x4St3%F zdVuczN009=0~Ne5(KQF^C6AjnmRB^4m*%KDett6CCHLKudjEQ}HnJ_jihZ4yQ@GN^ z;+dQQ#rvAex}h@+qkUI)40HnDe;y`Z%|9Qs)j>FIu{&`;u3b%WdiKVwTeO?p>hY_hRdC?|AP1zL`kv##**9zgEYWcS5o4czOX!B>_*2 z?0e{xr$&UMFyrrP7HXtgJorZ>kZypXtsf7=XSL23b^sZe4vw8=q~6lyE&gWiI1 z>nY;x#5-bZoMEJJj;xUO3_f{1Zo?o?tK~2JG+%f^$l3cGO6f*^(7&~SvT;IQ&SVes zq9h}XLMBtKxwUU;NdO(2okKMdEq zIHdh$wDmiph<={f?KbCi-ESwy?oIKZlu74!K5(0&uh`98a~NoURi(n|_v=~u6@zwR z17P~ons4GQxf4q%Ps6=#&0p~#akmr=`Wd$Jp_ox$*@8)a`Gzd=Bi#2BRa7$m{eHf( zk_(A__BV%3ts*Bt7l^dKOBB$pT%#%L9r&_W6T4n!Wsj(L9-k@hDF<0~-W5-&+b)yL z$uPe%H|$&4sGdGTuH|3m@KC{6Zuy-fq^TRGL-wtyX{MN&=JBnjGw-WC6n1{=F=TEy zN4m#Y5A)g#2f3F-xdYZ^yf$S)?qyN#an>80)`&{Vqq1XPJ78Wv9DRU92q{oSNsf|V$;fsHpdvl?*=3)YZ z|I#_1f&%tbLRf7o)ivyoC8`+g4~)+8qJSv;Z-*FN74xBPiTO}><(x9`jD-~4{|5=U za_%P94H}1t{Otlm9zRD)h}}IULMe+U4%j8zkrybiI`%X(EcAj!--#IGlQSYG6o^QZ zFEP!^KXD`tP1`s@g0apca|z9W$TT@1ypf_LE7JiBUzBSjvBAJ4-5H1zN713z22d73 zurp!SBT;mm2h4yI$SI=4T9>G>z;=-a;LJ>n1u^;*Y8-t#HIBZWniG(w!O>%YP#V2I z+C3T^Ef+10mW!4XK+@u9F~AR694${aj`m{{C*YM$iuIg|4mCMj>`&Ya*@&Y#Ij}!4 zYlY&kjYQS&6we-jG1Yz?|dApn!M_=U=Y}0Zg2`IXIb7&pplzloq{b1O@+YQRh>1z-WOWb`Ff*qn#5_ zFT|+~26$1Z0jL!p=KprFAO)rs|Gb3+;)=PMF%bHK_{aTlbZTVycvAphiqBxD@h*ii zx#QMI}GfI!NdSE)M1?{J7C~yYG9x0GT_vF$V0mV-=YD| z*1)hjj4vM=0G=rT1lHjA>cn6o!1p7}q!tEdphaU6)WZK>0>HpbmL7}%cf09UU+jm5^%+%)z> z?zw~B+m8#QBya+O8s3wFq>dU7{hi@M!f`+xXCVRRk3z|7gn_MM(6|I(nneLXnn+2p zVF-&m(E>${Q1G3&1MBOC!&FJ46-Lj|m@@F9k?i0w#-6$|ji6JJhSMF?$$$~fU)_+Gm}!Lo+j5-Pm{ZW0 z0MJm56Pq&@`$79xY$U0GbQ6w0;Z&fnNdWS%ZGc(xV~kA}TVc$4yHf$7W>xGM)45h@ zz*Y;6ugEkYy!HS18pYUT4?5$)XUYx@bSLzp|E2%Y8GPx$LaXWl$%=Ulene9I2WbdH zk^}?`$N|#!qstt))CL0wWuWH}0iLzd9^D_9C@;JOjNjr!Y4{R&d`gJouT70v@2i)9 zBUFU%?_A)_4!U?efPm(&Y`g@*DkXr>io?QUl5{N-?c|_{2kSWIqksI_1{m>lCR(h5 z&E`lY41D?3fiqb;lD}iaSdmz(VAbI+tgM28*RdGn9qqwXLJrC@8|Z1u&`$(PIP+%s z?7u#6omh2vWWlOoVEr64b_wu4OA0)yK8!#09SppK!6ndlB@wXu?(~70%ohBf4axI5J+gosY_HLAlUMMx@Qby z!|6o+A|RsWPbV@r7ai^Dqg(Z_@?s9w#UjADT?!ijrrc7+=&=C$b|pGa_+i+XI)-4a RFA6Dv)=mf^TM1_2{{vOG>i+-$ delta 7061 zcmZ`-2RxMjA3tZ4y>iITNSVpVEPD%)y;ohZ)(Fw~VlSlUsA2n1S` z0D_uunk`z96R?h0tmzGhp8UP+tonm9+?Ko|rs z(2wS+^IaIS{`r*-UVd98VS7A}?hPK#!$-`bf>Ok`ii>?kWd2?DaO0!^V~(La!h1cd z<4Y=rb2>i8&tNMAoLx47XLw`cEF66cm=;n9VpvpM+n8L|_=KnV&T4qo6-`l@H!9(z ze`8i}zl7~<&X&C)W^$IaGkRTL=Da}S^C$-P2xD>_vWPevuhbj#aS4J;WVAI2(41MH(B%OctpXBd_qrEAF*nRKLAvAh)npOG9LtRxr2H({ ztR!4{i{l72@Kls<|1;?eMzics6+f3-$-3S)*Knuo?ySBC&gII${XhWaE#1*49FV3< zcv>7GR=hG7UmJP*qKhoeg`N>3?9S?Zvu-@9siB;+a@EUrP5fgl_pw{4XepPtCLKDt zdIU-yFfQ4{u19o-Q;Jh}@pJNi;lW&RyT;CR$%4JO;rwVn-d(!*LKfzmY`%{~FkVaG z+EQ#Vmb1 zM?gdgyH@6+H9K`N{LXTP83ahvHPHzOxp0Puj;uSsAGZ0bUbZ1`xM+dMJdvkPIy8?} zZj5T+>ev9SfNkD0habbK9JYo|Wd;#8T=jm4Izfg39!CdyW@+ z?1V3Uxz!X;W|fs)jhiQ(xSqJeybo8e4_TC=TZZRx;%&2!JER0An8z0vvb~z*TGyQR zls2mkU58COa}U-%v-@Z?W?L^_(HNGtl6~9phJ^XL`%i_`6?2;ic&T>m_}EwqSJCiJ zfXcKp`&V{@Wqdl~B^b_y@B{6GaH$ym0hQ8G%(tdRUPK;}!1gQLWA>=|e7eb+oUTRe$(ZN6>tFTXdy^4gThTjep!ywff6BHg|yL{}7= zz}<8~)(8aR>*V&MgprS9^FzVaRW{$&F?_y%y{bv_{=3S#M?Z=788Wv=>E*|is?O>X z3he3PNr`2&NQq~>dgodu-=K71zXxO?Atk+GYlyjkPfM``3$1$l!F0vn*q8q&O%q$1 z#Cy(r+c2r%fje;}!rgQ$bf5kC^#o1fBjbpVr0v=k!x%T$uE&#~710AvyT&<+JQ$z$ zh~EA+&#E<#%0RxwqhFa$>oJ z631YX&Jy2cmmdD3HjjZJ&$Cw^_{6hSzT%01!*m}NzPEr`Sv<^U2u`jfs_qNQZW6({ zX6RIG5O1HN?=WBZ+@Mz@dDM$rWm(#~mPsoeZgDMTv{RelruO?8gArnElV^(5->K@J z;*_@cBDUuQlv514ZVa*`+B$a#gK=*RO(%p+1Zdd~XA`rK&x<|#vB+(1cIkks9~Ry& z2Z!;KdH4mA+}laVzvO;?k8{9P=$=Gc?%;dMr}f<8wd7*@?0o!;v-KSl8$`pPb!m-9 zvb}oy4_kD67HxOS6Stjs*nhq)YVv!&@=a6H!n#9AtIY_4$H>+@6!aE)TZ_S5QPFxU z7AP);7dnlVhlKFB!mE|WT88NbNUI}o#tvXb7r%#ufmvy4-%1!jUuxFu4rYvcUNAOz zk}#d5d#hZTtw_i^F*H|uO{yWsgwm(Uf%j_5njProtS#6j$nE0VElIAs2y44=*IV$H zUus~`65d-qMED0(2x<%0WFI57G_&>k?KPib#GKHM?3C!k0vt1 z9P;7xKAYfkXKnoOmbF@1lvg7|Fty+VD#zxY@+G4MW9?5>7RHo_=kDr@M5*1Rr3?*4cIG(Lv^{gKJJRq=J;u_CBfxWsmzog#q zvAf$`pGL%rfn`LP=T)W0SoP&#PT!h7n3vssxwXRl)rU})_R$8x=^kx_Vv0*{Dg(sD zQe=0&5ii`-R!Mj3?6!jtgL#Up?!xrCO}~8~F1dHd)Z894(1WCN)F=qVSJ5r7zc=xb zJuvzn8SOMeuNFkpIOayA1n=}&^`7`X6kDs>Hsm)WR{#3V%#(eu{JTrOMwCAMuzC}= zQm=7RX;JV9_=5sf?~6uF@9DLrR$aa|)+Zb0^Ry44Fr|pTDL2QyE5Td>d0#U|iPQ^V zpT&ShVf$earot`I&NCMxW~kfv-q@hi;aFOCF|X*v`s08h_61% za6-88`d|A20 zAJWb%6)xFYqG35)dD-eNL?g>TdRQpL_`_@4r?IARzOc^Lk7t6buIa>)w|4F3x&QYj-k-{odQBea#}DY*vCVLGNLqdSY_X@L zwdjFlM>**ZS;%x?)km6aa~YR6Ix&!@*ewvZK*4N&g_^(E9s;XGKbjJoevvwI>?%Gl}s!9uRR?Ocn z9a*^8Y|a{qD?aNKu~ge=)oCLMwO3;az%OL8Ymy(OC;RBHvdaBZ)$&brGe>(yF37SfOxED4Ofd(%q5mM@6W zvzLWBEA!pGpGi@{%NNx2S4F3w`XjiSAi|#SG&+I3DVAMXmpSjn_c3;e?QIO_uSMy2 z#z&`6n@8RwSag(p&TG_mM)@L4+76R+5)?uZICxpQ0E7p{kfjS8#9^w$z?lYxc!5y6 z%hH^nkm_e3(ia%Aq9{D~tn?L7Hj+b)=Yh+F{MUINRyz`)X+?Kn`7Y^#2~S`l`y%E~Q>b>apxrQ;R)r=_->Z!50Z_4A( z+kka7U)jvPR1I^mvk?{S%c`FjllNBrz_F}>TcQyYn|p(Dd#skJ{_!NDTRE>_3uZ&D zeVIgSTEJuYNgJ%(cUX_iMGy+*Dq8Qqb4_B(>z!u13qvxMmnWwV4(!?<#rC;3jL^zF z9GK$of?CeJE2!Bhd&tSv@LLf6tDtdO2-sASK#D z_?k0i_tIC7PG;Y48|(YPy!+D)O`2fEFa+%v)sdkI3(`A+*fSxf_wgLg2RxXj2NV2k zqnYZG+Q3n;opXI*8QiPhU&cW4aiD#J9NKxs-{3)xu;3Xjc$SRNTF9+TAGIV{(7gGK zS_oa5C>2&)A-q$uz3kD3!=&-%KrLH?Rar`l56pGRy2~jKrvVhZ=8)9xaU*}=DLcu} zZ}!6t!}Bw)L5i89xZ2Y@l4b@vn_U6vMLp_VnGX>=@|@cbt!pv62Myyl2E)JK<;e0Y z5lH%G)8`+(2x;F{H|nsK@nd#@$zEdWJ=bdEU+cq>1yg6Oy|vn*l#~$JoI3S3YB7t; zTuC=A&vQE}ddQ#)L#nN>m5|c%ZqOEQvspuX#E%eD#Vzb{C@;RcsB}@z`QSR6Ukst2 ztyzR7G%#>u`unD=r)=n#SKdjPSYn{>HCag+w z25hvx7)G^H8_#H;pELW>>gbu6tmJOXUtBg5|K1Y9_{8ypiQ)J~si zqU=DXgOS3U_0!3LRoJ(W@GJ<3pc(J)ngt(YatNXl~bZm2SSW ze`+4a?53~c~|b~elRhM zl-Wz}fe)?Wdham(jOI28--I>%!aRN{!#jK_+fAHF7{dU8Y;iy!5CfnHFc|>orudpVgkXzt0sr;70C){=Jxt0Jd zf=bs^p2e8)IOt|g+V4d+zJ3So$KViu%)b1k7v@&Rq8`>Fz7^x7G20%brAlfDDP}m3 zxdeEJ8^JP-INX39g^qj$H=~P5k&9lQRBzcn1%QoAu{SBc@nJY2?vddC5XEoCCYN-8 z8V!+Uj|y2SD24VIoO>0sO+a#vU?GcsgKr@uBvjIon19%Bp*JysnU&;#e{#%cwge%` z467nZ@3G_+ekLP8xkK56uN7j6U2_m}ChSV32WLaRPGZ|Zk)i1$Q9B>vr+v#dYG-6sSFQrvp%7A;`!o@~xpmQX!ZR}QiZ}k`+ovpq~GiT$^ zY2xQqY_j7D@yuDCC9Jnr{=qV>=gmf!aSiswC|}HdN~OI0zBA`9BU6ouJC}tvs77gZ zbT^%IRv&*c{%k5Y-M6tc5L+K_6OczCd$Y!qnN9EWFnd^8S`BMEeZ|VvP8Y2B3yLm0 zY!&{2FXu(HdYY;)(ex0-6w~J2-tX*vM`5@T$qr2K!^1xW-Lsm}aH2%aF&6Q+XZ{4M80c7-pE06L0_P^wQyu9dP| zqM0tqUyuCePwT?KITChEZNtDvrGz7{C@KYTNtLZ=OhjNHr`2J*qQbbCeH4aF_NlJ8 znau(8W}m7Mia?ulQXy&}s`OO&)uks@q5*13&jXxgC$eb5ow8H1kvXSi_a1WqZ*zX9 z4*vKkb7GJ4(0NJDeS8!Uu<-cVZ|7-%)nhZLR-r6Eae^8!EkDwNYUz@4Z$KmCsK-$) zjXWTHq9>U?$_7e@MEWTm57JNR7*F>m#sE`d{{BS{5USuKM&Cs;%P>7Zj1o9;Q6Y=2$aX4@Op@WF@ z*ovUVKXqUT0X&swf4gOPB48o(PXX{fQUFm!b0PtLN(xAy+A|EEXgC~F|8VN-3F2^I z{fU5ALom((bk#?x;_ZWg70H0IYF^U+rl1)D22ea{v^TEE{X7{!R-=5lM`~j7CqIt= zc$)ob4M6G7BM10f#E&9E>iCS>^8!SfxF?lIpDO@mL5AXxYn({}Xw;xjFv`*-#jz!1 zEo@MO_#c+AD1pzJ{3lwE8juXBRRhK01t6{H8FgqW6HRN#`oj`fgbL8?grMC;xo<%Q zVC3@xb)84!1f}j7nnu}0cG#B3ZHT&FIoXhc)@2S*Kv5u|>qrlC{W!^@gIfae<)O-K?0@z4iTjy3&jydj$wS@LJz^A8h)sP)K{c<4f0t^{zMH3 zlpFK|4c_meI|PcED-stYL7rG3kklW*o3w{oV|pkLfjLlxSYsqG8w>)e`~fthI|LfO zU_z%Zx)%aI&u~ba>ph}DkTecPBuxTv@1;iLU@suxUK9=GIxfIiN&xgVpVS@f#02=& zoT^3&6A)DYf0}h3#S{8(BRAASz=F&u9=s77Agw#PX`#pwC*+yBGshzk#dv?h@IH+f zVL_|G1&V43SkP_zD>;S)+ZhCEe@y`TT`W|VV-L|las$<2^0KD%*C&wAv+j@o&hmOZ zQmq;~A)o3XVAd>@89=(8`A`s*Xk@|GJaDFP6s{D6Po(QOcQbSF?WUSOo}lq#;`L)x9PKPCZFTk)~0$fv|Bs7Hu@ zxVlyhsJEYT^~y8ko);KzKhnS64grUuX(Am*C;mkT1k8^(#8Gw99%a{-Y1&*9S+7Cl z_s_abqMnYW1O!qlNA3Ox{;Kmh diff --git a/tests/fixtures/export/migrate/export_v0.8_simple.aiida b/tests/fixtures/export/migrate/export_v0.8_simple.aiida index 250876da285055362a897f9c59a49a7bc18343a5..02562c14b260989eb0455240969ec3d0832b4c6b 100644 GIT binary patch delta 7230 zcmZ`-2|SeF*B@gES;o@Pge=J(vScSqwiH4_F}CbvEy^UamhC|rMb^qXmd2WW-z6bi zvXqcrvc5A0zv*A^e2nM0-*e9QoO{l>_s;W7JtwSAAf(dPBqX8-VLvx@g0-WlSP|H( z3Q4Pk7!?SVpa=qyVPuwYclbFwH)p4L<5}lnaoWAnD)W3(^5LAK80Gg+ZkODN*|lh? zxAW^RHekPKZsXfIMSXP<$9Jm6B0pB1(YQ6F-gVlJh%fX7o1}<5Rgluu$_2WO1Z!L0 zo5Wf`V{gOc1)ZinIcJSWnKpK9c9pa0!26vQ&mO=B7tUpv5kbEA@xawuRNs zI3!7Puyshd>D3nv#ZNnSMU9vG((86;zEG}%qKmpUOa!jTz#a>S3m25NukD1o-s(|@ zMDZh?NLj2+Zy;y&qozH?_#p+fb98B(mx9ISHZ!1So!*qzqxl0&m{bvsS+q7P8U<-$ z0zp)r>ypcfyX>hwnSdH8#{vi*Upuq4Gs(Bh2ndcJTIJLru$Is-4M3{7}1*eWf zI68jSm_@_1^0zr@mm3X-AZ3eThIeMQA7=FV9=VOs}$rNkSx1%(D$Nnd`kMs4n#it*}fOD_v((mz$c$<1PCYWD#@&yOBK`obR@6FVPWsCk$+ButK27f@SR+}Sa z!sd+YgZ{-kmR~1@^VM!dZ^>U`*V#Pt_VuchevAhMK!TPG>SuTmV+PQh$+B(p`k{fI zg+85u92*u!DpqBgz3dFPRt&1HML*WbCrYn|-MmaeT=QvxYPDO>zoJd*CVA}hW|Wwv zeV~_L#bZAPbPmVT>mS0O;dMy3O>>!0HZd9+CsK>%4GT8VN1=hxv&T3bNK2l&R}RfK zZQm|a-R;h=oOvFOkax%Ax;26SRN=?A>`5C^Q^v;HMJO8aDmJltU52m0 zR&UmMqie!oxiM$;iGaf5o^3jn)|iHCVgKGa7U#Pi6Q8{6cVsz8XtuAu`}AgZi#9{? z)grOtRk6Mahc1y=imp|WC5djm&XTu@`LD@$!{45>(Z6~Z^%-Fq(68mxyJ@Fu+SAfS z*)nS_DJi+<;_2$U>swo#7V@>^Y`N?}g?W9_)w6E%A?hXs@vD6e(6c3&QM$RWN+T1_o)s&W(ZdfF}8|Fkc!sC!eKHrem6&I1}-LM zDxI?cNxLFVU9Een?1nu)vu4EIR#gk52#&imsohaL0eRtJBh~EAenF?qkMg`-uqmA1 zg6Ri($xiG{b$515n@tWU?H19MZzN1yk<^{%Lq)0MShF=E&fFng_$a5WIn7}NTO7<& z&`f)KT=wSjD&lK=N_KNFB>&B&5u)u_hT2?nMf!&}(Ps?6QuVuRp#>~D9~DTKjp+5o zf5*qQcb{cT&WDy$@k{lSi*<=H4TL@*q8d@i^@)sIWRsh{J3A%qWN)T5t(-~F_=xbv z^UQ0-TgY8sEosS4WTwH^i>#DPpO>@lv6kNM>3WXp%NhvFqW;A4%$NX8BjDBtpNL1< z2BH!E$k9H+=1AroUDQpG^hEUJ*01-3MTD|suB!0iI`KKQpfBgrOJ?q>5XX(-}o>>Z_u?D)au8ZR%LVe!%AAdpW#!J@mm3Pn3=m} zpDN>R5%fj`+bSZnN7W$NHYfSj-D9VRZw2;R_-M!Hh30afE*<_XY2P|EKwBGPrE&$V zdc0{;RVg@p`;6cB=5<=jra0TE$;iGX&9#8os5C2Ls6E^Lm<;Fek8SQjKPZ#i6lW+u zFhTE(JdxxpJ7bGhM{f)8u}3fIi*gTMkY3gjyjN6!`0YBrQf3sOm=@T;@W}Mdy0ZQ) zGOb=-h?0_~VnL{9#nQ%SBh$tY(lBB^T~qZ60~Ot_p@1lKCu4^rC_z|$D@cT4qnE;1 zRSG$F?~050SbTwJZgcFrU+r1hrI#N|o$gS1nu0JhF9&o-d8vj!UItPEAYwMlMY;7e zv)1R65Ru!o>rYd{R(MJRAfFVY9>#`pzAiq-qv&$&MXfS(bU<92=qpJ9!G+%%%4Jg< z1B)`fqB2#5J6fDK6;j^tsMnuF{ftLD4+efyU{VuP>Hx>UeZQgtL(L*vOMndM(;8mTSEdZbD(dM|7}AGxx{@-2-& z%+=K=wpQbd5@?EOmD=7+x3_|~wNrAY_Y3c$=l#wM-EkA6U#K;T`77wOskI013d3%U zGbJEaRDo@Ox*ub#4sw~K1x1;CDNMsyCta=i-=-g`-hxh`r(o? z)-_bM9Voij`y%6+XcgO_Daj@+XF#KV*}vmwmc9qYO82aEI%+GRh@W^|4kl$t zCq0e`b$R-s>-6hIXxftM-7SfC{Kw#16YeC4DdT7>nz{4+1k`o@p_hp7ieI^ntYAjB zKG6_Q=F*DSTMlK^AYpz~_a*lJtIV?=ZSL!BGcysd<<$bmPJPzSS82OM^YrIx-zAY0 z!Jz)yNi)K1+lrH1+%1l64(wN#4YW!wXOWA$vk^IygiLfWdG^AfSMALTwd~b=J6Ffp z84w?3I95<`sU^hE*ta6v6hAaU*1?~?A~Yy9A6;;qO6%K7BTZ%xBER#jFAxnl^SOkq zDfd<<2&nIj&^%k*s1y8Fn99wItVm?8sbtvMu=aFUN=}rQi_|n)svZ0-iP+xky|-y9 zWv{e*@`Y#|J)_?y_{Fs-SjZ3~6Q44W4odACStG>Y1Y7YMX-`7*u-h@=nT6 z_4Pg3t0XKIlAdzxn^SrYwK5v(sxAgI)-)F;jU~B_cYa@ZwDjar7ctMh@xX`=a*`d0 zdz2>I#TkWeRCIT3d07yko!_$Gom(VJDhpzYZ!P8oUf%(ef6>X@@FN#G>D5kDM4}?( zy7tmUbz1zSSMCvd)f zjzRq$32LRVqX8Hz1*ld$2Lw5b0M!yQs6RxBDGZRXUfyT7e~5xII$-Ru_85gBP=F(b5(Kw@C`dlrA03`1rLOXvU=`P~k$Xk)^$1_5`gy#tFW%P%$6Eu4= zi41Mm=utF2IAekKk9t9-cTqMeq!?uqxLE7SfNr7ackB2WKYX>I>h#XW&&j8*p{~1X zq~;}M6Td4gTX+$03+CVPC(*WsEWj?%ZyX-(mtb$NdLyB;y|~?rX+c0lt848p+%!nn zYS9^?hHxGA-?Yg2v>F*CeR;Q)<}tdvvwHb~$h-9sL%vTCZ)^F)UGG(@YLqWauIgY> zmTK^Kt$s(VUV&QVp6!}=XVTj#l^ZEJ z5GRW}^m_do5;ksd2pZ+R(!TIJv~F%==j!QE#7h%ykDzkKdJ^}J{M$3RH4Ogs37ku3 zcX*YxkN;-(awFxgf0l6n)SjcfgcJ~QjpkJQx_S7>-D;cDSDx8$RnoL6LVIdOteyogR;prFsNH;VRlfoYC7K#wbOWS&i@-{>CnmK z`3l6m@k?)Q&elYqitkTQS~o)suUXz7O-fvKW1YX~fBMb&)~lomBB~c`Mm<~VZr=^b zJ+{!$zV?2=lz6kOuGP%=%rr65n+I|)*yLMgnIsUI#JLqhXTuLm>|$&_y`Vfn_`kH!>2p2#fp(?2EF7Idd_3oCKZr7I>OYkd? z!S0$u>phFK84q6gXL&GHT6Wf!m7%3w9A(e1$q-2k37uPg6E9&>-nvPex@1bmxlYf2 zf_h*UnPhyfwSqk@Dx0D&i&mO)e@ZI#?!ml`vy{cg1hb=mb$tkiuY82<%D+(-9t(Pp zMBI|%f0FH~vu@m8163N)O+PQ}yJ&Rtg#PEV%R#X{Z47ehOX_x$m1j53^ZjIPY0qzw z|Fm(xBZwnZ~bX?op4BVSXOR5^IPJXmU6%%a%uIxLNj|6*$=)uk3`>C zBVH|a0iAmfeD5p+WqdHPHAmW|yVq>3FKXXfnx$=<`)sm9>Axxc;q69sR7=8b&UJb& zk#bY3r;5f@AFAXGo=h_h_gvgI)(`$L+E2NfdoEHhQQbG{jba~A;!6ixyHQDPoE9j)BqM-QK|Fy0+;4uyo*q|YY{4IU3`O_Rl6LBwsy=({7c%gKB@J%D# z!0`G|x)^u(d6+M9lQ2~g+451vN;i%zhdx*Ny~w@gI5L;mhQR3W;+tWP9__>Dk4Mu5 z>$>&Lo-k%MV<^0Fcg=?FlbhZtCA|RNhP)?Kw-q^e>}i?J*UpXRgpFNixwz)x&{E9W z)IitV}wTp=Af zrRtB?yS=UzXWvil>Pt%=d`#fJNx2<$ttLt&Puyl2s*(W{sAcV#{t|<&sSKMh zf|sh-rzH3$=bSgE=Sx&V?0{CS({=M=vU1~)5zaYZPR}G%5Zyr1Pv*nw0|lnuaREpgavTtu2?Z)?#BqZ!m;n_n0yn_gJ!i$UD`V#Zez4-% zQL!D^C9$bOME*BC3`~B&NsnCrFRy&zm04rj`Cb8+vn5L1D@NO)1v5apsZzWn4sGGGh;8gaY%~ zN4`{XC^j~m7JeW+=b&^iax!p|h1{HjIslh)Q}HrHFJdkzdj4PDpPo4K4h05I97InG z)j13W3Os-wOv8jhe;mFS6#Q`#>~9X3e`$UXF)t?nI2W`v z|A$t)2*&IU0fC&HEv?)H_cxQjH3*V{okB+3q%j5r%>d-xe?#VlBYO*BOx^yr2SaIq z$U=rA0%lp7LywqN`n8V6&HgIjcm4y-Q- zioySm0_z}{4oFTB1R|;qHZd%5FBTI877nq!Dj1l99)n2&AvF|-3kr*RV^Qh9!^6sY z_AO}tQ3b4&`@}w9c{Ro1TD?;X10%4gDBzSr1$5QYvHrD#9`@gU3%W7vE9=#s+*icP zzy}Jd2UqMR&F?Me0qY4sv+iJ?2{kY<xnBe>j_|52L(IDVsJ@-tby?` zHsd-N*!CF)RRt;!87|htz?RSVHRI}GxU_d-^mr5YF^+o1!%ShS4PJi%yl6Yn!seaz zB_Q1X{}k39E5j>n`AZ;oDJJ{*4-J_RySX|7?koZT7VV(FU0^?|DTxi(BwqVDC zfPFD;w*s(Tb24ckYb!qWU*{Ag9g<^CZ)@xs|39k)OFoFVWZ$J>x{KdHp422}nS$=S8P?p2M zDzEpko%hE8+wy}Duz2!o09B~~$W>76lS;wWsIoDS2OdP2-!w<0*oC%(BVc$>kL#r? z6)-SK=02$*BlX`|WvhgN;aU4~&q~-nxwW45xp)kD9U%y$`3E^+_Ca*4IC8=FFfdW> zQN&^Iaod37abOM`l``h5_6I?09%dQ_|8==D>aZl3OcC0MDTl!UT3rp|Lr@Uz(|FxmCzh5Ih5IiW ztmv#k0|HITfj|^koh`x(ani}t)n(9h&UIXh;nz#l?NK<%jf8x%H8I_hcf1Qo>6F3d zk4I^%sDjWr#*xA=ZcKFB?fhUxmt56|Tls)teNQfY{{h!2aRpij)yeiedcWo?X!C>s zbAj&bQk$)FAEq=+#|(VUpQ2_d1)J>yso_yE*3LI_I46=QqqsHP8#vwOMWhBrj_CNj z$QxipRI8Duu5)QOo+NQa@MbQ`S{#8iMJ^bhQrQndy*e6n?E}iTXFB|2{$F?Rhp#-pIg-uI# zB3}4~;vEdng?`4)GyR<`Vh)(6PTM`n0uH7w#U|nQ$0~0Yybx#a6n3UFEiIF`a~R*w zpbM>L_*c`{kNgdfSZ5ax#o!{6hRJ*A`m4GGtPLbq0+QBwykC;FJ^?hltJ3 z<$e3<*n;k$x1v?WodwjOt5DaAr;gKR{SK9*HW!9 z_dLFFSB1?AoAV#aYmPJbVK9lgB_YJ_VkrL zHsyY`vLl96Vw;BK3bJXn3a8WF4!ajCSE(J}Y6V%#DJU*Fm=aG=upFL3-6{M0#d6lq z{D$Zba}95b+^FEK6_i47=k=IEsTS5*)^C2IM&g#}-VX)SkVbv$9->S0=VKX;$QXeK z-D8|(?tU2bid^|M&Z9Su%fMsn2fxx?_*8DHZ8RK?l9%~a>RU(r{U>reZN9_NuVUtz zXIxhMLJ|T&7sGCy${pM6m>RZSfnjIIc(*A7Uxk-TX1xp0D8mQ2qt+-}>-(AP!HLgk z%G-l7Yh*~znY!eg#9m1yRIv5ZTCt|ufYsP&cWN(9rPJ_ zRgWE^s%p0!4dmyeb6xsNLPCVy(dBCWwLNTNkn(WS{-3D46B{9+U>@d*&vGWnH@eT) zy3+c*kDHr3iW^KYyj-Hrn0_t1i1V2+d?+Cgy;sACF@;EVnS^k)R zft_QR`(^Z-@6S=lLipuY8j7Ec1x(iNiN0o9DQqSt0&N$mjFRN#wxZ=G?`e0RFuYhc z6Y0Fw(ihK#a(awr^IZZTJz{^8d`7RfR(-97&FV*vUB%J3BP^j)n6_dEO_I2JIcrL_J#S}$8-0Q+>L!z;)AXF z1*%DI*~#oMHyfF?@oMrgO9wT>I@b&=@=XUAf zV`7*eX?-->IjFB6ERr^_=o@+5z6;b;7ucG{%HAdwy-@b*lI`nToP4B{Pl}d#*_M+s zhToJ1O+?pz7^GKHdxU6xX`D z>S4pHsJFqA>a~8bMz}(le0`ygP0uq)TYs2NhF@flM2Pv9cMeaYEzu&OP4&~%!DZ(R zVi@Y1*RtIJN{gf9VMn4pr}NP?21XT5NpIvIq`j2?X(F<%2i*VacwS<;?q*&gf6gW^ zx3ZM&Ym`)7`1u8yfS^KF`*WTot|nyd*}_~tqe?~gF)~h5MX4Rh!pY~9fewm*=2eF@ zcc+B8&rc^?3+wal%D*astkQ=J29`}TXCltJy*G%0)kH6Ygv4@29+ztQ$!-=LDm0FS zlCXVZxTL@)LP0l-O46Zi5GgEm@&Ij_t#3MdxS=j&q{$79Q|Xs9eDr&Bw6-Qy0EJcq z1~oaKTxPI3H0v2-`}54{^`(is&^E@fC|7j2`77?dwJv)E$#q{Twa@oyP=L#7ly9Co>Z{w zoec3ur3acOJt!&nSbnLx%>BegV8oOt!fK+eaOsO?h4?a=!x^3t&$H@A71j?Wbd<#l z1Mm4?vK4a>3F7c><7Uc8AHKDd5D`3cT+4Xw)Bw`BmrNH#)w;oK5ZIPv)0`nGcq_J@ z_k#33lKqF^H_5~NgbT&a~u(xx$c z8D^LM-7!dCD`jSrnrqhau9JAAVI7CT{HA~r(tFm+)&9eKw;+vvvqq#cJt*5WD!H5^ zDO@`_I{SS|EWbV2Q1`9<*c;7ICrA5J;_VrgJLRb^_Q5YVB%I(bf`h251T|+0{4Ed?NPonac zBWw9g%0OpQq_xyJS7^)B53eS!8|#Y;Tfn&IlSN(TVAaq9mhYOq-5;$X*TqSOLM(5S zJMjnH9b^Mj?ldqDG%GBUsyK|fzqSc()9xr{rbl`5KGl%k`G8Yh4v}M zye$6U7xur{?|if4miVYcL>AT2K42H~*drlfTIGz}*VA6=ks2=AJ$hP=EV8#lpLyyn zaF|>|m8sBFTU;rj7+&*zsVp$nHtw~|HYRmLIcoOPIVX#}^HRgL#(YZ_5z1co*aNd} z8gM5sEjC?Pk`X5FwAcG?8d1+=KBUh-X0=`K>>Zz|=IJ1sUpy2$Y6IhV4METS2eG*2>~KM8}~ z-^i#I1F4^nrIY?J_tN)FqubV(ZjQzH?ZnmGvco^n9$aO2x;ZS2%KG2?vL&7msO5}z z%~=i_TeF;F9e>s_Fo&s$b^e7c{We9ZU^&8eZq0HhTvgD2=FmHs&&x@8Q!PAl zoZAw$sWr>%uV!ax>u#ZX%|Y9ZeG8F>;myBuMabQ~H@<>LQb11rl>8wUohf3aFCXJA zPpoUGpYJZK@$dX4t)LKaeCck8w>NvJuvi={`7o5q$HW?Dey`oKFQ(Q{>dTV3p1Y~g zg*4u4&o&LeSXkXVyP4RE?w%*Rw#vB^DP$40gsS>QyniVp-@TMA+B;krA_9RPkb*$i zI~fRR=Y=@9kj2Nfxr~cY-?6;Vs4I8x{wrp zvLfLW)*8WO4{KX}m-%Dc(}DK$1q>T^*SA%D?tS!=vV@uOrnuBaNb}zes82osnkUlc7xAF{~sk+z!5HL=i2=x4-Igl1;h=bMai2Ue)rv z>pR4w6O~QP;yc~sh_ajtx@7Ku&WYnwV#^TnaCe&);n%tE)2~o%<;F zC>*+o8r&S~INR>2`Y`rQ@2i*x7^jCCMdibeqV;{8o3|;Ml&j=Vx#kej8(hy!(!4*H z<<}2f5`s|!MYvG;dng-e6+}WS0)0;W|%-fgrepGK=S$XJ^ot!=$($(v2P9A1h zy)+@>)8k{CWMjmRIpV|VK|&FA+ctr&3&ExX^~ihLPT~>F<9z4Xz>xIDHS6{hZt%K@ zEEWEtEmCBc@ui8%qqqF2vfHyvGfHSvIjBR2nfDh1{)P_J(K7*Q5Sl}{YdUtq3=xI@ z10hR)fM5a&2`2RR(*MBITTA>c<=KA*6L6P+0r*KcnLltPoeAJ&fZ%Q1A29*i45IkS z=wT)xnL!FaL137G83xoIHP+^O8i7q#8iCD18i9>m`hgAq!86bq1aOTE0(eTs0h|di ze{}E&mESH2^tkkN&YY#}D{}Vp~T=UZKbRP;c@q@Z_ zCMdQ_l88S`g3D_D;e(Jh@o-V73h)+6{d?liNQpfucGm}12l=bAqEJ0j{K-;!l>f5p zu3>2qvluOY6v_B?&$Z`#Hsm-OaT!-A5Ey?BKvmP0b|hvIQQ)M~N^KNZfS`D#QdA0x zhzifRa6g~aW>@$k`Q~1%e(`HW`wn4a10*sJ%E1I!WFExB1U$(kDB(ut?-D|7MBw7~ z|H?;A5rI8UM5hu& zGn{%2I8nN{QcstP(Bew)`!yZlTY5|ZC;GEl;NI-vKG{JD*xvD<)}gCG1cnm-e)$KC zmcqd4bimJ2=Di?FVeHLxAdrizt(_;_26xc>ZNa{9fRMmd=x+oV3IPJj2slqe_BnSC zqkmXfLjVY&Mc9D_b|L)h8SF&|2yChUiReE-V9WPU#LfZ2PV#TW**X{}AN{`Pwo-5r z>wkdCVPGA4AeI3RM3nFC$=G%n_&!c11uT~DRqz{DHcJnTGROll^?Nw%1Hi!yz|Kn; zKK*C0Tu&K*`)Q)If20Vs z0vh2zW3o5UJ(+~<6ph5#PH_+eZo{5n-!=N$#)2o}YJ-7Ka_o}(w83`0M^XXv3Rt7_ z*stn;jJxnajv{(s{`Kz##_7$uU!wgM^T*o4l_;GP2uy|pbIM-IX!?fPok z5R(@HoUCR7Gvw^TsjH9dw-Vfjr^yAX>j`$GbS{vY2`317Z|C4ja4Q$kYf!{Hh1=op za)IIob^IK6IyvX<*LJ^M{-d_>dFbN*g?|LAk~-Kk1Y1rdAJ8rShHV)>_7o#JlleTV4Ub+L)>!hX--E3o^U9TfExfV|p^ru!8PEK`6rk^n}sDS_in zl>1A=0;?G@V2<|SyA;H}Wf(5tW?(XjHuEE{i-QC@t;1mvp>u~Su?l&`QdS1W# zde)k%qN@H=oc*1%_g80I$%unPz<~X6(~}LB`SZ=cJwbxufmvGV8`#k)$bSX{#|eti zr1@Q>;uFhI2hQUPcr}M{LlX}UzhnWih%o{MKEyuMJY7@n-n%?dR7`eLwYtE zW<7Qe8XZ=B9U2`5LskPlHWnRScKW{v!}!m_SnJpu|3fUacWXfZX&VdJxD(#@E5Y|a z?!SaZYh+??Vq|G$Yw&-wDE;%2 zO$wU9XjV_~(A&o8r8yhn_BO9{;tKnZ9$|jhwIJo`omHOd{XI7zK#_uPll|BMm!U|P0Y4(4u~&#&l6Jtic{ign zO8NPT&;~Q?2i#icY&kE%IsP9m{qGi*XoG!>dbjW_7Z{lEU$U^4ou09QzJs}eEsdSM zzJaYRt-Z7T|JBCxn_|86SV3DLjsC$%w1LCSIy1+G0vc!mq05hT1*&Ng`sK5&nD*iI zeL!`Df#)fOmY&E7j4JKgBsOb7rYR zmirxk{4~oKo?B25Fu%SzxhU`~nXe&0n8>gqBz0KG&)R7DX(2bCZc!WYWHx`4gNxlp zJ!sV)TC4e4PZS5GTxxo?@zufMQegA|J9WuK$TONfK{f55IQX4V`=gCdqe zB*{sgZ+uXP(Kx`#;^po)!P!>EZ_g-AkfXIt;1o@#%4eRaD_f-Arv z2PgN-b`XsFP;zI=$0BdlfFQ`yv0v~$uRZ&wa!rphNi_*B2`+E}SrW2nqx93fO#3n? z()@i&s;V{OUEurw0oHn75-`@hX#YTYf4qsm18i7VXsmgG{pTUxzi`Mf{==jC?-XQW zw?a$$*AE#+y8rqiBWt!wj|BGH=?x}(5&6KL0!SK+DrUpkYyD&8ge7kbe0FRZk16Hl zQ3H=nQuq;S;F%Z%ba(0rBCkC-SGy@Sy}PLxt`p^(EEv{TJZ~u!A7I1XJhG_wJjfkv zS*{C`n4~)~s^1_?u>?K$jILLtl8Z@(+IGl{>w#Eip;hOlYmRI>>%WEtJ>9rhs~;G2 zw0*mBo0lu#wOpwvFCn6(JG;6X9WVR^VIM@OP_SxRJ8!D>R`FOitFQkPGSb}7ZW;x5 zj2G-XBMg-g)Px{JficG}$@^EBD{@*n3i6}6QzUbf__UhzrE$0LZZ|`H2Ees}M;R4* zk~XFX4Nu%b*a*r7vPQTbS*oHhrhV>~T7z@*pu0029#M)4izJ|1oY%>DJ{{LwZj!HnwBK)lUYTp+c-%O zMS|0EBhO>0=W`T=Qru-f*0o-BQ@e&ohYKafUaMZVzU?MYVfFp_Tkxo|dbrOT*Ur|q z=Tz>w)nu z+9@2VdOvClH;Z?gfeUTli)p=tQM^QPH!*iDsP`0ar!Uwl*bZnJhZ&VQG7`&2x|P}Y?M^# zLR(kI-J4f-!`vO@b{lPkRjzDS>m*ffL08-4TJFt{bqk+QFNKrIB%9aCF0&ss99yMw zkqZ#ifB#1Q$b~fg&PC=cDyZ9m5F-d3zXxcrH@jFPCtol=xNL~4c5yLk-6b%)Db717 zD(r<&N*uF3gjePee3x@R@9|><`zvL_FUIbLPp~<)8d8so;5O&+u7)IsZS<<#d*^uR zwcVdSXc$*b7E38=#BrkW-<`BX2=}_e340q_A1wJ7qPlc%sbi+g)wQTw@zuuuz*=(_ z*Qdrs72G^fU<>!-LQE(dplJ7mYafieRKc~ksBY`f^y+B&mJk+Zk-Z2-H&{%w@eEK4=IaAQ zWTe5@W4BeUQteA79`nk@?#y#`OMBROhN@nTdp+G1rv5!R1IP-l^X?akb8ABHQ{w_; zv6$>Mf)@FDpY0UZmW9W|BGl~_KBYsLRx{yt!9Ecd_(fKSzTO({8;l~S4MC*>^^4>A z&a<|%%*3d$M1)3-SHxE|POlC-C#AchQDoSc2}YLQn}L+{N$)1Cn9jT37Ka&nR9}Y* zGQYT9nfCpLY!ag)aD(82MK;7P{vLIzmjr3`L5*oJEF^&cz&?FDx zuF3P^)uJB$j%mElVC~1krLv(F5$RPw@m1|^{Oq?FI`XUh#v!^wC1Ndpo>P3=Bj}77 z>Q#9i`=LA6+y3eUp)-f!o2E}&FG2hpm|`0qkvD`W=e%vhPkYy5yA&H<#(hub?I;gR zPeJ%?un^M($dKU=Cy+}S9GBWX>HR(y%`neKj9&*Ep&R+|u1!YJcn5utyovGh(S!xV zoXLye$hy&(-j(0AcmQv8=E*O+Ie%|e z?g?leIUOuOjIM?RXY=8bHN+^7{~%Xd|KV3!`YpL3=7Jm-oy)+p$ z=?q3}iOP_MVtw=)zNRMJlY^=;7L`0n=(cw)m#F+;GN(EpdQ1Ca`P+-hx|?tf`N-a! zo@H|6{*OJ3+@)Z=8FY5EufK;sEp9RE!JUybcW*yAP>|#$aIS47WVSD|gl~L=dduAl z3voZRqt@cT%0IJqCPNgw)TPIR+1~4yk}2+m+7l0Pzn9m2&?S37Khvrp4ilHcyXr+g<#1I!bE~*nVq@A4V=mG#=p`s%y4Cqq*i2K zeGQ56KajPTXX|YEFo%rEe*u#NHxnKbg7bBh!V8_+PYZtnem-#WvuxjLK`9cz8=z5n zTe|FwdO@&A;j~$e0Nbn-jk#lvRvPG#z9crM@10?9(Qj@46Qmk;k;Vt6hFnJB%)7`# z$w{vs1vZ~y0_7i>h+Tjlr$M#+D{lRRqKP#5#1_O?M_2-o|?l8c#tK-k)U8&|cTlftf!qgRCy&lJd!) zMMyn&VeWkni!piguat`KXSWMIpNB|2A%9)S**#MsX^06GVepb&Fo`(s@!w-d-!~We z6LxTb8TcpKQdNACi34l-@_||ZLvZT|P1b|zx5H;>dGzlVpEP*A14+GUzkMvCde)z3 zqw$=8Z+T3WL{~U|tm8`=8JwBQ`X? z7mBF9x>H}E8F0s&Im)_al`Y}Es*u$~_o!(==d4V9tVK)@%Yq!#dEhJBmFWyX+Bot> znh(J!X~>bj`S>2rZj#;)P_z$V=ld1x8fD zK~s}D>w>gBNeXjmLCp2RU+{g1F?IG+&Q!Hlw8Cp}wGh(AQo7KUS*#!%G}xyG{|;Lcz<<>_r0D%9@)4Xu@&4k>`Ukk|Pdv=}VqQ zBOODx&LK!j&}kUeriHZSpUkTF4RIRiO~HQ1bE)b#Oy27Nl6?{dvTmc4<_;6M@3kYw)ty=4nc>S{E_sN$HL@! zZsDRAlspwY*>ZEL)?Xgi&4w(}O6E3m)R9G+4QcD6Jh%R}DHKwTCgrkN!}8zMCM*py z9T!ygqHb0>pOguYN9CE`#|$%Vk5u+t1WudYjM;Nqmalv)Z$BSoDrEF5Z6Cgmym8<7 zINpAy$X3XZT;H$H-3m4}8eKj;0VRbfxW@4_A3=w@J#hR}rsO0=7hiMSn3PXHA=1~Q^k15hr+i?% ziZ=woAF*WDzD5dXem^2qPNk6)pvN^!DE7N9IpBEysUK|6M^K)S{#nhmnC*kjdamHJ z;Yd4c>g2`OBsgZFHApJKi_6gLE!S*j)IPUHd95bH^=q($>z-Z+qm)ku;0A`k6fQzP z>H({AeXesF!2@}5`uUQDmJOObNJo8{Di(?&>%Oq$2 zGSF6da^;=wdzTl~N0Ym;5yxapP2=h9U{tm%K->m{j8)KGT2O)g-6twNr_}E29lknS zoFw)pj(jNFDegGUV1i_#At$R*Py(z$RT!mA2S0{i)X^MR#jO)*->h~UU#xu6kv8Ol z_Ja5>o*1OfeeP$yZ){a1Ew!CeEjwQuO0nglT8d#9xNDH&uoLpJZf!8P6z+DY9;p;VOy{LbLx{6^wIc8mvEpd9 zj3yEF{i73ILa_!*Z@nYDdLn}_u<`*0!PvlTlPU>F(+`MT-4qz6f&>GaDm+JLg$JWJ6vZS z>6Ynm34|(bIJxoUPrb?v`z918m&UG1>ClSmb_K;7gCQxEjdRk#^yotNAOtFHIEHcN z9Er=C)8h|-Os6UAaP|i4ri`AnJlEP!pEh2T4iqY{% zGL{&&GxYKv@#ShTcu@yMC>+D<4zQYA^h|L`(?u!cq))K0Z~PDQ{9=)A5^^5yRDj${ zk{R{^R$OYT>{#u3oger)_8-0-OUj&P=ul!PT8)+6?NqC1#I{s^uPEKeYu56!>#Mbk zbVfPQKWGh=E>ZTg%X`1J|KqxiEnvElLzsu_V2Hv7;n$rmKp5cGj_^C#=?&^Xud@Aj zR9&qc?Eg{K^*>+0zfk65_$P`hZ2ZHYP5$*VAHzRUT#*#-{f-3r+s(7TM#+Se=?Pvm zOr+$4XnupHco;IjS&j+Paivpvv7%&mZPTEUVbJ=XLGBjlB?Yi|Q&2RLqzCQJ4xOVT z%Tg*(Ko`)en?4z3u8OgjvoYXquO0Q^UQ3hO56YlWn1}l~ki(1{q0&rkr6<*;Qhl}K z?4^`q0K9hP!I9n+`P`|`X&W&e}?LO9-E@#Oqw zz5dMJLh_gxO$ryN@quHjYL=|U67#C}#qP1XsE#hK+tR6GHel_pw#W;~PlI~E$f&k- z)`W8+a-fG7)m`Jf`u>i0n+F4m;o?oI^TpX$>3^-3yd_2=ezF1s>z#oC3W5+v9$8u(30% zQqId%+9LjR`;=DT7@dEk^Gqqy;E8gBXw^ec*ovF5-I))^%T>h2cU#f;#oJN( zv#M3>JgM4wBM9PUkVo_McJgq25Yfx^?sCU1{&ZG@|H)a|YWj`Jb8_b3g>fVQzPTy! zz&X););alwu16M?YwL?;8Dak?Qf{8j#xI(yB=l ztH3JM+0V6xY-_$d;l61*B|cbV==63xhx!Q0^c0eAUEd!+Ufk^PNX6JmCS5j6)n9gP z+2DEILeGJpL0>sJPXwmHq50JbJ!lM@D!C(D%~ERsa)M4Du?2i=9vU`0yLXhWQ0qh- zZCd)+PK~YLr9ED}Zq1%2t~U@bSv6_n$R1P7Yw_|DJ3a0(sfw)IaP4=)HW-sa+S7o+;M zQcA7Q1LKL$th*}roa;E11Ps0A(;%FvuLoIa>?$={Wo%dt^Rx#9*R~IMZ&De)YqA~i zLl+UF^z~h5r))*fDFdv43r>xvk=hxfot_g*Up+SN4aLF{dSz5l7&&cwpvh zRY9i&1m;V?#v`QH$ao!u*JYdLc}7t_C{eC)H=!4BB6(i{4n0}aWIl;F95i(adShQY z+M%6{bWh#D4cztHS>0ONFyOXgEO5e}ObrKW{FeMJ`TeJ5*w-DGIGgry=M8;{bH)@l zsszcq)qArv!#IPsUJ{9mR_fTzVLINbb+n41UX4^|TwaQ1x(9>s&Q`807S>t`zrf+NeXPp`=C%bs#9>vRMGXf6U9fxQ3 zk&c^eqKeI}Gfp*q@oW21niW=)DFn}zg{^NEFLf#I?xC3=3m=WxY7egdb*&i%UxpOY zQvIiU{PDZin_lo$HsEp%(&Mbtita0#6(K4=$ezM@i}M%8@x0QSTTM^i62Kzv#H3xl zYO<?=>{yFV6qqXfkP9uC|QRjJqxvs=Ytt`dP`ijg7dl))`6WiE!oax1J8VFVu?V9}}flTP;8&G#TA5#-#Os(Zh<+RyA{ zx7Q}=SQ?Dh_WO6lW$EumPtngmxg%UIfFu}Mrg{fcNhU-`!b?l&%osOuYz5_w!`$O+ z=P4Tte5u#<7ar$P=WaL!3NNHs=P#EA(s!_rNDcPiXg+yFFXmbBMoYFKnmwC(1z|I6 zg1=G?SP{}4p6v>>aG$V9Y0ctyF7o!zj(fzGC&roArkFQsy|zs9Sk>&!rEtnQ)u^45 z-)jNe_f%(jz3u}VUQdk(M;#g8d~04nxM&k)q*X?sLmun>OYX~_A;g0_8!YOar(Ms1 zNZQS*SCDOEMD0y4!V5`3H!GiD)_ya0JOA4adK}f}Y8nRh+o1ZUc6wdcdq4Myu!!Ip z+_GL|>c#1L2kj+%wN1mSmW4R=)cJ(+simj#)gjc-N1I% zAAFY7!B3erva8dXBKLAZvkqu4o`R&I62L2rnvs@zk*0ka#(3VHf7_kS#Os#k0~WqH zH@D}yH3K*4wI6<0kUvyZMXTWTP#=1L*S7IqzzkB|LP%pL@sF2nuiV|i zZaVovEjk(9eyo*5T9HLrgWAwN6XY!O_3cpZPkEqFybSF5+9K%md73vLUhplxVG)V% zMAb7KYIo4?n@CSrWRFlhHT7W6Ntbnx*1y+-@C>)bJU=W*yy16T6 z2jultMZcMqBCZx!N`rzl6Xd2D>vF=p-a&`RlT{vlDs5DoY=q8sSxygIJ82uLsL3AT z+gr<%AEa29jF}y^nOS)FLY~@@L!Vbl{P7SSBmCv433tibz=4KBa>~mw?e}!c<}%xw2*|&Bbk6^@xb+Zs>&(s=E22v z>0lpJaN`0JoQg>8kU;EyTr1mApOZ)@kgDdf${Ea4C-?;~zoz{d?bz31yRia#m6nm`Mq4_%+aM*mg|d_X{GY< z8b&~y<7?^0?z@(wkhSJDOEHBJH?unPYMldnDT9#C?JPSNvXAW~qKfU2FoY3vk3kcM zI+h+AH1En(k0c0~OwScIjwVR&@5LA90DE9>)HN7=#E8CG8eInmq9Qsa z5ZHUr3L{p#zpocF?5;Rh*ulfI+Bwti+8UTTG@ZLdN#d*5T7X679-;(_9yQ#sf&Ti<1gBl>vkHE3nt@x80mJ@_(&RwHwC;(++y6k?h} zRSvc8cnmrl8v4Vw28T9qJ==#wnwj6&c{o!XN3PjhdCtFX2(Nb@6D%H}Jc=1>HE-WL z0$lAe5D#FsSH~gAp;L=z{ZukXA$4r7kp>#gj!GxCh1N~oTerF(WSQVS@*8+C#|g>b z4KS=+5LA8BtW&)WBmMY$d?_^ao-A)${+WViJPhOk#|1k39qr3Jc&&KkN3*seN%`I&! zPvW1ti?kwC=1!vswqoN#e%H+?)W3faZgoyXAQeF{fA83f>SG;`a>ZvEz+99+5DlqN zl5dJ5A6tS5uv~nCcYS;K-0q&yH0dNX*jlKcx+{bUV!8tvq#1}hJ_nFeM;^G`9%#5{ z7V>k4@8dtQ*~A2X!_}9(9Z>Su($=UogCpKPFA~TLuz_`<{S4w%=dYz9`EG zp3|LmZ@DJXdzEe~b+}xf*=>BVO>(2&uk3xUPjZvmY`YiS>?vRCAWMm{FFlcyQXIVQi=3rKmw6fvUYvnCkcU6}=}${z4||}-#=#&^^Sb9)*nlL)3`yigFO57lVc#%ZB)*_Q26a zOKeTP48{#7+oT-oIyS}79ve4gxQCJ-7!K=5+HP;sW(e&eOzZjWnxk<<3rLm6Oqtpe z_~Y?ghw|b5QnW{39jSnv;V1M+~gV>v@|Niilv_gML`R32bZ7-J?j5xUZUs z;3pd<)J^i`FLq%@vgqDLi4jdGLyy1MU4qH@6QuhL@A&yc8?Ow zhsLCAZE{BHRd(E<`$cM8OFFU3$CD69i4JHt;+jJ=X?)!#X5eqHS3N)gEl$|{43TEXpf zbZPI|QzKpQN*w||*`iW^*Y0Bq-e%9vlE%3iL8p0N?=tdfAE z$vS13jK>r-#2hj&1ffvlk{vD2hdy~`uQ=WrkQV+MUf(WBIh*}SC`_;3OVA1S2UhL_|EcBsg!c#AH2-Ku<{D&BrimNvYC?O z73^Hk+!%^{2BW*Y967t5d3*iV`Q=-sl(~5@EZ65Gsg1pChPMaTr@9|W4G$O?HnjBV zO-v1O$rfBZi_cZ?KWr!?x_iETZ{S$=YR?tw7`9=B!GZc~2%82ba3^Htw zXxyA1zSw!avrpca?EF0ZTLPZ}o0y>6+$+}>XAf~?z{nbUg%WS?9??Lkp4VN zF3wx{7FSu$^LCRdzOMZRuOhB%8+%*$@%5oCdbU=%1fOm-V?`<;p>oLktSeXJ8G^v# z!g#S3#-?*Q8++p&TQf^577qVkowGRwr6+`;})o!&@LzhhcJ6u9&QCHLNVuDKH!z?lyxG!ORI`h?2d$*^nUu1)E){RNBVR zPfG+dmn)}rfoFZW9j|$+0<3oI&Dw90VXWYrUViwWsa(4v1(ONJDEs&QdMKZU)T|nr z39tRN{LEm=zpC(itK$Ju>P!(`j>+IKeH2-a?@(Ce3WMZ~6^2nh?tY!^HJK>^%sCV- zhj%1f2rUgp$9-KIZ0hlVYj|{=XMOJ<#&E8(qggNWf7#uxGd(t)#4jfvy*4gZ78Ibv?uv$^QS$?_xyyj=19j7h+LRtUt(&AYeraW2 z08%U4>D=|NETo)emhh_NzUS-*N0#xfV-;?wwv3uKcuna9g8ou|f;omosNYKu%I*mS z2!be>gA*Oy3SLIpFF~>hTZf~~Sd2k5Jl|Y2qjH5utJp|?3YCnf0Cgtj&JzgCFI9#? zB6t9Pzw1M4T+*h&@egW?aBDkt!1<=ZZYU+bT7RU6Np$^PT53{}y)o86`H7!2d*blF z=7fw#+JLut^}P$WiQF5b#bHBP51YNza2cM}3sBMp!zx&c^CZd&vpfUIH+YPv`~@0A zb#8$QXu+B-%<6R0)-_#e_2@vU__T6L$AHgHIkb*=osk6d=|wBcpPwr<#+MqmodN=K zFsjn0^{Pga)?h@}t(qjL9`3O07&j#XS4ZacSm6@4DGR&91auAH&qBeKAQRI2(D8f; zgABb69k@bMnoXS~I5Vn>q%3by!zYL9UAV+BmS>6s9 zqFj5g&8l`e?m-{%+%XHHZhmKGcox3xOZ@IlS2ttn_Pl!x=+5 z(MTZpc+6)7Sh4_sCvr>>r=BP022Ht6sb@MVYW0+>BBoru|6mBL067LZ=|=#8De@UH zMd~oLQ=I;Lz^Lrs`e)2VxaBF~J2p*n6i8qQswWg)RzXvgV-j_x?~bSujXp-yzd=HW zGPvCJ38-;~E(y3{WDuyYLIE_Hi3}?IKDma^NDluKs|ulyC;(N+q2uA8mcm879yJ5AcoJG=nd9teqQO_n#9ZNMT0Gd zUoDA>D%HC0bNHrZw3G;zc)}lu7T=q7WXC}p68l)}URI&2ThAWiu3ZmqA%?^xr3aup zv0RABPtLSmcHJ9a&awmC<2B;}I#envF>ccZs0jdhZWMeAA=pR`X$tZv? z5#G#I(Lca6$RobPSD7A1)duakQWjAh7wM5uk(sU99wT1%t-ZL)m@uuON{kue6ZRM$ z9rLlq{;WA76-L~`CXQPrbQq66lm!UrFI^Jh<@>usuNGV7$Z?MRAd3lOa_%?khU1hl zO)181qnY%**}JO=tJRLB)vzGvi3~@f)KP8$RsKl0ci!b+JJjt zDsW5QZ##Y$Jeg&5%bzB#ibWKWUB_(5i^`llUxCO{O=JjGa-hdBz5K(2=&wi-N3AIJ zwY(KhA1}f%s6qpMJFPIYLTI13wMCLYj$>e>o3c8W5lO0>@|Y-XMLIEr)XXFwZ6ra@ z?`Q&y#fS+C%VxQx#axMcFe52J=^1MsjK|R#;9^@zcel$p7;d02Mh?>?4cB@VbIKe%;ArxNJ4)IW=^_aV zu>y(k?u(b1c*AX>p*$x}lex@|i+8Es)08U)YwN(e@);&Ga>Ok-AhtFp)wZ1xiwc7G&{0_v&acu+RQb`C7p)}L&U1mLh7 zfERnF`6@kvTjeBkoLowa%R9f(Ak%uh<*@b)#jdT@Ab;FBjLjQ-%r!vdus*8x=rTqe zj9hp8z}t?`<+$ukI4pqh?u$QqVtE)`EXf#cm<6+A5-}%P_{a*UvGG=v(u*}}h{!vc zMlLFue^1jtVJoam`MBKzW2W#;2<~RxCHx7YHwZ*XUZ1-xX&(F=?Tb8k*}geT3bd4B zX+xM0(9)*gOo1Fj=oSL1P@%t?^6P>aTiAQ0WMUx4-r-2O_sH8c9Nk|Gj8DHVjr`EO z+HH8q__H<HKY#Hfwf#-N&r~0lri#|s7NQiZKVlP9Twj!W@AmF2 zL01qwgp2jG&;n30J6t+w7CrvbM^G`s5v)&3K}M$|;Qkx7YdFX2h;~=dMFg6#%eX^3 z2Md&obPF^19MGoRMfywRNL7j-d&yURl&%f+v|w^_4PN7e*JmH3w(m*B@4PAX4Y!XOCAlOr1Lm%kKrdi@)_=C9fF>TT-{6 za=$#aeW&w$Hs>V{_1Vx`^Xt8#*LhX!taJx$uOGC5@6z8Xk7)%xm@(X?b@;4N;Ggy` zviLC#Swk+jxqmlL#Y;e1RTM|mbK44pM}*EY$r+?D4Q~2nlEH$nKH#l=mTlPgK0ptE z9z#xKV@xXC9hf3u!2-nDdD-a&A7`7wDe%_nIp1-d|88So)(OwfAsoPZ6x`gV{v#(W z6TJ)R=vN`7Kv%6IUmrH@hkHvqY1o{0jWhr3XOwg<`$tw7@)Wr7bT%Y^y*cMR9y9b7 zjb1G7l}%3jwFLeEl4clIEVh0o6uGuH!k$Qcv|^uF7)3FbTA0+KLxVdkWg>Wgm8h_F zI7T=2EI{xs4%~NFK1B|WnC8dveZO^xWQ4H{`DX{49$mEN^z=8b^TZQU6d=Xd6OcbG6Dc}1|rGA@bmjFSZj_|}wgi|FTt=>i~ zAKnX^-VN@#T?t-e3wHbw4^umbgjME@p4CJ#TPBe2J{dS};OZ6?+1@t|cX)6}Pim z?Gez^q0Xm;3W@)#17QxevGVx%BC?*o7KD^l68zOaaykl$bAVMY2MVIp_=DM$Vcw~y zl%k67)DEjg+wj}za*S0juT%#)Xx%k6#?A9#4^=g9@_u41*elNI{Gl?^3qixfU9-|j zy0F5W4vwtFHeG3u7m42wXRj!5O(S5c3!zTbwF`my;@$a$ z8=$PQxzZrD5B9oBgEu#b8L@+?QA*2dXuQ%Zv@rFFRE>^e*^p(~vh&#?(_=$_l&S%; z@JzpPL-L|?#%;8|!0Yt_*5YM#k|%|i3`9q{%G=4eH754ajWy~F+=yTjl`j0*+LJJ- zo47^#%;@Bhel&}h&S{lhjrU}VR~ObJb=RxG8Krye3g%^T(K+M_W@Wag;6<-yFG`i9a92`&%&#V%g-TH_#+W`HpjTCjoWW`O3!2P=#(Q4*|VXdwk za{GarhuHFu2D!Sc9uqXYj2kLIUCwyb@YUrL2(5ine^v=CP|veSNtyn(v_DMs{3mea z$kTag07TUj-ZoN@)-A7$4hK}Ll>GW}!G7(@14kwteoaJAk4$LnNeS$Khb9r;i#h@gZ)=*>G@FWhtZPgf`0@-5s_hJ zGev5Nb)~s08eh(k=J4^tnkMv+k%By0z96(-N`BU``ih{I(11h&!;q6c7~De*TTAV$ zk^qD^R7o-z&CT(6!LcHYGY_(bIM81OY*_A`+Y>Sme;A(WBs<)*x^vnsmE%6Np?+)d zRF&l6>7P8LyIkC@OZ05I0OP-21w2R?H4hX?+}`rtKz&?^nnN0URU>_lU^?eKiROLgN{k7oCEJ;I=l$xV`o7eFMz0h zcFhJreL4({R?G(l03CClEUOz;Gy7y1ecq*Ra>_9`9gX;+)4t1l*CoCpaZFR}UE`AZ z$E{*yg=o5=5c$Un{GMDSB*KWMjABMAq)N#Ij&i`q3KTM`fWB4Vu`8AiwXGdaJ9^MH zrKT9;C~@<9y2I1IHVrB9-uw9G3Y} zRmKREcNluEOYqTkIrZQ4fK8=k9nQMAT;U$yFhX|4(SF3DNIpa~k9Dh^N}CkPs-}|} z7vBKm7#X8!L~1CsV7#-HzSVn<;T`4>tJgDoC-$2c<3Jhibcqoi*Jdv% zsdS8<)Bm{_uZg+)x^uz;%;u!mvUlPXZav4fT`F*Q%0Jf+VOY&ba~Ny|;1DZyJndSI zU$kZDn{P_l+$Gxo(heto&?y-_H}2RhCnQ+v0E|RKzW})`VxZc2$d@#`>Q+9hHc938 zbMoqeM5K^UGa5v1>ojSXjI{Ok*86CG$g^Jzxs+ZxVDav|l|D8A&zg)HCS!&E-hB@( zuiLzQDkF&d%wg7$w=aEJhsfLhbpNU}7UszFvfB{_y^1;5E65RoIZ1cg`YakBL;ZGN zZztS;^u@k^(Gqz4Q#hk3fbNx((njgZ5L~W4IQVS*a_2Q#N8`d=PUXx zk@;|88S*WkR-A^+{hG{^=I>!z%0tUia{?J#8`#jS)K3A+yJds2nx;M!t-lMh^J=gW zhR5*$eH={k-ls&M=ylq7c_p4X{GaxIqO83{@3s$SOF~ne@X+6{QM056P7CzvsF~A? z8nTlnUMK@dH@y3B9R`n7fEFu}Cqz-HeJA8gk>)h*_@ogIhZhENM=_-mh=nFxH9_1# zH?2w0<$DD+_3@_TN={jBL#t($Q|$3B2DOy2VW`~;_2ak#S`af3Cf0$8kiryLjr)q{IEgA)YoT*;xD!4nEKPg{9)XrDF+3;H0iY*p$FZn^3UN!}ck= z+e<%_s8-BrQ=fhBcdGjc1zIW}#hGz#E~pDrFxM~mCD_GoM602pVkHRNz6a0qY_}aI zPWF1-fAl?-?!Jv4mTFgtnAWC@!_Q@$z(_nN`rK63Z7Z=ZJ?~ttJoHVb>u9JN)g70d(NFO?!GKmykKT7`XMc!lDo znhfI{gUV(r$QOJBjL&lz@&}tD`}%vom8r#j>pEqnNouY>12@4fAM%{20I|K@-`^ps zlmP*;jmzJ>!ze$#n38rBJS`rk@n*6_@`40`II0_X#F&JLVrfcseJw< z3FN`~xuqnx6&AdlIuhhAm_DMO3 z&7=rZq_Urc)III?N=Q-~T6ci99)Y8j;&4BrOinJ3!kbT`)R0&a`A+h`)Z zVFTzO(hx~FGIw>Yj}^a2r+){}A28fu*^^f@qF5p|Z@+Y(o$BYijG-5Fak?|JM)6&} z&ay=BzC`=gG3{ml@|zpW)gaH3QCzom6>pkc@_Q*a6jQjvZdKhBunGHs*60n_ew4Ou z5U4qw%Yxx)Dw|E*89Q7g6DL+tRv!L|4u^Fj&jN;W+!5VHjmV~fZ~6CJp5nH-)aEc0 z6yXg`;dzebb^3DgIs1r@v>bX|{NqmhP)szCpTG4r1$DbRcyP%vMS#zh!X8In0{G**h}k%`qbm^Gwj<( zWv_#kG<^M)WjvnPtj$qJ8&J4x+DV+KrmDs4XD6;CjC%8s94jeE{?LJ!8@3bJq6A?Y z?#XId6qFZK4}!{z<%!G(ov5Qo^o~N|785^oq`3#L8-UAvB6~n;8s%h|-Wzr0PuQ8& z4%B8%6Bb!88~?#C($hB_H`Tu2cO(;6SmIW$rJ$K9#Mqgoezt7e98L3JAIH}zV8@g_ zVESw8ZlRO_WbL%SxAkDthaE|TY!&pFEtGs#D~%>~!xnFvaLW+CTgjGz3L1bNxl6fG z&QYl~#d7joeZ~<>S+FU{4%brb$%|1|JS!_-00lJPwwIIr*V;vs*2(QS4oymq}rP;6Z-OEj;{P@TvWB87T{p_ zbQl50dj(Op=CGcB3@Q!!y6Y-x_TY5FZ?dT?B|QTX+Jl%&=|1^c@i>L*^I#XM3O~`k z52mk?ez2Soi4?$fSv{gr5sKjI=fYKnyXJ_1u*`JpE923l-`M|P>??rc{FXg&4HDcX zcyO0Ng9Q%|+}+(LKyZR<2=4A~g9ZyOgS!NGci7>7?|u9BZEe-oRGqP!ufNl$Pj{d0 z{`Cagewl4Pl)26N73Y9(zw?(WqxE|IaQ3#N(RoUU_QE6;O-OBldwOYy(7g>lGc8Te z1%)qq{95JfKJ=2ywylDpmz0BE31Pfv!-uWrGJRe@V?pg0?C%+oma*|NezwT!^t{Z7 zHU@}XT%3AhSXBNn4A7`<(FK0N`6@o9orIsTsPu*VbqO@56&+SMuDF!Ak*>RHm>F5b zNp)avc8{sU54breF%MwNF7K;RN>_m=h2}y1>dxwf^EWgClwPVnoM^+6bOV@Q<$vbo zlJHX)B$R&sQ%Q9_g;Jv@%B;lD4qgHJ6@@A?EB(PKU@^X~l&4>zk0^xm0Dq$#O)#?2 zpO-g&$-Zr5$VV=Kf&UtGB`4s$s`Hzzf+(^{@94MUCuqnSS>Z+1roUFyA)*DWa$_mMWBfdE{lhKZ>9o+tO7_q$QPGVrV z4AR$%^vIIgBncIpG=32nF^Fi{y&8>BN*uG_CQIUA5hvdvlP!R}mB)aw20bCA6p1=v z$W+Fs##FDLC%HDRrg->BdhZ#n)n-^R_5mNf8%Ah;#s_{_Zu0jE1#yZ6{;|J5;1BGL8})y|JR-u@DrZ9b5bAio3%tq3Dr`J~$Cjw?^}!4cc=r%wB%_ zZS25)Ba_t67;jLy-paH>V=4LhQ}nY&&gxr01?O|WQ)=+obnW+%V1~t6al;2X8C>^s z;qTSnG^Jz;!hyL<|Q`iGi;^&|8y(Dq8(v3!8IL;EeK5AIlzZT1`-7ry| z9$P!mM^uM9%T-R(^gsd{f)}OOecZn8&-_|u$RAQ}S7U`Qv0w*=V7Q#r^yk(SZJ!lh zKC>!u>OXCa_1%6g*s|r^Xx$Z%wTlIa2v^O4im=t*f+)qC0+7JUJKSSGS>%5DVX%0W z3e4a1(;sB*JLy%hjflWVK^lSlTYE8#t^Ib&JXen^zo%XmmO@B%8W`632E|V6#VJDp zQX?jn9*u|9*f^_^kJ)wsqty7+RE|aTNY^rl5Q7dK2>F7&=jRNtu>k>jyW|LLrW^{=brl2mR)K zhT3oFB>J6-#gMWtP;QA-FlM<_)6}-H4;w_7If5>c1@%+S{RPDL%@Eq6;oR?MPS&NJ zsM3{?9Oy{b8}h3UkDCANDRD2WJai_tY$AA`1UkOpk}C1(>k;YA#oNcMF0+U)Z*VxC zY&mw{2Tjem3PU?zmB>m!dOy{5Uv9;mSL$z5A*W~hzw-$>I_w$ks`$8U{Z}F&RxH0r zmviIxFVFmH=&_IL(qn-l3n)XrgKSNGj<3of$vzaWt?ZvT9z4G1k>A?ymruXa*-mBGX=Vy`6V*tP@+*3CE( z4r6aDs~#M=ImYq$E~A0mZy8@6f4t?vwRSp-fnmEwrq=kcQyJ?cId=V@7Po)bMGKnE znfxDh(OqTb9@roPL1Ba%;c;KIiBA>hcm4Vw%WBqy;@JE9v$v>ZF#U^*C^#wcYPcP_{rD~tH=Eg`f zIMA_W|F3FjW$aYVG0yL~ zt{F*CSWYgc^l4}~Aw29s26 zyX98~?c3AVzZcvl*(kg|ZQw6HG-dfPEe$@QW-@24WjmVF_zR^;-}-Y7sD|Q8`SNwf z^ccQhp}XaDccmYl$od7cORpez-y>&7YefD4BWYf)vDevQ%(2~PQUpKz%09>2%eP?B zQvd3I(EUH+iMCJW|1F*fL4TyetLMnR$}c)>!Sk>cJNexcE0MU@EPMX^ruKt_Um<&K zY6YBzVmA@b(d+a;|NM9Q_3u7-%0M@cbfkt8oF*6h1 z8G?fOmpEzaDamhD&z{%rnOdvlp21r<-vWx>$>*4>I~_kh2A_hu0}nx*-~1P7k&fw+ zPhcI`JwA}Ci-{SO8;?F=mwAdC;p*jrg7&he5OTlEF4_E`aAy|jhrQTp!KF4bl-~Ft ziEmC&$oGrSV21Z4nH&^9xp=Ty!f~k$b#}}-1dOTkxoeNlL^8uHYCdC7C>1S;^ z?fpNIjNm*o!oIOWhWTF7e%i{?`1Ri6A3qc1@nu;QBNr`__WN7uBPJIXxq$+Mo&AGXtd8BlYTU(>|ynNf53U(*(>V(}#yUeo$!r-kgdM30aqSgfoB z)l=K;^A~Inc6;bqlU`Igbi(u`M&!(mX*A4Ov2u&4bxNg_^*TFD;pcdpkByzVwefV< zrW+%SPSIJLr$<@SYJcgbYf&4BCkPW6KRkr*2PV%NS($uoX}#k4bsg7YU1a|2x=lED zNw=1vY;ZJP7|ViQ1-fwnDld}_0 zzD=myu=*yhWx>8hs9YLjVVg~2e9iQ#jm)+n%dqk2+#96|5b*_lFzEL0rmKZ2y)=f* z5RI*~7=7phft)j4It@G1s719Oat;_d$8}$#f$uApnH+g+S-Ez4VR1egiZxdmUJ;Sm zwD>)R7bhXQF)~!Sz7SdO5G!8=Uo2sv2jxaRwHf6C77?WQDWC7g!4NqyBdO+l2C38L zzH<6@0u$^%DRl}Cr&tb>L0fid6!B@J3w{qbFO>Q9@TzkDu@FG2e7inT!NpCbY7jtH zeTf3Uh(8GaFYt3PtLl5?ZYPT^lIe+ZnYjl36DH`1(J{)o-re%ta&`BwTFUMo;prNQ zl6n_I68{mK5o8pZF$MkXO6oxWJuY=kM#EB$?oFl2xHq}PBy7E@eERi)eVyVtl z$};L&n}6I4y44)P4eoEwItBR5f3)XTtloO2|7<=4iOcAE3^l(`5R)g|WrsEgm2>_n zs1c|=P_CBV{TOt7K>2k}UPcHdfHJf>(Pf{dv%IV?{!}6{HloFMdYv~v~g5&nrVayaWH0NN! zIKxcOV>~1$ynKdotn=@o?-Y@N!jS2O!5_u4v>1?U$?%M08KI*pUw+@I2f9yQ&t0)jof`@n2$P#Mz7E`ypNhvtLsH;}2p(rL)_2x2^KfjtNCFDFPC7)|q zglZuQ)SaQoH!V`n6a6;tJaIN$njwy5x1A~&I-RoPggZtVMmD$L99R_kR1!?J6kSB5 zcn|ZfayGN9{%hF<`yMH@X6Vjb-(1de^8?c%eDKE*_2S$MRXdJJeG9qK1Me@c71Th4 z{Yi^SZe&9=fvSP_MLc2j{5$ph=K9PC7FPdJO)_bIM~tB1bYPO1V>DUa@V6l9*||E@ zLn0X<4nvsPUbi_sy@BCm^z;@~K>~uRK-d*rgC($15;j;(DNs4(BS;1V7XAbGsB;6( z;g)_wfP@V+06ol{?Yw;Wktvm(9}>j}495$j#tdRo3>TY`U26~E-YPqc9)1w_3p=`F zwI3`&AI8#5vaAh)0TcZTZmcxKcfeg4kCah7{ zio7<^%=o6h#4=FM)>F8+G}DhZ^W8;nTLM-+)rPB{%9^Ab;HRpqb&iK#yG(+^n$Z1o%(3(aC9 z<_^5-c@ms)R;Ue|pR@}+pGpdU9aEZ%`HcSkQYyz}a5z72rDv8)r2(l9 zWp)9(6-IdiajYeOx4XD5LCTS`W;=*23?M+Bx{_qaP!wr~f~d+KD~zEXE`l#S*|tMU zhhyKFH&}81u90n`#|`{FO#CS`X7F)jZUOfG=Y<>a_`=QI{+?{V`=4*3$u_++R?4Kg zf$5`iW~Qr@P5Shr@OVenh7Jff;rK^?73UuUDynqW9$Gi8Xgd*{*GXZX7cBIJCpw2% zwZRpxM2oGk6@M&=v;%G8FI;U{&?>Zkp($aMaMkak6Htw3^IJPdK~NADLnPcspSJeJ z@YkZQ_^8M?jR~>T@C*$I^$t5OCfziOHNx1M1m-f>ZYKWL<`<^KS9Muw=u`tLM!Q1}sS6-Er87PqjPN&*!j|o` zqF}?tjoEb8MX_Ee{=@#U&lKlk@wwQW@#J($Ov zy3Tiz4&K4ozc=Udve0lzHDU3xXuA5sNKZ#Eah%LV?;%B0HK;Q){|=4_t~JlA_I8v5 zy4p}WfYhte7Y$sgr@noHg>mVcugHS@SjR)Fl3us3;#wXNW}8w6DWppFIt|^ui6sy+ak;!Q zrBCvM(hF*wyvWFeH^!*-0(sd()%Y$~S$UyyYZB39eWfk(7#bthw7Zq4^VN1v@WP#f zS%JLSR68AnWw45v!>~&|vXC=-@AatlJ_ID9o7a?RQ=fj=p1M~&<~7k}9Acs5>!X6C z4xVcE?Hsb#OR1t_tdWmN3X2xFbUeGtP6fr0T|cTddhd8Y?A83F2~}Nu%R1rjkF->Z z)}c+(x<$Cc-UrCdVh=N$>J&L1n(WTmX}Q$17Bvq~!^K_qIzx8W?+Qj>qW%?;ApJ*H zlyE2gk;o z5{Uq}S0mhKCGyai{ssq*#h_jBHEGxqk2)7vsB!<2ZX7jh z5uhXY$qc86G91Qhv^5%T^gk-d8fl%pSG<~SWKj*);t`Cx?EgU}@van{YHqz+ zco68I-Hvx44m<6>6C;Ch2=J(E!>ZOaXoMrY%RQ|JSCN|m7SOAz02JS0rfIcdP=h@) z1UV^6zyPSz@xdVeaN ze7`dLH`YtE7lQTjjUF+(cpZAE5C+nK8{1@iJ=UnjZ|!GerI@^a+ru@Y&`fWQ-bW&Z{_gE-5UnG1eSMeuJ-d zg!OhP=#BV;1^iv4hm$yjw22spx<0Xme#{eR3zeud2N;fYlhDj<9{px-j3>)9*3s;5 z&{RcUtZQ{*PK~M&f>>`EL>j$Q{hB5P-P3q`-e|IlU%Nh<6#B#Q@Os)o6#cjdKam(n zYR~lHjNcjC>I>uGx$o*@Y%bMrE*<#6i@@By$JvHe=@$FgEL(@(ShHbh5lj}Fb}U%! zcD1)_-pR4-;+s))M6A0d#AxMaVSExhz*vy zyh{LxQ z*lG05dT{s$G#(ubg-HyK4~VWB15aw)(5P3$Ar+sHx9efCj_)*fv&jVAh^`C|i9WZ6 ziuQ{368XWu7nhQdH~)jV`_-DKoR7|kQ&YyHF5dn$G*sSEBLO*D*HLl&3`M?!Fkh3{ zF`S$4YBqjybl1Xs$JmLc`DE9aoAqFUX^J*d(TLNn>V`dnYxGw2xB=*N#W&g~jzn7J zv`8w&A2f09q6i2b{J=~+K0evAr)!%;rr}hLax=wP{TUkd+dgmYud(LV)Y94;Obhyr z1FVrg@yN%ks6Ez{G^F{5kele7wQ=Wg+$eO{Lw9+LU{AdJkIq2z{6bXn4XTihM8bc))N$oYNU zDb+t-27S>#{QFjcFw#LCj+5X^1#53=+|>#JhL#1J8r-^#Lo5I?hj zW}kbPe}_^)@PPv03>7H=gGIst6SO#tX!;YnmgdO6YZZ@Kstz6M3R#FvliKxZ=3#VSO2I>dWA)F#(_iBNdO zUk)MNnsc^lSn|rQEI{uw`Y&_?T9e0f9CYcJ+5SM45nedA`gmic1I#WMF;b@L40yLa zA@l$Yio11r2mrw{#b1{^HcHz3B*28EN1#8a1~0f?;Q0<=T|Oud4Y|qeI;$g|v{}Ai zegXStS-b2RZYs*ASU$#`kn@|I@J1~mCk^b2zF!l5Tl@8B$lfu$Lm|HtoP&A7&U<0S z7&1pFLUalY-X>y_9o!9+P%pK;$p^NorNr9yvK364_AR1ILst;-W-Y@KjZ!)Uc`)>O zddB`oeCW6k0K>HbKto2<+JO`pV!mz$LT62T0$2LLwigoeEmRH<7B0LC*r!Z>d7DK& zdLg;}Dk0MEIZjx60FpryDX$*846hl6nhW@6^G&{(OJ!loB38BD)aXe$dM4RO8x=9` z03?{>s_O9Rl<76_+YOWh{%{7ngK;417r3LSI~WJ+Q)1m1aW|n)H*9M6eo(8Fh-XvZ zHX_T6I7R2>eFjL3bIkT~*fEOlwInWU2fe!f))ok|5~m@)W2N!}{uzf&YA$Ugb`!^_ z(^8MgJ7W4eaJS0KhodkBOA@XI?CS3zi3ymyG}c%8>@h*#7cd~^SB(rrTI}BFG$MTo z#5|y7tgcImpM#4uPZGNAwhBm*361YgsfA#{SiIq+c{rK!Qp`-wAOWg!`@Ni=>R9#4 z>36#J^(BqNQNln-a}~h<$iq=SY@IMH(9-w6)-iJ#^lK%V4j4>4F|77q>eWhu?tD|) z_)}3&|E%rV34cd)$mkq@-Ej$hvK}`3Cf5mKz7>;pN`Q5$BOUciF z%PZ}?7G!$O2?+7Xx{dm*NI)IoOF+%N{r;11J&vA*9J8Z8?-&KXNJL4^RdCnc>OQpp zxh*Fjv5szgR+OFXER(w97cmS?1t8a$dg#napGw9bbpoS8(gj0Rd%ZP*9!v5^P5oi3 zqADNBm=t%@Km~qa{W#kqt`N~u%5qc)wa=$OUbnF6c5gmyX*rE%4+7+c)MwWb_3uP{ zyX+gwOy#8+D^Bk^cW&`r?tg>x9w&U{GwaZIz*qHZO4PIor?jPDy|mwqxu~x=lntzg z)L?(bZK{gJ$(eb%7hcG{hJE<7MOH_?_XYlg6wAk#ME4f)swKW-dRqkOPPeOAjJx*ubhdOK^iVxBzu=zA<9$l&!uP0s z@^H1grF@5CeDAMfxuvZ>oGmKAwZqeS9l;n}=t9mf=Mrqy>(r-55{A9d2;XEONJw$1 zdDTx%)kUthx53LS%#`mQV;n+s!Dew-$9mwrWO4Da5HI)`^^_h{C#psb*6PSxIW7 znjCu-TBvRkPTg9;pyT!|+DH z3LY-d^0=|9W#)zvM47IGD_mK(0{qcy_vx)aNH59=&KbJKb9dU6c6m3x$_VL%oeqJ*6$`h=jJ}_ghZ)CF{O%}_*rBs5VPoL=bf051%@-SDMg@#HBm@H%UWAu=A+6qq z4`cTDSM9)SN~eY{r+#UufcqkaBwxkXUEzhktk;L}0(&-0^f?*B^LOV|KCg(87K-D$ zSGTU$o-p9XqDIx6ZAK1?Rr%1pM)YH@KQv!*rS~PfUuSdtkx+SHQ5%(}6OBfY{Kd8| z%!l$#-z|KbHsn!&RGEx2REC!Ft38iIZ$N-TftNeeLF`50zC%0HWZcQBZ|EK6yMLp1 z0wcLm{PB8n;DfkDHDTAvtExjnEIhWvVRZ;?&5#WxO$G05&XThdNcH_Ic?Wq4hsRxm z#IK^6URMuEF68Iy3F zT8nf;p7q7eAukORu~hM}NQnH^g0{L*_FLgR$f$fFg03loFs=xBwb3vF%5$ z4lD9B6pBa3%uxPu&eS=7`0x_=A>=dc8`4vjz}F7O*Jn)iw9uRY(6P6Hwc?0V`G!9; z&cfzRI+?i3`P8IA1l`UZp!`UxqZamC#ezO@;p@(wig?2Zj3zX@QnG;#b;7tDLJ}DB zmhjOlC+7apL1J_Hy6$r2sN~Qs&0j049I#9e-~ziz4-13t572tRgzEMR2?QjY3{IO2 zO50B)1{(-ZTcJzxnpNV}_)AMFJ-Q##P@M=mBQz=CG-`~ArFFM31yOc({EVFMCK}=F zt=iSl`~5xt$n{A?^(?aeZSlYUUUn@KG)ovINgP0EOw9$`=rtq6H^=o0ym3b z0JZ)#4GF0p0xWaOw-BX9BrdzzNc~~c5{J4}D_J(m72xtGo{dL(H+W7E|0@gV-{jxa zl1?IS6cYD&euHJ*ZepqM`ev!f^ko}On7nU^(PgVfrHi4yh(hTCbsor zoI+?`iiYb&=eaDD3zfhH4RF3Pi(M+s^xK)uHfmI;T|DNkhWXz9VhnjV2m`8XxZ{x^ zw$qxTo?MuMW4RipCHClc?W~lr!mP&@-@&6rO%(CRg{9v-p8_UpNuk?^~Jk~L93sc4H+83biAqb3QHdsTNDNUDl49qko)b=*$ zYt~sFce!X=^`-OuE|R|J$?P+a`#A{%(jD(-ftE03UemdAhe5pG*k-h=2ApSZC*Vkq z?(#YMsOEi&)c4%X1n=wDKyEZtY=W>PrKD zf?BN)6JmYroeG!JCmVq}ACYPcPNv!V$`^5ds3u@_?QAj5180PqO;Bn;PFCk@ z>lGmj1ia6!UbrWY32MQm&sBP_nE<)V+D_tXfH5KmDu>PD8mWBX*-G!lc+1lVOBeh{Mz-bSY0d!hFvXp(Jhj?;ytqC%Tb12wjidL~}0OwNLxQgcE7 z5Iukp_2?Rdc$)yumn=qD30{b~Tlr(^?wwo~T}4_T)g%AWyJ%RnbhL7ah2-pR@qFCz z&VKrdv$_QaLKzBl_#53D+K==Ol!MGgSlzkQ=sF8l-UH-R723cIHwe*g#3xqP2o5*4 z7fa1a{6mrlvZRFP3&x^ck)v`@xg*$J9_<&ThP<{%{V2fiJvP#-lpLz$-n@=Cg!v-@ zcJI6q=k@U2DeAgBes!J5COHK5qMJ6dd~Y&i03t((wiM6~wVD^^*qGmpq?GIveK!Xg zWverG)s{Qr@w-P=rlibr`{_B%8<`_O1Q=W+2MvIOEETHv+;9mBkXD%E%XE-*A04z<;;68J2+_&MYSkMfD0g`G=vD<0<0DZ^$#rP5o^WM^WTQ$b z2(KYq=#6=l#=FIjw$LCd_i9@3wFgKQ@n6cM+$PaxORB>RJ_omSKeY?_iS3J^+-9pj z4!>J#KM#Zih^$7n5*~e?v_|#z70O*>&?7sd`OfG#1m$yS)k7t!%!SgHqHVCw0CQdT zsZ=282xKS1-~4otu=Gy}X{dU~*3^w&F4c8{b-wccKf9#)z&Bn?znbS_#+hzZaP5{^ z$;n~#8fN(7H<8{}@*5Ll<{8qi{8rur%s^M4<{Mxgf<{~kMRAxapiOwyGr#z-ZufQn zWEe|Ao;z4s7O-dc^Lmree`XB&cqDjDuzmiIM$lRC-qE+5Qoz=Ih zuZ)UV;H)N*wsT;{5BdWU*38DdiHi!NOm~G#uD2a33|X8MWXRSKP4t4ft2F3Pm6GU5 z^W>Ls)$2xyEP~4DH$PWnPx$zkqAn2Z(UtqpYC^mF>0!GQeS;*7*D_`ZOq7m1?TPDh}a4a$#MKeK+R1gob|!9aBZB zYFSTQ<{DR3kD*O~FR{Hr|3$(?iR?Q{s~j%Z>Hc|*hxOiyu;tUSO|$3ciL>j&HOrQJ6mV#0t0|C!>O@I$+Y z)S$Wvc54KaLgkLj>s@3>=}*@De#e9|uLlp=g%F;4=*?RlokrmT7PA7DWLs2LveW46 zqWgKUtH;jnp;F%|&$}KFj>5#Km#bm&%sEwvD$kRwoj)y75$9ML!5#s76^nRK{vqYJ za&3ffHN)9Tdz4tA4c`GSNpn${cN_5d_saGBwlDXizl1x}tMV@LBRzI7TuuEvz>R`K z)G=qDz07mNjq12G!>hbwb={`gaT;8=An)cf==?jyI>3w3*{-t^!NxST=x2?pNDtgR z^crLpA$M;xUvi48i|-EgLzvTT(Bt6SMP4wWX1(;0-|5@dZLNm&DT-K;@8Lz7C%sej z$ug^dW2&BSCJ+w^qSE`p5?V@SkG8Pm5ub*Y!Q8$Cca@|i`>Bq2ca4CuNp4PXE5d(- zp{!eRWlc!`D6w1BKwckCaAr>BmQ^=|ZjoKGX(a!*N9h%G%FMutw&#C|4+zsex?tlJ z;RxN^L!q!H9Kdn_Zy#kOhHc3aO*;hCW~UP(gZvqR=2~%Lc3)J$M3z=%LB0*iK+Ot8 zDSzF%W+5)(Kb)l+P@G)zfj$!KvJdhu+VfyCDSG1S`*~PEUvQo8{+BeD0zK&zxDm8* z0NOC&Rq=US_`E*!s}+KKMRqB#5$`vvtJK0()cL{&sJD%JAgt4ceg}C}gA|XZ=i=YFKMPlg$XW@N zka~swc@wG!GTc8$&>+UR+<(j#g!)`C=^nu62*)H58h`+BQ@XOOfgoptng#x2%eE{b zEd(ZR&EHH5Bp?Snr!mE?z8;dfHN)&o`d>^x`8_|gp#EzB!gw@qk1js_TBWU2c{{t>bV$ds>p=>bHU82|U<|pXUPlw8qe4VT#0Vg%GFf8};H-QdL*7}|l*$SvB%N>ahV>?}u z-w%PpG89vpR&nb6V{aA?wgdJ^DkdW@P%i`7#biDz@|4S)K%>tCqPCbS2ZrRkAZ{6~ zg)+nffs(Bg4Pcg3%Zg}wXb-A!&qW#F@CMKN?5h<*M(uiL5r^ijsJjRm-2-ud_=jfz zGvMcL?3SXwYSGnOM1MRC>JV``h_=1M#|LyN;sXpCGPxn5w}RyECdF*%G3s5^q2Wl_ zVS|_nLqQZ4-2X=Tfh`6Ta~7#Zp|iUxf!)2hdHQ8|em}F8UWi{pc64Eg#_$?I5Na zfpGNb^xrqawyxwgQ@&tYkI=;yhQh)1iIAu`ztn;ZMut>S83GQ1QXZgRKETB+|I-93 z%}&!vI`5mG9KTj;k$DJIMkALfyoahjd=I$Bk$=-_{E(fascpE6fd{zu!CMD}>*a3% zK=0NXV5<;CrT)l^{{_AOU0!}VeR~T9YMk#DMD_MSy*x^;CZ=4Qe8KsImZ8k8Y)?iP1dc;R1EShzYQqYWWLsQS83qjeVHmAFE+nQe8&uQ_!tyggYLo|ND=}#Or)C z{k#IXi8(w_iemYmCS>E{nnwZLd~9cfP=3n)O{I6a?H=Y*pRz`)A{B{@FavDa+o_ZC z4WNB_4>`(|Lh*Ll!JU+)JRqXnMnSYWPZYs+jdL1@JJGICfB1?ad@csz6knUsILWYb z)Mb-J9I0ffx1prIW)EVen-d|-o8dg6t#(n{ZD%(w($|E469Xyq+)3l2cbT(N!wtxG zx1N!4DPQl<4QE3LCEV}+F$TY?mHX-I>N`z}bB@zJ+L1~tRqwTbv-<&b<1po$eCd=Cer3&i% zt*BjQ%nFcHgRqW580&fl?3=xd+@et+RzWI;bPvS14p>|xNXO}+vvz=C$ID#=)|_uq zm)43}-h&4IMbEYJ6PLBLo7SU7$u}x z?kW{}bNuI1)ZDP zA6b5<*bz5VYlqAcvfAi?ih0jBJ)&|Ht#ua=LO3CWS4v2+OmynZjnk^!e9dDAZNF|$ zxeWDL?&UC2?Qz|{-LB#stIh=lEfM1?K*|pTLUqX;rg?zY9SFd(FV1XZy0Vo^!C+!6 z1&nN`?7((aN*`VEkV%J;9#`qGBR2c1Jr0b`EL%VyIUijuGx_O7XRLbR{o_ryve~4U zkQ&#S7rJLRq|u6g#Y7?l*zv$LhYpnzQP1qaR3Au5PzufM1=Ht4uPI=uIgDfW+E0X} ztY*dmP-nu1c&ury6^j_Wwuo{c3d2z5uRmf!@m3#bYN@&C*hD(Qy0;tc?pNwT10pk4 zTVVd>!M{23AQ=c>g8A*RT^1V;N3CLRZQhk2&+CZq!kVG%_w~obe}e4#le)=j{g~U4G_*e&)0#JeZM0ceFv+hglX^*J>Bo^e=|B&m9p+rDF#!^dJ{O z``-2PT=IIR@=>dBNTu#xGrFm zD?hE@F5g4@5u(iXpxry{Kh?2AJa$-IFUn>+zsJQE;&Oom^^71<0#8|d?}@Tg-z)D4 z_}~!lRg=5RSg8c%!C20yv2?8;@~TJ>{RE=S;yfS(EE+T!qdUTu=_8k+d)b)pl?#~i zq8fT|Lneijgdp4{#k`O3%)T8!#DmxH zB8doRF0Wo;(!?#CR~xJgdeM$;+NOJE7uP3fR>J22CHNsMg-x7j7WVWHhTl}j)3omF zyNBd-#oGuyNi<^#L!WS*=T- zocAb9)>D{=6?V8}7QXlwYC6i2|3;ECQ&b%o?n47lH@1Bev|AJMk=F&(f4YEP+fv>H=J-50W?LD0B?jo@+>M0lD(0~@qopQ=X%$*IW)>#P2PNA7 z4HGs@T{QaK2lha6YQZq2zKgTR49UevF4QC(p=St88TLggN><-EhxIAx`Q^UaGg9VD zz^jloH9%*(J259$Q&K12K`STAJ2eg+^h);X4cj_FELSF(ZaHRoVgO}_ z?N5;>Xo*(l{so&Pnb1O2#M#_`J4?3cRTzJHoGbNie*GelKJ+i#K$B++W+Nk8{qK_9 z@lPU=1bk=D^S0jd>X&da+&y>}wZAom3zGX$6E}sc{ec|YT_Lc<=(c>-8(ks9A`0Ch z#2Ap_-yhY3V2DrDEm3%uf4mKzkbz|0F&*u87zh|}4c`Tk&^+aPHu?7FJ{6r-ZTYZ_ z8XowU6fNQ=1DignvRz9_K!$#C$j~(nNCjRguShL=8Q%CWc!U%h-bDTQuOSrjiXqJh zJ=uqOM3{U;nC)?F0%xS-{p5d!bI4oavzvXyT}i_pf3&yX_q_f7-vi|LkWY&odV>F7 z-!d#YZmsyjzfETUYs+{1e^V;5?TNaThqX%l^@Ac2(pn~s`jz!}ww)8ctM}I& z{I;);Rn7*AlfP`TS)2BT*(ZL{f#)PDS=YLnonQ1SG!nLfnQ4=W%V6CT%U`0o>OL}Y z#s16uVb#_7Z8&9dmd-nI3I6fL?z%#smF*4%uzpg1j*yt6m(-BVy$8y@*q)cCUDTcM zSr!Dzz*f|M4Z*1(dKj|0W_^5o^aL3e=IdY#T=Jqzt{TpLqFm^h06H8L%gNYG&n4+%CPqN**)rd8>Pze>=X6y zgsZ>|{Q*Lyrej|KQbyUEjAQrs*maLj*J4 zSmA!Nn%&l9H2IYF*G=T`(jYw5y84yN%i%s4_2h+fCD-{|hf+ro=gNo4&x$^Hm=MJ1 zAWH_hp8(W`XJeL*o125nO>UD0DArc4}foAgj{rgMQ z(=AbdtLMCe`fztl1HrLD<30=T!tCRUd8c@UCw^Y2Yf3hs0{%HyL?aF%W^QnK` z+BA>qB!3?yFXML~gj!mA)ndND`fM2cDP{3$4NdYffeBvg%HaYhn1`dXF;H2(WmyR| zEgn5_Zj65U*Jh}Yx2!ct3`r7+2n#v;z?tcz4qXt#alQw%X>Dz@fu&Ol_=5Ebr6&h` z>wYNt-1(MZtR)`xs8l30u!zGAh)Ud&O$_mL<=Ef|u z814`cu#ECX`!r&V%w=_+aLRhPHRfbIv3snoH(0+v)AOV~V{YkkO2_;;88NgcY47EU zfzfBG29#uEYR}bEf9Jn@>0|0muDLqAZ0e-V_wHV} z5u)r{MRrNE+L!DkEhG`LXHwQkktHE(wonO4DM|J{LfLu0c%GPky~Wi3_x`6+^O5Vi z@B3Wm+~=I{x#yIDuxGJx2AP~fW6~uPrQL@Yhpl?4M+~BC`yvzji{nNP$=uob%qHLN zjM4|E0snl`w?ZE-kCbGY*~NHj4lfLO^%&#^-x#V2;M%K{!8N{ZVQ4P4p?-8}q3yNj z=n~uieyJn%_nY@5F~7Z}kp{HSdR03*?GNw}ya3@rx35lJJqSsHt!llgDCn1N7Gtpp~om5%Kg@U8~IdHL+OePg2QSRH+mQcyp-5Cyta@RJM5CP@km&!0 zJ74WhI-+BW!g9$J$yVI?d6!%3wP?t1#rboHwO$KAK#A>6YrS?A1ty7vfN`QI7#IeZ z6a%Aha1-N-Obbd1*tLi7LvPNb~X31S6B z5{<%w5jY887>B~3U=$Pw2V-yuEEejb2x1dl|KA7#iXyTp*8~AT zz{H3|Srf!6iX;vL14}?e(O?)14h17I7;!KhjY7dC&^RapO{}qNg4iIH)ym2($&<$*528P$c8^eJfR43`DW+e@~+Lg5^R)%O@KSY<4Hjv^*`pdv_qLX z+1Oi)SUTF-ejm=+uLS-$oI8hT9;<-R`7Wt7Ll@Z7@cF!FdY>H+7vKF{CFXi?y7~L_ z)zrp@A^EUotGTb}IUO~dK-w4Q-6`F!zl@`#>p^cj=~I~Sf#a&R5R2O@h@c|g`7{Xy zEhPDcHpey==4bX=Ds*u<5q0uy%~MtXx6p zL47+M=u1tea1&d;I30al!9WfbZP{_72N~sOpqH9{zb4?4Gq=vO-p>ni)EWFd1%9gc z8j@3nn(}z=+L$kPc)EW!ORLKteAhIdIjzNUHm%xP;2|_hdknwWzV|5m^ZfG=0SV;$ zcb6uPb}Bu8l5ZxJNS=d6uou zb1j^KQc5LS_GA$;W(v)yNBwip z+bsNVK8!6TA)J!pbdHPM09v>LNTq+?!f2#5+QJrR_uCe}bwb9j6~ee6bnz*-vf7@n z&7$&=3?tVye0j#YbfC!!7n82-P+U4Ia39>s${BOnt*B7>qtqdy>fZS8TTa_DUf()seDwrDd@5k2^{t7X&4c}+aYfG6ff!GLvq6B z$Yev#v% zh~}p><9V^&?YyVS5!o&34%$qVjfyH#ypEA!Q5nZt-TE#BxWFB!hPv%uYr}Ky-Fwp`=jrHB}P zcNuG=l6l*{3>=N$jy;1*LXQIqeravxoyL)!J^|&+{0p0oGAiBvs0U&81X;kl3fwU&!>uOSIKXBoC9u>-^7@+ ze$KeIydaA7|2Q@L!ANcIh;_!;5g9cV;z#O#c>imGTT3;fHp;S7z%n5Z7>tyNcffQd z#W^~oY%H8a9L;~3Wv;8MlZi4`kIpb>$qWl*q;setDOlpozMj%{&1r1I+*EE09<^3f zp*`i>J#`-PVq9rBef!z%TYZF0cYikJ$_p3mGrWNIrK6aiJlp#MG*WKRx>GO1DxK|e z2N~1zZKPzbf|>OeFH+m8^2nSXE6XG^@EkiKXw7(OtG)`}^&EKlc`##+@R8zyyUx_J$ehkdp^}2vLGSXzw@{v;V@zbD_XtNQ->}T@ zZqY*`F#3)DkGG|~LXa1sS!21oe#<}w`{`hcrzxlSP5DBq&0FD zyh$sf)X7a|d`9X<`n3`j*m=h-YX?h09A}cdam{@CBU#rre=GTj+O652B84NQyYT{+ zEg#2|wVVag89169ta)tY?o>B~+`sj@yvQh^*ljqe9{Uobdj{u~cQoeyq%vMn=#!*X z;CprMZ8Vw3e04uCE-A=Hr6s1^xMs1{JnO_H72m~z z7>VDGPd`s3YYQL%0z&M{WNiVoiULO?kYE@Bf&ex)B_zQ}xELBN0f%64FpLBg4k30w zXKewrL7Km^0NM=Ke{Hb-d26oinPJ8MSP%dNM3P99H9@SRAh2)*7y*Mqz%Z;B28@JB z!oW~*xCBB1i;{q1&_vR#31WjZe?<_R;rg!$Vl{i?a6!kpZ;XDHU;u$)j(|YRg1{i1 zkZX&8IAF&v;XilnMvRB-K1eVveJn*~nbLen&k0v3X5DZ9plj%B=)u&HZ}#S-7en_O zo2BP8R|L~8l(t`gUBah!s#Ud~%aUBkKZZvVD$8(1zGniyeflQO!pnq0Gwf+oRX1F# zdg+jz+NFEugSscks$SD%4eS$rvkRlZ{`A3}5JavxoJRibRi@_w(pZuDp+%K8q`jLk4m#&77Y^3E_m^E!QbIjqbg&FFy5AGH_p@$ zKcpMd>ntwJn9Vf2J&9k*PkeYbnRT!2-N%)=!agT=DL=i>WUi=|og@yvf=!SZml^cH zUjZi}EuXTN9eN|&>MVW5H;6?!LoNB9$j!U9&yXKF__D4BpRD`pm)OC`TQ^m9I;87L zF7P3uG>uWL0xrk?KN1n;+ae6Y6mU57lA7LCamK0p?d->B=^5@ z%*kaH5)?=sXi`u+dGk7Xc+_+*wS#*ItM=9jCQ<1CZlR~Vdz!IocINYzmDE`sg*zmT z!a*>5;QG>-8|IZNyVx(L#lx+6-f7xD@t94l8b`nNXBsw-XY&rzEI-bBwac8(;oRG? zGnme9(JU3i&^cM9JzBGTsVS4Ty5Y`@VR+CNy~+WBc0E>;c$w!Ty8gZ{56`~z<()?Rs?6lHHl873Qayo;V6G@*%zuE`X~SoykMEV_J= zEjOL_OUj_=dt?P3X1O7J`S0DrYfY6O=v2}=R6TE2puE_S zRWcAABx`%mEKRAbU9>q7_w1%H$HUjp)4I*6Om{pjA7jlu9~W`^$v0`CDhr(g}u)n#G`05p&&RZkg{fK5sQ`n`SMwO&e?L>1&5@l9G~3 z_AU-yi(ch#IxYDK#V=&bRKbc5v^-UmBe6r=FDkp?39D0tAL2EMDT z><$%Xys1Ap zr@zO!Cx0mCum-q3=sN38MsQb6Jl?LF2Qk#(bbUaXx0NK+iig0|OeG25w#-=fZ_cSkuC;p1w_mB2r-4NN~h{5Ii^Q*BcK+CO^_ z?~%#`cIWM5`lqN0{i(G;396w6)cuTeSN1>iP|zadsj<>xwirZdXyVMnt~VG2m~7Fz zWpx?f$IdWZb%-?Bmiw)Yism$9igS_k1ZSyu=6=ULUqR@yt7#g|k6X{_o-a+lwaZLX zcgog;yk1wUx%j1Xftr|1f%af7pF6+yd#kr5x@zV8>8b~Yij@!R3E`^`YmN?*nAZg9 zS-k5eu?|!Z9W|Y1XkeJ++n?hp{$~EM$RM{}B*|nYJZlhnk8cqMdDogy6m-J2Wnfp@ z;lX~(#b~k^p&6ve(S8z*Gl%%VF-=oB*}|QB6T{NDzmPMK!@RH7W!d)M6E5xNNv5{Q z_TW97nSjmYYt+R%p51vgc;qm$M6e*iv)jR{FHIl{txmNsl0}j_2D~dQmYzp?JdC`Y zw^jW*r*X1ksJveFf#&UcN}-V~25FbawD2X8+6S6A@4`V419{b>td5%-u-8bJ`gieN4rRnnSs< zYKHmb5h|;Al_zRdp-rvc%slx5N0A8(sz;t<*&AXaqozFh_p~{9TD#r0V87k|ZYeob zgj?%%IA157fvZMqo^^#IH)9e$9!H`U7(4CU4!Mp-Hi`P~Gfp=Tr>QjLWFPb4qxFeo z(0Qd9B96K&8Tolac5AtADPk5wf*PPVhgdXA40{h$i$qO z|CUOt34B(>K_9-lFjUtyw>>jwweK``dz;_-z+F5=QfHfTF4$2!Qc?18u^N(pjOT&_ z>FgD8NUFst7HnVKq=h|;o75q*6RGwh>1SI!d|mb#zZ#z{dH=2|%|57ZCed)QjQVQ^ zqlBq0xuH?B<>eY_*T(%_r_M3ZJNSDj+V;n6;V~k8)>KwW!{Cf)9PJx1^^(UgSsZOr z$G2Y}K0vmh`uQT|L;d@He92|FspfkJeck)N-qLP4cf=_2l06qaXWG?UW3r0*N&|ht zC){XXP4gC=i!qAy&RjTi(0d$Qn@!~sJ#?YE~n- z5AV*!N`&llzrXinvMvQj0plgOcjXrxL!mo;K%aQ#yIyfVS+iNY5L(OIq3#CHwLV-@ z4>?zZbhnP-`50NRg#qb>1GgUWEYTfhSEsoq zca9`9E!IqV311wK^^orhE6nL_mwItjEc>20kQv12p?7OKux+zs*90BF(Cc*1=2@w% z#pkYm_!83N-A?PPDz+nY_gmbodF^<4@tvae9Kh>htjq7+#)Zh9wqg}MmF z5-fXRaent?UiYq(riRaq4@~P--1j1(7~8KqFGq3XniP)I$a~L@$FoNtB-+V(y{I>D z3!m|^+V*YV-G)=+TjpQ!3&-VaqlvCCh(aUv~;-?t3MC?2OChi#0r(T+zyb>8Eb%q?iv=6R+6AtCCwqam?vom z^=&AJu^qC!CyAFkl zww9^9B)=DTyS~!cKt$dRSxDQ8@f~`MbJh5~?_GPam#6EvlBA;Kc|LN{lWku-Pi12w zN;9*9JYUeY!i0lWmuiQ=%GFQm9z9A-(=_Wf?v)Cfii}R~^C-&9kWUqxh9}8Njvijf z(zEiYso>muzQ^x~vANmlnUZLuIomWAD=L|XT*uDKcx*XsAiMQ#qV@Y`UWLizq1@~N z)&sfN?m59*k7=85Z;WvU4+c)(fVoV&%cV+$m6eXDpBItQrFj%@lFpNR={QI3K;hiT zjdWD5ydBHF-YL&2q|5wb%GS}NBH;xUCUhvWg5H*gPH#U29PX?Obqn)i*>m@@XGozW zIdf-|W=Y^uF4wMGwbiM9^tiL4)wa^7bGD|a=WK8H!}0`}Bk|cua^BW0w2uy5#Y=Cg zt|-1q2hVjB6!6hy2@df^OIFDD$4|{z<4q?Fgj>A2rN328gnk&g*5LB-Tl!I%yG+=6 znNvY@`lV0MMw9nd0$x87{%-dWUe2I;vr zBsLQ)HbJUc%ahdcizUwAqJ6vLmX$ zd(S!i(f2cG{i&Gvf%Ejm>ADH{Q8vBWIWvpOOm|EAB`%$W8>1DcG!JmHD(MyOp9A-v z4v4o+)sEgF>z4k&U(jazOvSVQ9@^Mf(|qC%rrSNESzNDUdRw}=tUOCF2aFM>hS_ZI zG%hd69?#T*l+<8MO*5-|7q`$z3})@|tBtsHSFu7K$`!6pA$NKIUl{zOrTy(9rzjp6I_#4Pv#r=CqnkIazip^8BYBRoqTbVPKoL_`vbh91{?Fu|O z*UqgO+#Y)>ruckH&^OnT4{U5xA>- zaTJ=xf)h#mm8rU`boD!fUSFE*ZmSioaq~FDjR|Qd{~Ccf%&r4%n4VJYTRK#Vm{DzG z3B=DzTy-l-g+1inpE2!x73*U2VA1#R0?&YsXGcwU-9x6c{$bH=8QDtl;4K5DTtZ5Y z44?bC7t;k5EJlYYJ@lg#7O>vq*^6U^>OFT7#wSyp4nDhc@8l&lvb+SZi7{Ei{mZ=6 z-ig=1{$&*<2t*GoJZ-Q}NW#VE^7BQ3|6F`VgsTHjpy;Yg{Xb1GPA;^h_g^sa|LDR_ zDWA3D%7xtz6tEn0=ciO7M`}zyJgwnjt>T(VQAq`VI))aG*pkT*C{mK$iJCC*L{ca- zOXr!|mLK@+7HGFyjX93mb%%7hi*XSVvDkZpEkf1MH%B|4>Fdkx+SVf%?h8wVE95nrTHi=UG4d2{q}oxUbXLSP*RxG0wmQ<2)>hwL%vOTMs!_jx=bgM)&bW5WW0wW) zzJ2i*+#1TOAKm}PT|_Q5fmd*;cDD}TG*ZUimLGTZg*~hweZ6N@dX7edi>i&ON0-po1zX=^JI64f*+LjbwL! z;Fk!rg{$oRnEPeh%ufGNXYbQeR?HmV-c|L6hESi^ABMlWEyLq=7IkGn=49mqCYNHS z!lNy9YQuYxc|uvOK5glhpP5`rnF^1!RHzN}AcOa1nzjMbAR*mN_VaBjO3)7A>B91h z5j1jm`EOf04Av3Y1fl?u{p1<+udt{Iu;jjDpCw@!$&nDTg zc-Nj4t$#!){SPld2uivbLlp1&4YOZa@N;DmszD1lDzWN0bos2;=HUp{5WXI6WfObz zh=gi*{{tc%D<%%cLE*r`AvgvDMu{PDV3a5v4nv5+VOWXf6O5auBviv;;BdkBm;g@v zmhvyDA)P4Rzo`Zk@K|lt^Ocj;n-3(R8t$xz!!2*OZyu3Q4MRU5A|-)g0ggaQfMF;Y z1S}~D963cxNTS3=aS${V^*6=_ZHjd<0i5_PP>Enw=1HpgO|t+5VZM<)!{!9m2y&FR<1 zY@}IWbi^355|`zZw7=>AYq5YL?3kAiz5Qpir2Gl}#+zj$kthKkr)|DjRwA){`g>!M z*mwQ@wVy{qbn73`Z#)ufjS|;tF!GKb1aeGVK8dnASqIt(nAE9kBgT~FRUjB=VU7KL zFZp?M{M$kU|Tpb7Ob!8+n5D}HF)bSc>4zy{7{kiD^q^1Wx}qJ9Pw{3fQm0-dKzKZ z7+4RtQdzfo0|~pvfyi|R0*D)^vfDf*Vb_Sev*5(a_<#elAdv%FZ{>L`_P+Ye$g9xj?)*F;` z`%i=NLoL{^4Ep(a|L;V9RU&0^L>shPfAlK@)|zO21|{6Fk%<2z1O6ja|1@slE>jafrjBeY(wU|`c=b+csCg<}6vF|;vT2n%P{+me(-v@L68 zG`}+9=iP;H?p>SwuMt2g0WsN-Fu&5Jtb;!MZatKp>Y3G=BI8 literal 0 HcmV?d00001 diff --git a/tests/fixtures/graphs/graph1.aiida b/tests/fixtures/graphs/graph1.aiida index 77198fe69cdd34155b775b415ece900460114c2e..56164574e3d03e53b01f21d3555a8c1ab3a89ed8 100644 GIT binary patch literal 8262 zcmbVS2{@E%8@4Ov*ejKglOjH5U#Kimp(H|s;+utJttOI_GL%9RDrqW&N|r*FL}`(| zNsBE}Vk*BoWG*{~0@6T(U3&xkU$|HWbZnSk$#UM0 zZn4WdSnGWly(pP549*`0;R?G>v~JoWcJ_FL=ET7rPrqv2+(1m|v68)E*>c~x>&I*R z{5top&U;>40uF9Fm*->M5uCRDkJ4{OG}wh`y?sn%(V2nlb|VL~&1lEPuQh_scN#no zdvT;(Zk_)8^_|p;{_+=jA5Bj0`-t1rmWxZ%>ulbCXw`w1$hHy;SU7NhLXURK=)oe3WZujdd{r$Fjd4E`8yK0F+KPVAqf}l8Ooi4t!z*wqljI7qE)KL4VKZ9K#mzIIn?B#NLiFTAtW>xa%rt-|B|7G24@X2O3~EY*V-_~X z59?T4JSuu0o2ah0>cEkkNp2z8=EH7EpjnB5lBZfhIN3M3aGiHOc)`QX0`8NETyzc( z81;0nPuKRM?=sVQw#6ZqvA03#n&Uk;BTF;QFLbF2dW+ZDRX4a>n@Vs~lGf}hD@z17YN z#n+b>b)9R>levyw6?5gcO?4w1Q(wtd-5!t%3i^StrvxkJ`qXWA_Wv9lHqyAGuQ911 z3Rd>mVayRz{-S1iLYmh_3vJAKZAHo7Z&jst&v{H=N|otV(Sx0uL#o&szTGt4b1U^x zoYin<{~41^_rhhLR^3i0&**vfszQ!S5*M}@#nCSSj^K1TyRd&IzAD8Vhud`vRhqfxVYQ+=pBBK=+Q4}vo z+-=sOyL)dp6${d>C3(MX+iI4w!u(RkuKdVemfuilPVuEE=@Klp;^(}JYqGAm+a7H+ zQHttW-Ni)Jr>gQzI8=+qjKKJqqCWJoZs%g6Rv*1GbAY%d2zE`sJgCiVRO^Y zM+e2~)SNtE9exA)xRvKKSRwC{YV^l@ZiKjBGn`oS$9R&9r?*d@Txx)Id+D(10Z*%l z1LiVD>lY=hP1yEEWBhtPuSDmVUWa!*uxtXa& z)xAP$CmAQ&--+~FZq^nXYDtWj*D}!|_NVGhWYZp*4j(E`}MBjH3# zm70(YZTtI-P4m~mVtfM^C(bK5E#LT+^`@T|7Gts}Jbv}(Z!P1^akfs))jn(Nd)+-N_vuREKx@K4I`GRr37e}9@W&bL89dwO6_q)p-k&eeJZp97O z9)I7A$?}hKxGHU2dKX`>zo){lT$=G9^?CeaeDJXXchJ+R|7F#f(0ToRd-Wt{H{O8BfNeoec9c5yuWJ2 zhVp_B{quJ2B!5(a{kpJZ3_d#ecJLd^L|Lrqwx-Rc091Iio%!MLZK_^=Rle{e=}zMkV)6 zIC!YN3{=~@K>p~#LsblzeyGmsBP_adrSqe`YW?|-_Ey~-@_AFs=-Uy~XXI1f9r<>Z zkIj!axT50zFN_<-gf|0Q4x_)NfD9X*cZ4Tl?HMz@e^BX{X5_aO<8k7d9F%IA%|i4ph&5GZuTO>!HR(HU8(4+{;*6 zVVR9++SZb_cFn;%gRtvfe@m1;D{sC3V=Oa1=VWrl+TcF3p1HbqV50$3%n_R~^6Vpm zX|cgp^^#uNA#!i4>t^7JM3=Fet8Le3GbzbW5?8|`RhD@gOC$}IDJ}1LTIV}>(Qfk! zpY%ny6x2fwUrKyGLQ}rBIoh5io9%?Q`Ou9?6~Au7`bx#nr6NHe<7%NT}^jMWB9TQY6pniAKd-1@t>)KJE6 z59R@|-(^L=<}_Z;uVPxZxvSncgzbPaA6B|} zxvwN%9hvakasSLZR+LmkzNk?WLcRyPyR!55XXf}$Nz3@2E5w12Mb|H*D^u2QiSbDw zwfhb!g~o*taq+&a98U*Er*9a%;LGPT6-x)&#NXyi2VITXqZs7s5$x{lKI7ne=XoJG3kl08_XkpOUP96K)@o&2r7!o*5@f?P@V{gRFFlmE0`>7`FNR_j>M736o5h|AOJLo z!~g_54hR1AVZCSgFluwvqgfJg;d1bcrGutJuPCt^Ahg=JR~iHyJlXfh59;63qV z07=H6Nn{clkHwRPQ1L`Sq=GDhz5B_+&Iz7~=}06AO9sePG?^`egaq(#3<@A1uxK({ z4g!r5M#U2WkqWX1_Spmq8<{6!I+6e)umA>*VDAYy9%SzcJQ@IzI5--_;qX+nu+xMm z0wNV;5$ux_C9IHnBBmp;1SAzeQ{n7Afu~Ra5(dFOr^r|o9*3jgP#{`JIpMgc6GW;# zV6eNlf8ahEmvs6~M9#{@RtYwl82j7w|0kvT1=2zcX!~jPH4-9%?^E7MPNEu8A+FU_ zBngjz17s`#4WK=#Q~;!sL4blLfJhXIghwHvC473&f2D$)aQ5Yf6L$Ra5o)|2gghnC=!m5@h0wUF4B1FYh#09@#V^=NvQ4kq0oBYXn-NwQE z?-y>9k&{nG_%ik<*SXu-@WW=>{3*;7te5@vj?eV-p#NOLNgFx+OwRHi`_I3bO3sM? z24ityQzjECOiqR~+LY;p3d3+=QzjHD%#911GNn*q5nR}mNrei_=E9~-D^%E19&GXm zf(rYS3!5^vP+=OfoCHjnT*0u(!;x1d>$os>>oH||1tTZ7C=VGp0|~(d3#OjjvOMbY z*;4=O2Idzn%l>nEYj8}m;F_xMFwKIg`9&X|E%hu#Gr9a6Q!RLA{Gz|lKteFt zf~omMTmHt$#OzGBV1D*o|5iz-x%?axE*Qx#`tA%Q1XC`UnqRbx97pu*OuArxe$g&m zevWAujN}&`IRgp7#0#e87yV?m)Uzya4SCL@a!kJ9nemIZnt_C1`UO+-i%yy?^(;ld zLxm7cHm2SuTjqUf2aT*(OXslg5KMTk5|z4V?M^@3w(6Oh$O#|33(~i4Z1< ze@7`Oa#!oGhJrKCf7&VjbvBxT{MW8P*rfQ2>&R@WXIWe?xcr<31ghwX8Au2XiC}7e z(bh_wOw6u95zNn@=@c$Mr(qF{)GQo!?km$UtD&bYKTv; zZCDOo+MF_1`LDkfcxJEKokZxY;Jyfk4hN5UfC29soK?}HWFGx}xsX+b{jr=iKCe6J z_9c6j0o}Jc_G|c@>EDWR!)#pjoNWUpxD4iYj3-~svveIyEq*%OEKNv%{uw!mNJ=4Q zFIUB3(U#e72u>=O;*7%~LaVlxdmfhBy?!trStxRMN2okjU@SQqD0Cc(r~;(K4I5D) z{(NeLVqf@ByIi)`#z3hq@$y~iY&MpXH+`sULc-!PEp?lkA#N?gE!9W2Q?(6_R^QV# zOis+{1?=gyi=ezG-UCNYGhWmZ^wT{XQKJ(gGfl@yC1@<3k}TIQBA(?JjvDM?>8Yz8 ze?|}@OJ$*?+4Z9eCG?&P+L-i~u#X#qp7eKh5SHyaG;BO)>rU$mn#;w>UFFG)A_;S_ zv(M&)#AlrH7ZdL>(dz4_eyN<8LA0R~(A5)*DvV&)qPur$_Ah`-AG>SsT6lM)NpSCbqaeM4D$99MKYg ztjJ?qqam2Ub<8Bc!?GZB%dTdL-h9}*xz(}2oR;;v*7AiY578vNOh-{%pEyf4lDSCm zBJJ^8-fE`C?e^CVINGfv3&JpqcZzDg#>3e1?)W6mcq0r)EB@}CN~h+^ujhZt|Nc^Y z(-o4cz%?mvPcwtQHlwJ?-j4KA2yB&*Kqfh2`GZ9xs#=r+qfVyn|DEGsmh9r^j~6>w zJQ$NNvk9pySDy!W!y~FTvsm1jb9q^LHlkMEAK=H0cF2te zC}GH~Jtk|-g#|7ivxAJNSsQI0u>h7ZKpqN^>3itTN^Q?YH0JES76;Q~?Y#N;$a}?H zUoJwT3(UUYa*rKM32zI6ZsX0Sgitgl4CVadkf~m6e#FZHwaD+$Nj0xMfGJs)`=e;uM{w|QM4LofI-7!fb} zw8{2MILN7d`^8Iq5(uDCACt6euW?cZ1(eZ-davWp z<34o7KDP-|2JI&8PA*H{dz3-vEID%9g$i$7ZjhMNRsa{&E~6s0arWt9=dxsacDRlF z;;k73HzF}oAT>=ef(mQjA>_rC4U~O8E2=@$asxJ2*0MpMbCj1wy091>FL7*ODSCjc zw*4Y^XtNl>25aoDHF};mB^Zk{gb@1}W`hPwiXax4D9pg1L z$w7VKqv6!4Cigq&Yb#TXD?Emv%kcHSjhV<|bg{SpH>A;aoAO#$%9VXJVy*14A9Jijlc`iIzk;rT*SodWl>8 zxo%h7Qd9RT&2gNiR(twnqUa4I~S#K8}S^x#O@AC9{K`rFWg8 zw~z4J`?5Z^tDydrBHDc|v9RDJ9(68(LzFB6;)WvVv5p_1nA7)+3)uZl(H8XycPor1 z$H%OOfj32pwCd&Sw&M#1=X`GN8hDTj%R$G0#<3@!w)AH6*cI|LgMp6D)>V@?<{f(+ zLv@m4e>MtbX{5r~>1RL8U2s0fiG^<|(iT}^bS{MlDAw?CwbIc_3)#GGdRW_y=cqdb z`}JPz77S~?=q$X0)GlfJm}x2gCFu~?c!;VI7#`+Xmx*`czw{Y?!qj)^&s}OtV^H6S z3IMcFhzfLh?f!*b zW%$P-2lNUpMgN@}{R8HJW>CLaNHP8*c(O`Cs(f&P|4*bSTo`8Je>U;pqFfH6gKwlj)NRrG&`CP;ZQz{0wav-DD_v?_RHCl<{liX zVp4!sG05*X`ifZ6#3P`5K56J88_TcS3 Date: Sun, 12 Apr 2020 19:38:56 +0200 Subject: [PATCH 49/54] Fix bug in `QueryBuilder` when joining `Group` on tuple of classes The recent addition of support for subclassing of `Group` entities to the `QueryBuilder` introduced a bug when joining a tuple of entities to an appended `Group` clause, e.g.: builder = QueryBuilder().append(Group, tag='group) builder.append((orm.Float, orm.Int), with_group='group') This would cause an exception in `QueryBuilder._get_connecting_node` that would assume that `self._path[index]['entity_type']` would always be a string, but it can be a tuple as well. --- aiida/orm/querybuilder.py | 8 +++++--- tests/orm/test_groups.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/aiida/orm/querybuilder.py b/aiida/orm/querybuilder.py index 701bdf89ed..8a599522f6 100644 --- a/aiida/orm/querybuilder.py +++ b/aiida/orm/querybuilder.py @@ -1723,12 +1723,14 @@ def _get_connecting_node(self, index, joining_keyword=None, joining_value=None, :param joining_value: the tag of the nodes to be joined """ # Set the calling entity - to allow for the correct join relation to be set - if self._path[index]['entity_type'].startswith(GROUP_ENTITY_TYPE_PREFIX): + entity_type = self._path[index]['entity_type'] + + if isinstance(entity_type, str) and entity_type.startswith(GROUP_ENTITY_TYPE_PREFIX): calling_entity = 'group' - elif self._path[index]['entity_type'] not in ['computer', 'user', 'comment', 'log']: + elif entity_type not in ['computer', 'user', 'comment', 'log']: calling_entity = 'node' else: - calling_entity = self._path[index]['entity_type'] + calling_entity = entity_type if joining_keyword == 'direction': if joining_value > 0: diff --git a/tests/orm/test_groups.py b/tests/orm/test_groups.py index 46f4ddf232..0741ab023c 100644 --- a/tests/orm/test_groups.py +++ b/tests/orm/test_groups.py @@ -359,6 +359,25 @@ def test_querying(): assert orm.QueryBuilder().append(orm.Group).count() == 3 assert orm.QueryBuilder().append(orm.Group, filters={'type_string': 'custom.group'}).count() == 1 + @staticmethod + def test_querying_node_subclasses(): + """Test querying for groups with multiple types for nodes it contains.""" + group = orm.Group(label='group').store() + data_int = orm.Int().store() + data_str = orm.Str().store() + data_bool = orm.Bool().store() + + group.add_nodes([data_int, data_str, data_bool]) + + builder = orm.QueryBuilder().append(orm.Group, tag='group') + builder.append((orm.Int, orm.Str), with_group='group', project='id') + results = [entry[0] for entry in builder.iterall()] + + assert len(results) == 2 + assert data_int.pk in results + assert data_str.pk in results + assert data_bool.pk not in results + @staticmethod def test_query_with_group(): """Docs.""" From a1bef8ee454d03fa5e3ab4fde2130490d2c78687 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 12 Apr 2020 19:44:08 +0200 Subject: [PATCH 50/54] Deprecate `--group-type` option in favor of `--type-string` The `--group-type` option in `verdi group list` and `verdi group path ls` has been deprecated and superseded with `-T/--type-string`. The recently added support for subclassing of the `Group` ORM class has made it interesting to be able to query for specific subclasses based on the type strings of groups. Since these can be hierarchical it is useful to support the inclusion of SQL wildcards `%` and `_` to perform like instead of exact matching. Note that this new functionality of filtering is only available for `verdi group list` and not for `verdi group path ls`, as the `GroupPath` utility does not support subclassing yet. --- aiida/cmdline/commands/cmd_group.py | 131 +++++++++++++---------- aiida/cmdline/params/options/__init__.py | 18 +++- 2 files changed, 87 insertions(+), 62 deletions(-) diff --git a/aiida/cmdline/commands/cmd_group.py b/aiida/cmdline/commands/cmd_group.py index e48c361b33..11ca95e4bf 100644 --- a/aiida/cmdline/commands/cmd_group.py +++ b/aiida/cmdline/commands/cmd_group.py @@ -8,7 +8,6 @@ # For further information please visit http://www.aiida.net # ########################################################################### """`verdi group` commands""" - import click from aiida.common.exceptions import UniquenessError @@ -179,74 +178,89 @@ def group_show(group, raw, limit, uuid): @verdi_group.command('list') -@options.ALL_USERS(help='Show groups for all users, rather than only for the current user') -@click.option( - '-u', - '--user', - 'user_email', - type=click.STRING, - help='Add a filter to show only groups belonging to a specific user' -) -@click.option('-a', '--all-types', is_flag=True, default=False, help='Show groups of all types') +@options.ALL_USERS(help='Show groups for all users, rather than only for the current user.') +@options.USER(help='Add a filter to show only groups belonging to a specific user') +@options.ALL(help='Show groups of all types.') @click.option( '-t', '--type', 'group_type', - default='core', + default=None, help='Show groups of a specific type, instead of user-defined groups. Start with semicolumn if you want to ' - 'specify aiida-internal type' + 'specify aiida-internal type. [deprecated: use `--type-string` instead. Will be removed in 2.0.0]' ) +@options.TYPE_STRING() @click.option( - '-d', '--with-description', 'with_description', is_flag=True, default=False, help='Show also the group description' + '-d', + '--with-description', + 'with_description', + is_flag=True, + default=False, + help='Show also the group description.' ) -@click.option('-C', '--count', is_flag=True, default=False, help='Show also the number of nodes in the group') -@options.PAST_DAYS(help='add a filter to show only groups created in the past N days', default=None) +@click.option('-C', '--count', is_flag=True, default=False, help='Show also the number of nodes in the group.') +@options.PAST_DAYS(help='Add a filter to show only groups created in the past N days.', default=None) @click.option( '-s', '--startswith', type=click.STRING, default=None, - help='add a filter to show only groups for which the name begins with STRING' + help='Add a filter to show only groups for which the label begins with STRING.' ) @click.option( '-e', '--endswith', type=click.STRING, default=None, - help='add a filter to show only groups for which the name ends with STRING' + help='Add a filter to show only groups for which the label ends with STRING.' ) @click.option( '-c', '--contains', type=click.STRING, default=None, - help='add a filter to show only groups for which the name contains STRING' + help='Add a filter to show only groups for which the label contains STRING.' ) @options.ORDER_BY(type=click.Choice(['id', 'label', 'ctime']), default='id') @options.ORDER_DIRECTION() -@options.NODE(help='Show only the groups that contain the node') +@options.NODE(help='Show only the groups that contain the node.') @with_dbenv() def group_list( - all_users, user_email, all_types, group_type, with_description, count, past_days, startswith, endswith, contains, - order_by, order_dir, node + all_users, user, all_entries, group_type, type_string, with_description, count, past_days, startswith, endswith, + contains, order_by, order_dir, node ): """Show a list of existing groups.""" - # pylint: disable=too-many-branches,too-many-arguments, too-many-locals + # pylint: disable=too-many-branches,too-many-arguments,too-many-locals,too-many-statements import datetime - from aiida.common.escaping import escape_for_sql_like - from aiida.common import timezone - from aiida.orm import Group - from aiida.orm import QueryBuilder - from aiida.orm import User + import warnings from aiida import orm + from aiida.common import timezone + from aiida.common.escaping import escape_for_sql_like + from aiida.common.warnings import AiidaDeprecationWarning from tabulate import tabulate - query = QueryBuilder() + builder = orm.QueryBuilder() filters = {} - # Specify group types - if not all_types: - filters = {'type_string': {'==': group_type}} + if group_type is not None: + warnings.warn('`--group-type` is deprecated, use `--type-string` instead', AiidaDeprecationWarning) # pylint: disable=no-member + + if type_string is not None: + raise click.BadOptionUsage('group-type', 'cannot use `--group-type` and `--type-string` at the same time.') + else: + type_string = group_type + + # Have to specify the default for `type_string` here instead of directly in the option otherwise it will always + # raise above if the user specifies just the `--group-type` option. Once that option is removed, the default can + # be moved to the option itself. + if type_string is None: + type_string = 'core' + + if not all_entries: + if '%' in type_string or '_' in type_string: + filters['type_string'] = {'like': type_string} + else: + filters['type_string'] = type_string # Creation time if past_days: @@ -261,26 +275,25 @@ def group_list( if contains: filters['or'].append({'label': {'like': '%{}%'.format(escape_for_sql_like(contains))}}) - query.append(Group, filters=filters, tag='group', project='*') + builder.append(orm.Group, filters=filters, tag='group', project='*') # Query groups that belong to specific user - if user_email: - user = user_email + if user: + user_email = user.email else: # By default: only groups of this user - user = orm.User.objects.get_default().email + user_email = orm.User.objects.get_default().email # Query groups that belong to all users if not all_users: - query.append(User, filters={'email': {'==': user}}, with_group='group') + builder.append(orm.User, filters={'email': {'==': user_email}}, with_group='group') # Query groups that contain a particular node if node: - from aiida.orm import Node - query.append(Node, filters={'id': {'==': node.id}}, with_group='group') + builder.append(orm.Node, filters={'id': {'==': node.id}}, with_group='group') - query.order_by({Group: {order_by: order_dir}}) - result = query.all() + builder.order_by({orm.Group: {order_by: order_dir}}) + result = builder.all() projection_lambdas = { 'pk': lambda group: str(group.pk), @@ -306,9 +319,13 @@ def group_list( for group in result: table.append([projection_lambdas[field](group[0]) for field in projection_fields]) - if not all_types: - echo.echo_info('If you want to see the groups of all types, please add -a/--all-types option') - echo.echo(tabulate(table, headers=projection_header)) + if not all_entries: + echo.echo_info('to show groups of all types, use the `-a/--all` option.') + + if not table: + echo.echo_info('no groups found matching the specified criteria.') + else: + echo.echo(tabulate(table, headers=projection_header)) @verdi_group.command('create') @@ -356,36 +373,34 @@ def verdi_group_path(): @verdi_group_path.command('ls') @click.argument('path', type=click.STRING, required=False) -@click.option('-R', '--recursive', is_flag=True, default=False, help='Recursively list sub-paths encountered') -@click.option('-l', '--long', 'as_table', is_flag=True, default=False, help='List as a table, with sub-group count') +@options.TYPE_STRING(default='core', help='Filter to only include groups of this type string.') +@click.option('-R', '--recursive', is_flag=True, default=False, help='Recursively list sub-paths encountered.') +@click.option('-l', '--long', 'as_table', is_flag=True, default=False, help='List as a table, with sub-group count.') @click.option( - '-d', '--with-description', 'with_description', is_flag=True, default=False, help='Show also the group description' + '-d', + '--with-description', + 'with_description', + is_flag=True, + default=False, + help='Show also the group description.' ) @click.option( '--no-virtual', 'no_virtual', is_flag=True, default=False, - help='Only show paths that fully correspond to an existing group' -) -@click.option( - '-t', - '--type', - 'group_type', - default='core', - help='Show groups of a specific type, instead of user-defined groups. Start with semicolumn if you want to ' - 'specify aiida-internal type' + help='Only show paths that fully correspond to an existing group.' ) @click.option('--no-warn', is_flag=True, default=False, help='Do not issue a warning if any paths are invalid.') @with_dbenv() -def group_path_ls(path, recursive, as_table, no_virtual, group_type, with_description, no_warn): - # pylint: disable=too-many-arguments +def group_path_ls(path, type_string, recursive, as_table, no_virtual, with_description, no_warn): + # pylint: disable=too-many-arguments,too-many-branches """Show a list of existing group paths.""" from aiida.plugins import GroupFactory from aiida.tools.groups.paths import GroupPath, InvalidPath try: - path = GroupPath(path or '', cls=GroupFactory(group_type), warn_invalid_child=not no_warn) + path = GroupPath(path or '', cls=GroupFactory(type_string), warn_invalid_child=not no_warn) except InvalidPath as err: echo.echo_critical(str(err)) diff --git a/aiida/cmdline/params/options/__init__.py b/aiida/cmdline/params/options/__init__.py index 9d7f77927e..708930028f 100644 --- a/aiida/cmdline/params/options/__init__.py +++ b/aiida/cmdline/params/options/__init__.py @@ -27,10 +27,10 @@ 'EXPORT_FORMAT', 'ARCHIVE_FORMAT', 'NON_INTERACTIVE', 'DRY_RUN', 'USER_EMAIL', 'USER_FIRST_NAME', 'USER_LAST_NAME', 'USER_INSTITUTION', 'DB_BACKEND', 'DB_ENGINE', 'DB_HOST', 'DB_PORT', 'DB_USERNAME', 'DB_PASSWORD', 'DB_NAME', 'REPOSITORY_PATH', 'PROFILE_ONLY_CONFIG', 'PROFILE_SET_DEFAULT', 'PREPEND_TEXT', 'APPEND_TEXT', 'LABEL', - 'DESCRIPTION', 'INPUT_PLUGIN', 'CALC_JOB_STATE', 'PROCESS_STATE', 'EXIT_STATUS', 'FAILED', 'LIMIT', 'PROJECT', - 'ORDER_BY', 'PAST_DAYS', 'OLDER_THAN', 'ALL', 'ALL_STATES', 'ALL_USERS', 'GROUP_CLEAR', 'RAW', 'HOSTNAME', - 'TRANSPORT', 'SCHEDULER', 'USER', 'PORT', 'FREQUENCY', 'VERBOSE', 'TIMEOUT', 'FORMULA_MODE', 'TRAJECTORY_INDEX', - 'WITH_ELEMENTS', 'WITH_ELEMENTS_EXCLUSIVE' + 'DESCRIPTION', 'INPUT_PLUGIN', 'CALC_JOB_STATE', 'PROCESS_STATE', 'PROCESS_LABEL', 'TYPE_STRING', 'EXIT_STATUS', + 'FAILED', 'LIMIT', 'PROJECT', 'ORDER_BY', 'PAST_DAYS', 'OLDER_THAN', 'ALL', 'ALL_STATES', 'ALL_USERS', + 'GROUP_CLEAR', 'RAW', 'HOSTNAME', 'TRANSPORT', 'SCHEDULER', 'USER', 'PORT', 'FREQUENCY', 'VERBOSE', 'TIMEOUT', + 'FORMULA_MODE', 'TRAJECTORY_INDEX', 'WITH_ELEMENTS', 'WITH_ELEMENTS_EXCLUSIVE' ) TRAVERSAL_RULE_HELP_STRING = { @@ -333,6 +333,16 @@ def decorator(command): help='Only include entries whose process label matches this filter.' ) +TYPE_STRING = OverridableOption( + '-T', + '--type-string', + 'type_string', + type=click.STRING, + required=False, + help='Only include entries whose type string matches this filter. Can include `_` to match a single arbitrary ' + 'character or `%` to match any number of characters.' +) + EXIT_STATUS = OverridableOption( '-E', '--exit-status', 'exit_status', type=click.INT, help='Only include entries with this exit status.' ) From 5479bbcd4647b5aa29a8ef60063136cf5bc11795 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 12 Apr 2020 19:52:50 +0200 Subject: [PATCH 51/54] Add support for subclassing to `GroupParamType` CLI param type This will allow options and arguments that are supposed to match `Group` instance, to narrow the scope of subclasses that are to be matched. For example, an option or argument with the following type: GroupParamType(sub_classes=('aiida.groups:core.auto',)) will only match group instances that are a subclass of `AutoGroup`. Any other `Group` subclasses will not be matched. --- aiida/cmdline/params/types/group.py | 43 +++-- tests/cmdline/params/types/test_group.py | 192 ++++++++++++++--------- 2 files changed, 150 insertions(+), 85 deletions(-) diff --git a/aiida/cmdline/params/types/group.py b/aiida/cmdline/params/types/group.py index 6150f6d062..9541d3e7c2 100644 --- a/aiida/cmdline/params/types/group.py +++ b/aiida/cmdline/params/types/group.py @@ -7,12 +7,10 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### -""" -Module for custom click param type group -""" - +"""Module for custom click param type group.""" import click +from aiida.common.lang import type_check from aiida.cmdline.utils.decorators import with_dbenv from .identifier import IdentifierParamType @@ -23,29 +21,50 @@ class GroupParamType(IdentifierParamType): name = 'Group' - def __init__(self, create_if_not_exist=False): + def __init__(self, create_if_not_exist=False, sub_classes=('aiida.groups:core',)): + """Construct the parameter type. + + The `sub_classes` argument can be used to narrow the set of subclasses of `Group` that should be matched. By + default all subclasses of `Group` will be matched, otherwise it is restricted to the subclasses that correspond + to the entry point names in the tuple of `sub_classes`. + + To prevent having to load the database environment at import time, the actual loading of the entry points is + deferred until the call to `convert` is made. This is to keep the command line autocompletion light and + responsive. The entry point strings will be validated, however, to see if they correspond to known entry points. + + :param create_if_not_exist: boolean, if True, will create the group if it does not yet exist. By default the + group created will be of class `Group`, unless another subclass is specified through `sub_classes`. Note + that in this case, only a single entry point name can be specified + :param sub_classes: a tuple of entry point strings from the `aiida.groups` entry point group. + """ + type_check(sub_classes, tuple, allow_none=True) + + if create_if_not_exist and len(sub_classes) > 1: + raise ValueError('`sub_classes` can at most contain one entry point if `create_if_not_exist=True`') + self._create_if_not_exist = create_if_not_exist - super().__init__() + super().__init__(sub_classes=sub_classes) @property def orm_class_loader(self): - """ - Return the orm entity loader class, which should be a subclass of OrmEntityLoader. This class is supposed - to be used to load the entity for a given identifier + """Return the orm entity loader class, which should be a subclass of `OrmEntityLoader`. + + This class is supposed to be used to load the entity for a given identifier. - :return: the orm entity loader class for this ParamType + :return: the orm entity loader class for this `ParamType` """ from aiida.orm.utils.loaders import GroupEntityLoader return GroupEntityLoader @with_dbenv() def convert(self, value, param, ctx): - from aiida.orm import Group try: group = super().convert(value, param, ctx) except click.BadParameter: if self._create_if_not_exist: - group = Group(label=value) + # The particular subclass to load will be stored in `_sub_classes` as loaded by `convert` of the super. + cls = self._sub_classes[0] + group = cls(label=value) else: raise diff --git a/tests/cmdline/params/types/test_group.py b/tests/cmdline/params/types/test_group.py index d40385682a..1fca1786e9 100644 --- a/tests/cmdline/params/types/test_group.py +++ b/tests/cmdline/params/types/test_group.py @@ -7,81 +7,127 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=redefined-outer-name,unused-variable,unused-argument """Tests for the `GroupParamType`.""" -from aiida.backends.testbase import AiidaTestCase +import click +import pytest + from aiida.cmdline.params.types import GroupParamType -from aiida.orm import Group +from aiida.orm import Group, AutoGroup, ImportGroup from aiida.orm.utils.loaders import OrmEntityLoader -class TestGroupParamType(AiidaTestCase): - """Tests for the `GroupParamType`.""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - """ - Create some groups to test the GroupParamType parameter type for the command line infrastructure - We create an initial group with a random name and then on purpose create two groups with a name - that matches exactly the ID and UUID, respectively, of the first one. This allows us to test - the rules implemented to solve ambiguities that arise when determing the identifier type - """ - super().setUpClass(*args, **kwargs) - - cls.param = GroupParamType() - cls.entity_01 = Group(label='group_01').store() - cls.entity_02 = Group(label=str(cls.entity_01.pk)).store() - cls.entity_03 = Group(label=str(cls.entity_01.uuid)).store() - - def test_get_by_id(self): - """ - Verify that using the ID will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.pk) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_get_by_uuid(self): - """ - Verify that using the UUID will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.uuid) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_get_by_label(self): - """ - Verify that using the LABEL will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.label) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_ambiguous_label_pk(self): - """ - Situation: LABEL of entity_02 is exactly equal to ID of entity_01 - - Verify that using an ambiguous identifier gives precedence to the ID interpretation - Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL - """ - identifier = '{}'.format(self.entity_02.label) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - identifier = '{}{}'.format(self.entity_02.label, OrmEntityLoader.label_ambiguity_breaker) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_02.uuid) - - def test_ambiguous_label_uuid(self): - """ - Situation: LABEL of entity_03 is exactly equal to UUID of entity_01 - - Verify that using an ambiguous identifier gives precedence to the UUID interpretation - Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL - """ - identifier = '{}'.format(self.entity_03.label) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - identifier = '{}{}'.format(self.entity_03.label, OrmEntityLoader.label_ambiguity_breaker) - result = self.param.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_03.uuid) +@pytest.fixture +def parameter_type(): + """Return an instance of the `GroupParamType`.""" + return GroupParamType() + + +@pytest.fixture +def setup_groups(clear_database_before_test): + """Create some groups to test the `GroupParamType` parameter type for the command line infrastructure. + + We create an initial group with a random name and then on purpose create two groups with a name that matches exactly + the ID and UUID, respectively, of the first one. This allows us to test the rules implemented to solve ambiguities + that arise when determing the identifier type. + """ + entity_01 = Group(label='group_01').store() + entity_02 = AutoGroup(label=str(entity_01.pk)).store() + entity_03 = ImportGroup(label=str(entity_01.uuid)).store() + return entity_01, entity_02, entity_03 + + +def test_get_by_id(setup_groups, parameter_type): + """Verify that using the ID will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_groups + identifier = '{}'.format(entity_01.pk) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_get_by_uuid(setup_groups, parameter_type): + """Verify that using the UUID will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_groups + identifier = '{}'.format(entity_01.uuid) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_get_by_label(setup_groups, parameter_type): + """Verify that using the LABEL will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_groups + identifier = '{}'.format(entity_01.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_ambiguous_label_pk(setup_groups, parameter_type): + """Situation: LABEL of entity_02 is exactly equal to ID of entity_01. + + Verify that using an ambiguous identifier gives precedence to the ID interpretation. Appending the special ambiguity + breaker character will force the identifier to be treated as a LABEL. + """ + entity_01, entity_02, entity_03 = setup_groups + identifier = '{}'.format(entity_02.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + identifier = '{}{}'.format(entity_02.label, OrmEntityLoader.label_ambiguity_breaker) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_02.uuid + + +def test_ambiguous_label_uuid(setup_groups, parameter_type): + """Situation: LABEL of entity_03 is exactly equal to UUID of entity_01. + + Verify that using an ambiguous identifier gives precedence to the UUID interpretation. Appending the special + ambiguity breaker character will force the identifier to be treated as a LABEL. + """ + entity_01, entity_02, entity_03 = setup_groups + identifier = '{}'.format(entity_03.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + identifier = '{}{}'.format(entity_03.label, OrmEntityLoader.label_ambiguity_breaker) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_03.uuid + + +def test_create_if_not_exist(setup_groups): + """Test the `create_if_not_exist` constructor argument.""" + label = 'non-existing-label-01' + parameter_type = GroupParamType(create_if_not_exist=True) + result = parameter_type.convert(label, None, None) + assert isinstance(result, Group) + + label = 'non-existing-label-02' + parameter_type = GroupParamType(create_if_not_exist=True, sub_classes=('aiida.groups:core.auto',)) + result = parameter_type.convert(label, None, None) + assert isinstance(result, AutoGroup) + + # Specifying more than one subclass when `create_if_not_exist=True` is not allowed. + with pytest.raises(ValueError): + GroupParamType(create_if_not_exist=True, sub_classes=('aiida.groups:core.auto', 'aiida.groups:core.import')) + + +@pytest.mark.parametrize(('sub_classes', 'expected'), ( + (None, (True, True, True)), + (('aiida.groups:core.auto',), (False, True, False)), + (('aiida.groups:core.auto', 'aiida.groups:core.import'), (False, True, True)), +)) +def test_sub_classes(setup_groups, sub_classes, expected): + """Test the `sub_classes` constructor argument.""" + entity_01, entity_02, entity_03 = setup_groups + parameter_type = GroupParamType(sub_classes=sub_classes) + + results = [] + + for group in [entity_01, entity_02, entity_03]: + try: + parameter_type.convert(str(group.pk), None, None) + except click.BadParameter: + results.append(False) + else: + results.append(True) + + assert tuple(results) == expected From 789fdee461cf5f4f5fa50e914b96d9d9c30f8d3e Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sun, 12 Apr 2020 20:42:30 +0200 Subject: [PATCH 52/54] Add auto-complete support for `CodeParamType` and `GroupParamType` This will enable auto-completion for existing `Code` and `Group` instances by their label for commands that have options or arguments with the corresponding parameter type. --- aiida/cmdline/params/types/choice.py | 1 - aiida/cmdline/params/types/code.py | 11 +- aiida/cmdline/params/types/group.py | 12 +- aiida/orm/utils/loaders.py | 8 +- tests/cmdline/params/types/test_code.py | 212 ++++++++++++----------- tests/cmdline/params/types/test_group.py | 12 ++ 6 files changed, 152 insertions(+), 104 deletions(-) diff --git a/aiida/cmdline/params/types/choice.py b/aiida/cmdline/params/types/choice.py index 47cc63b06f..b1ccce62e0 100644 --- a/aiida/cmdline/params/types/choice.py +++ b/aiida/cmdline/params/types/choice.py @@ -43,7 +43,6 @@ def _click_choice(self): """ if self.__click_choice is None: self.__click_choice = click.Choice(self._get_choices()) - # self._get_choices = None return self.__click_choice @property diff --git a/aiida/cmdline/params/types/code.py b/aiida/cmdline/params/types/code.py index 1266d96b6c..da1c6753bc 100644 --- a/aiida/cmdline/params/types/code.py +++ b/aiida/cmdline/params/types/code.py @@ -8,8 +8,9 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Module to define the custom click type for code.""" - import click + +from aiida.cmdline.utils import decorators from .identifier import IdentifierParamType @@ -40,6 +41,14 @@ def orm_class_loader(self): from aiida.orm.utils.loaders import CodeEntityLoader return CodeEntityLoader + @decorators.with_dbenv() + def complete(self, ctx, incomplete): # pylint: disable=unused-argument + """Return possible completions based on an incomplete value. + + :returns: list of tuples of valid entry points (matching incomplete) and a description + """ + return [(option, '') for option, in self.orm_class_loader.get_options(incomplete, project='label')] + def convert(self, value, param, ctx): code = super().convert(value, param, ctx) diff --git a/aiida/cmdline/params/types/group.py b/aiida/cmdline/params/types/group.py index 9541d3e7c2..0645ac6e65 100644 --- a/aiida/cmdline/params/types/group.py +++ b/aiida/cmdline/params/types/group.py @@ -11,7 +11,7 @@ import click from aiida.common.lang import type_check -from aiida.cmdline.utils.decorators import with_dbenv +from aiida.cmdline.utils import decorators from .identifier import IdentifierParamType @@ -56,7 +56,15 @@ def orm_class_loader(self): from aiida.orm.utils.loaders import GroupEntityLoader return GroupEntityLoader - @with_dbenv() + @decorators.with_dbenv() + def complete(self, ctx, incomplete): # pylint: disable=unused-argument + """Return possible completions based on an incomplete value. + + :returns: list of tuples of valid entry points (matching incomplete) and a description + """ + return [(option, '') for option, in self.orm_class_loader.get_options(incomplete, project='label')] + + @decorators.with_dbenv() def convert(self, value, param, ctx): try: group = super().convert(value, param, ctx) diff --git a/aiida/orm/utils/loaders.py b/aiida/orm/utils/loaders.py index 5e73ff46e1..ecf08e6215 100644 --- a/aiida/orm/utils/loaders.py +++ b/aiida/orm/utils/loaders.py @@ -456,15 +456,19 @@ def _get_query_builder_label_identifier(cls, identifier, classes, operator='==', :raises ValueError: if the identifier is invalid :raises aiida.common.NotExistent: if the orm base class does not support a LABEL like identifier """ + from aiida.common.escaping import escape_for_sql_like from aiida.orm import Computer try: - label, _, machinename = identifier.partition('@') + identifier, _, machinename = identifier.partition('@') except AttributeError: raise ValueError('the identifier needs to be a string') + if operator == 'like': + identifier = escape_for_sql_like(identifier) + '%' + builder = QueryBuilder() - builder.append(cls=classes, tag='code', project=project, filters={'label': {'==': label}}) + builder.append(cls=classes, tag='code', project=project, filters={'label': {operator: identifier}}) if machinename: builder.append(Computer, filters={'name': {'==': machinename}}, with_node='code') diff --git a/tests/cmdline/params/types/test_code.py b/tests/cmdline/params/types/test_code.py index f96d9ecf05..a2464f64d7 100644 --- a/tests/cmdline/params/types/test_code.py +++ b/tests/cmdline/params/types/test_code.py @@ -7,109 +7,125 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### +# pylint: disable=redefined-outer-name,unused-variable,unused-argument """Tests for the `CodeParamType`.""" - import click +import pytest -from aiida.backends.testbase import AiidaTestCase from aiida.cmdline.params.types import CodeParamType from aiida.orm import Code from aiida.orm.utils.loaders import OrmEntityLoader -class TestCodeParamType(AiidaTestCase): - """Tests for the `CodeParamType`.""" - - @classmethod - def setUpClass(cls, *args, **kwargs): - """ - Create some code to test the CodeParamType parameter type for the command line infrastructure - We create an initial code with a random name and then on purpose create two code with a name - that matches exactly the ID and UUID, respectively, of the first one. This allows us to test - the rules implemented to solve ambiguities that arise when determing the identifier type - """ - super().setUpClass(*args, **kwargs) - - cls.param_base = CodeParamType() - cls.param_entry_point = CodeParamType(entry_point='arithmetic.add') - cls.entity_01 = Code(remote_computer_exec=(cls.computer, '/bin/true')).store() - cls.entity_02 = Code(remote_computer_exec=(cls.computer, '/bin/true'), - input_plugin_name='arithmetic.add').store() - cls.entity_03 = Code(remote_computer_exec=(cls.computer, '/bin/true'), - input_plugin_name='templatereplacer').store() - - cls.entity_01.label = 'computer_01' - cls.entity_02.label = str(cls.entity_01.pk) - cls.entity_03.label = str(cls.entity_01.uuid) - - def test_get_by_id(self): - """ - Verify that using the ID will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.pk) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_get_by_uuid(self): - """ - Verify that using the UUID will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.uuid) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_get_by_label(self): - """ - Verify that using the LABEL will retrieve the correct entity - """ - identifier = '{}'.format(self.entity_01.label) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_get_by_fullname(self): - """ - Verify that using the LABEL@machinename will retrieve the correct entity - """ - identifier = '{}@{}'.format(self.entity_01.label, self.computer.name) # pylint: disable=no-member - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - def test_ambiguous_label_pk(self): - """ - Situation: LABEL of entity_02 is exactly equal to ID of entity_01 - - Verify that using an ambiguous identifier gives precedence to the ID interpretation - Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL - """ - identifier = '{}'.format(self.entity_02.label) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - identifier = '{}{}'.format(self.entity_02.label, OrmEntityLoader.label_ambiguity_breaker) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_02.uuid) - - def test_ambiguous_label_uuid(self): - """ - Situation: LABEL of entity_03 is exactly equal to UUID of entity_01 - - Verify that using an ambiguous identifier gives precedence to the UUID interpretation - Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL - """ - identifier = '{}'.format(self.entity_03.label) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_01.uuid) - - identifier = '{}{}'.format(self.entity_03.label, OrmEntityLoader.label_ambiguity_breaker) - result = self.param_base.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_03.uuid) - - def test_entry_point_validation(self): - """Verify that when an `entry_point` is defined in the constructor, it is respected in the validation.""" - identifier = '{}'.format(self.entity_02.pk) - result = self.param_entry_point.convert(identifier, None, None) - self.assertEqual(result.uuid, self.entity_02.uuid) - - with self.assertRaises(click.BadParameter): - identifier = '{}'.format(self.entity_03.pk) - result = self.param_entry_point.convert(identifier, None, None) +@pytest.fixture +def parameter_type(): + """Return an instance of the `CodeParamType`.""" + return CodeParamType() + + +@pytest.fixture +def setup_codes(clear_database_before_test, aiida_localhost): + """Create some `Code` instances to test the `CodeParamType` parameter type for the command line infrastructure. + + We create an initial code with a random name and then on purpose create two code with a name that matches exactly + the ID and UUID, respectively, of the first one. This allows us to test the rules implemented to solve ambiguities + that arise when determing the identifier type. + """ + entity_01 = Code(remote_computer_exec=(aiida_localhost, '/bin/true')).store() + entity_02 = Code(remote_computer_exec=(aiida_localhost, '/bin/true'), input_plugin_name='arithmetic.add').store() + entity_03 = Code(remote_computer_exec=(aiida_localhost, '/bin/true'), input_plugin_name='templatereplacer').store() + + entity_01.label = 'computer_01' + entity_02.label = str(entity_01.pk) + entity_03.label = str(entity_01.uuid) + + return entity_01, entity_02, entity_03 + + +def test_get_by_id(setup_codes, parameter_type): + """Verify that using the ID will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}'.format(entity_01.pk) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_get_by_uuid(setup_codes, parameter_type): + """Verify that using the UUID will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}'.format(entity_01.uuid) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_get_by_label(setup_codes, parameter_type): + """Verify that using the LABEL will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}'.format(entity_01.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_get_by_fullname(setup_codes, parameter_type): + """Verify that using the LABEL@machinename will retrieve the correct entity.""" + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}@{}'.format(entity_01.label, entity_01.computer.name) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + +def test_ambiguous_label_pk(setup_codes, parameter_type): + """Situation: LABEL of entity_02 is exactly equal to ID of entity_01. + + Verify that using an ambiguous identifier gives precedence to the ID interpretation + Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL + """ + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}'.format(entity_02.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + identifier = '{}{}'.format(entity_02.label, OrmEntityLoader.label_ambiguity_breaker) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_02.uuid + + +def test_ambiguous_label_uuid(setup_codes, parameter_type): + """Situation: LABEL of entity_03 is exactly equal to UUID of entity_01. + + Verify that using an ambiguous identifier gives precedence to the UUID interpretation + Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL + """ + entity_01, entity_02, entity_03 = setup_codes + identifier = '{}'.format(entity_03.label) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_01.uuid + + identifier = '{}{}'.format(entity_03.label, OrmEntityLoader.label_ambiguity_breaker) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_03.uuid + + +def test_entry_point_validation(setup_codes): + """Verify that when an `entry_point` is defined in the constructor, it is respected in the validation.""" + entity_01, entity_02, entity_03 = setup_codes + parameter_type = CodeParamType(entry_point='arithmetic.add') + identifier = '{}'.format(entity_02.pk) + result = parameter_type.convert(identifier, None, None) + assert result.uuid == entity_02.uuid + + with pytest.raises(click.BadParameter): + identifier = '{}'.format(entity_03.pk) + result = parameter_type.convert(identifier, None, None) + + +def test_complete(setup_codes, parameter_type, aiida_localhost): + """Test the `complete` method that provides auto-complete functionality.""" + entity_01, entity_02, entity_03 = setup_codes + entity_04 = Code(label='xavier', remote_computer_exec=(aiida_localhost, '/bin/true')).store() + + options = [item[0] for item in parameter_type.complete(None, '')] + assert sorted(options) == sorted([entity_01.label, entity_02.label, entity_03.label, entity_04.label]) + + options = [item[0] for item in parameter_type.complete(None, 'xa')] + assert sorted(options) == sorted([entity_04.label]) diff --git a/tests/cmdline/params/types/test_group.py b/tests/cmdline/params/types/test_group.py index 1fca1786e9..722c2d3fe7 100644 --- a/tests/cmdline/params/types/test_group.py +++ b/tests/cmdline/params/types/test_group.py @@ -131,3 +131,15 @@ def test_sub_classes(setup_groups, sub_classes, expected): results.append(True) assert tuple(results) == expected + + +def test_complete(setup_groups, parameter_type): + """Test the `complete` method that provides auto-complete functionality.""" + entity_01, entity_02, entity_03 = setup_groups + entity_04 = Group(label='xavier').store() + + options = [item[0] for item in parameter_type.complete(None, '')] + assert sorted(options) == sorted([entity_01.label, entity_02.label, entity_03.label, entity_04.label]) + + options = [item[0] for item in parameter_type.complete(None, 'xa')] + assert sorted(options) == sorted([entity_04.label]) From 0e2a00cea1e97b89701662e9ff28459c8e4d83e7 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Wed, 15 Apr 2020 17:43:54 +0200 Subject: [PATCH 53/54] Do not ignore `type_string` in constructor of `Group` (#3935) Doing so would actually break backwards compatibility. Code that creates groups with explicit custom type strings, would no longer be able to query for them as the type string was silently converted to `core`. Even though accepting the passed `type_string` will cause warnings when loading them from the database, that is preferable then breaking existing code. --- aiida/orm/groups.py | 7 ++++++- tests/orm/test_autogroups.py | 6 +++--- tests/orm/test_groups.py | 27 +++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/aiida/orm/groups.py b/aiida/orm/groups.py index f2c726c0f2..7d43cbd4be 100644 --- a/aiida/orm/groups.py +++ b/aiida/orm/groups.py @@ -142,9 +142,14 @@ def __init__(self, label=None, user=None, description='', type_string=None, back raise ValueError('Group label must be provided') if type_string is not None: - message = '`type_string` is deprecated because it is determined automatically, using default `core`' + message = '`type_string` is deprecated because it is determined automatically' warnings.warn(message) # pylint: disable=no-member + # If `type_string` is explicitly defined, override automatically determined `self._type_string`. This is + # necessary for backwards compatibility. + if type_string is not None: + self._type_string = type_string + type_string = self._type_string backend = backend or get_manager().get_backend() diff --git a/tests/orm/test_autogroups.py b/tests/orm/test_autogroups.py index 9deed78bed..23ca495af9 100644 --- a/tests/orm/test_autogroups.py +++ b/tests/orm/test_autogroups.py @@ -57,7 +57,7 @@ def test_get_or_create(self): ) # I create a group with a large integer suffix (9) - AutoGroup(label='{}_9'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}_9'.format(label_prefix)).store() # The next autogroup should become number 10 autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) @@ -69,7 +69,7 @@ def test_get_or_create(self): ) # I create a group with a non-integer suffix (15a), it should be ignored - AutoGroup(label='{}_15b'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}_15b'.format(label_prefix)).store() # The next autogroup should become number 11 autogroup = Autogroup() autogroup.set_group_label_prefix(label_prefix) @@ -86,7 +86,7 @@ def test_get_or_create_invalid_prefix(self): label_prefix = 'new_test_prefix_TestAutogroup' # I create a group with the same prefix, but followed by non-underscore # characters. These should be ignored in the logic. - AutoGroup(label='{}xx'.format(label_prefix), type_string='auto.run').store() + AutoGroup(label='{}xx'.format(label_prefix)).store() # Check that there are no groups to begin with queryb = QueryBuilder().append(AutoGroup, filters={'label': label_prefix}) diff --git a/tests/orm/test_groups.py b/tests/orm/test_groups.py index 0741ab023c..e598983697 100644 --- a/tests/orm/test_groups.py +++ b/tests/orm/test_groups.py @@ -342,6 +342,33 @@ def test_loading_unregistered(): assert isinstance(loaded, orm.Group) + @staticmethod + def test_explicit_type_string(): + """Test that passing explicit `type_string` to `Group` constructor is still possible despite being deprecated. + + Both constructing a group while passing explicit `type_string` as well as loading a group with unregistered + type string should emit a warning, but it should be possible. + """ + type_string = 'data.potcar' # An unregistered custom type string + + with pytest.warns(UserWarning): + group = orm.Group(label='group', type_string=type_string) + + group.store() + assert group.type_string == type_string + + with pytest.warns(UserWarning): + loaded = orm.Group.get(label=group.label, type_string=type_string) + + assert isinstance(loaded, orm.Group) + assert loaded.pk == group.pk + assert loaded.type_string == group.type_string + + queried = orm.QueryBuilder().append(orm.Group, filters={'id': group.pk, 'type_string': type_string}).one()[0] + assert isinstance(queried, orm.Group) + assert queried.pk == group.pk + assert queried.type_string == group.type_string + @staticmethod def test_querying(): """Test querying for groups with and without subclassing.""" From 8ad0f24e6d246dab77dabf785d31d00c5bc6920f Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 14 Apr 2020 17:05:37 +0200 Subject: [PATCH 54/54] Release v1.2.0 --- AUTHORS.txt | 1 + CHANGELOG.md | 43 +++++++++++++++++++ aiida/__init__.py | 2 +- .../bf591f31dd12_dbgroup_type_string.py | 8 ++++ aiida/tools/groups/__init__.py | 9 ++++ setup.json | 4 +- tests/common/test_escaping.py | 8 ++++ tests/engine/processes/text_exit_code.py | 8 ++++ .../importexport/migration/test_migrations.py | 8 ++++ 9 files changed, 88 insertions(+), 3 deletions(-) diff --git a/AUTHORS.txt b/AUTHORS.txt index af2a1eb8ae..ac97a4387b 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -52,6 +52,7 @@ and the following people for code contributions, bug fixes, improvements of the * Marco Dorigo * Y.-W. Fang * Marco Gibertini +* Davide Grassano * Daniel Hollas * Eric Hontz * Jianxing Huang diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ca0d93b2..9933ae460c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,48 @@ # Changelog +## v1.2.0 + +### Features +- `ExitCode`: make the exit message parameterizable through templates [[#3824]](https://github.com/aiidateam/aiida-core/pull/3824) +- `GroupPath`: a utility to work with virtual `Group` hierarchies [[#3613]](https://github.com/aiidateam/aiida-core/pull/3613) +- Make `Group` sub classable through entry points [[#3882]](https://github.com/aiidateam/aiida-core/pull/3882)[[#3903]](https://github.com/aiidateam/aiida-core/pull/3903)[[#3926]](https://github.com/aiidateam/aiida-core/pull/3926) +- Add auto-complete support for `CodeParamType` and `GroupParamType` [[#3926]](https://github.com/aiidateam/aiida-core/pull/3926) +- Add export archive migration for `Group` type strings [[#3912]](https://github.com/aiidateam/aiida-core/pull/3912) +- Add the `-v/--version` option to `verdi export migrate` [[#3910]](https://github.com/aiidateam/aiida-core/pull/3910) +- Add the `-l/--limit` option to `verdi group show` [[#3857]](https://github.com/aiidateam/aiida-core/pull/3857) +- Add the `--order-by/--order-direction` options to `verdi group list` [[#3858]](https://github.com/aiidateam/aiida-core/pull/3858) +- Add `prepend_text` and `append_text` to `aiida_local_code_factory` pytest fixture [[#3831]](https://github.com/aiidateam/aiida-core/pull/3831) +- REST API: make it easier to call `run_api` in wsgi scripts [[#3875]](https://github.com/aiidateam/aiida-core/pull/3875) +- Plot bands with only one kpoint [[#3798]](https://github.com/aiidateam/aiida-core/pull/3798) + +### Bug fixes +- Improved validation for CLI parameters [[#3894]](https://github.com/aiidateam/aiida-core/pull/3894) +- Ensure unicity when creating instances of `Autogroup` [[#3650]](https://github.com/aiidateam/aiida-core/pull/3650) +- Prevent nodes without registered entry points from being stored [[#3886]](https://github.com/aiidateam/aiida-core/pull/3886) +- Fix the `RotatingFileHandler` configuration of the daemon logger[[#3891]](https://github.com/aiidateam/aiida-core/pull/3891) +- Ensure log messages are not duplicated in daemon log file [[#3890]](https://github.com/aiidateam/aiida-core/pull/3890) +- Convert argument to `str` in `aiida.common.escaping.escape_for_bash` [[#3873]](https://github.com/aiidateam/aiida-core/pull/3873) +- Remove the return statement of `RemoteData.getfile()` [[#3742]](https://github.com/aiidateam/aiida-core/pull/3742) +- Support for `BandsData` nodes without `StructureData` ancestors [[#3817]](https://github.com/aiidateam/aiida-core/pull/3817) + +### Deprecations +- Deprecate `--group-type` option in favor of `--type-string` for `verdi group list` [[#3926]](https://github.com/aiidateam/aiida-core/pull/3926) + +### Documentation +- Docs: link to documentation of other libraries via `intersphinx` mapping [[#3876]](https://github.com/aiidateam/aiida-core/pull/3876) +- Docs: remove extra `advanced_plotting` from install instructions [[#3860]](https://github.com/aiidateam/aiida-core/pull/3860) +- Docs: consistent use of "plugin" vs "plugin package" terminology [[#3799]](https://github.com/aiidateam/aiida-core/pull/3799) + +### Developers +- Deduplicate code for tests of archive migration code [[#3924]](https://github.com/aiidateam/aiida-core/pull/3924) +- CI: use GitHub Actions services for PostgreSQL and RabbitMQ [[#3901]](https://github.com/aiidateam/aiida-core/pull/3901) +- Move `aiida.manage.external.pgsu` to external package `pgsu` [[#3892]](https://github.com/aiidateam/aiida-core/pull/3892) +- Cleanup the top-level directory of the repository [[#3738]](https://github.com/aiidateam/aiida-core/pull/3738) +- Remove unused `orm.implementation.utils` module [[#3877]](https://github.com/aiidateam/aiida-core/pull/3877) +- Revise dependency management workflow [[#3771]](https://github.com/aiidateam/aiida-core/pull/3771) +- Re-add support for Coverage reports through codecov.io [[#3618]](https://github.com/aiidateam/aiida-core/pull/3618) + + ## v1.1.1 ### Changes diff --git a/aiida/__init__.py b/aiida/__init__.py index a6a582127c..bbaea6a112 100644 --- a/aiida/__init__.py +++ b/aiida/__init__.py @@ -32,7 +32,7 @@ 'For further information please visit http://www.aiida.net/. All rights reserved.' ) __license__ = 'MIT license, see LICENSE.txt file.' -__version__ = '1.1.1' +__version__ = '1.2.0' __authors__ = 'The AiiDA team.' __paper__ = ( 'G. Pizzi, A. Cepellotti, R. Sabatini, N. Marzari, and B. Kozinsky,' diff --git a/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py b/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py index 8231d8ebb7..626b561c12 100644 --- a/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py +++ b/aiida/backends/sqlalchemy/migrations/versions/bf591f31dd12_dbgroup_type_string.py @@ -1,4 +1,12 @@ # -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### """Migration after the `Group` class became pluginnable and so the group `type_string` changed. Revision ID: bf591f31dd12 diff --git a/aiida/tools/groups/__init__.py b/aiida/tools/groups/__init__.py index 7d429eeab7..19e936839b 100644 --- a/aiida/tools/groups/__init__.py +++ b/aiida/tools/groups/__init__.py @@ -1,3 +1,12 @@ +# -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # diff --git a/setup.json b/setup.json index 9de32a4b12..1160389e90 100644 --- a/setup.json +++ b/setup.json @@ -1,6 +1,6 @@ { "name": "aiida-core", - "version": "1.1.1", + "version": "1.2.0", "url": "http://www.aiida.net/", "license": "MIT License", "author": "The AiiDA team", @@ -210,4 +210,4 @@ ], "aiida.workflows": [] } -} +} \ No newline at end of file diff --git a/tests/common/test_escaping.py b/tests/common/test_escaping.py index a66070b603..80bdb377c6 100644 --- a/tests/common/test_escaping.py +++ b/tests/common/test_escaping.py @@ -1,4 +1,12 @@ # -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### """Tests for the :mod:`aiida.common.escaping`.""" from aiida.common.escaping import escape_for_bash diff --git a/tests/engine/processes/text_exit_code.py b/tests/engine/processes/text_exit_code.py index 2dbdd7abce..2371d7fb2b 100644 --- a/tests/engine/processes/text_exit_code.py +++ b/tests/engine/processes/text_exit_code.py @@ -1,4 +1,12 @@ # -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### """Tests for `aiida.engine.processes.exit_code.ExitCode`.""" import pytest diff --git a/tests/tools/importexport/migration/test_migrations.py b/tests/tools/importexport/migration/test_migrations.py index 75da75a855..fc2546d259 100644 --- a/tests/tools/importexport/migration/test_migrations.py +++ b/tests/tools/importexport/migration/test_migrations.py @@ -1,4 +1,12 @@ # -*- coding: utf-8 -*- +########################################################################### +# Copyright (c), The AiiDA team. All rights reserved. # +# This file is part of the AiiDA code. # +# # +# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # +# For further information on the license, see the LICENSE.txt file # +# For further information please visit http://www.aiida.net # +########################################################################### # pylint: disable=redefined-outer-name """Test the export archive migrations on the archives included in `tests/fixtures/export/migrate`.""" import copy