From 64eceed8ed18888d8a72f3649a72b3c391fb7b37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A5l=20R=C3=B8nning?= Date: Tue, 18 Jun 2024 17:41:48 +0200 Subject: [PATCH 01/10] Removed misleading info (#669) --- cognite_toolkit/cognite_modules/README.md | 13 +------------ cognite_toolkit/custom_modules/README.md | 9 +-------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/cognite_toolkit/cognite_modules/README.md b/cognite_toolkit/cognite_modules/README.md index fae569d64..ceca20ce6 100644 --- a/cognite_toolkit/cognite_modules/README.md +++ b/cognite_toolkit/cognite_modules/README.md @@ -3,16 +3,5 @@ Modules in this folder come bundled with the `cdf-tk` tool. They are managed from a [public repository](https://github.com/cognitedata/toolkit). -The modules prefixed by `cdf_` are managed and supported by Cognite. You should put your own modules in -the custom_modules directory. - -The modules are grouped into sub-directories: - -* **common**: these modules are CDF project wide and are not specific to any particular solution. -* **examples**: these modules are meant to be copied to `custom_moudles`, renamed, and used as a starting point - for your own modules. -* ****: e.g. apm and infield. These modules are specific to a particular solution. Typically, - a solution like infield consists of multiple modules. - -See the [module and package documentation](https://developer.cognite.com/sdks/toolkit/references/module_reference) for +See the [documentation](https://docs.cognite.com/cdf/deploy/cdf_toolkit/references/module_reference) for an introduction. diff --git a/cognite_toolkit/custom_modules/README.md b/cognite_toolkit/custom_modules/README.md index 99f0388d1..c5b4925db 100644 --- a/cognite_toolkit/custom_modules/README.md +++ b/cognite_toolkit/custom_modules/README.md @@ -1,10 +1,3 @@ # local_modules directory -You are free to add your own modules to this directory as long as you don't use the `cdf_` prefix. -Each module should have a default.config.yaml file that contains variables that are used in the module. The -sub-directories in each module correspond to the different resources in CDF. See the [my_example_module](my_example_module/README.md) -for an example of a module. Run the command `cdf-tk init --upgrade` to add the variables from the default.config.yaml -into the `config.yaml` file in the root of your project directory. You can then override these default values in that -`config.yaml` file. - -See the [module and package documentation](../docs/overview.md) for an introduction. +See the [documentation](https://docs.cognite.com/cdf/deploy/cdf_toolkit/guides/build_modules) for an introduction. From a575e148d57c662cbfc763332cf35d0d9d4f2963 Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Wed, 19 Jun 2024 09:19:32 +0200 Subject: [PATCH 02/10] =?UTF-8?q?[CDF-21830]=F0=9F=A4=95=20Added=20flaky?= =?UTF-8?q?=20decorator=20on=20flaky=20test=20(#670)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit tests: Added flaky decorator on flaky test --- .../test_loaders/test_resource_container_loaders.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/tests_integration/test_loaders/test_resource_container_loaders.py b/tests/tests_integration/test_loaders/test_resource_container_loaders.py index f757df620..62bba6d86 100644 --- a/tests/tests_integration/test_loaders/test_resource_container_loaders.py +++ b/tests/tests_integration/test_loaders/test_resource_container_loaders.py @@ -79,6 +79,8 @@ def edge_container(cognite_client: CogniteClient, integration_space: dm.Space) - class TestContainerLoader: + # The DMS service is fairly unstable, so we need to rerun the tests if they fail. + @pytest.mark.flaky(reruns=3, reruns_delay=10, only_rerun=["AssertionError"]) def test_populate_count_drop_data_node_container( self, node_container: dm.Container, cognite_client: CogniteClient ) -> None: @@ -114,6 +116,8 @@ def test_populate_count_drop_data_node_container( finally: loader.drop_data(container_id) + # The DMS service is fairly unstable, so we need to rerun the tests if they fail. + @pytest.mark.flaky(reruns=3, reruns_delay=10, only_rerun=["AssertionError"]) def test_populate_count_drop_data_edge_container( self, edge_container: dm.Container, cognite_client: CogniteClient ) -> None: From 9750188034ebd7141c90bc78da98b9bc93d060e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A5l=20R=C3=B8nning?= Date: Wed, 19 Jun 2024 13:23:01 +0200 Subject: [PATCH 03/10] Fine-tuned selection logic (#672) --- .vscode/launch.json | 3 +- .../empty/empty_module/default.config.yaml | 1 - .../prototypes/_packages/empty/manifest.yaml | 2 +- .../empty/my_module/default.config.yaml | 1 + .../_cdf_tk/prototypes/commands/modules.py | 36 +++++++++---------- 5 files changed, 22 insertions(+), 21 deletions(-) delete mode 100644 cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml create mode 100644 cognite_toolkit/_cdf_tk/prototypes/_packages/empty/my_module/default.config.yaml diff --git a/.vscode/launch.json b/.vscode/launch.json index 9c4c0680b..be8a923a4 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -2,6 +2,7 @@ // Use IntelliSense to learn about possible attributes. // Hover to view descriptions of existing attributes. // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 +{ "version": "0.2.0", "configurations": [ { @@ -158,7 +159,7 @@ "type": "debugpy", "request": "launch", "program": "./cdf-tk-dev.py", - "args": ["modules", "init", "new_project"], + "args": ["modules", "init"], "console": "integratedTerminal", "justMyCode": false }, diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml deleted file mode 100644 index 02e61d5d0..000000000 --- a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml +++ /dev/null @@ -1 +0,0 @@ -my_variable: "Hello, World!" \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml index 472aa02c5..72f651236 100644 --- a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml @@ -1,4 +1,4 @@ title: "Empty: I want to create my own modules" modules: { - "empty_module": {"items": {}}, + "my_module": {"items": {}}, } \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/my_module/default.config.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/my_module/default.config.yaml new file mode 100644 index 000000000..fac6b3410 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/my_module/default.config.yaml @@ -0,0 +1 @@ +my_variable: "" \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py index c11c9f296..27942c6f6 100644 --- a/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py @@ -204,7 +204,7 @@ def init(self, init_dir: Optional[str] = None, arg_package: Optional[str] = None print("\n") if len(available) > 0: - if not questionary.confirm("Would you like to add more?", default=False).ask(): + if not questionary.confirm("Would you like to change the selection?", default=False).ask(): break package_id = questionary.select( @@ -215,24 +215,24 @@ def init(self, init_dir: Optional[str] = None, arg_package: Optional[str] = None style=custom_style_fancy, ).ask() - selection = questionary.checkbox( - f"Which modules in {package_id} would you like to include?", - instruction="Use arrow up/down, press space to select item(s) and enter to save", - choices=[ - questionary.Choice( - value.get("title", key), key, checked=True if key in selected.get(package_id, {}) else False - ) - for key, value in available[package_id].get("modules", {}).items() - ], - qmark=INDENT, - pointer=POINTER, - style=custom_style_fancy, - ).ask() - - if len(selection) > 0: - selected[package_id] = selection + if len(available[package_id].get("modules", {}).items()) > 1: + selection = questionary.checkbox( + f"Which modules in {package_id} would you like to include?", + instruction="Use arrow up/down, press space to select item(s) and enter to save", + choices=[ + questionary.Choice( + value.get("title", key), key, checked=True if key in selected.get(package_id, {}) else False + ) + for key, value in available[package_id].get("modules", {}).items() + ], + qmark=INDENT, + pointer=POINTER, + style=custom_style_fancy, + ).ask() else: - selected[package_id] = available[package_id].get("modules", {}).keys() + selection = list(available[package_id].get("modules", {}).keys()) + + selected[package_id] = selection if not questionary.confirm("Would you like to continue with creation?", default=True).ask(): print("Exiting...") From cb7212379b9634614d3c885baf0dbeb26202987d Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Wed, 19 Jun 2024 14:02:27 +0200 Subject: [PATCH 04/10] =?UTF-8?q?[CDF-21817]=20=F0=9F=98=88Check=20Toolkit?= =?UTF-8?q?=20Group=20Correctly=20setup=20(#666)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor; missing files * refactor: cache token inspect, remove unused method * refactor: Removed unused parameters * refactor: verify authorization * tests: updated test to refactoring * refactor: removed unused function * refactor; removed verify space not necessary * feat: consistent implementation of verify methods * tests: updated tests * refactor: add action for each verify * tests: ensture URL are working * style: improved docs --- cognite_toolkit/_cdf_tk/commands/auth.py | 34 ++- cognite_toolkit/_cdf_tk/commands/clean.py | 2 +- cognite_toolkit/_cdf_tk/commands/deploy.py | 5 +- cognite_toolkit/_cdf_tk/commands/describe.py | 14 +- cognite_toolkit/_cdf_tk/commands/dump.py | 5 +- cognite_toolkit/_cdf_tk/constants.py | 6 + .../_cdf_tk/loaders/_resource_loaders.py | 83 +++--- cognite_toolkit/_cdf_tk/utils.py | 236 +++++++----------- tests/tests_unit/conftest.py | 3 +- .../test_cdf_tk/test_commands/test_auth.py | 2 +- .../test_commands/test_describe.py | 3 +- .../test_cdf_tk/test_commands/test_run.py | 12 +- .../test_loaders/test_base_loaders.py | 6 +- .../test_loaders/test_data_model_loader.py | 3 +- .../test_loaders/test_data_set_loader.py | 3 +- .../test_extraction_pipeline_loader.py | 9 +- .../test_loaders/test_function_loader.py | 6 +- .../test_loaders/test_time_series_loader.py | 3 +- .../test_loaders/test_view_loader.py | 3 +- tests/tests_unit/test_cdf_tk/test_utils.py | 3 +- .../tests_unit/test_cdf_tk/tests_constants.py | 12 + 21 files changed, 219 insertions(+), 234 deletions(-) create mode 100644 tests/tests_unit/test_cdf_tk/tests_constants.py diff --git a/cognite_toolkit/_cdf_tk/commands/auth.py b/cognite_toolkit/_cdf_tk/commands/auth.py index af2739bc7..044440bda 100644 --- a/cognite_toolkit/_cdf_tk/commands/auth.py +++ b/cognite_toolkit/_cdf_tk/commands/auth.py @@ -24,6 +24,8 @@ from cognite.client.data_classes.capabilities import ( Capability, FunctionsAcl, + GroupsAcl, + ProjectsAcl, ) from cognite.client.data_classes.iam import Group, GroupList, GroupWrite, TokenInspection from cognite.client.exceptions import CogniteAPIError @@ -231,14 +233,20 @@ def check_has_group_access(self, ToolGlobals: CDFToolConfig) -> None: "(projectsAcl: LIST, READ and groupsAcl: LIST, READ, CREATE, UPDATE, DELETE)..." ) try: - ToolGlobals.verify_client( - capabilities={ - "projectsAcl": [ - "LIST", - "READ", - ], - "groupsAcl": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"], - } + ToolGlobals.verify_authorization( + [ + ProjectsAcl([ProjectsAcl.Action.List, ProjectsAcl.Action.Read], ProjectsAcl.Scope.All()), + GroupsAcl( + [ + GroupsAcl.Action.Read, + GroupsAcl.Action.List, + GroupsAcl.Action.Create, + GroupsAcl.Action.Update, + GroupsAcl.Action.Delete, + ], + GroupsAcl.Scope.All(), + ), + ] ) print(" [bold green]OK[/]") except Exception: @@ -250,11 +258,11 @@ def check_has_group_access(self, ToolGlobals: CDFToolConfig) -> None: ) print("Checking basic group read access rights (projectsAcl: LIST, READ and groupsAcl: LIST, READ)...") try: - ToolGlobals.verify_client( - capabilities={ - "projectsAcl": ["LIST", "READ"], - "groupsAcl": ["LIST", "READ"], - } + ToolGlobals.verify_authorization( + capabilities=[ + ProjectsAcl([ProjectsAcl.Action.List, ProjectsAcl.Action.Read], ProjectsAcl.Scope.All()), + GroupsAcl([GroupsAcl.Action.Read, GroupsAcl.Action.List], GroupsAcl.Scope.All()), + ] ) print(" [bold green]OK[/] - can continue with checks.") except Exception: diff --git a/cognite_toolkit/_cdf_tk/commands/clean.py b/cognite_toolkit/_cdf_tk/commands/clean.py index 2666948b6..9ebb17eea 100644 --- a/cognite_toolkit/_cdf_tk/commands/clean.py +++ b/cognite_toolkit/_cdf_tk/commands/clean.py @@ -79,7 +79,7 @@ def clean_resources( capabilities = loader.get_required_capability(loaded_resources) if capabilities: - ToolGlobals.verify_capabilities(capabilities) + ToolGlobals.verify_authorization(capabilities, action=f"clean {loader.display_name}") nr_of_items = len(loaded_resources) if nr_of_items == 0: diff --git a/cognite_toolkit/_cdf_tk/commands/deploy.py b/cognite_toolkit/_cdf_tk/commands/deploy.py index 3ebdb6aea..71a6521ed 100644 --- a/cognite_toolkit/_cdf_tk/commands/deploy.py +++ b/cognite_toolkit/_cdf_tk/commands/deploy.py @@ -190,6 +190,9 @@ def _deploy_resources( verbose: bool = False, ) -> ResourceDeployResult | None: filepaths = loader.find_files() + if not filepaths: + self.warn(LowSeverityWarning(f"No {loader.display_name} files found. Skipping...")) + return None def sort_key(p: Path) -> int: if result := re.findall(r"^(\d+)", p.stem): @@ -214,7 +217,7 @@ def sort_key(p: Path) -> int: capabilities = loader.get_required_capability(loaded_resources) if capabilities: - ToolGlobals.verify_capabilities(capabilities) + ToolGlobals.verify_authorization(capabilities, action=f"deploy {loader.display_name}") nr_of_items = len(loaded_resources) if nr_of_items == 0: diff --git a/cognite_toolkit/_cdf_tk/commands/describe.py b/cognite_toolkit/_cdf_tk/commands/describe.py index d7f5b739e..9e298431a 100644 --- a/cognite_toolkit/_cdf_tk/commands/describe.py +++ b/cognite_toolkit/_cdf_tk/commands/describe.py @@ -3,6 +3,7 @@ import datetime from cognite.client.data_classes.aggregations import Count +from cognite.client.data_classes.capabilities import DataModelInstancesAcl, DataModelsAcl from cognite.client.data_classes.data_modeling import ( DirectRelation, DirectRelationReference, @@ -29,11 +30,14 @@ def execute(self, ToolGlobals: CDFToolConfig, space_name: str, model_name: str | else: print(f"Describing data model {model_name} in space {space_name} in project {ToolGlobals.project}...") print("Verifying access rights...") - client = ToolGlobals.verify_client( - capabilities={ - "dataModelsAcl": ["READ", "WRITE"], - "dataModelInstancesAcl": ["READ", "WRITE"], - } + client = ToolGlobals.verify_authorization( + [ + DataModelsAcl([DataModelsAcl.Action.Read, DataModelsAcl.Action.Write], DataModelsAcl.Scope.All()), + DataModelInstancesAcl( + [DataModelInstancesAcl.Action.Read, DataModelInstancesAcl.Action.Write], + DataModelInstancesAcl.Scope.All(), + ), + ] ) table = Table(title=f"Space {space_name}") table.add_column("Info", justify="left") diff --git a/cognite_toolkit/_cdf_tk/commands/dump.py b/cognite_toolkit/_cdf_tk/commands/dump.py index 1803d7444..f9bb8571f 100644 --- a/cognite_toolkit/_cdf_tk/commands/dump.py +++ b/cognite_toolkit/_cdf_tk/commands/dump.py @@ -4,6 +4,7 @@ import yaml from cognite.client import data_modeling as dm +from cognite.client.data_classes.capabilities import DataModelsAcl from cognite.client.data_classes.data_modeling import DataModelId from rich import print from rich.panel import Panel @@ -26,7 +27,9 @@ def execute( ) -> None: print(f"Dumping {data_model_id} from project {ToolGlobals.project}...") print("Verifying access rights...") - client = ToolGlobals.verify_client(capabilities={"dataModelsAcl": ["READ", "WRITE"]}) + client = ToolGlobals.verify_authorization( + DataModelsAcl([DataModelsAcl.Action.Read], DataModelsAcl.Scope.All()), + ) data_models = client.data_modeling.data_models.retrieve(data_model_id, inline_views=True) if not data_models: diff --git a/cognite_toolkit/_cdf_tk/constants.py b/cognite_toolkit/_cdf_tk/constants.py index 6e5f72332..0aa3cf1f6 100644 --- a/cognite_toolkit/_cdf_tk/constants.py +++ b/cognite_toolkit/_cdf_tk/constants.py @@ -38,3 +38,9 @@ COGNITE_MODULES_PATH = ROOT_PATH / COGNITE_MODULES SUPPORT_MODULE_UPGRADE_FROM_VERSION = "0.1.0" + + +class URL: + configure_access = "https://docs.cognite.com/cdf/deploy/cdf_deploy/cdf_deploy_access_management" + auth_toolkit = "https://docs.cognite.com/cdf/deploy/cdf_toolkit/guides/configure_deploy_modules#configure-the-cdf-toolkit-authentication" + docs = "https://docs.cognite.com/" diff --git a/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py b/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py index 5e492bcc4..7f1f546ac 100644 --- a/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py +++ b/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py @@ -18,7 +18,7 @@ import re from abc import ABC from collections import defaultdict -from collections.abc import Hashable, Iterable, Sequence, Sized +from collections.abc import Callable, Hashable, Iterable, Sequence, Sized from functools import lru_cache from numbers import Number from pathlib import Path @@ -306,8 +306,9 @@ def _substitute_scope_ids(group: dict, ToolGlobals: CDFToolConfig, skip_validati for acl, values in capability.items(): scope = values.get("scope", {}) - for scope_name, verify_method in [ - ("datasetScope", ToolGlobals.verify_dataset), + verify_method: Callable[[str, bool, str], int] + for scope_name, verify_method, action in [ + ("datasetScope", ToolGlobals.verify_dataset, "replace datasetExternalId with dataSetId in group"), ( "idScope", ( @@ -315,12 +316,17 @@ def _substitute_scope_ids(group: dict, ToolGlobals: CDFToolConfig, skip_validati if acl == "extractionPipelinesAcl" else ToolGlobals.verify_dataset ), + "replace extractionPipelineExternalId with extractionPipelineId in group", + ), + ( + "extractionPipelineScope", + ToolGlobals.verify_extraction_pipeline, + "replace extractionPipelineExternalId with extractionPipelineId in group", ), - ("extractionPipelineScope", ToolGlobals.verify_extraction_pipeline), ]: if ids := scope.get(scope_name, {}).get("ids", []): values["scope"][scope_name]["ids"] = [ - verify_method(ext_id, skip_validation) if isinstance(ext_id, str) else ext_id + verify_method(ext_id, skip_validation, action) if isinstance(ext_id, str) else ext_id for ext_id in ids ] return group @@ -732,7 +738,11 @@ def load_resource( for item in items: if "dataSetExternalId" in item: ds_external_id = item.pop("dataSetExternalId") - item["dataSetId"] = ToolGlobals.verify_dataset(ds_external_id, skip_validation=skip_validation) + item["dataSetId"] = ToolGlobals.verify_dataset( + ds_external_id, + skip_validation=skip_validation, + action="replace dataSetExternalId with dataSetId in label", + ) loaded = LabelDefinitionWriteList.load(items) return loaded[0] if isinstance(raw_yaml, dict) else loaded @@ -794,7 +804,9 @@ def load_resource( self.extra_configs[func["externalId"]] = {} if func.get("dataSetExternalId") is not None: self.extra_configs[func["externalId"]]["dataSetId"] = ToolGlobals.verify_dataset( - func.get("dataSetExternalId", ""), skip_validation=skip_validation + func.get("dataSetExternalId", ""), + skip_validation=skip_validation, + action="replace datasetExternalId with dataSetId in function", ) if "fileId" not in func: # The fileID is required for the function to be created, but in the `.create` method @@ -1390,11 +1402,15 @@ def load_resource(self, filepath: Path, ToolGlobals: CDFToolConfig, skip_validat for resource in resources: if resource.get("dataSetExternalId") is not None: ds_external_id = resource.pop("dataSetExternalId") - resource["dataSetId"] = ToolGlobals.verify_dataset(ds_external_id, skip_validation) + resource["dataSetId"] = ToolGlobals.verify_dataset( + ds_external_id, skip_validation, action="replace dataSetExternalId with dataSetId in time series" + ) if "securityCategoryNames" in resource: if security_categories_names := resource.pop("securityCategoryNames", []): security_categories = ToolGlobals.verify_security_categories( - security_categories_names, skip_validation + security_categories_names, + skip_validation, + action="replace securityCategoryNames with securityCategoryIDs in time series", ) resource["securityCategories"] = security_categories if resource.get("securityCategories") is None: @@ -1717,7 +1733,9 @@ def load_resource( ) if resource.get("dataSetExternalId") is not None: ds_external_id = resource.pop("dataSetExternalId") - resource["dataSetId"] = ToolGlobals.verify_dataset(ds_external_id, skip_validation) + resource["dataSetId"] = ToolGlobals.verify_dataset( + ds_external_id, skip_validation, action="replace dataSetExternalId with dataSetId in transformation" + ) if resource.get("conflictMode") is None: # Todo; Bug SDK missing default value resource["conflictMode"] = "upsert" @@ -2108,7 +2126,11 @@ def load_resource( for resource in resources: if resource.get("dataSetExternalId") is not None: ds_external_id = resource.pop("dataSetExternalId") - resource["dataSetId"] = ToolGlobals.verify_dataset(ds_external_id, skip_validation) + resource["dataSetId"] = ToolGlobals.verify_dataset( + ds_external_id, + skip_validation, + action="replace datasetExternalId with dataSetId in extraction pipeline", + ) if resource.get("createdBy") is None: # Todo; Bug SDK missing default value (this will be set on the server-side if missing) resource["createdBy"] = "unknown" @@ -2360,11 +2382,15 @@ def load_resource( ) if resource.get("dataSetExternalId") is not None: ds_external_id = resource.pop("dataSetExternalId") - resource["dataSetId"] = ToolGlobals.verify_dataset(ds_external_id, skip_validation) + resource["dataSetId"] = ToolGlobals.verify_dataset( + ds_external_id, skip_validation, action="replace dataSetExternalId with dataSetId in file metadata" + ) if "securityCategoryNames" in resource: if security_categories_names := resource.pop("securityCategoryNames", []): security_categories = ToolGlobals.verify_security_categories( - security_categories_names, skip_validation + security_categories_names, + skip_validation, + action="replace securityCategoryNames with securityCategoriesIDs in file metadata", ) resource["securityCategories"] = security_categories @@ -2421,7 +2447,11 @@ def load_resource( raise FileNotFoundError(f"Could not find file {meta.name} referenced in filepath {filepath.name}") if isinstance(meta.data_set_id, str): # Replace external_id with internal id - meta.data_set_id = ToolGlobals.verify_dataset(meta.data_set_id, skip_validation) + meta.data_set_id = ToolGlobals.verify_dataset( + meta.data_set_id, + skip_validation, + action="replace dataSetExternalId with dataSetId in file metadata", + ) return files_metadata def create(self, items: FileMetadataWriteList) -> FileMetadataList: @@ -2666,8 +2696,6 @@ def load_resource( if "list" not in type_: type_["list"] = False items = ContainerApplyList.load(raw_yaml) - if not skip_validation: - ToolGlobals.verify_spaces(list({item.space for item in items})) for item in items: # Todo Bug in SDK, not setting defaults on load for prop_name in item.properties.keys(): @@ -2861,15 +2889,6 @@ def get_dependent_items(cls, item: dict) -> Iterable[tuple[type[ResourceLoader], elif source.get("type") == "container" and _in_dict(("space", "externalId"), source): yield ContainerLoader, ContainerId(source["space"], source["externalId"]) - def load_resource( - self, filepath: Path, ToolGlobals: CDFToolConfig, skip_validation: bool - ) -> ViewApply | ViewApplyList | None: - loaded = super().load_resource(filepath, ToolGlobals, skip_validation) - if not skip_validation: - items = loaded if isinstance(loaded, ViewApplyList) else [loaded] - ToolGlobals.verify_spaces(list({item.space for item in items})) - return loaded - def are_equal(self, local: ViewApply, cdf_resource: View) -> bool: local_dumped = local.dump() cdf_resource_dumped = cdf_resource.as_write().dump() @@ -3049,15 +3068,6 @@ def get_dependent_items(cls, item: dict) -> Iterable[tuple[type[ResourceLoader], if _in_dict(("space", "externalId"), view): yield ViewLoader, ViewId(view["space"], view["externalId"], view.get("version")) - def load_resource( - self, filepath: Path, ToolGlobals: CDFToolConfig, skip_validation: bool - ) -> DataModelApply | DataModelApplyList | None: - loaded = super().load_resource(filepath, ToolGlobals, skip_validation) - if not skip_validation: - items = loaded if isinstance(loaded, DataModelApplyList) else [loaded] - ToolGlobals.verify_spaces(list({item.space for item in items})) - return loaded - def are_equal(self, local: DataModelApply, cdf_resource: DataModel) -> bool: local_dumped = local.dump() cdf_resource_dumped = cdf_resource.as_write().dump() @@ -3178,10 +3188,7 @@ def are_equal(self, local: NodeApply, cdf_resource: Node) -> bool: def load_resource(self, filepath: Path, ToolGlobals: CDFToolConfig, skip_validation: bool) -> NodeApplyListWithCall: raw = load_yaml_inject_variables(filepath, ToolGlobals.environment_variables()) - loaded = NodeApplyListWithCall._load(raw, cognite_client=self.client) - if not skip_validation: - ToolGlobals.verify_spaces(list({item.space for item in loaded})) - return loaded + return NodeApplyListWithCall._load(raw, cognite_client=self.client) def dump_resource( self, resource: NodeApply, source_file: Path, local_resource: NodeApply diff --git a/cognite_toolkit/_cdf_tk/utils.py b/cognite_toolkit/_cdf_tk/utils.py index 3d92cef9e..5982c45ce 100644 --- a/cognite_toolkit/_cdf_tk/utils.py +++ b/cognite_toolkit/_cdf_tk/utils.py @@ -36,16 +36,24 @@ from cognite.client.config import global_config from cognite.client.credentials import CredentialProvider, OAuthClientCredentials, OAuthInteractive, Token from cognite.client.data_classes import CreatedSession -from cognite.client.data_classes.capabilities import Capability, SecurityCategoriesAcl +from cognite.client.data_classes.capabilities import ( + Capability, + DataSetsAcl, + ExtractionPipelinesAcl, + SecurityCategoriesAcl, +) from cognite.client.data_classes.data_modeling import View, ViewId -from cognite.client.exceptions import CogniteAPIError, CogniteAuthError +from cognite.client.data_classes.iam import TokenInspection +from cognite.client.exceptions import CogniteAPIError from cognite.client.testing import CogniteClientMock from rich import print from rich.prompt import Confirm, Prompt -from cognite_toolkit._cdf_tk.constants import _RUNNING_IN_BROWSER, ROOT_MODULES +from cognite_toolkit._cdf_tk.constants import _RUNNING_IN_BROWSER, ROOT_MODULES, URL from cognite_toolkit._cdf_tk.exceptions import ( AuthenticationError, + AuthorizationError, + ResourceRetrievalError, ToolkitError, ToolkitResourceMissingError, ToolkitYAMLFormatError, @@ -377,7 +385,9 @@ class CDFToolConfig: class _Cache: existing_spaces: set[str] = field(default_factory=set) data_set_id_by_external_id: dict[str, int] = field(default_factory=dict) + extraction_pipeline_id_by_external_id: dict[str, int] = field(default_factory=dict) security_categories_by_name: dict[str, int] = field(default_factory=dict) + token_inspect: TokenInspection | None = None def __init__( self, @@ -498,19 +508,6 @@ def initialize_from_auth_variables(self, auth: AuthVariables) -> None: ) self._update_environment_variables() - def reinitialize_client(self) -> None: - """Reinitialize the client with the current configuration.""" - if self._client is None or self._credentials_provider is None or self._cdf_url is None or self._project is None: - raise ValueError("Client is not initialized.") - self._client = CogniteClient( - ClientConfig( - client_name=self._client_name, - base_url=self._cdf_url, - project=self._project, - credentials=self._credentials_provider, - ) - ) - def _update_environment_variables(self) -> None: """This updates the cache environment variables with the auth variables. @@ -611,76 +608,51 @@ def environ(self, attr: str, default: str | None = None, fail: bool = True) -> s self._environ[attr] = var return var - def verify_client( - self, - capabilities: dict[str, list[str]] | None = None, - data_set_id: int = 0, - space_id: str | None = None, + @property + def _token_inspection(self) -> TokenInspection: + if self._cache.token_inspect is None: + try: + self._cache.token_inspect = self.client.iam.token.inspect() + except CogniteAPIError as e: + raise AuthorizationError( + f"Don't seem to have any access rights. {e}\n" + f"Please visit [link={URL.configure_access}]the documentation[/link] " + f"and ensure you have configured your access correctly." + ) from e + return self._cache.token_inspect + + def verify_authorization( + self, capabilities: Capability | Sequence[Capability], action: str | None = None ) -> CogniteClient: """Verify that the client has correct credentials and required access rights - Supply requirement CDF ACLs to verify if you have correct access - capabilities = { - "filesAcl": ["READ", "WRITE"], - "datasetsAcl": ["READ", "WRITE"] - } - The data_set_id will be used when verifying that the client has access to the dataset. - This approach can be reused for any usage of the Cognite Python SDK. - Args: - capabilities (dict[list], optional): access capabilities to verify - data_set_id (int): id of dataset that access should be granted to - space_id (str): id of space that access should be granted to + capabilities (Capability | Sequence[Capability]): access capabilities to verify + action (str, optional): What you are trying to do. It is used with the error message Defaults to None. - Yields: + Returns: CogniteClient: Verified client with access rights - Re-raises underlying SDK exception """ - capabilities = capabilities or {} - try: - # Using the token/inspect endpoint to check if the client has access to the project. - # The response also includes access rights, which can be used to check if the client has the - # correct access for what you want to do. - resp = self.client.iam.token.inspect() - if resp is None or len(resp.capabilities.data) == 0: - raise CogniteAuthError("Don't have any access rights. Check credentials.") - except Exception as e: - raise e - scope: dict[str, dict[str, Any]] = {} - if data_set_id > 0: - scope["dataSetScope"] = {"ids": [data_set_id]} - if space_id is not None: - scope["spaceScope"] = {"ids": [space_id]} - if space_id is None and data_set_id == 0: - scope["all"] = {} - try: - caps = [ - Capability.load( - { - cap: { - "actions": actions, - "scope": scope, - }, - } - ) - for cap, actions in capabilities.items() - ] - except Exception: - raise ValueError(f"Failed to load capabilities from {capabilities}. Wrong syntax?") - comp = self.client.iam.compare_capabilities(resp.capabilities, caps) - if len(comp) > 0: - print(f"Missing necessary CDF access capabilities: {comp}") - raise CogniteAuthError("Don't have correct access rights.") - return self.client - - def verify_capabilities(self, capability: Capability | Sequence[Capability]) -> CogniteClient: - missing_capabilities = self.client.iam.verify_capabilities(capability) - if len(missing_capabilities) > 0: - print(f"Missing necessary CDF access capabilities: {missing_capabilities}") + token_inspect = self._token_inspection + missing_capabilities = self.client.iam.compare_capabilities(token_inspect.capabilities, capabilities) + if missing_capabilities: + missing = " - \n".join(repr(c) for c in missing_capabilities) + first_sentence = "Don't have correct access rights" + if action: + first_sentence += f" to {action}." + else: + first_sentence += "." + raise AuthorizationError( + f"{first_sentence} Missing:\n{missing}\n" + f"Please [blue][link={URL.auth_toolkit}]click here[/link][/blue] to visit the documentation " + "and ensure that you have setup authentication for the CDF toolkit correctly." + ) return self.client - def verify_dataset(self, data_set_external_id: str, skip_validation: bool = False) -> int: + def verify_dataset( + self, data_set_external_id: str, skip_validation: bool = False, action: str | None = None + ) -> int: """Verify that the configured data set exists and is accessible Args: @@ -688,33 +660,41 @@ def verify_dataset(self, data_set_external_id: str, skip_validation: bool = Fals skip_validation (bool): Skip validation of the data set. If this is set, the function will not check for access rights to the data set and return -1 if the dataset does not exist or you don't have access rights to it. Defaults to False. + action (str, optional): What you are trying to do. It is used with the error message Defaults to None. + Returns: data_set_id (int) - Re-raises underlying SDK exception """ if data_set_external_id in self._cache.data_set_id_by_external_id: return self._cache.data_set_id_by_external_id[data_set_external_id] + if skip_validation: + return -1 + + self.verify_authorization( + DataSetsAcl( + [DataSetsAcl.Action.Read], + ExtractionPipelinesAcl.Scope.All(), + ), + action=action, + ) try: data_set = self.client.data_sets.retrieve(external_id=data_set_external_id) except CogniteAPIError as e: - if skip_validation: - return -1 - raise CogniteAuthError("Don't have correct access rights. Need READ and WRITE on datasetsAcl.") from e - except Exception as e: - if skip_validation: - return -1 - raise e + raise ResourceRetrievalError(f"Failed to retrieve data set {data_set_external_id}: {e}") + if data_set is not None and data_set.id is not None: self._cache.data_set_id_by_external_id[data_set_external_id] = data_set.id return data_set.id - if skip_validation: - return -1 - raise ValueError( - f"Data set {data_set_external_id} does not exist, you need to create it first. Do this by adding a config file to the data_sets folder." + raise ToolkitResourceMissingError( + f"Data set {data_set_external_id} does not exist, you need to create it first. " + f"Do this by adding a config file to the data_sets folder.", + data_set_external_id, ) - def verify_extraction_pipeline(self, external_id: str, skip_validation: bool = False) -> int: + def verify_extraction_pipeline( + self, external_id: str, skip_validation: bool = False, action: str | None = None + ) -> int: """Verify that the configured extraction pipeline exists and is accessible Args: @@ -722,70 +702,45 @@ def verify_extraction_pipeline(self, external_id: str, skip_validation: bool = F skip_validation (bool): Skip validation of the extraction pipeline. If this is set, the function will not check for access rights to the extraction pipeline and return -1 if the extraction pipeline does not exist or you don't have access rights to it. Defaults to False. + action (str, optional): What you are trying to do. It is used with the error message Defaults to None. + Yields: extraction pipeline id (int) - Re-raises underlying SDK exception """ - if not skip_validation: - self.verify_client(capabilities={"extractionPipelinesAcl": ["READ"]}) + if external_id in self._cache.extraction_pipeline_id_by_external_id: + return self._cache.extraction_pipeline_id_by_external_id[external_id] + if skip_validation: + return -1 + + self.verify_authorization( + ExtractionPipelinesAcl([ExtractionPipelinesAcl.Action.Read], ExtractionPipelinesAcl.Scope.All()), action + ) try: pipeline = self.client.extraction_pipelines.retrieve(external_id=external_id) except CogniteAPIError as e: - if skip_validation: - return -1 - raise CogniteAuthError("Don't have correct access rights. Need READ on extractionPipelinesAcl.") from e - except Exception as e: - if skip_validation: - return -1 - raise e + raise ResourceRetrievalError(f"Failed to retrieve extraction pipeline {external_id}: {e}") if pipeline is not None and pipeline.id is not None: + self._cache.extraction_pipeline_id_by_external_id[external_id] = pipeline.id return pipeline.id - if not skip_validation: - print( - f" [bold yellow]WARNING[/] Extraction pipeline {external_id} does not exist. It may have been deleted, or not been part of the module." - ) - return -1 - - def verify_spaces(self, space: str | list[str]) -> list[str]: - """Verify that the configured space exists and is accessible - - Args: - space (str): External id of the space to verify - - Yields: - spaces (str) - Re-raises underlying SDK exception - """ - if isinstance(space, str): - spaces = [space] - else: - spaces = space - - if all([s in self._cache.existing_spaces for s in spaces]): - return spaces - - self.verify_client(capabilities={"dataModelsAcl": ["READ"]}) - try: - existing = self.client.data_modeling.spaces.retrieve(spaces) - except CogniteAPIError as e: - raise CogniteAuthError("Don't have correct access rights. Need READ on dataModelsAcl.") from e - - if missing := (set(spaces) - set(existing.as_ids())): - raise ValueError( - f"Space {missing} does not exist, you need to create it first. Do this by adding a config file to the data model folder." - ) - self._cache.existing_spaces.update([space.space for space in existing]) - return [space.space for space in existing] + raise ToolkitResourceMissingError( + "Extraction pipeline does not exist. You need to create it first.", external_id + ) @overload - def verify_security_categories(self, names: str, skip_validation: bool = False) -> int: ... + def verify_security_categories( + self, names: str, skip_validation: bool = False, action: str | None = None + ) -> int: ... @overload - def verify_security_categories(self, names: list[str], skip_validation: bool = False) -> list[int]: ... + def verify_security_categories( + self, names: list[str], skip_validation: bool = False, action: str | None = None + ) -> list[int]: ... - def verify_security_categories(self, names: str | list[str], skip_validation: bool = False) -> int | list[int]: + def verify_security_categories( + self, names: str | list[str], skip_validation: bool = False, action: str | None = None + ) -> int | list[int]: if skip_validation: return [-1 for _ in range(len(names))] if isinstance(names, list) else -1 if isinstance(names, str) and names in self._cache.security_categories_by_name: @@ -798,7 +753,10 @@ def verify_security_categories(self, names: str | list[str], skip_validation: bo } if len(existing_by_name) == len(names): return [existing_by_name[name] for name in names] - self.verify_client(capabilities={SecurityCategoriesAcl._capability_name: ["LIST"]}) + + self.verify_authorization( + SecurityCategoriesAcl([SecurityCategoriesAcl.Action.List], SecurityCategoriesAcl.Scope.All()), action + ) all_security_categories = self.client.iam.security_categories.list(limit=-1) self._cache.security_categories_by_name.update( diff --git a/tests/tests_unit/conftest.py b/tests/tests_unit/conftest.py index 58ff62e6f..6942f5c8b 100644 --- a/tests/tests_unit/conftest.py +++ b/tests/tests_unit/conftest.py @@ -103,8 +103,7 @@ def cdf_tool_config(cognite_client_approval: ApprovalCogniteClient, monkeypatch: real_config = CDFToolConfig(cluster="bluefield", project="pytest-project") # Build must always be executed from root of the project cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cdf_tool.failed = False cdf_tool.environment_variables.side_effect = real_config.environment_variables diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py b/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py index 8e8173b4f..9a72feef0 100644 --- a/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py @@ -174,7 +174,7 @@ def test_auth_verify_no_capabilities( def mock_verify_client(*args, **kwargs): raise Exception("No capabilities") - cdf_tool_config.verify_client.side_effect = mock_verify_client + cdf_tool_config.verify_authorization.side_effect = mock_verify_client cmd = AuthCommand(print_warning=False) with pytest.raises(AuthorizationError) as e: cmd.check_auth(cdf_tool_config, admin_group_file=Path(AUTH_DATA / "rw-group.yaml")) diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_describe.py b/tests/tests_unit/test_cdf_tk/test_commands/test_describe.py index f2406831d..521584e4d 100644 --- a/tests/tests_unit/test_cdf_tk/test_commands/test_describe.py +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_describe.py @@ -31,8 +31,7 @@ def test_describe_datamodel( cdf_tool = MagicMock(spec=CDFToolConfig) cdf_tool.project = "test" cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client space_loader = SpaceLoader.create_loader(cdf_tool, None) data_model_loader = DataModelLoader.create_loader(cdf_tool, None) diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_run.py b/tests/tests_unit/test_cdf_tk/test_commands/test_run.py index d527c61fa..dd2a16812 100644 --- a/tests/tests_unit/test_cdf_tk/test_commands/test_run.py +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_run.py @@ -13,8 +13,7 @@ def test_get_oneshot_session(cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client session = get_oneshot_session(cdf_tool.client) assert session.id == 5192234284402249 assert session.nonce == "QhlCnImCBwBNc72N" @@ -25,8 +24,7 @@ def test_get_oneshot_session(cognite_client_approval: ApprovalCogniteClient): def test_run_transformation(cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client transformation = Transformation( name="Test transformation", external_id="test", @@ -40,8 +38,7 @@ def test_run_transformation(cognite_client_approval: ApprovalCogniteClient): def test_run_function(cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client function = Function( id=1234567890, name="Test function", @@ -69,8 +66,7 @@ def test_run_function(cognite_client_approval: ApprovalCogniteClient): def test_run_local_function(cognite_client_approval: ApprovalCogniteClient) -> None: cdf_tool = MagicMock(spec=CDFToolConfig) cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client function = Function( id=1234567890, name="Test function", diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py index f72078dbd..2effaa569 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py @@ -66,8 +66,7 @@ def test_loader_class( data_regression: DataRegressionFixture, ): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cdf_tool.data_set_id = 999 @@ -91,8 +90,7 @@ def test_deploy_resource_order(self, cognite_client_approval: ApprovalCogniteCli ) expected_order = ["MyView", "MyOtherView"] cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cmd = DeployCommand(print_warning=False) diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py index 1e1da433e..6e4005478 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py @@ -11,8 +11,7 @@ class TestDataModelLoader: def test_update_data_model_random_view_order(self, cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cdf_data_model = dm.DataModel( space="sp_space", diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py index 4fd7a0967..6f8dfbe32 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py @@ -12,8 +12,7 @@ class TestDataSetsLoader: def test_upsert_data_set(self, cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client loader = DataSetsLoader.create_loader(cdf_tool, None) diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py index ee7b3a194..7e6c8c293 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py @@ -38,8 +38,7 @@ def test_load_extraction_pipeline_upsert_create_one( self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch ): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cognite_client_approval.append( @@ -54,8 +53,7 @@ def test_load_extraction_pipeline_upsert_update_one( self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch ): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cognite_client_approval.append( @@ -83,8 +81,7 @@ def test_load_extraction_pipeline_delete_one( self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch ): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client cognite_client_approval.append( diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py index 12775d32a..a3bc9deb3 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py @@ -11,8 +11,7 @@ class TestFunctionLoader: def test_load_functions(self, cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client loader = FunctionLoader.create_loader(cdf_tool, None) loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_functions.yaml", cdf_tool, skip_validation=False) @@ -20,8 +19,7 @@ def test_load_functions(self, cognite_client_approval: ApprovalCogniteClient): def test_load_function(self, cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client loader = FunctionLoader.create_loader(cdf_tool, None) loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_function.yaml", cdf_tool, skip_validation=False) diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py index c5d34d182..1594b25fd 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py @@ -2,7 +2,6 @@ import yaml from _pytest.monkeypatch import MonkeyPatch -from cognite.client.data_classes import DataSet from cognite_toolkit._cdf_tk.loaders import TimeSeriesLoader from cognite_toolkit._cdf_tk.utils import CDFToolConfig @@ -42,7 +41,7 @@ def test_load_skip_validation_with_preexisting_dataset( cdf_tool_config_real: CDFToolConfig, monkeypatch: MonkeyPatch, ) -> None: - cognite_client_approval.append(DataSet, DataSet(id=12345, external_id="ds_timeseries_oid")) + cdf_tool_config_real._cache.data_set_id_by_external_id["ds_timeseries_oid"] = 12345 loader = TimeSeriesLoader(cognite_client_approval.mock_client, None) mock_read_yaml_file({"timeseries.yaml": yaml.safe_load(self.timeseries_yaml)}, monkeypatch) diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py index 26de1d664..36aa634af 100644 --- a/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py @@ -63,8 +63,7 @@ def test_valid_spec(self, item: dict): def test_update_view_with_interface(self, cognite_client_approval: ApprovalCogniteClient): cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.verify_authorization.return_value = cognite_client_approval.mock_client cdf_tool.client = cognite_client_approval.mock_client prop1 = dm.MappedProperty( dm.ContainerId(space="sp_space", external_id="container_id"), diff --git a/tests/tests_unit/test_cdf_tk/test_utils.py b/tests/tests_unit/test_cdf_tk/test_utils.py index dda6d7baa..82e7c76f6 100644 --- a/tests/tests_unit/test_cdf_tk/test_utils.py +++ b/tests/tests_unit/test_cdf_tk/test_utils.py @@ -10,7 +10,7 @@ import pytest import yaml -from cognite.client._api.iam import TokenAPI, TokenInspection +from cognite.client._api.iam import IAMAPI, TokenAPI, TokenInspection from cognite.client.credentials import OAuthClientCredentials, OAuthInteractive from cognite.client.data_classes.capabilities import ( DataSetsAcl, @@ -62,6 +62,7 @@ def test_dataset_create(): with patch.object(CDFToolConfig, "__init__", mocked_init): instance = CDFToolConfig() instance._client.config.project = "cdf-project-templates" + instance._client.iam.compare_capabilities = IAMAPI.compare_capabilities instance._client.iam.token.inspect = Mock( spec=TokenAPI.inspect, return_value=TokenInspection( diff --git a/tests/tests_unit/test_cdf_tk/tests_constants.py b/tests/tests_unit/test_cdf_tk/tests_constants.py new file mode 100644 index 000000000..0caca2904 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/tests_constants.py @@ -0,0 +1,12 @@ +import pytest +import requests + +from cognite_toolkit._cdf_tk.constants import URL + + +@pytest.mark.parametrize( + "url, name", + [pytest.param(url, id=name) for name, url in vars(URL).items() if not name.startswith("_")], +) +def test_url_returns_200(url: str, name: str) -> None: + assert requests.get(url).status_code == 200, f"Failed to get a 200 response from the URL.{name}." From 8ef9e9c9eb0203cc6747b4839e93516cede119fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A5l=20R=C3=B8nning?= Date: Wed, 19 Jun 2024 14:47:49 +0200 Subject: [PATCH 05/10] =?UTF-8?q?=F0=9F=90=9F=20=20Fixed=20outdated=20help?= =?UTF-8?q?=20text=20(#675)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed outdated help text Co-authored-by: Anders Albert <60234212+doctrino@users.noreply.github.com> --- cognite_toolkit/_cdf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cognite_toolkit/_cdf.py b/cognite_toolkit/_cdf.py index ddfc33340..ddf62f6e6 100755 --- a/cognite_toolkit/_cdf.py +++ b/cognite_toolkit/_cdf.py @@ -869,7 +869,9 @@ def feature_flag_main(ctx: typer.Context) -> None: "\nDo not enable a flag unless you are familiar with what it does.[/]" ) ) - print("Use [bold yellow]cdf-tk feature list[/] or [bold yellow]cdf-tk feature --[flag] --enabled=True|False[/]") + print( + "Use [bold yellow]cdf-tk features list[/] or [bold yellow]cdf-tk features set --enabled/--disabled[/]" + ) return None From 064d4b853e9bbbfe3947d7c4f353b8654e56c6f8 Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:31:47 +0200 Subject: [PATCH 06/10] =?UTF-8?q?[CDF-21820]=20=F0=9F=A7=B9Cleanup=20tests?= =?UTF-8?q?=5Fmigrations=20(#673)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor; remove unused * refactor: renaming to module upgrade * refactor; fix * docs; updated docs * style: typo * refactor: upgrade contribution guideline * refactor: fix a few bugs in the run check script * refactor: delete .venv after propject init is created --- .gitignore | 4 +- CONTRIBUTING.md | 4 +- module_upgrade/README.md | 20 ++ .../__init__.py | 0 module_upgrade/calculate_hashes.py | 20 ++ .../run_check.py | 42 +++- tests_migrations/README.md | 69 ------ tests_migrations/calculate_hashes.py | 16 -- tests_migrations/constants.py | 46 ---- tests_migrations/migrations.py | 223 ------------------ tests_migrations/tests_migrations.py | 101 -------- 11 files changed, 76 insertions(+), 469 deletions(-) create mode 100644 module_upgrade/README.md rename {tests_migrations => module_upgrade}/__init__.py (100%) create mode 100644 module_upgrade/calculate_hashes.py rename tests_migrations/run_module_upgrade.py => module_upgrade/run_check.py (85%) delete mode 100644 tests_migrations/README.md delete mode 100644 tests_migrations/calculate_hashes.py delete mode 100644 tests_migrations/constants.py delete mode 100644 tests_migrations/migrations.py delete mode 100644 tests_migrations/tests_migrations.py diff --git a/.gitignore b/.gitignore index 53680d236..bb0c502bf 100644 --- a/.gitignore +++ b/.gitignore @@ -286,7 +286,7 @@ my_project/ # Hide environments used for migration testing **/.venv* -tests_migrations/tmp* +module_upgrade/tmp* cognite_toolkit/config.local.yaml **/functions/**/common @@ -294,4 +294,4 @@ cognite_toolkit/config.local.yaml .venv.* build.* cognite_toolkit/.env.* -tests_migrations/project_inits \ No newline at end of file +module_upgrade/project_inits diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d672f5b35..6e5a4be81 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -108,8 +108,8 @@ To release a new version of the `cdf-tk` tool and the templates, you need to do - `tests/tests_unit/test_cdf_tk/project_for_test/cognite_modules/_system.yaml` 4. Run `poetry lock` to update the `poetry.lock` file. 5. Run `pytest tests` locally to ensure that tests pass. - 6. Follow the guide in [tests_migration](tests_migration/README.md) do detect breaking changes, and - update the migration instructions in `cognite_toolkit/templates/_migration.py` if necessary. + 6. Run `python module_upgrade/run_check.py` to ensure that the `cdf-tk modules upgrade` command works as expected. + against previous versions. See [Module Upgrade](module_upgrade/README.md) for more information. 2. Get approval to squash merge the branch into `main`: 1. Verify that all Github actions pass. 3. Create a release branch: `release-x.y.z` from `main`: diff --git a/module_upgrade/README.md b/module_upgrade/README.md new file mode 100644 index 000000000..72f57a916 --- /dev/null +++ b/module_upgrade/README.md @@ -0,0 +1,20 @@ +# Module Upgrade + +This directory contains the script `run_check.py` that is used to check for breaking changes in the package, +and that the `cdf-tk module upgrade` command works as expected. + +## Motivation + +This could have been part of the test suite, but it is not for two reasons: + +* It needs to have project_inits for each version of the package, each at the size of `~25MB` which is too large to + include in the test suite. +* Running the check is time consuming, and only needs to be run before a new release. + +## Workflow + +1. The constant `cognite_toolkit/_cdf_tk/constants.py:SUPPORT_MODULE_UPGRADE_FROM_VERSION` controls the + earliest version that the `cdf-tk module upgrade` command should support. +2. Run `python module_upgrade/run_check.py` to check that the `cdf-tk module upgrade` command works as expected. + If any exceptions are raised, you need to update the `_changes.py` file in the `modules` commands, so that the + `cdf-tk module upgrade` command works as expected. diff --git a/tests_migrations/__init__.py b/module_upgrade/__init__.py similarity index 100% rename from tests_migrations/__init__.py rename to module_upgrade/__init__.py diff --git a/module_upgrade/calculate_hashes.py b/module_upgrade/calculate_hashes.py new file mode 100644 index 000000000..c0a089307 --- /dev/null +++ b/module_upgrade/calculate_hashes.py @@ -0,0 +1,20 @@ +from pathlib import Path + +from cognite_toolkit._cdf_tk.utils import calculate_directory_hash + +TEST_DIR_ROOT = Path(__file__).resolve().parent +PROJECT_INIT_DIR = TEST_DIR_ROOT / "project_inits" + +# Todo this file can be deleted when we go to 0.3.0alpha and remove +# the old manual migration, cognite_toolkit/_cdf_tk/_migration.yaml + + +def calculate_hashes(): + for directory in PROJECT_INIT_DIR.iterdir(): + version = directory.name.split("_")[1] + version_hash = calculate_directory_hash(directory / "cognite_modules") + print(f"Cognite Module Hash for version {version!r}: {version_hash!r}") + + +if __name__ == "__main__": + calculate_hashes() diff --git a/tests_migrations/run_module_upgrade.py b/module_upgrade/run_check.py similarity index 85% rename from tests_migrations/run_module_upgrade.py rename to module_upgrade/run_check.py index c0f1c6da6..5dba0a2fa 100644 --- a/tests_migrations/run_module_upgrade.py +++ b/module_upgrade/run_check.py @@ -36,7 +36,7 @@ def run() -> None: versions = get_versions_since(SUPPORT_MODULE_UPGRADE_FROM_VERSION) for version in versions: create_project_init(str(version)) - + return print( Panel( "All projects inits created successfully.", @@ -85,13 +85,24 @@ def create_project_init(version: str) -> None: if (TEST_DIR_ROOT / environment_directory).exists(): print(f"Environment for version {version} already exists") else: - print(f"Creating environment for version {version}") - subprocess.run(["python", "-m", "venv", environment_directory]) - if platform.system() == "Windows": - subprocess.run([f"{environment_directory}/Scripts/pip", "install", f"cognite-toolkit=={version}"]) - else: - subprocess.run([f"{environment_directory}/bin/pip", "install", f"cognite-toolkit=={version}"]) - print(f"Environment for version {version} created") + with chdir(TEST_DIR_ROOT): + print(f"Creating environment for version {version}") + create_venv = subprocess.run(["python", "-m", "venv", environment_directory]) + if create_venv.returncode != 0: + raise ValueError(f"Failed to create environment for version {version}") + + if platform.system() == "Windows": + install_toolkit = subprocess.run( + [f"{environment_directory}/Scripts/pip", "install", f"cognite-toolkit=={version}"] + ) + else: + install_toolkit = subprocess.run( + [f"{environment_directory}/bin/pip", "install", f"cognite-toolkit=={version}"] + ) + + if install_toolkit.returncode != 0: + raise ValueError(f"Failed to install toolkit version {version}") + print(f"Environment for version {version} created") modified_env_variables = os.environ.copy() repo_root = TEST_DIR_ROOT.parent @@ -103,8 +114,19 @@ def create_project_init(version: str) -> None: old_version_script_dir = Path(f"{environment_directory}/Scripts/") else: old_version_script_dir = Path(f"{environment_directory}/bin/") - cmd = [str(old_version_script_dir / "cdf-tk"), "init", str(project_init), "--clean"] - _ = subprocess.run(cmd, capture_output=True, shell=True, env=modified_env_variables) + with chdir(TEST_DIR_ROOT): + cmd = [ + str(old_version_script_dir / "cdf-tk"), + "init", + f"{PROJECT_INIT_DIR.name}/{project_init.name}", + "--clean", + ] + output = subprocess.run(cmd, capture_output=True, shell=True, env=modified_env_variables) + + if output.returncode != 0: + print(output.stderr.decode()) + raise ValueError(f"Failed to create project init for version {version}.") + print(f"Project init for version {version} created.") shutil.rmtree(environment_directory) diff --git a/tests_migrations/README.md b/tests_migrations/README.md deleted file mode 100644 index c1c56e522..000000000 --- a/tests_migrations/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Migration Tests - -This directory contains tests for comparing a project setup with the previous version of the packages -with the current version of the package. - -## Motivation - -This is used to have a machine check for breaking changes in the package. The results of this check is -used to update `cognite_toolkit/cdf_tk/templates/_migrations.yaml` with instructions on how to migrate from -the previous version of the package to the current version of the package. - -## Workflow - -1. Update `tests_migrations/constants.py` with the previous versions to test against. -2. Follow the instructions in Section [Setup and Running Tests](#setup-and-running-tests) to create the virtual - environments and running the tests. -3. If the tests pass, no further action is needed. -4. If the tests fail, update `tests_migrations/migrations.py:get_migration` function with programmatic instructions on - how to migrate from the previous version of the package to the current version of the package. -5. Then, verify that the tests are now passing. -6. Then update `cognite_toolkit/cdf_tk/templates/_migrations.yaml` with instructions on how to migrate from the - previous version of the package to the current version of the package. - -## Setup and Running Tests - -**Caveat** These tests are only run with `Windows` and `PyCharm`, there might be differences in how these tests -need to be run on other platforms. - -To run the tests you need to setup virtual environments for each version of the package that you want to test. -This can be done by running the `create_environments.py` file in this directory. - -```bash -python tests_migrations/create_environments.py -``` - -This will create a virtual environment. This is done as a separate step than running the tests as it is expensive, -and should thus be an explicit step. - -Then, you can run the tests by running the `tests_migrations.py` file in this directory. - -```bash -pytest tests_migrations/tests_migrations.py -``` - - After running this file you folder structure should look like this: - -```bash -tests_migrations - ┣ 📂.venv_0.1.0b1 - Virtual environment for version 0.1.0b1 - ┣ 📂.venv_0.1.0b2 - ┣ 📂.venv_0.1.0b3 - ┣ 📂.venv_0.1.0b4 - ┣ 📂.venv_0.1.0b5 - ┣ 📂.venv_0.1.0b6 - ┣ 📂build - (created by running tests_migrations.py) Build directory for the modules - ┣ 📂tmp-project - (created by running tests_migrations.py) Temporary project directory - ┣ 📜constants - Contains which previous versions to tests against - ┣ 📜create_environments.py - Creates virtual environments for each version to test against - ┣ 📜tests_migrations.py - Runs the tests - ┗ 📜README.md - This file. -``` - -## Tests - -### tests_init_migrate_build_deploy - -This tests runs the `init`, `build`, `deploy` command in previous versions of the package and -then runs the `build`, `deploy` in the current version. All it ensures is that the commands returns exit status 0. -There is no check that the deploy command stays consistent between versions. diff --git a/tests_migrations/calculate_hashes.py b/tests_migrations/calculate_hashes.py deleted file mode 100644 index e39372e90..000000000 --- a/tests_migrations/calculate_hashes.py +++ /dev/null @@ -1,16 +0,0 @@ -from constants import PROJECT_INIT_DIR, SUPPORTED_TOOLKIT_VERSIONS - -from cognite_toolkit._cdf_tk.utils import calculate_directory_hash - - -def calculate_hashes(): - exclude_prefixes = set() - for version in SUPPORTED_TOOLKIT_VERSIONS: - project_init = PROJECT_INIT_DIR / f"project_{version}" - - version_hash = calculate_directory_hash(project_init / "cognite_modules", exclude_prefixes=exclude_prefixes) - print(f"Cognite Module Hash for version {version!r}: {version_hash!r}") - - -if __name__ == "__main__": - calculate_hashes() diff --git a/tests_migrations/constants.py b/tests_migrations/constants.py deleted file mode 100644 index 5c521887f..000000000 --- a/tests_migrations/constants.py +++ /dev/null @@ -1,46 +0,0 @@ -import contextlib -import os -from collections.abc import Iterator -from pathlib import Path - -TEST_DIR_ROOT = Path(__file__).resolve().parent -PROJECT_INIT_DIR = TEST_DIR_ROOT / "project_inits" -PROJECT_INIT_DIR.mkdir(exist_ok=True) - -SUPPORTED_TOOLKIT_VERSIONS = [ - "0.1.0", - "0.1.1", - "0.1.2", - "0.1.3", - "0.1.4", - "0.2.0a1", - "0.2.0a2", - "0.2.0a3", - "0.2.0a4", - "0.2.0a5", - "0.2.0b1", - "0.2.0b2", - "0.2.0b3", - "0.2.0b4", - "0.2.0", - "0.2.1", -] - - -@contextlib.contextmanager -def chdir(new_dir: Path) -> Iterator[None]: - """ - Change directory to new_dir and return to the original directory when exiting the context. - - Args: - new_dir: The new directory to change to. - - """ - current_working_dir = Path.cwd() - os.chdir(new_dir) - - try: - yield - - finally: - os.chdir(current_working_dir) diff --git a/tests_migrations/migrations.py b/tests_migrations/migrations.py deleted file mode 100644 index 249a55241..000000000 --- a/tests_migrations/migrations.py +++ /dev/null @@ -1,223 +0,0 @@ -import re -import shutil -from collections.abc import Callable, Iterable -from pathlib import Path -from typing import Any - -import yaml -from packaging import version - -from cognite_toolkit._cdf_tk.utils import iterate_modules -from cognite_toolkit._version import __version__ - - -def modify_environment_to_run_all_modules(project_path: Path) -> None: - """Modify the environment to run all modules.""" - environments_path = project_path / "environments.yaml" - if environments_path.exists(): - # This is a older version version - environments = yaml.safe_load(environments_path.read_text()) - - modules = [module_path.name for module_path, _ in iterate_modules(project_path)] - - for env_name, env_config in environments.items(): - if env_name == "__system": - continue - env_config["deploy"] = modules - environments_path.write_text(yaml.dump(environments)) - return - config_dev_file = project_path / "config.dev.yaml" - if not config_dev_file.exists(): - raise FileNotFoundError(f"Could not find config.dev.yaml in {project_path}") - config_dev = yaml.safe_load(config_dev_file.read_text()) - config_dev["environment"]["selected_modules_and_packages"] = [ - # The 'cdf_functions_dummy' module uses the common functions code, which is no longer available - # so simply skipping it. - module_path.name - for module_path, _ in iterate_modules(project_path) - if module_path.name != "cdf_functions_dummy" - ] - config_dev_file.write_text(yaml.dump(config_dev)) - - -def get_migration(previous_version_str: str, current_version: str) -> Callable[[Path], None]: - previous_version = version.parse(previous_version_str) - changes = Changes() - if previous_version < version.parse("0.2.0b5"): - changes.append(_rename_function_external_dataset_id) - if previous_version < version.parse("0.2.0b4"): - changes.append(_move_common_functions_code) - changes.append(_fix_pump_view_external_id) - - if previous_version < version.parse("0.2.0a3"): - changes.append(_move_system_yaml_to_root) - changes.append(_rename_modules_section_to_variables_in_config_yamls) - - if version.parse("0.1.0b7") <= previous_version: - changes.append(_update_system_yaml) - - if previous_version <= version.parse("0.1.0b4"): - changes.append(_add_name_to_file_configs) - changes.append(_add_ignore_null_fields_to_transformation_configs) - - if previous_version <= version.parse("0.1.0b6"): - changes.append(_to_config_env_yaml) - - return changes - - -class Changes: - def __init__(self) -> None: - self._changes: list[Callable[[Path], None]] = [] - - def append(self, change: Callable[[Path], None]) -> None: - self._changes.append(change) - - def __call__(self, project_path: Path) -> None: - for change in self._changes: - change(project_path) - - -def _rename_function_external_dataset_id(project_path: Path) -> None: - for resource_yaml in project_path.glob("*.yaml"): - if resource_yaml.parent == "functions": - content = resource_yaml.read_text() - content = content.replace("externalDataSetId", "dataSetExternalId") - resource_yaml.write_text(content) - - -def _rename_modules_section_to_variables_in_config_yamls(project_path: Path) -> None: - for config_yaml in project_path.glob("config.*.yaml"): - data = yaml.safe_load(config_yaml.read_text()) - if "modules" in data: - data["variables"] = data.pop("modules") - config_yaml.write_text(yaml.dump(data)) - - -def _move_common_functions_code(project_path: Path) -> None: - # It is complex to move the common functions code, so we will just remove - # the one module that uses it - cdf_functions_dummy = project_path / "cognite_modules" / "examples" / "cdf_functions_dummy" - - if not cdf_functions_dummy.exists(): - return - shutil.rmtree(cdf_functions_dummy) - - -def _fix_pump_view_external_id(project_path: Path) -> None: - pump_view = ( - project_path - / "cognite_modules" - / "experimental" - / "example_pump_data_model" - / "data_models" - / "4.Pump.view.yaml" - ) - if not pump_view.exists(): - raise FileNotFoundError(f"Could not find Pump.view.yaml in {project_path}") - - pump_view.write_text(pump_view.read_text().replace("external_id", "externalId")) - - -def _move_system_yaml_to_root(project_path: Path) -> None: - system_yaml = project_path / "cognite_modules" / "_system.yaml" - if not system_yaml.exists(): - raise FileNotFoundError(f"Could not find _system.yaml in {project_path}") - system_yaml.rename(project_path / "_system.yaml") - - -def _update_system_yaml(project_path: Path) -> None: - old_system_yaml = project_path / "cognite_modules" / "_system.yaml" - new_system_yaml = project_path / "_system.yaml" - if not old_system_yaml.exists() and not new_system_yaml.exists(): - raise FileNotFoundError(f"Could not find _system.yaml in {project_path}") - system_yaml = old_system_yaml if old_system_yaml.exists() else new_system_yaml - data = yaml.safe_load(system_yaml.read_text()) - data["cdf_toolkit_version"] = __version__ - system_yaml.write_text(yaml.dump(data)) - - -def _add_name_to_file_configs(project_path: Path) -> None: - # Added required field 'name' to files - for file_yaml in _config_yaml_from_folder_name(project_path, "files"): - if file_yaml.suffix != ".yaml": - continue - data = yaml.safe_load(file_yaml.read_text().replace("{{", "").replace("}}", "")) - for entry in data: - if "name" not in entry: - entry["name"] = entry["externalId"] - file_yaml.write_text(yaml.dump(data)) - - -def _add_ignore_null_fields_to_transformation_configs(project_path: Path) -> None: - for transformation_yaml in _config_yaml_from_folder_name(project_path, "transformations"): - if transformation_yaml.suffix != ".yaml" or transformation_yaml.name.endswith(".schedule.yaml"): - continue - data = yaml.safe_load(transformation_yaml.read_text().replace("{{", "").replace("}}", "")) - if isinstance(data, list): - for entry in data: - if "ignoreNullFields" not in entry: - entry["ignoreNullFields"] = False - elif isinstance(data, dict): - if "ignoreNullFields" not in data: - data["ignoreNullFields"] = False - transformation_yaml.write_text(yaml.dump(data)) - - -def _to_config_env_yaml(project_path: Path) -> None: - """Change introduced in b7""" - default_packages_path = project_path / "cognite_modules" / "default.packages.yaml" - environments_path = project_path / "environments.yaml" - config_path = project_path / "config.yaml" - try: - default_packages: dict[str, Any] = yaml.safe_load(default_packages_path.read_text()) - environments: dict[str, Any] = yaml.safe_load(environments_path.read_text()) - config_yaml: dict[str, Any] = yaml.safe_load(config_path.read_text()) - except FileNotFoundError: - raise FileNotFoundError( - "Could not find one of the required files: default.packages.yaml, environments.yaml, config.yaml" - ) - # Create _system.yaml - system_yaml = default_packages.copy() - system_yaml["cdf_toolkit_version"] = __version__ - (project_path / "cognite_modules" / "_system.yaml").write_text(yaml.dump(system_yaml)) - # Create config.[env].yaml - for env_name, env_config in environments.items(): - if env_name == "__system": - continue - env_config["name"] = env_name - env_config["selected_modules_and_packages"] = env_config.pop("deploy") - config_env_yaml = { - "environment": env_config, - "modules": config_yaml, - } - (project_path / f"config.{env_name}.yaml").write_text(yaml.dump(config_env_yaml)) - # Delete - default_packages_path.unlink() - environments_path.unlink() - config_path.unlink() - # Delete all default files - for file in project_path.glob("**/default.*"): - file.unlink() - - -def _config_yaml_from_folder_name(project: Path, folder_name: str) -> Iterable[Path]: - for module_name, module_files in iterate_modules(project): - for module_file in module_files: - if module_file.parent.name == folder_name: - yield module_file - - -def _version_str_to_tuple(version_str: str) -> tuple[int | str, ...]: - """Small helper function to convert version strings to tuples. - >>> _version_str_to_tuple("0.1.0b1") - (0, 1, 0, 'b', 1) - >>> _version_str_to_tuple("0.1.0") - (0, 1, 0) - >>> _version_str_to_tuple("0.1.0-rc1") - (0, 1, 0, 'rc', 1) - """ - version_str = version_str.replace("-", "") - version_str = re.sub(r"([a-z]+)", r".\1.", version_str) - - return tuple(int(x) if x.isdigit() else x for x in version_str.split(".")) diff --git a/tests_migrations/tests_migrations.py b/tests_migrations/tests_migrations.py deleted file mode 100644 index 2f66022ff..000000000 --- a/tests_migrations/tests_migrations.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import platform -import shutil -import subprocess -from collections.abc import Iterable -from pathlib import Path - -import pytest -from packaging import version as version_package - -from cognite_toolkit._version import __version__ -from tests_migrations.constants import SUPPORTED_TOOLKIT_VERSIONS, TEST_DIR_ROOT, chdir -from tests_migrations.migrations import get_migration, modify_environment_to_run_all_modules - - -def cdf_tk_cmd_all_versions() -> Iterable[tuple[Path, str]]: - for version in SUPPORTED_TOOLKIT_VERSIONS: - environment_directory = f".venv{version}" - if (TEST_DIR_ROOT / environment_directory).exists(): - if platform.system() == "Windows": - yield pytest.param(Path(f"{environment_directory}/Scripts/"), version, id=f"cdf-tk-{version}") - else: - yield pytest.param(Path(f"{environment_directory}/bin/"), version, id=f"cdf-tk-{version}") - else: - pytest.skip("Environment for version {version} does not exist, run 'create_environments.py' to create it.") - - -@pytest.fixture(scope="function") -def local_tmp_project_path() -> Path: - project_path = TEST_DIR_ROOT / "tmp-project" - if project_path.exists(): - shutil.rmtree(project_path) - project_path.mkdir(exist_ok=True) - yield project_path - - -@pytest.fixture(scope="function") -def local_build_path() -> Path: - build_path = TEST_DIR_ROOT / "build" - if build_path.exists(): - shutil.rmtree(build_path) - - build_path.mkdir(exist_ok=True) - # This is a small hack to get 0.1.0b1-4 working - (build_path / "file.txt").touch(exist_ok=True) - yield build_path - - -@pytest.mark.parametrize("old_version_script_dir, old_version", list(cdf_tk_cmd_all_versions())) -def tests_init_migrate_build_deploy( - old_version_script_dir: Path, old_version: str, local_tmp_project_path: Path, local_build_path: Path -) -> None: - project_name = local_tmp_project_path.name - build_name = local_build_path.name - - modified_env_variables = os.environ.copy() - repo_root = TEST_DIR_ROOT.parent - if "PYTHONPATH" in modified_env_variables: - # Need to remove the repo root from PYTHONPATH to avoid importing the wrong version of the toolkit - # (This is typically set by the IDE, for example, PyCharm sets it when running tests). - modified_env_variables["PYTHONPATH"] = modified_env_variables["PYTHONPATH"].replace(str(repo_root), "") - previous_version = str(old_version_script_dir / "cdf-tk") - - with chdir(TEST_DIR_ROOT): - is_upgrade = True - for cmd in [ - [previous_version, "--version"], - [previous_version, "init", project_name, "--clean"], - [previous_version, "build", project_name, "--env", "dev"] - + (["--clean"] if version_package.parse(old_version) < version_package.parse("0.2.0a3") else []), - [previous_version, "deploy", "--env", "dev", "--dry-run"], - # This runs the cdf-tk command from the cognite_toolkit package in the ROOT of the repo. - ["cdf-tk", "--version"], - ["cdf-tk", "build", project_name, "--env", "dev", "--build-dir", build_name], - ["cdf-tk", "deploy", "--env", "dev", "--dry-run"], - ]: - if cmd[0] == "cdf-tk" and is_upgrade: - migration = get_migration(old_version, __version__) - migration(local_tmp_project_path) - is_upgrade = False - - if cmd[:2] == [previous_version, "build"]: - # This is to ensure that we test all modules. - modify_environment_to_run_all_modules(local_tmp_project_path) - - kwargs = dict(env=modified_env_variables) if cmd[0] == previous_version else dict() - output = subprocess.run(cmd, capture_output=True, shell=True, **kwargs) - - messaged = output.stderr.decode() or output.stdout.decode() - assert output.returncode == 0, f"Failed to run {cmd[0]}: {messaged}" - - if cmd[-1] == "--version": - # This is to check that we use the expected version of the toolkit. - stdout = output.stdout.decode("utf-8").strip() - print(f"cmd: {cmd}") - print(f"output: {output}") - print(f"stdout: {stdout}") - expected_version = __version__ if cmd[0] == "cdf-tk" else old_version - assert stdout.startswith( - f"CDF-Toolkit version: {expected_version}" - ), f"Failed to setup the correct environment for {expected_version}" From 752a40281b15cf4c6776da00bafa3ceffc2654cd Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Wed, 19 Jun 2024 16:26:43 +0200 Subject: [PATCH 07/10] =?UTF-8?q?[CDF-21796]=20=F0=9F=A4=A8=20Improved=20c?= =?UTF-8?q?df-tk=20build=20if=20`.env`=20is=20missing=20(#674)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build; gitignore * fix: Required type to be dev, straging or prod * refactor: include type in build panel * refactor: improved messages * refactor: improved message * build; changelog --- CHANGELOG.cdf-tk.md | 12 +++ cognite_toolkit/_api/data_classes.py | 2 +- cognite_toolkit/_api/modules_api.py | 4 +- cognite_toolkit/_cdf_tk/commands/build.py | 5 +- .../_cdf_tk/data_classes/_config_yaml.py | 85 ++++++++++++------- 5 files changed, 73 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.cdf-tk.md b/CHANGELOG.cdf-tk.md index e0fe638e3..b0b53198b 100644 --- a/CHANGELOG.cdf-tk.md +++ b/CHANGELOG.cdf-tk.md @@ -15,6 +15,18 @@ Changes are grouped as follows: - `Fixed` for any bug fixes. - `Security` in case of vulnerabilities. +## TBD + +### Improved + +- When running `cdf-tk build` and missing `CDF_PROJECT` environment variable, the user will now get a more informative + error message. + +### Fixed + +- The variable `type` in the `environment` section of the `config.[env].yaml` now raises an error if it is not + set to `dev`, `staging`, or `prod`. + ## [0.2.2] - 2024-06-18 ### Improved diff --git a/cognite_toolkit/_api/data_classes.py b/cognite_toolkit/_api/data_classes.py index 66a975e0f..9c5b77771 100644 --- a/cognite_toolkit/_api/data_classes.py +++ b/cognite_toolkit/_api/data_classes.py @@ -20,7 +20,7 @@ _DUMMY_ENVIRONMENT = Environment( name="not used", project="not used", - build_type="not used", + build_type="dev", selected=[], ) diff --git a/cognite_toolkit/_api/modules_api.py b/cognite_toolkit/_api/modules_api.py index 1d0aa49cf..6b951e819 100644 --- a/cognite_toolkit/_api/modules_api.py +++ b/cognite_toolkit/_api/modules_api.py @@ -5,7 +5,7 @@ import tempfile from collections.abc import Sequence from pathlib import Path -from typing import Any, overload +from typing import Any, Literal, cast, overload from unittest.mock import MagicMock import typer @@ -78,7 +78,7 @@ def _build(self, modules: Sequence[ModuleMeta], verbose: bool) -> None: environment=Environment( name=self._build_env, project=self._project_name, - build_type=self._build_env, + build_type=cast(Literal["dev"], self._build_env), selected=[module.name for module in modules], ), filepath=Path(""), diff --git a/cognite_toolkit/_cdf_tk/commands/build.py b/cognite_toolkit/_cdf_tk/commands/build.py index c6b2706f7..05b76bb6c 100644 --- a/cognite_toolkit/_cdf_tk/commands/build.py +++ b/cognite_toolkit/_cdf_tk/commands/build.py @@ -94,7 +94,7 @@ def execute(self, verbose: bool, source_path: Path, build_dir: Path, build_env_n print( Panel( f"Building {directory_name}:\n - Toolkit Version '{__version__!s}'\n" - f" - Environment {build_env_name!r}\n" + f" - Environment name {build_env_name!r}, type {config.environment.build_type!r}.\n" f" - Config '{config.filepath!s}'" f"\n{module_locations}", expand=False, @@ -136,7 +136,8 @@ def build_config( else: build_dir.mkdir(exist_ok=True) - config.validate_environment() + if issue := config.validate_environment(): + self.warn(issue) module_parts_by_name: dict[str, list[tuple[str, ...]]] = defaultdict(list) available_modules: set[str | tuple[str, ...]] = set() diff --git a/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py b/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py index cd1746428..0f19ed2a8 100644 --- a/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py +++ b/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py @@ -25,8 +25,10 @@ from cognite_toolkit._cdf_tk.loaders import LOADER_BY_FOLDER_NAME from cognite_toolkit._cdf_tk.tk_warnings import ( FileReadWarning, + MediumSeverityWarning, MissingFileWarning, SourceFileModifiedWarning, + ToolkitWarning, WarningList, ) from cognite_toolkit._cdf_tk.utils import YAMLComment, YAMLWithComments, calculate_str_or_file_hash, flatten_dict @@ -39,31 +41,41 @@ class Environment: name: str project: str - build_type: str + build_type: Literal["dev", "staging", "prod"] selected: list[str | tuple[str, ...]] + def __post_init__(self) -> None: + if self.build_type not in {"dev", "staging", "prod"}: + raise ToolkitEnvError( + f"Invalid type {self.build_type} in {self.name!s}. Must be one of 'dev', 'staging', 'prod'." + ) + @classmethod def load(cls, data: dict[str, Any], build_name: str) -> Environment: _deprecation_selected(data) - try: - return Environment( - name=build_name, - project=data["project"], - build_type=data["type"], - selected=[ - tuple([part for part in selected.split(MODULE_PATH_SEP) if part]) - if MODULE_PATH_SEP in selected - else selected - for selected in data["selected"] or [] - ], + if missing := {"name", "project", "type", "selected"} - set(data.keys()): + raise ToolkitEnvError( + f"Environment section is missing one or more required fields: {missing} in {BuildConfigYAML._file_name(build_name)!s}" ) - except KeyError: + build_type = data["type"] + if build_type not in {"dev", "staging", "prod"}: raise ToolkitEnvError( - "Environment section is missing one or more required fields: 'name', 'project', 'type', or " - f"'selected' in {BuildConfigYAML._file_name(build_name)!s}" + f"Invalid type {build_type} in {BuildConfigYAML._file_name(build_name)!s}. Must be one of 'dev', 'staging', 'prod'." ) + return Environment( + name=build_name, + project=data["project"], + build_type=build_type, + selected=[ + tuple([part for part in selected.split(MODULE_PATH_SEP) if part]) + if MODULE_PATH_SEP in selected + else selected + for selected in data["selected"] or [] + ], + ) + def dump(self) -> dict[str, Any]: return { "name": self.name, @@ -110,24 +122,37 @@ def set_environment_variables(self) -> None: os.environ["CDF_ENVIRON"] = self.environment.name os.environ["CDF_BUILD_TYPE"] = self.environment.build_type - def validate_environment(self) -> None: + def validate_environment(self) -> ToolkitWarning | None: if _RUNNING_IN_BROWSER: return None - env_name, env_project = self.environment.name, self.environment.project + project = self.environment.project + project_env = os.environ.get("CDF_PROJECT") + if project_env == project: + return None + + build_type = self.environment.build_type + env_name = self.environment.name file_name = self._file_name(env_name) - if (project_env := os.environ.get("CDF_PROJECT", "")) != env_project: - if env_name in {"dev", "local", "demo"}: - print( - f" [bold yellow]WARNING:[/] Project name mismatch (CDF_PROJECT) between {file_name!s} " - f"({env_project}) and what is defined in environment ({project_env}). " - f"Environment is {env_name}, continuing (would have STOPPED for staging and prod)..." - ) - else: - raise ToolkitEnvError( - f"Project name mismatch (CDF_PROJECT) between {file_name!s} ({env_project}) and what is " - f"defined in environment ({project_env=} != {env_project=})." - ) - return None + missing_message = ( + "No 'CDF_PROJECT' environment variable set. This is expected to match the project " + f"set in environment section of {file_name!r}.\nThis is required for " + "building configurations for staging and prod environments to ensure that you do " + "not accidentally deploy to the wrong project." + ) + mismatch_message = ( + f"Project name mismatch between project set in the environment section of {file_name!r} and the " + f"environment variable 'CDF_PROJECT', {project} ≠ {project_env}.\nThis is required for " + "building configurations for staging and prod environments to ensure that you do not " + "accidentally deploy to the wrong project." + ) + if build_type != "dev" and project_env is None: + raise ToolkitEnvError(missing_message) + elif build_type != "dev": + raise ToolkitEnvError(mismatch_message) + elif build_type == "dev" and project_env is None: + return MediumSeverityWarning(missing_message) + else: + return MediumSeverityWarning(mismatch_message) @classmethod def load(cls, data: dict[str, Any], build_env_name: str, filepath: Path) -> BuildConfigYAML: From 2d4313c51b647563e39dd8146b6b1dd52e8f9bcc Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Thu, 20 Jun 2024 10:35:44 +0200 Subject: [PATCH 08/10] =?UTF-8?q?[CDF-21862]=20=F0=9F=98=8FImport=20transf?= =?UTF-8?q?ormation-cli=20(#677)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: setup shell for new import command * refactor: allow - for _ * refactor: setup default args * docs; were to go * tests: added test data * tests: added basic test of transformation cli * refactor; implemented converion of manifest * refactor; table * refactor; finished implementation * tests: Finished test data * refactor: moved get client to init * tests: Finished test * build; changelog * style * refactor: renaming variablnes --- CHANGELOG.cdf-tk.md | 7 + cognite_toolkit/_cdf.py | 5 + .../_cdf_tk/commands/featureflag.py | 3 +- cognite_toolkit/_cdf_tk/exceptions.py | 4 + .../_cdf_tk/prototypes/commands/import_.py | 217 ++++++++++++++++++ .../_cdf_tk/prototypes/import_app.py | 42 ++++ tests/tests_unit/data/__init__.py | 1 + .../manifest.Notification.yaml | 2 + .../transformation_cli/manifest.Schedule.yaml | 3 + .../manifest.Transformation.yaml | 31 +++ .../data/transformation_cli/manifest.yaml | 71 ++++++ .../test_cdf_tk/test_commands/test_import.py | 24 ++ 12 files changed, 409 insertions(+), 1 deletion(-) create mode 100644 cognite_toolkit/_cdf_tk/prototypes/commands/import_.py create mode 100644 cognite_toolkit/_cdf_tk/prototypes/import_app.py create mode 100644 tests/tests_unit/data/transformation_cli/manifest.Notification.yaml create mode 100644 tests/tests_unit/data/transformation_cli/manifest.Schedule.yaml create mode 100644 tests/tests_unit/data/transformation_cli/manifest.Transformation.yaml create mode 100644 tests/tests_unit/data/transformation_cli/manifest.yaml create mode 100644 tests/tests_unit/test_cdf_tk/test_commands/test_import.py diff --git a/CHANGELOG.cdf-tk.md b/CHANGELOG.cdf-tk.md index b0b53198b..a8c287346 100644 --- a/CHANGELOG.cdf-tk.md +++ b/CHANGELOG.cdf-tk.md @@ -27,6 +27,13 @@ Changes are grouped as follows: - The variable `type` in the `environment` section of the `config.[env].yaml` now raises an error if it is not set to `dev`, `staging`, or `prod`. +### Added + +- The preview feature `IMPORT_CMD` added. This enables you to import a `transformation-cli` manifest into + resource configuration files compatible with the `cognite-toolkit`. Activate by running + `cdf-tk features set IMPORT_CMD --enable`, and deactivate by running `cdf-tk features set IMPORT_CMD --disable`. + Run `cdf-tk import transformation-cli --help` for more information about the import command. + ## [0.2.2] - 2024-06-18 ### Improved diff --git a/cognite_toolkit/_cdf.py b/cognite_toolkit/_cdf.py index ddf62f6e6..9768c32f3 100755 --- a/cognite_toolkit/_cdf.py +++ b/cognite_toolkit/_cdf.py @@ -107,6 +107,11 @@ def app() -> NoReturn: else: _app.command("init")(main_init) + if FeatureFlag.is_enabled(Flags.IMPORT_CMD): + from cognite_toolkit._cdf_tk.prototypes.import_app import import_app + + _app.add_typer(import_app, name="import") + _app() except ToolkitError as err: print(f" [bold red]ERROR ([/][red]{type(err).__name__}[/][bold red]):[/] {err}") diff --git a/cognite_toolkit/_cdf_tk/commands/featureflag.py b/cognite_toolkit/_cdf_tk/commands/featureflag.py index 6c8f14673..e90e8255f 100644 --- a/cognite_toolkit/_cdf_tk/commands/featureflag.py +++ b/cognite_toolkit/_cdf_tk/commands/featureflag.py @@ -17,6 +17,7 @@ class Flags(Enum): MODULES_CMD: ClassVar[dict[str, Any]] = {"visible": True, "description": "Enables the modules management subapp"} INTERNAL: ClassVar[dict[str, Any]] = {"visible": False, "description": "Does nothing"} + IMPORT_CMD: ClassVar[dict[str, Any]] = {"visible": True, "description": "Enables the import sup application"} class FeatureFlag: @@ -61,7 +62,7 @@ def is_enabled(flag: str | Flags) -> bool: @lru_cache def to_flag(flag: str) -> Flags | None: try: - return Flags[flag.upper()] + return Flags[flag.upper().replace("-", "_")] except KeyError: return None diff --git a/cognite_toolkit/_cdf_tk/exceptions.py b/cognite_toolkit/_cdf_tk/exceptions.py index 42e340d1c..1380b0ebc 100644 --- a/cognite_toolkit/_cdf_tk/exceptions.py +++ b/cognite_toolkit/_cdf_tk/exceptions.py @@ -124,6 +124,10 @@ def __repr__(self) -> str: return str(self) +class ToolkitValueError(ValueError, ToolkitError): + pass + + class ToolkitRequiredValueError(ToolkitError, ValueError): pass diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/import_.py b/cognite_toolkit/_cdf_tk/prototypes/commands/import_.py new file mode 100644 index 000000000..52b6650eb --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/import_.py @@ -0,0 +1,217 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import Callable +from pathlib import Path +from typing import Any + +import yaml +from cognite.client import CogniteClient +from rich import print +from rich.table import Table + +from cognite_toolkit._cdf_tk.commands._base import ToolkitCommand +from cognite_toolkit._cdf_tk.exceptions import AuthenticationError, ToolkitValueError +from cognite_toolkit._cdf_tk.tk_warnings import LowSeverityWarning +from cognite_toolkit._cdf_tk.utils import read_yaml_file + + +class ImportTransformationCLI(ToolkitCommand): + def __init__( + self, + get_client: Callable[[], CogniteClient] | None = None, + print_warning: bool = True, + user_command: str | None = None, + skip_tracking: bool = False, + ): + super().__init__(print_warning, user_command, skip_tracking) + self._dataset_external_id_by_id: dict[int, str] = {} + # We only initialize the client if we need to look up dataset ids. + self._client: CogniteClient | None = None + self._get_client = get_client + + def execute( + self, + source: Path, + destination: Path, + overwrite: bool, + flatten: bool, + verbose: bool = False, + ) -> None: + # Manifest files are documented at + # https://cognite-transformations-cli.readthedocs-hosted.com/en/latest/quickstart.html#transformation-manifest + if source.is_file() and source.suffix in {".yaml", ".yml"}: + yaml_files = [source] + elif source.is_file(): + raise ToolkitValueError(f"File {source} is not a YAML file.") + elif source.is_dir(): + yaml_files = list(source.rglob("*.yaml")) + list(source.rglob("*.yml")) + else: + raise ToolkitValueError(f"Source {source} is not a file or directory.") + + if not yaml_files: + self.warn(LowSeverityWarning("No YAML files found in the source directory.")) + return None + + count_by_resource_type: dict[str, int] = defaultdict(int) + for yaml_file in yaml_files: + data = self._load_file(yaml_file) + if data is None: + continue + + # The convert schedule and notifications pop off the schedule and notifications + # keys from the transformation + schedule = self._convert_schedule(data, data["externalId"], yaml_file) + notifications = self._convert_notifications(data, data["externalId"], yaml_file) + transformation, source_query_path = self._convert_transformation(data, yaml_file) + + if flatten: + destination_folder = destination + else: + destination_folder = destination / yaml_file.relative_to(source).parent + destination_folder.mkdir(parents=True, exist_ok=True) + + destination_transformation = destination_folder / f"{yaml_file.stem}.Transformation.yaml" + if not overwrite and destination_transformation.exists(): + self.warn(LowSeverityWarning(f"File already exists at {destination_transformation}. Skipping.")) + continue + destination_transformation.write_text(yaml.safe_dump(transformation)) + if source_query_path is not None: + destination_query_path = destination_folder / f"{destination_transformation.stem}.sql" + destination_query_path.write_text(source_query_path.read_text()) + + if schedule is not None: + destination_schedule = destination_folder / f"{yaml_file.stem}.Schedule.yaml" + destination_schedule.write_text(yaml.safe_dump(schedule)) + if notifications: + destination_notification = destination_folder / f"{yaml_file.stem}.Notification.yaml" + destination_notification.write_text(yaml.safe_dump(notifications)) + if verbose: + print(f"Imported {yaml_file} to {destination_folder}.") + count_by_resource_type["transformation"] += 1 + count_by_resource_type["schedule"] += 1 if schedule is not None else 0 + count_by_resource_type["notification"] += len(notifications) + + print(f"Finished importing from {source} to {destination}.") + table = Table(title="Import transformation-cli Summary") + table.add_column("Resource Type", justify="right", style="cyan") + table.add_column("Count", justify="right", style="magenta") + for resource_type, count in count_by_resource_type.items(): + table.add_row(resource_type, str(count)) + print(table) + + def _load_file(self, yaml_file: Path) -> dict[str, Any] | None: + content = read_yaml_file(yaml_file, expected_output="dict") + required_keys = {"externalId", "name", "destination", "query"} + if missing_keys := required_keys - content.keys(): + self.warn( + LowSeverityWarning( + f"Missing required keys {missing_keys} in {yaml_file}. Likely not a Transformation manifest. Skipping." + ) + ) + return None + return content + + def _convert_transformation( + self, transformation: dict[str, Any], source_file: Path + ) -> tuple[dict[str, Any], Path | None]: + if "shared" in transformation: + transformation["isPublic"] = transformation.pop("shared") + if "action" in transformation: + transformation["conflictMode"] = transformation.pop("action") + if "ignoreNullFields" not in transformation: + # This is required by the API, but the transformation-cli sets it to true by default. + transformation["ignoreNullFields"] = True + source_query_path: Path | None = None + if isinstance(transformation["query"], dict): + query = transformation.pop("query") + if "file" in query: + source_query_path = Path(query.pop("file")) + + if "dataSetId" in transformation: + if "dataSetExternalId" in transformation: + self.warn(LowSeverityWarning(f"Both dataSetId and dataSetExternalId are present in {source_file}.")) + else: + data_set_external_id = self._lookup_dataset(transformation.pop("dataSetId")) + if data_set_external_id is None: + self.warn( + LowSeverityWarning(f"Failed to find DataSet with id {transformation['dataSetId']} in CDF.") + ) + else: + transformation["dataSetExternalId"] = data_set_external_id + + if "authentication" in transformation: + authentication = transformation["authentication"] + if not isinstance(authentication, dict): + self.warn(LowSeverityWarning(f"Invalid authentication format in {source_file}.")) + else: + if "tokenUrl" in authentication: + authentication["tokenUri"] = authentication.pop("tokenUrl") + + if "read" in authentication or "write" in authentication: + # Read or Write in authentication. + transformation.pop("authentication") + + read = authentication.pop("read", None) + if read and isinstance(read, dict): + if "tokenUrl" in read: + read["tokenUri"] = read.pop("tokenUrl") + transformation["sourceOidcCredentials"] = read + + write = authentication.pop("write", None) + if write and isinstance(write, dict): + if "tokenUrl" in write: + write["tokenUri"] = write.pop("tokenUrl") + transformation["destinationOidcCredentials"] = write + return transformation, source_query_path + + def _convert_notifications( + self, transformation: dict[str, Any], external_id: str, source_file: Path + ) -> list[dict[str, Any]]: + notifications = [] + if "notifications" in transformation: + notifications_raw = transformation.pop("notifications") + if isinstance(notifications_raw, list) and all(isinstance(n, str) for n in notifications_raw): + notifications = [ + {"destination": email, "transformationExternalId": external_id} for email in notifications_raw + ] + else: + self.warn(LowSeverityWarning(f"Invalid notifications format in {source_file}.")) + return notifications + + def _convert_schedule( + self, transformation: dict[str, Any], external_id: str, source_file: Path + ) -> dict[str, Any] | None: + schedule: dict[str, Any] | None = None + if "schedule" in transformation: + schedule_raw = transformation.pop("schedule") + if isinstance(schedule_raw, str): + schedule = {"interval": schedule_raw} + elif isinstance(schedule_raw, dict): + schedule = schedule_raw + else: + self.warn(LowSeverityWarning(f"Invalid schedule format in {source_file}.")) + if isinstance(schedule, dict): + schedule["externalId"] = external_id + return schedule + + def _lookup_dataset(self, dataset_id: int) -> str | None: + if dataset_id in self._dataset_external_id_by_id: + return self._dataset_external_id_by_id[dataset_id] + dataset = self.client.data_sets.retrieve(id=dataset_id) + if dataset is None or dataset.external_id is None: + return None + self._dataset_external_id_by_id[dataset.id] = dataset.external_id + return dataset.external_id + + @property + def client(self) -> CogniteClient: + if self._client is None: + if self._get_client is None: + raise AuthenticationError( + "No Cognite Client available. Are you missing a .env file?" + "\nThis is required to look up dataset ids in " + "the transformation-cli manifest(s)." + ) + self._client = self._get_client() + return self._client diff --git a/cognite_toolkit/_cdf_tk/prototypes/import_app.py b/cognite_toolkit/_cdf_tk/prototypes/import_app.py new file mode 100644 index 000000000..32db69311 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/import_app.py @@ -0,0 +1,42 @@ +from pathlib import Path + +import typer +from cognite.client import CogniteClient + +from cognite_toolkit._cdf_tk.utils import CDFToolConfig + +from .commands.import_ import ImportTransformationCLI + +import_app = typer.Typer( + pretty_exceptions_short=False, pretty_exceptions_show_locals=False, pretty_exceptions_enable=False +) + + +@import_app.callback(invoke_without_command=True) +def import_main(ctx: typer.Context) -> None: + """PREVIEW FEATURE Import resources into Cognite-Toolkit.""" + if ctx.invoked_subcommand is None: + print("Use [bold yellow]cdf-tk import --help[/] for more information.") + return None + + +@import_app.command("transformation-cli") +def transformation_cli( + ctx: typer.Context, + source: Path = typer.Argument(..., help="Path to the transformation CLI manifest directory or files."), + destination: Path = typer.Argument(..., help="Path to the destination directory."), + overwrite: bool = typer.Option(False, help="Overwrite existing files."), + flatten: bool = typer.Option(False, help="Flatten the directory structure."), +) -> None: + """Import transformation CLI manifests into Cognite-Toolkit modules.""" + + # We are lazy loading the client as we only need it if we need to look up dataset ids. + # This is to ensure the command can be executed without a client if the user does not need to look up dataset ids. + # (which is likely 99% of the time) + def get_client() -> CogniteClient: + config = CDFToolConfig.from_context(ctx) + return config.client + + cmd = ImportTransformationCLI(print_warning=True, get_client=get_client) + + cmd.execute(source, destination, overwrite, flatten, verbose=ctx.obj.verbose) diff --git a/tests/tests_unit/data/__init__.py b/tests/tests_unit/data/__init__.py index 3909cb9a6..0f9379d23 100644 --- a/tests/tests_unit/data/__init__.py +++ b/tests/tests_unit/data/__init__.py @@ -9,3 +9,4 @@ PYTEST_PROJECT = DATA_FOLDER / "project_for_test" LOAD_DATA = DATA_FOLDER / "load_data" RUN_DATA = DATA_FOLDER / "run_data" +TRANSFORMATION_CLI = DATA_FOLDER / "transformation_cli" diff --git a/tests/tests_unit/data/transformation_cli/manifest.Notification.yaml b/tests/tests_unit/data/transformation_cli/manifest.Notification.yaml new file mode 100644 index 000000000..1ff9eebaa --- /dev/null +++ b/tests/tests_unit/data/transformation_cli/manifest.Notification.yaml @@ -0,0 +1,2 @@ +- destination: example@cognite.com + transformationExternalId: tr_workorder_oid_workmate_infield_sync_workorders_to_apm_activities diff --git a/tests/tests_unit/data/transformation_cli/manifest.Schedule.yaml b/tests/tests_unit/data/transformation_cli/manifest.Schedule.yaml new file mode 100644 index 000000000..de47105e9 --- /dev/null +++ b/tests/tests_unit/data/transformation_cli/manifest.Schedule.yaml @@ -0,0 +1,3 @@ +externalId: tr_workorder_oid_workmate_infield_sync_workorders_to_apm_activities +interval: 7 * * * * +isPaused: false diff --git a/tests/tests_unit/data/transformation_cli/manifest.Transformation.yaml b/tests/tests_unit/data/transformation_cli/manifest.Transformation.yaml new file mode 100644 index 000000000..163b157ee --- /dev/null +++ b/tests/tests_unit/data/transformation_cli/manifest.Transformation.yaml @@ -0,0 +1,31 @@ +authentication: + audience: ${AUDIENCE} + cdfProjectName: ${CDF_PROJECT_NAME} + clientId: ${CLIENT_ID} + clientSecret: ${CLIENT_SECRET} + scopes: + - ${SCOPES} + tokenUri: ${TOKEN_URL} +conflictMode: upsert +dataSetExternalId: ds_transformations_oid +destination: + dataModel: + destinationType: APM_Activity + externalId: APM_SourceData + space: APM_SourceData + version: '1' + instanceSpace: sp_asset_oid_source + type: instances +externalId: tr_workorder_oid_workmate_infield_sync_workorders_to_apm_activities +ignoreNullFields: true +isPublic: true +name: workorder:oid:workmate:infield:sync_workorders_to_apm_activities +query: " select\n cast(`externalId` as STRING) as externalId,\n cast(`description`\ + \ as STRING) as description,\n cast(`key` as STRING) as id,\n cast(`status`\ + \ as STRING) as status,\n /* cast(`startTime` as TIMESTAMP) as startTime,\n \ + \ cast(`endTime` as TIMESTAMP) as endTime,\n NOTE!!! The below two datas just\ + \ updates all workorders to be from now \n and into the future. This is done\ + \ for the sake of the demo data.\n */\n cast(current_date() as TIMESTAMP)\ + \ as startTime,\n cast(date_add(current_date(), 7) as TIMESTAMP) as endTime,\n\ + \ cast(`title` as STRING) as title,\n 'WMT:VAL' as rootLocation,\n 'workmate'\ + \ as source\n from\n `workorder_oid_workmate`.`workorders`;\n" diff --git a/tests/tests_unit/data/transformation_cli/manifest.yaml b/tests/tests_unit/data/transformation_cli/manifest.yaml new file mode 100644 index 000000000..6d0f9ebbd --- /dev/null +++ b/tests/tests_unit/data/transformation_cli/manifest.yaml @@ -0,0 +1,71 @@ +# Manifest file downloaded from fusion +externalId: tr_workorder_oid_workmate_infield_sync_workorders_to_apm_activities +name: workorder:oid:workmate:infield:sync_workorders_to_apm_activities +query: |2 + select + cast(`externalId` as STRING) as externalId, + cast(`description` as STRING) as description, + cast(`key` as STRING) as id, + cast(`status` as STRING) as status, + /* cast(`startTime` as TIMESTAMP) as startTime, + cast(`endTime` as TIMESTAMP) as endTime, + NOTE!!! The below two datas just updates all workorders to be from now + and into the future. This is done for the sake of the demo data. + */ + cast(current_date() as TIMESTAMP) as startTime, + cast(date_add(current_date(), 7) as TIMESTAMP) as endTime, + cast(`title` as STRING) as title, + 'WMT:VAL' as rootLocation, + 'workmate' as source + from + `workorder_oid_workmate`.`workorders`; +destination: + dataModel: + space: APM_SourceData + externalId: APM_SourceData + version: "1" + destinationType: APM_Activity + instanceSpace: sp_asset_oid_source + type: instances +ignoreNullFields: true +shared: true +action: upsert +schedule: + interval: 7 * * * * + isPaused: false +notifications: + - example@cognite.com +dataSetExternalId: ds_transformations_oid +authentication: + clientId: ${CLIENT_ID} + clientSecret: ${CLIENT_SECRET} + tokenUrl: ${TOKEN_URL} + # Optional: If idP requires providing the scopes + cdfProjectName: ${CDF_PROJECT_NAME} + scopes: + - ${SCOPES} + # Optional: If idP requires providing the audience + audience: ${AUDIENCE} +# Specify credentials separately like this: +# authentication: +# read: +# clientId: ${READ_CLIENT_ID} +# clientSecret: ${READ_CLIENT_SECRET} +# tokenUrl: ${READ_TOKEN_URL} +# cdfProjectName: ${READ_CDF_PROJECT_NAME} +# # Optional: If idP requires providing the scopes +# scopes: +# - ${READ_SCOPES} +# # Optional: If idP requires providing the audience +# audience: ${READ_CDF_AUDIENCE} +# write: +# clientId: ${WRITE_CLIENT_ID} +# clientSecret: ${WRITE_CLIENT_SECRET} +# tokenUrl: ${WRITE_TOKEN_URL} +# cdfProjectName: ${WRITE_CDF_PROJECT_NAME} +# # Optional: If idP requires providing the scopes +# scopes: +# - ${WRITE_SCOPES} +# # Optional: If idP requires providing the audience +# audience: ${WRITE_CDF_AUDIENCE} +# Or together like this: diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_import.py b/tests/tests_unit/test_cdf_tk/test_commands/test_import.py new file mode 100644 index 000000000..99c7be1b8 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_import.py @@ -0,0 +1,24 @@ +from pathlib import Path + +from cognite_toolkit._cdf_tk.prototypes.commands.import_ import ImportTransformationCLI +from tests.tests_unit.data import TRANSFORMATION_CLI + + +class TestImportTransformationCLI: + def test_import_transformation_cli(self, tmp_path: Path) -> None: + source_name = "manifest" + transformation, schedule, notification = ( + f"{source_name}.Transformation", + f"{source_name}.Schedule", + f"{source_name}.Notification", + ) + expected = {transformation, schedule, notification} + cmd = ImportTransformationCLI(print_warning=False) + cmd.execute(TRANSFORMATION_CLI / f"{source_name}.yaml", tmp_path, False, False, verbose=False) + + files_by_name = {file.stem: file for file in tmp_path.rglob("*")} + assert len(files_by_name) == len(expected) + missing = expected - set(files_by_name) + assert not missing, f"Missing files: {missing}" + for name in expected: + assert (tmp_path / f"{name}.yaml").read_text() == (TRANSFORMATION_CLI / f"{name}.yaml").read_text() From 49beb496ea7b9739f3c527b762b124cd5b6ec5ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A5l=20R=C3=B8nning?= Date: Thu, 20 Jun 2024 10:42:14 +0200 Subject: [PATCH 09/10] =?UTF-8?q?=F0=9F=A7=AF=20made=20init=20.=20safer=20?= =?UTF-8?q?(#676)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * made init . safer * Update cognite_toolkit/_cdf_tk/prototypes/commands/modules.py Co-authored-by: Anders Albert <60234212+doctrino@users.noreply.github.com> * Update cognite_toolkit/_cdf_tk/prototypes/commands/modules.py Co-authored-by: Anders Albert <60234212+doctrino@users.noreply.github.com> --------- Co-authored-by: Anders Albert <60234212+doctrino@users.noreply.github.com> --- .../_cdf_tk/prototypes/commands/modules.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py index 27942c6f6..ebd66c8df 100644 --- a/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py @@ -87,12 +87,12 @@ def _build_tree(self, item: dict | list, tree: Tree) -> None: def _create( self, init_dir: str, selected: dict[str, dict[str, Any]], environments: list[str], mode: str | None ) -> None: + modules_root_dir = Path(init_dir) / ALT_CUSTOM_MODULES if mode == "overwrite": - print(f"{INDENT}[yellow]Clearing directory[/]") - if Path.is_dir(Path(init_dir)): - shutil.rmtree(init_dir) + if modules_root_dir.is_dir(): + print(f"{INDENT}[yellow]Clearing directory[/]") + shutil.rmtree(modules_root_dir) - modules_root_dir = Path(init_dir) / ALT_CUSTOM_MODULES modules_root_dir.mkdir(parents=True, exist_ok=True) for package, modules in selected.items(): @@ -167,9 +167,10 @@ def init(self, init_dir: Optional[str] = None, arg_package: Optional[str] = None if not init_dir or init_dir.strip() == "": raise ToolkitRequiredValueError("You must provide a directory name.") - if (Path(init_dir) / ALT_CUSTOM_MODULES).is_dir(): + modules_root_dir = Path(init_dir) / ALT_CUSTOM_MODULES + if modules_root_dir.is_dir(): mode = questionary.select( - f"Directory {init_dir}/modules already exists. What would you like to do?", + f"Directory {modules_root_dir} already exists. What would you like to do?", choices=[ questionary.Choice("Abort", "abort"), questionary.Choice("Overwrite (clean existing)", "overwrite"), @@ -204,7 +205,7 @@ def init(self, init_dir: Optional[str] = None, arg_package: Optional[str] = None print("\n") if len(available) > 0: - if not questionary.confirm("Would you like to change the selection?", default=False).ask(): + if not questionary.confirm("Would you like to make changes to the selection?", default=False).ask(): break package_id = questionary.select( From 2bb0e855b5aee5f901acecbb0022847b6edc36ca Mon Sep 17 00:00:00 2001 From: Anders Albert <60234212+doctrino@users.noreply.github.com> Date: Thu, 20 Jun 2024 10:50:31 +0200 Subject: [PATCH 10/10] =?UTF-8?q?=F0=9F=9A=80Prepare=200.2.3=20(#678)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build; bump * refactor: update migration * refactor; fix --- CHANGELOG.cdf-tk.md | 2 +- CHANGELOG.templates.md | 4 ++++ cognite_toolkit/_cdf_tk/_migration.yaml | 7 ++++++- cognite_toolkit/_system.yaml | 2 +- cognite_toolkit/_version.py | 2 +- module_upgrade/run_check.py | 2 +- pyproject.toml | 2 +- tests/tests_unit/data/project_for_test/_system.yaml | 2 +- .../data/project_no_cognite_modules/_system.yaml | 2 +- tests/tests_unit/data/run_data/_system.yaml | 2 +- 10 files changed, 18 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.cdf-tk.md b/CHANGELOG.cdf-tk.md index a8c287346..d8dd8cab7 100644 --- a/CHANGELOG.cdf-tk.md +++ b/CHANGELOG.cdf-tk.md @@ -15,7 +15,7 @@ Changes are grouped as follows: - `Fixed` for any bug fixes. - `Security` in case of vulnerabilities. -## TBD +## [0.2.3] - 2024-06-20 ### Improved diff --git a/CHANGELOG.templates.md b/CHANGELOG.templates.md index b2b16cfd8..b220e43c0 100644 --- a/CHANGELOG.templates.md +++ b/CHANGELOG.templates.md @@ -15,6 +15,10 @@ Changes are grouped as follows: - `Fixed` for any bug fixes. - `Security` in case of vulnerabilities. +## [0.2.3] - 2024-06-20 + +No changes to templates. + ## [0.2.2] - 2024-06-18 No changes to templates. diff --git a/cognite_toolkit/_cdf_tk/_migration.yaml b/cognite_toolkit/_cdf_tk/_migration.yaml index 392a41814..466d2c860 100644 --- a/cognite_toolkit/_cdf_tk/_migration.yaml +++ b/cognite_toolkit/_cdf_tk/_migration.yaml @@ -1,8 +1,13 @@ -- version: 0.2.2 +- version: 0.2.3 cognite_modules: {} resources: {} tool: {} cognite_modules_hash: "" +- version: 0.2.2 + cognite_modules: {} + resources: {} + tool: {} + cognite_modules_hash: "e3ef5a27540847e955f04c3dad4e296f5d6f646008a7a5490cddf81e79772c1d" - version: 0.2.1 cognite_modules: {} resources: {} diff --git a/cognite_toolkit/_system.yaml b/cognite_toolkit/_system.yaml index 2f0c2e9a3..edbf33728 100644 --- a/cognite_toolkit/_system.yaml +++ b/cognite_toolkit/_system.yaml @@ -25,4 +25,4 @@ packages: - example_pump_data_model # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.2 \ No newline at end of file +cdf_toolkit_version: 0.2.3 \ No newline at end of file diff --git a/cognite_toolkit/_version.py b/cognite_toolkit/_version.py index 7977696f4..09fdb50f6 100644 --- a/cognite_toolkit/_version.py +++ b/cognite_toolkit/_version.py @@ -1 +1 @@ -__version__ = "0.2.2" +__version__ = "0.2.3" diff --git a/module_upgrade/run_check.py b/module_upgrade/run_check.py index 5dba0a2fa..37560841f 100644 --- a/module_upgrade/run_check.py +++ b/module_upgrade/run_check.py @@ -36,7 +36,7 @@ def run() -> None: versions = get_versions_since(SUPPORT_MODULE_UPGRADE_FROM_VERSION) for version in versions: create_project_init(str(version)) - return + print( Panel( "All projects inits created successfully.", diff --git a/pyproject.toml b/pyproject.toml index bf3a2756b..6c1bf83ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cognite_toolkit" -version = "0.2.2" +version = "0.2.3" description = "Official Cognite Data Fusion tool for project templates and configuration deployment" authors = ["Cognite AS "] license = "Apache-2" diff --git a/tests/tests_unit/data/project_for_test/_system.yaml b/tests/tests_unit/data/project_for_test/_system.yaml index bffdedf6e..f30a7e65a 100644 --- a/tests/tests_unit/data/project_for_test/_system.yaml +++ b/tests/tests_unit/data/project_for_test/_system.yaml @@ -4,4 +4,4 @@ packages: - child_module # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.2 +cdf_toolkit_version: 0.2.3 diff --git a/tests/tests_unit/data/project_no_cognite_modules/_system.yaml b/tests/tests_unit/data/project_no_cognite_modules/_system.yaml index 4504ff1f0..b10cf2b0d 100644 --- a/tests/tests_unit/data/project_no_cognite_modules/_system.yaml +++ b/tests/tests_unit/data/project_no_cognite_modules/_system.yaml @@ -3,4 +3,4 @@ packages: {} # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.2 +cdf_toolkit_version: 0.2.3 diff --git a/tests/tests_unit/data/run_data/_system.yaml b/tests/tests_unit/data/run_data/_system.yaml index e8b52203e..e7d3c04ac 100644 --- a/tests/tests_unit/data/run_data/_system.yaml +++ b/tests/tests_unit/data/run_data/_system.yaml @@ -25,4 +25,4 @@ packages: - example_pump_data_model # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.2 +cdf_toolkit_version: 0.2.3