diff --git a/.gitmodules b/.gitmodules index 6ebd274..5d89a1b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,3 +2,6 @@ path = assets/processes url = https://github.com/Open-EO/openeo-processes branch = add-tests +[submodule "assets/openeo-api"] + path = assets/openeo-api + url = git@github.com:Open-EO/openeo-api.git diff --git a/README.md b/README.md index bba4a6d..ebbe2ea 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,17 @@ focussing on a specific API aspect to test or verify --html=reports/process-metadata.html ``` - **WP4 General openEO API compliance validation** (lead implementation partner: EODC) - - TODO: [Open-EO/openeo-test-suite#20](https://github.com/Open-EO/openeo-test-suite/issues/20) + - Main location: [`src/openeo_test_suite/tests/general`](./src/openeo_test_suite/tests/general) + - Provides tests to validate the general openEO API compliance of a back-end. + - The backend is checked against the openeo API specification defined in the [openeo-api](https://github.com/Open-EO/openeo-api/). + - There are some tests which might run for a long time (as they running process_graphs on the backends) these can be skippied by adding + `-m "not longrunning"` to the pytest command. + - Usage example of just running these tests against a desired openEO backend URL: + ```bash + pytest src/openeo_test_suite/tests/general \ + -U https://openeo.example \ + --html=reports/general.html + ``` - **WP5 Individual process testing** (lead implementation partner: M. Mohr) - Main location: [`src/openeo_test_suite/tests/processes/processing`](./src/openeo_test_suite/tests/processes/processing) - Provides tests to validate individual openEO processes, @@ -377,7 +387,7 @@ Some general guidelines: - Validation of process metadata: add new tests to `src/openeo_test_suite/tests/processes/metadata`. - General openEO API compliance validation: - - TODO: [Open-EO/openeo-test-suite#20](https://github.com/Open-EO/openeo-test-suite/issues/20) + add new tests to `src/openeo_test_suite/tests/general`. - Individual process testing: - new input-output pairs for existing or new processes: add them in the [openeo-processes](https://github.com/Open-EO/openeo-processes) project diff --git a/assets/openeo-api b/assets/openeo-api new file mode 160000 index 0000000..c5a45b4 --- /dev/null +++ b/assets/openeo-api @@ -0,0 +1 @@ +Subproject commit c5a45b4647b06e313a4f099e9119bfa3cca5c6a3 diff --git a/pyproject.toml b/pyproject.toml index 1b7c967..49b067e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ dependencies = [ "pytest-html>=4.1.0", "stac-validator>=3.3.0", "pytest-metadata>=3.0.0", + "openapi-core>=0.18.2", ] classifiers = [ "Programming Language :: Python :: 3", diff --git a/src/openeo_test_suite/lib/compliance_util.py b/src/openeo_test_suite/lib/compliance_util.py new file mode 100644 index 0000000..5eab468 --- /dev/null +++ b/src/openeo_test_suite/lib/compliance_util.py @@ -0,0 +1,487 @@ +from pathlib import Path +import pathlib +from typing import Iterator, Union +from openapi_core import Spec +import yaml +import json +import os +import uuid +import requests +import time +import logging +import openeo_test_suite + +from openeo_test_suite.lib.backend_under_test import ( + get_backend_url, + get_backend_under_test, +) + + +from openapi_core import validate_response +from openapi_core.contrib.requests import RequestsOpenAPIRequest +from openapi_core.contrib.requests import RequestsOpenAPIResponse +from openapi_core import V31ResponseValidator + + +from typing import Union +import requests +from requests import Request, Session + + +def test_endpoint( + base_url: str, + endpoint_path: str, + test_name: str, + spec: "Spec", + payload: dict = None, + bearer_token: str = None, + method: str = "GET", + expected_status_codes: Union[list[int], int] = [200], + return_response: bool = False, +): + full_endpoint_url = f"{base_url}{endpoint_path}" + session = Session() + headers = {"Content-Type": "application/json"} if payload else {} + + if bearer_token: + headers["Authorization"] = bearer_token + + response = session.request( + method=method.upper(), + url=full_endpoint_url, + json=payload, + headers=headers, + ) + + openapi_request = RequestsOpenAPIRequest( + Request(method.upper(), full_endpoint_url, json=payload, headers=headers) + ) + openapi_response = RequestsOpenAPIResponse(response) + + try: + if check_status_code(expected_status_codes, openapi_response.status_code): + validate_response( + openapi_request, openapi_response, spec=spec, cls=V31ResponseValidator + ) + else: + raise UnexpectedStatusCodeException( + endpoint=full_endpoint_url, + expected_status_code=expected_status_codes, + actual_status_code=openapi_response.status_code, + auth=(bearer_token is not None), + ) + except Exception as e: + print_test_results(e, endpoint_path=endpoint_path, test_name=test_name) + if return_response: + return check_test_results(e), response + else: + return check_test_results(e) + else: + if return_response: + return "", response + else: + return "" + + +def check_status_code( + expected_status_codes: Union[list[int], int], actual_status_code: int +): + if isinstance(expected_status_codes, int): + return actual_status_code == expected_status_codes + return actual_status_code in expected_status_codes + + +class UnexpectedStatusCodeException(Exception): + def __init__(self, endpoint, expected_status_code, actual_status_code, auth): + self.endpoint = endpoint + self.expected_status_code = expected_status_code + self.actual_status_code = actual_status_code + self.auth = auth + self.message = f'Unexpected status code for endpoint "{endpoint}": Expected {expected_status_code}, but received {actual_status_code}.' + super().__init__(self.message) + + +def get_batch_job_status(base_url: str, bearer_token: str, job_id: str): + return json.loads( + requests.get( + f"{base_url}/jobs/{job_id}", headers={"Authorization": f"{bearer_token}"} + ).content + )["status"] + + +def wait_job_statuses( + base_url: str, + bearer_token: str, + job_ids: list[str], + job_statuses: list[str], + timeout: int = 10, +): + """ + waits for jobs status to reach one of job_statuses, or times out after {timeout} seconds + + returns True if all jobs have reached desired status + returns False if timeout has been reached + """ + end_time = time.time() + timeout + while time.time() < end_time: + if all( + get_batch_job_status( + base_url=base_url, bearer_token=bearer_token, job_id=job_id + ) + in job_statuses + for job_id in job_ids + ): + return True + time.sleep(1) + logging.log("Waiting on jobs to reach desired status..") + logging.warn("Jobs failed to reach desired state, timeout has been reached.") + return False + + +def print_test_results(e: Exception, endpoint_path: str, test_name: str = "?"): + """ + prints the results of a openapi-core validation test + + e: the exception that was raised as a part of the response validation test + test_name: the name of the test that ought to be displayed + """ + print("") + print("------------------------------------------") + print( + f'Validation Errors from path: "{endpoint_path}" Path description: {test_name}' + ) + + # message is important + if type(e) is UnexpectedStatusCodeException: + print("") + print(e.message) + if e.auth and (e.actual_status_code == 500): + print( + "Bearer-Token invalid/no longer valid or if endpoint expects an id the item does not exist." + ) + elif e.actual_status_code == 500: + print("Endpoint expects an id and the item does not exist.") + elif e.actual_status_code == 404 or e.actual_status_code == 501: + print("Endpoint is not implemented, only an error if it is REQUIRED.") + elif e.actual_status_code == 410: + print( + "Endpoint is not providing requested resource as it is gone. Logs are not provided if job is queued or created." + ) + else: + print(f"Some other unexpected error code. {e.actual_status_code}") + # json_path and message are important + elif hasattr(e.__cause__, "schema_errors"): + errors = e.__cause__.schema_errors + for error in errors: + print("") + print(error.message) + print(error.json_path) + else: + print(e) + print("------------------------------------------") + print("") + + +def check_test_results(e: Exception): + """ + prints the results of a openapi-core validation test + + e: the exception that was raised as a part of the response validation test + test_name: the name of the test that ought to be displayed + """ + + # message is important + + fail_log = "" + if type(e) is UnexpectedStatusCodeException: + if e.auth and (e.actual_status_code == 500): + fail_log = "Bearer-Token invalid/no longer valid or if endpoint expects an id the item does not exist." + elif e.actual_status_code == 500: + fail_log = "Endpoint expects an id and the item does not exist." + elif e.actual_status_code == 404 or e.actual_status_code == 501: + fail_log = "Endpoint is not implemented, only an error if it is REQUIRED." + elif e.actual_status_code == 410: + fail_log = "Endpoint is not providing requested resource as it is gone. Logs are not provided if job is queued or created." + else: + fail_log = f"Some other unexpected error code. {e.actual_status_code}" + # json_path and message are important + elif hasattr(e.__cause__, "schema_errors"): + errors = e.__cause__.schema_errors + for error in errors: + fail_log += f"Message: {error.message} Json_path: {error.json_path} \n" + else: + fail_log = str(e) + + return fail_log + + +# Server field in the spec has to be adjusted so that validation does not fail on the server url +def adjust_spec(path_to_file: str, endpoint: str, domain: str): + data = adjust_server(path_to_file=path_to_file, endpoint=endpoint) + data = adjust_server_in_well_known(data=data, endpoint=domain) + return Spec.from_dict(data, validator=None) + + +def adjust_server(path_to_file, endpoint): + with open(path_to_file, "r") as file: + data = yaml.safe_load(file) + + if "servers" in data and isinstance(data["servers"], list): + for server in data["servers"]: + if "url" in server and isinstance(server["url"], str): + server["url"] = endpoint + return data + + +def adjust_server_in_well_known(data, endpoint): + data["paths"]["/.well-known/openeo"]["get"]["servers"][0]["url"] = endpoint + return data + + +def validate_uri(value): + if not isinstance(value, str): + return False + if value.startswith("http://") or value.startswith("https://"): + return True + return False + + +extra_format_validators = { + "uri": validate_uri, +} + + +def unmarshal_commonmark(value): + return value + + +extra_format_unmarshallers = { + "commonmark": unmarshal_commonmark, +} + + +def _guess_root(): + project_root = Path(openeo_test_suite.__file__).parents[2] + candidates = [ + project_root / "assets/openeo-api", + Path("./assets/openeo-api"), + Path("./openeo-test-suite/assets/openeo-api"), + ] + for candidate in candidates: + if candidate.exists() and candidate.is_dir(): + return candidate + raise ValueError( + f"Could not find valid processes test root directory (tried {candidates})" + ) + + +def get_examples_path(): + return ( + _guess_root().parents[2] + / "src" + / "openeo_test_suite" + / "tests" + / "general" + / "payload_examples" + ) + + +def get_spec_path(): + return _guess_root() / "openapi.yaml" + + +def load_payloads_from_directory(directory_path: str) -> Iterator[str]: + for filename in pathlib.Path(directory_path).glob("*.json"): + file_path = os.path.join(directory_path, filename) + with open(file_path, "r") as file: + try: + # Load the JSON data from the file + data = json.load(file) + yield json.dumps(data) + except json.JSONDecodeError: + logging.error(f"Error decoding JSON in file: {filename}") + except Exception as e: + logging.error(f"Error reading file: {filename} - {str(e)}") + + +def set_uuid_in_job(json_data): + if isinstance(json_data, str): + json_data = json.loads(json_data) + # Generate a new UUID + new_id = str(uuid.uuid4().hex) + # Set the 'id' field to the generated UUID + json_data["process"]["id"] = new_id + # Return the modified JSON object + return new_id, json.dumps(json_data) + + +def delete_id_resource( + base_url: str, endpoint_path: str, bearer_token: str, ids: list[str] +): + for id in ids: + try: + requests.delete( + f"{base_url}/{endpoint_path}/{id}", + headers={"Authorization": f"{bearer_token}"}, + ) + except Exception as e: + logging.error(f"Failed to delete resource with id {id}: {e}") + + +def put_process_graphs(base_url: str, bearer_token: str): # TODO id and so forth + directory_path = get_examples_path() + examples_directory = "put_process_graphs" + + created_udp_ids = [] + payloads = load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + try: + for payload in payloads: + id = str(uuid.uuid4().hex) + created_udp_ids.append(id) + requests.put( + f"{base_url}/process_graphs/{id}", + data=payload, + headers={ + "Content-Type": "application/json", + "Authorization": f"{bearer_token}", + }, + ) + except Exception as e: + print(f"Failed to create process graph: {e}") + return created_udp_ids + + +def set_uuid_in_udp(json_data): + if isinstance(json_data, str): + json_data = json.loads(json_data) + # Generate a new UUID + new_id = str(uuid.uuid4().hex) + # Set the 'id' field to the generated UUID + json_data["id"] = new_id + # Return the modified JSON object + return new_id, json.dumps(json_data) + + +def post_jobs(base_url: str, bearer_token: str): + endpoint_path = "jobs" + directory_path = get_examples_path() + examples_directory = "post_jobs" + + created_batch_job_ids = [] + + payloads = load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + full_endpoint_url = f"{base_url}{endpoint_path}" + + # TESTING + for payload in payloads: + _, payload = set_uuid_in_job(payload) + + response = requests.post( + full_endpoint_url, + data=payload, + headers={ + "Content-Type": "application/json", + "Authorization": f"{bearer_token}", + }, + ) + created_batch_job_ids.append(response.headers["OpenEO-Identifier"]) + + return created_batch_job_ids + + +def post_start_jobs(base_url: str, bearer_token: str): + created_batch_job_ids = post_jobs(base_url=base_url, bearer_token=bearer_token) + + endpoint_path = "jobs" + endpoint_path_extra = "results" + + for job_id in created_batch_job_ids: + full_endpoint_url = f"{base_url}/{endpoint_path}/{job_id}/{endpoint_path_extra}" + requests.post(full_endpoint_url, headers={"Authorization": f"{bearer_token}"}) + + wait_job_statuses( + base_url=base_url, + bearer_token=bearer_token, + job_ids=created_batch_job_ids, + job_statuses=["running"], + timeout=10, + ) + return created_batch_job_ids + + +def cancel_delete_jobs(base_url: str, bearer_token: str, job_ids: list[str]): + """ + Deletes and cancels all jobs with the given ids + """ + + endpoint_path = "jobs" + + for job_id in job_ids: + full_endpoint_url = f"{base_url}/{endpoint_path}/{job_id}" + requests.delete(full_endpoint_url, headers={"Authorization": f"{bearer_token}"}) + + +def process_list_generator(filename: str): + with open(filename, "r") as file: + data = json.load(file) + for item in data: + yield item + + +def get_process_list(base_url: str): + endpoint_path = "processes" + + full_endpoint_url = f"{base_url}/{endpoint_path}" + + return json.loads(requests.get(full_endpoint_url).content)["processes"] + + +def get_access_token(pytestconfig): + backend = get_backend_under_test() + + capmanager = pytestconfig.pluginmanager.getplugin("capturemanager") + with capmanager.global_and_fixture_disabled(): + backend.connection.authenticate_oidc() + # load_dotenv("..") + if hasattr(backend.connection.auth, "bearer"): + return backend.connection.auth.bearer + return None + + +from urllib.parse import urlparse, urlunparse + + +def get_domain(request): + url = get_backend_url(request.config) + parsed_url = urlparse(url) + # Reconstruct the URL with scheme and netloc only. + return urlunparse((parsed_url.scheme, parsed_url.netloc, "", "", "", "")) + + +def get_version(request): + url = get_backend_url(request.config) + parsed_url = urlparse(url) + # Split the path and filter for the segment starting with 'openeo/' and having a version number format. + for segment in parsed_url.path.split("/"): + if segment.startswith("openeo") and len(segment.split("/")) > 1: + return segment + "/" + return "" + + +def get_base_url(request): + url = get_backend_url(request.config) + parsed_url = urlparse(url) + # If the scheme is missing, add 'https://'. + if not parsed_url.scheme: + url = "https://" + url + # If the path is missing or doesn't contain 'openeo', query the '.well-known' endpoint. this is a failsafe. + if not parsed_url.path or "openeo" not in parsed_url.path: + requests_response = requests.get(url + "/.well-known/openeo") + data = json.loads(requests_response.content) + url = data["versions"][-1]["url"] + return url diff --git a/src/openeo_test_suite/tests/general/payload_examples/empty_payload/empty_payload.json b/src/openeo_test_suite/tests/general/payload_examples/empty_payload/empty_payload.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/empty_payload/empty_payload.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/mock_processes/mock_processes_0.json b/src/openeo_test_suite/tests/general/payload_examples/mock_processes/mock_processes_0.json new file mode 100644 index 0000000..e5d41d0 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/mock_processes/mock_processes_0.json @@ -0,0 +1,133 @@ +{ + "processes": [ + { + "id": "apply", + "summary": "Apply a process to each pixel", + "description": "Applies a *unary* process to each pixel value in the data cube (i.e. a local operation). A unary process takes a single value and returns a single value, for example ``abs()`` or ``linear_scale_range()``.", + "categories": [ + "cubes" + ], + "parameters": [ + { + "name": "data", + "description": "A data cube.", + "schema": { + "type": "object", + "subtype": "datacube" + } + }, + { + "name": "process", + "description": "A unary process to be applied on each value, may consist of multiple sub-processes.", + "schema": { + "type": "object", + "subtype": "process-graph", + "parameters": [ + { + "name": "x", + "description": "The value to process.", + "schema": { + "description": "Any data type." + } + } + ] + } + } + ], + "returns": { + "description": "A data cube with the newly computed values. The resolution, cardinality and the number of dimensions are the same as for the original data cube.", + "schema": { + "type": "object", + "subtype": "datacube" + } + } + }, + { + "id": "multiply", + "summary": "Multiplication of two numbers", + "description": "Multiplies the two numbers `x` and `y` (*x * y*) and returns the computed product.\n\nNo-data values are taken into account so that `null` is returned if any element is such a value.\n\nThe computations follow [IEEE Standard 754](https://ieeexplore.ieee.org/document/8766229) whenever the processing environment supports it.", + "categories": [ + "math" + ], + "parameters": [ + { + "name": "x", + "description": "The multiplier.", + "schema": { + "type": [ + "number", + "null" + ] + } + }, + { + "name": "y", + "description": "The multiplicand.", + "schema": { + "type": [ + "number", + "null" + ] + } + } + ], + "returns": { + "description": "The computed product of the two numbers.", + "schema": { + "type": [ + "number", + "null" + ] + } + }, + "exceptions": { + "MultiplicandMissing": { + "message": "Multiplication requires at least two numbers." + } + }, + "examples": [ + { + "arguments": { + "x": 5, + "y": 2.5 + }, + "returns": 12.5 + }, + { + "arguments": { + "x": -2, + "y": -4 + }, + "returns": 8 + }, + { + "arguments": { + "x": 1, + "y": null + }, + "returns": null + } + ], + "links": [ + { + "rel": "about", + "href": "http://mathworld.wolfram.com/Product.html", + "title": "Product explained by Wolfram MathWorld" + }, + { + "rel": "about", + "href": "https://ieeexplore.ieee.org/document/8766229", + "title": "IEEE Standard 754-2019 for Floating-Point Arithmetic" + } + ] + } + ], + "links": [ + { + "rel": "alternate", + "href": "https://openeo.example/processes", + "type": "text/html", + "title": "HTML version of the processes" + } + ] + } \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/patch_jobs/patch_jobs_1.json b/src/openeo_test_suite/tests/general/payload_examples/patch_jobs/patch_jobs_1.json new file mode 100644 index 0000000..60c0119 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/patch_jobs/patch_jobs_1.json @@ -0,0 +1,292 @@ +{ + "title": "NDVI based on Sentinel 2", + "description": "Deriving minimum NDVI measurements over pixel time series of Sentinel 2", + "process": { + "id": "ndvi", + "summary": "string", + "description": "string", + "parameters": [ + { + "schema": { + "subtype": "string", + "deprecated": false, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com", + "type": "array", + "pattern": "/regex/", + "enum": [ + null + ], + "minimum": 0, + "maximum": 0, + "minItems": 0, + "maxItems": 0, + "items": [ + {} + ], + "property1": null, + "property2": null + }, + "name": "string", + "description": "string", + "optional": false, + "deprecated": false, + "experimental": false, + "default": null + } + ], + "returns": { + "description": "string", + "schema": { + "subtype": "string", + "deprecated": false, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com", + "type": "array", + "pattern": "/regex/", + "enum": [ + null + ], + "minimum": 0, + "maximum": 0, + "minItems": 0, + "maxItems": 0, + "items": [ + {} + ], + "property1": null, + "property2": null + } + }, + "categories": [ + "string" + ], + "deprecated": false, + "experimental": false, + "exceptions": { + "Error Code1": { + "description": "string", + "message": "The value specified for the process argument '{argument}' in process '{process}' is invalid: {reason}", + "http": 400 + }, + "Error Code2": { + "description": "string", + "message": "The value specified for the process argument '{argument}' in process '{process}' is invalid: {reason}", + "http": 400 + } + }, + "examples": [ + { + "title": "string", + "description": "string", + "arguments": { + "property1": { + "from_parameter": null, + "from_node": null, + "process_graph": null + }, + "property2": { + "from_parameter": null, + "from_node": null, + "process_graph": null + } + }, + "returns": null + } + ], + "links": [ + { + "rel": "related", + "href": "https://openeo.example", + "type": "text/html", + "title": "openEO" + } + ], + "process_graph": { + "dc": { + "process_id": "load_collection", + "arguments": { + "id": "Sentinel-2", + "spatial_extent": { + "west": 16.1, + "east": 16.6, + "north": 48.6, + "south": 47.2 + }, + "temporal_extent": [ + "2018-01-01", + "2018-02-01" + ] + } + }, + "bands": { + "process_id": "filter_bands", + "description": "Filter and order the bands. The order is important for the following reduce operation.", + "arguments": { + "data": { + "from_node": "dc" + }, + "bands": [ + "B08", + "B04", + "B02" + ] + } + }, + "evi": { + "process_id": "reduce", + "description": "Compute the EVI. Formula: 2.5 * (NIR - RED) / (1 + NIR + 6*RED + -7.5*BLUE)", + "arguments": { + "data": { + "from_node": "bands" + }, + "dimension": "bands", + "reducer": { + "process_graph": { + "nir": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 0 + } + }, + "red": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 1 + } + }, + "blue": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 2 + } + }, + "sub": { + "process_id": "subtract", + "arguments": { + "data": [ + { + "from_node": "nir" + }, + { + "from_node": "red" + } + ] + } + }, + "p1": { + "process_id": "product", + "arguments": { + "data": [ + 6, + { + "from_node": "red" + } + ] + } + }, + "p2": { + "process_id": "product", + "arguments": { + "data": [ + -7.5, + { + "from_node": "blue" + } + ] + } + }, + "sum": { + "process_id": "sum", + "arguments": { + "data": [ + 1, + { + "from_node": "nir" + }, + { + "from_node": "p1" + }, + { + "from_node": "p2" + } + ] + } + }, + "div": { + "process_id": "divide", + "arguments": { + "data": [ + { + "from_node": "sub" + }, + { + "from_node": "sum" + } + ] + } + }, + "p3": { + "process_id": "product", + "arguments": { + "data": [ + 2.5, + { + "from_node": "div" + } + ] + }, + "result": true + } + } + } + } + }, + "mintime": { + "process_id": "reduce", + "description": "Compute a minimum time composite by reducing the temporal dimension", + "arguments": { + "data": { + "from_node": "evi" + }, + "dimension": "temporal", + "reducer": { + "process_graph": { + "min": { + "process_id": "min", + "arguments": { + "data": { + "from_parameter": "data" + } + }, + "result": true + } + } + } + } + }, + "save": { + "process_id": "save_result", + "arguments": { + "data": { + "from_node": "mintime" + }, + "format": "GTiff" + }, + "result": true + } + } + }, + "plan": "free", + "budget": 100, + "log_level": "warning" +} \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/post_jobs/post_jobs_1.json b/src/openeo_test_suite/tests/general/payload_examples/post_jobs/post_jobs_1.json new file mode 100644 index 0000000..63caf63 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/post_jobs/post_jobs_1.json @@ -0,0 +1,41 @@ +{ + "title": "Custom Test Process", + "description": "Loads and Saves austrian ground motion data", + "process": { + "process_graph": { + "load1": { + "process_id": "load_collection", + "arguments": { + "bands": [ + "B01" + ], + "properties": {}, + "id": "CGLS_SSM_1KM", + "spatial_extent": { + "west": 16.186110851391813, + "east": 16.576456845030226, + "south": 48.08764096726651, + "north": 48.29291292355549 + }, + "temporal_extent": [ + "2020-01-01T00:00:00Z", + "2020-12-13T00:00:00Z" + ] + } + }, + "save2": { + "process_id": "save_result", + "arguments": { + "data": { + "from_node": "load1" + }, + "format": "NETCDF" + }, + "result": true + } + }, + "parameters": [] + }, + "plan": "free", + "budget": 100 + } \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/post_result/post_result_1.json b/src/openeo_test_suite/tests/general/payload_examples/post_result/post_result_1.json new file mode 100644 index 0000000..b2febe3 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/post_result/post_result_1.json @@ -0,0 +1,292 @@ +{ + "process": { + "id": "ndvi", + "summary": "string", + "description": "string", + "parameters": [ + { + "schema": { + "subtype": "string", + "deprecated": false, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com", + "type": "array", + "pattern": "/regex/", + "enum": [ + null + ], + "minimum": 0, + "maximum": 0, + "minItems": 0, + "maxItems": 0, + "items": [ + {} + ], + "property1": null, + "property2": null + }, + "name": "string", + "description": "string", + "optional": false, + "deprecated": false, + "experimental": false, + "default": null + } + ], + "returns": { + "description": "string", + "schema": { + "subtype": "string", + "deprecated": false, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com", + "type": "array", + "pattern": "/regex/", + "enum": [ + null + ], + "minimum": 0, + "maximum": 0, + "minItems": 0, + "maxItems": 0, + "items": [ + {} + ], + "property1": null, + "property2": null + } + }, + "categories": [ + "string" + ], + "deprecated": false, + "experimental": false, + "exceptions": { + "Error Code1": { + "description": "string", + "message": "The value specified for the process argument '{argument}' in process '{process}' is invalid: {reason}", + "http": 400 + }, + "Error Code2": { + "description": "string", + "message": "The value specified for the process argument '{argument}' in process '{process}' is invalid: {reason}", + "http": 400 + } + }, + "examples": [ + { + "title": "string", + "description": "string", + "arguments": { + "property1": { + "from_parameter": null, + "from_node": null, + "process_graph": null + }, + "property2": { + "from_parameter": null, + "from_node": null, + "process_graph": null + } + }, + "returns": null + } + ], + "links": [ + { + "rel": "related", + "href": "https://openeo.example", + "type": "text/html", + "title": "openEO" + } + ], + "process_graph": { + "dc": { + "process_id": "load_collection", + "arguments": { + "id": "Sentinel-2", + "spatial_extent": { + "west": 16.1, + "east": 16.6, + "north": 48.6, + "south": 47.2 + }, + "temporal_extent": [ + "2018-01-01", + "2018-02-01" + ] + } + }, + "bands": { + "process_id": "filter_bands", + "description": "Filter and order the bands. The order is important for the following reduce operation.", + "arguments": { + "data": { + "from_node": "dc" + }, + "bands": [ + "B08", + "B04", + "B02" + ] + } + }, + "evi": { + "process_id": "reduce", + "description": "Compute the EVI. Formula: 2.5 * (NIR - RED) / (1 + NIR + 6*RED + -7.5*BLUE)", + "arguments": { + "data": { + "from_node": "bands" + }, + "dimension": "bands", + "reducer": { + "process_graph": { + "nir": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 0 + } + }, + "red": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 1 + } + }, + "blue": { + "process_id": "array_element", + "arguments": { + "data": { + "from_parameter": "data" + }, + "index": 2 + } + }, + "sub": { + "process_id": "subtract", + "arguments": { + "data": [ + { + "from_node": "nir" + }, + { + "from_node": "red" + } + ] + } + }, + "p1": { + "process_id": "product", + "arguments": { + "data": [ + 6, + { + "from_node": "red" + } + ] + } + }, + "p2": { + "process_id": "product", + "arguments": { + "data": [ + -7.5, + { + "from_node": "blue" + } + ] + } + }, + "sum": { + "process_id": "sum", + "arguments": { + "data": [ + 1, + { + "from_node": "nir" + }, + { + "from_node": "p1" + }, + { + "from_node": "p2" + } + ] + } + }, + "div": { + "process_id": "divide", + "arguments": { + "data": [ + { + "from_node": "sub" + }, + { + "from_node": "sum" + } + ] + } + }, + "p3": { + "process_id": "product", + "arguments": { + "data": [ + 2.5, + { + "from_node": "div" + } + ] + }, + "result": true + } + } + } + } + }, + "mintime": { + "process_id": "reduce", + "description": "Compute a minimum time composite by reducing the temporal dimension", + "arguments": { + "data": { + "from_node": "evi" + }, + "dimension": "temporal", + "reducer": { + "process_graph": { + "min": { + "process_id": "min", + "arguments": { + "data": { + "from_parameter": "data" + } + }, + "result": true + } + } + } + } + }, + "save": { + "process_id": "save_result", + "arguments": { + "data": { + "from_node": "mintime" + }, + "format": "GTiff" + }, + "result": true + } + } + }, + "budget": 100, + "plan": "free", + "log_level": "warning", + "property1": null, + "property2": null +} \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/post_validation/post_validation_1.json b/src/openeo_test_suite/tests/general/payload_examples/post_validation/post_validation_1.json new file mode 100644 index 0000000..fa29b57 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/post_validation/post_validation_1.json @@ -0,0 +1,103 @@ +{ + "id": "evi", + "summary": "Enhanced Vegetation Index", + "description": "Computes the Enhanced Vegetation Index (EVI). It is computed with the following formula: `2.5 * (NIR - RED) / (1 + NIR + 6*RED + -7.5*BLUE)`.", + "parameters": [ + { + "name": "red", + "description": "Value from the red band.", + "schema": { + "type": "number" + } + }, + { + "name": "blue", + "description": "Value from the blue band.", + "schema": { + "type": "number" + } + }, + { + "name": "nir", + "description": "Value from the near infrared band.", + "schema": { + "type": "number" + } + } + ], + "returns": { + "description": "Computed EVI.", + "schema": { + "type": "number" + } + }, + "process_graph": { + "sub": { + "process_id": "subtract", + "arguments": { + "x": { + "from_parameter": "nir" + }, + "y": { + "from_parameter": "red" + } + } + }, + "p1": { + "process_id": "multiply", + "arguments": { + "x": 6, + "y": { + "from_parameter": "red" + } + } + }, + "p2": { + "process_id": "multiply", + "arguments": { + "x": -7.5, + "y": { + "from_parameter": "blue" + } + } + }, + "sum": { + "process_id": "sum", + "arguments": { + "data": [ + 1, + { + "from_parameter": "nir" + }, + { + "from_node": "p1" + }, + { + "from_node": "p2" + } + ] + } + }, + "div": { + "process_id": "divide", + "arguments": { + "x": { + "from_node": "sub" + }, + "y": { + "from_node": "sum" + } + } + }, + "p3": { + "process_id": "multiply", + "arguments": { + "x": 2.5, + "y": { + "from_node": "div" + } + }, + "result": true + } + } + } \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/payload_examples/put_process_graphs/put_process_graphs_1.json b/src/openeo_test_suite/tests/general/payload_examples/put_process_graphs/put_process_graphs_1.json new file mode 100644 index 0000000..d7a61f2 --- /dev/null +++ b/src/openeo_test_suite/tests/general/payload_examples/put_process_graphs/put_process_graphs_1.json @@ -0,0 +1,103 @@ +{ + "id": "evi", + "summary": "Enhanced Vegetation Index", + "description": "Computes the Enhanced Vegetation Index (EVI). It is computed with the following formula: `2.5 * (NIR - RED) / (1 + NIR + 6*RED + -7.5*BLUE)`.", + "parameters": [ + { + "name": "red", + "description": "Value from the red band.", + "schema": { + "type": "number" + } + }, + { + "name": "blue", + "description": "Value from the blue band.", + "schema": { + "type": "number" + } + }, + { + "name": "nir", + "description": "Value from the near infrared band.", + "schema": { + "type": "number" + } + } + ], + "returns": { + "description": "Computed EVI.", + "schema": { + "type": "number" + } + }, + "process_graph": { + "sub": { + "process_id": "subtract", + "arguments": { + "x": { + "from_parameter": "nir" + }, + "y": { + "from_parameter": "red" + } + } + }, + "p1": { + "process_id": "multiply", + "arguments": { + "x": 6, + "y": { + "from_parameter": "red" + } + } + }, + "p2": { + "process_id": "multiply", + "arguments": { + "x": -7.5, + "y": { + "from_parameter": "blue" + } + } + }, + "sum": { + "process_id": "sum", + "arguments": { + "data": [ + 1, + { + "from_parameter": "nir" + }, + { + "from_node": "p1" + }, + { + "from_node": "p2" + } + ] + } + }, + "div": { + "process_id": "divide", + "arguments": { + "x": { + "from_node": "sub" + }, + "y": { + "from_node": "sum" + } + } + }, + "p3": { + "process_id": "multiply", + "arguments": { + "x": 2.5, + "y": { + "from_node": "div" + } + }, + "result": true + } + } +} \ No newline at end of file diff --git a/src/openeo_test_suite/tests/general/test_compliance.py b/src/openeo_test_suite/tests/general/test_compliance.py new file mode 100644 index 0000000..b424c71 --- /dev/null +++ b/src/openeo_test_suite/tests/general/test_compliance.py @@ -0,0 +1,1467 @@ +from openapi_core import Spec +import pytest +import requests +import openeo_test_suite.lib.compliance_util as conformance_util +import uuid + + +@pytest.fixture(scope="session") +def base_url(request): + return conformance_util.get_base_url(request=request) + + +@pytest.fixture(scope="session") +def domain(request): + return conformance_util.get_domain(request=request) + + +@pytest.fixture(scope="session") +def spec(request): + return conformance_util.adjust_spec( + conformance_util.get_spec_path(), + conformance_util.get_base_url(request), + conformance_util.get_domain(request), + ) + + +@pytest.fixture(scope="session") +def bearer_token(pytestconfig): + bearer_token = conformance_util.get_access_token(pytestconfig) + return f"Bearer {bearer_token}" + + +def test_GET_backend_info(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "" + test_name = "Backend Info" + + # Run through all the generic GET endpoints and test their response to a proper request. + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_well_known(domain: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: Change server in spec + testing: test response by API for GET requests + cleanup: Change server back potentially + """ + + endpoint_path = "/.well-known/openeo" + test_name = "Well known" + + # Run through all the generic GET endpoints and test their response to a proper request. + + fail_log = conformance_util.test_endpoint( + base_url=domain, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_file_formats(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "file_formats" + test_name = "File formats" + + # Run through all the generic GET endpoints and test their response to a proper request. + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_conformance(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "conformance" + test_name = "Conformance" + + # Run through all the generic GET endpoints and test their response to a proper request. + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_udf_runtimes(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "udf_runtimes" + test_name = "UDF runtimes" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_service_types(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "service_types" + test_name = "Service Types" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_credentials_oidc(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "credentials/oidc" + test_name = "OpenID Connect authentication" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_collections(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "collections" + test_name = "Basic metadata for all collections" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_processes(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "processes" + test_name = "List of Predefined Processes" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, endpoint_path=endpoint_path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_me(base_url: str, spec: Spec, bearer_token: str): + """ + tests all the generic GET endpoints that require neither setup nor cleanup + + setup: None + testing: test response by API for GET requests + cleanup: None + + """ + endpoint_path = "me" + test_name = "Information about logged in user" + + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + ) + + assert fail_log == "" + + """ + setup: collect list of collection ids + testing: test response by API for GET requests of all the collection ids + cleanup: None + + """ + fail_log = "" + + collection_ids = [ + collection["id"] + for collection in requests.get((f"{base_url}collections")).json()["collections"] + ] + + # prepare list of endpoints + special_GET_endpoints_no_auth = [ + (f"collections/{collection_id}", f"Test for collection/{collection_id}") + for collection_id in collection_ids + ] + + # Run through all the special GET endpoints and test their response to a proper request. + for path, test_name in special_GET_endpoints_no_auth: + fail_log += conformance_util.test_endpoint( + base_url=base_url, endpoint_path=path, test_name=test_name, spec=spec + ) + + assert fail_log == "" + + +def test_GET_process_graphs(base_url: str, spec: Spec, bearer_token: str): + """ + setup: submit valid user defined processes + testing: test response format of submitted user defined processes + cleanup: delete submitted user defined processes + """ + fail_log = "" + # SETUP + + endpoint_path = "process_graphs" + test_name = "List all user-defined processes" + + created_udp_ids = conformance_util.put_process_graphs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_udp_ids, + ) + + assert fail_log == "" + + +def test_GET_process_graphs_process_id(base_url: str, spec: Spec, bearer_token: str): + """ + setup: submit user defined processes, gather list of user defined processes + testing: test each individual metadata response for submitted user-defined processes + cleanup: delete user defined processes + """ + # SETUP + fail_log = "" + + endpoint_path = "process_graphs" + + created_udp_ids = conformance_util.put_process_graphs( + base_url=base_url, bearer_token=bearer_token + ) + + # prepare list of endpoints + process_GET_endpoints_auth = [ + (f"{endpoint_path}/{process_id}", f"Test for {endpoint_path}/{process_id}") + for process_id in created_udp_ids + ] + + # TESTING + for prepared_endpoint_path, test_name in process_GET_endpoints_auth: + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + ) + + # CLEANUP + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_udp_ids, + ) + + assert fail_log == "" + + +def test_PUT_process_graphs_process_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: load payloads + TESTING: PUT UDPs + CLEANUP: DELETE UDPs + """ + + fail_log = "" + + # SETUP + endpoint_path = "process_graphs" + directory_path = conformance_util.get_examples_path() + examples_directory = "put_process_graphs" + + test_name = "Store a user-defined process" + + created_udp_ids = [] + payloads = conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + # TESTING + for payload in payloads: + id = str(uuid.uuid4()) + created_udp_ids.append(id) + prepared_endpoint_path = f"{endpoint_path}/{id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {id}", + spec=spec, + payload=payload, + bearer_token=bearer_token, + method="PUT", + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_udp_ids, + ) + + assert fail_log == "" + + +def test_DELETE_process_graphs_process_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: PUT UDPs + TESTING: DELETE UDPs + CLEANUP: None + """ + fail_log = "" + + # SETUP + endpoint_path = "process_graphs" + + created_udp_ids = conformance_util.put_process_graphs( + base_url=base_url, bearer_token=bearer_token + ) + + process_graphs_DELETE_endpoints = [ + (f"{endpoint_path}/{process_id}", f"Test for {endpoint_path}/{process_id}") + for process_id in created_udp_ids + ] + + # TESTING + for prepared_endpoint_path, test_name in process_graphs_DELETE_endpoints: + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=204, + ) + + # CLEANUP + + assert fail_log == "" + + +def test_GET_jobs(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: post jobs + TESTING: GET JOBS + CLEANUP: DELETE JOBS + """ + fail_log = "" + + # SETUP + endpoint_path = "jobs" + test_name = "List all batchjobs" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=f"{test_name}", + spec=spec, + bearer_token=bearer_token, + method="GET", + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_POST_jobs(base_url: str, spec: Spec, bearer_token: str): + """ + setup: prepare batch jobs payloads + testing: test posting prepared batch jobs to endpoint + cleanup: delete posted batch jobs + """ + fail_log = "" + # SETUP + endpoint_path = "jobs" + test_name = "Creates a new batch processing task" + directory_path = conformance_util.get_examples_path() + examples_directory = "post_jobs" + + created_batch_job_ids = [] + + payloads = conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + # TESTING + for payload in payloads: + _, payload = conformance_util.set_uuid_in_job(payload) + + fail_log_entry, response = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + expected_status_codes=201, + return_response=True, + ) + + fail_log += fail_log_entry + created_batch_job_ids.append(response.headers["OpenEO-Identifier"]) + + # CLEANUP + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_GET_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: post jobs + TESTING: GET job metadata of posted jobs + CLEANUP: Delete jobs + """ + fail_log = "" + # SETUP + + endpoint_path = "jobs" + test_name = "Full metadata for a batch job" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="GET", + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_PATCH_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, prepare payloads + TESTING: PATCH jobs + CLEANUP: DELETE jobs + """ + fail_log = "" + # SETUP + + endpoint_path = "jobs" + test_name = "Modify a batch job" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + directory_path = conformance_util.get_examples_path() + examples_directory = "patch_jobs" + + payloads = conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + # TESTING + + for job_id, payload in zip(created_batch_job_ids, payloads): + prepared_endpoint_path = f"{endpoint_path}/{job_id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="PATCH", + expected_status_codes=204, + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_DELETE_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + setup: Post jobs + testing: Delete posted jobs + cleanup: None + """ + fail_log = "" + # SETUP + endpoint_path = "jobs" + test_name = "Delete specific batch job" + + created_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + for job_id in created_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=204, + ) + + # CLEANUP + + assert fail_log == "" + + +def test_POST_jobs_job_id_results(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: post jobs + TESTING: start batch jobs + CLEANUP: ?? + """ + # SETUP + fail_log = "" + + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Start processing a batch job" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}/{endpoint_path_extra}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="POST", + expected_status_codes=202, + ) + + # CLEANUP + + try: + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + except Exception as e: + print(e) + + assert fail_log == "" + + +@pytest.mark.vv +@pytest.mark.longrunning +def test_GET_jobs_job_id_results(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, START jobs (POST jobs/job_id/results), Wait for jobs to be finished + TESTING: GET job results + CLEANUP: DELETE Jobs + """ + fail_log = "" + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Download results for a completed batch job" + + created_batch_job_ids = conformance_util.post_start_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + conformance_util.wait_job_statuses( + base_url=base_url, + bearer_token=bearer_token, + job_ids=created_batch_job_ids, + job_statuses=["finished"], + timeout=160, + ) + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}/{endpoint_path_extra}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="GET", + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_DELETE_jobs_job_id_results(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, Start processing jobs + TESTING: DELETE job id results: Cancel processing jobs + CLEANUP: Delete jobs + """ + fail_log = "" + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Cancel processing a batch job" + + created_batch_job_ids = conformance_util.post_start_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + conformance_util.wait_job_statuses( + base_url=base_url, + bearer_token=bearer_token, + job_ids=created_batch_job_ids, + job_statuses=["queued", "running"], + timeout=120, + ) + + # TESTING + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}/{endpoint_path_extra}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=204, + ) + + # CLEANUP + + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_GET_jobs_job_id_estimate(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, start jobs + TESTING: GET estimates for jobs + CLEANUP: Cancel and delete jobs + """ + + fail_log = "" + + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "estimate" + test_name = "Get an estimate for a running job" + + created_batch_job_ids = conformance_util.post_start_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}/{endpoint_path_extra}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="GET", + ) + + # CLEANUP + + conformance_util.cancel_delete_jobs( + base_url=base_url, bearer_token=bearer_token, job_ids=created_batch_job_ids + ) + + assert fail_log == "" + + +def test_GET_jobs_job_id_logs(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, (start? jobs) + TESTING: GET logs for batch jobs, GET logs for batch jobs using offset + CLEANUP: (cancel? jobs), DELETE jobs + """ + fail_log = "" + # SETUP + + endpoint_path = "jobs" + endpoint_path_extra = "logs" + test_name = "Logs for a batch job" + + created_batch_job_ids = conformance_util.post_start_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}/{endpoint_path_extra}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + method="GET", + ) + # CLEANUP + + conformance_util.cancel_delete_jobs( + base_url=base_url, bearer_token=bearer_token, job_ids=created_batch_job_ids + ) + + assert fail_log == "" + + +@pytest.mark.longrunning +def test_POST_result(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: gather payloads + TESTING: POST payloads to result + CLEANUP: None + """ + fail_log = "" + + # SETUP + + endpoint_path = "result" + test_name = "Process and download data synchronously" + + directory_path = conformance_util.get_examples_path() + examples_directory = "post_result" + + payloads = conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + # TESTING + + for payload in payloads: + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + expected_status_codes=201, + ) + # CLEANUP + + assert fail_log == "" + + +def test_POST_validation(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: load payloads + TESTING: POST payloads for validation + CLEANUP: None + """ + + fail_log = "" + # SETUP + + endpoint_path = "validation" + test_name = "Validate a user-defined process (graph)" + + directory_path = conformance_util.get_examples_path() + examples_directory = "post_validation" + + payloads = conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + + # TESTING + + for payload in payloads: + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + ) + + # CLEANUP + + assert fail_log == "" + + +def test_none_PUT_process_graphs_process_id( + base_url: str, spec: Spec, bearer_token: str +): + """ + SETUP: load empty payloads + TESTING: PUT empty UDPs + CLEANUP: None + """ + fail_log = "" + # SETUP + + endpoint_path = "process_graphs" + + test_name = "Store a user-defined process (NEGATIVE)" + + # TESTING + + id = str(uuid.uuid4()) + prepared_endpoint_path = f"{endpoint_path}/{id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {id}", + spec=spec, + payload=None, + bearer_token=bearer_token, + method="PUT", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + + assert fail_log == "" + + +def test_negative_DELETE_process_graphs_process_id( + base_url: str, spec: Spec, bearer_token: str +): + """ + SETUP: None + TESTING: try DELETE non-existant UDPs + CLEANUP: None + """ + fail_log = "" + # SETUP + + endpoint = "process_graphs" + prepared_endpoint_path = f"{endpoint}/thisiddoesnotexist" + test_name = "Negative delete process graphs process id" + + # TESTING + + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=range(400, 501), + ) + + # CLEANUP + assert fail_log == "" + + +def test_none_POST_jobs(base_url: str, spec: Spec, bearer_token: str): + """ + setup: prepare empty payloads + testing: test posting empty payloads endpoint + cleanup: None + """ + # SETUP + endpoint_path = "jobs" + test_name = "Creates a new batch processing task (NEGATIVE)" + + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=None, + method="POST", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_none_PATCH_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, prepare payloads + TESTING: PATCH jobs with empty payload + CLEANUP: DELETE jobs + """ + + # SETUP + fail_log = "" + endpoint_path = "jobs" + test_name = "Negative Modify a batch job" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + # TESTING + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + payload=None, + method="PATCH", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +def test_negative_DELETE_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + setup: None + testing: Delete non-existent posted jobs + cleanup: None + """ + # SETUP + endpoint_path = "jobs" + test_name = "Delete specific batch job (NEGATIVE)" + + # TESTING + prepared_endpoint_path = f"{endpoint_path}/thisiddoesnotexist" + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} for non-existant id", + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_negative_POST_jobs_job_id_results( + base_url: str, spec: Spec, bearer_token: str +): + """ + SETUP: None + TESTING: start batch jobs that doesn't exist + CLEANUP: None + """ + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Start processing a batch job that doesn't exist" + + # TESTING + prepared_endpoint_path = f"{endpoint_path}/thisiddoesnotexist/{endpoint_path_extra}" + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name}", + spec=spec, + bearer_token=bearer_token, + method="POST", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_negative_GET_jobs_job_id_results(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, START jobs (POST jobs/job_id/results), Wait for jobs to be finished + TESTING: GET job results + CLEANUP: DELETE Jobs + """ + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Download results for a completed batch job that doesn't exist" + + # TESTING + prepared_endpoint_path = f"{endpoint_path}/thisiddoesnotexist/{endpoint_path_extra}" + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name}", + spec=spec, + bearer_token=bearer_token, + method="GET", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_negative_DELETE_jobs_job_id_results( + base_url: str, spec: Spec, bearer_token: str +): + """ + SETUP: + TESTING: DELETE job id results: Cancel processing jobs + CLEANUP: None + """ + # SETUP + endpoint_path = "jobs" + endpoint_path_extra = "results" + test_name = "Cancel processing a batch job that doesn't exist" + + # TESTING + prepared_endpoint_path = f"{endpoint_path}/thisiddoesnotexist/{endpoint_path_extra}" + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name}", + spec=spec, + bearer_token=bearer_token, + method="DELETE", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_negative_GET_jobs_job_id_logs(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: None + TESTING: GET logs for batch jobs that don't exist + CLEANUP: + """ + # SETUP + + endpoint_path = "jobs" + endpoint_path_extra = "logs" + test_name = "Logs for a batch job" + + # TESTING + + prepared_endpoint_path = f"{endpoint_path}/thisiddoesnotexist/{endpoint_path_extra}" + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name}", + spec=spec, + bearer_token=bearer_token, + method="GET", + expected_status_codes=range(400, 500), + ) + # CLEANUP + + assert fail_log == "" + + +@pytest.mark.longrunning +def test_none_POST_result(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: gather payloads + TESTING: POST empty payloads to result + CLEANUP: None + """ + # SETUP + + endpoint_path = "result" + test_name = "Process and download data synchronously" + + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=None, + method="POST", + expected_status_codes=range(400, 500), + ) + # CLEANUP + + assert fail_log == "" + + +def test_none_POST_validation(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: load empty payloads + TESTING: POST empty payloads for validation + CLEANUP: None + """ + + # SETUP + endpoint_path = "validation" + test_name = "Validate a user-defined process (graph)" + + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=None, + method="POST", + expected_status_codes=range(400, 500), + ) + + # CLEANUP + assert fail_log == "" + + +def test_empty_PUT_process_graphs_process_id( + base_url: str, spec: Spec, bearer_token: str +): + """ + SETUP: load empty payloads + TESTING: PUT empty UDPs + CLEANUP: None + """ + fail_log = "" + # SETUP + + endpoint_path = "process_graphs" + + test_name = "Store a user-defined process (EMPTY)" + + directory_path = conformance_util.get_examples_path() + examples_directory = "empty_payload" + + payload = next( + conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + ) + + # TESTING + + id = str(uuid.uuid4()) + prepared_endpoint_path = f"{endpoint_path}/{id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {id}", + spec=spec, + payload=payload, + bearer_token=bearer_token, + method="PUT", + expected_status_codes=range(400, 501), + ) + + # CLEANUP + + assert fail_log == "" + + +def test_empty_POST_jobs(base_url: str, spec: Spec, bearer_token: str): + """ + setup: prepare empty payloads + testing: test posting empty payloads endpoint + cleanup: None + """ + # SETUP + endpoint_path = "jobs" + test_name = "Creates a new batch processing task (NEGATIVE)" + directory_path = conformance_util.get_examples_path() + examples_directory = "empty_payload" + + payload = next( + conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + ) + + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + expected_status_codes=range(400, 501), + ) + + # CLEANUP + assert fail_log == "" + + +def test_empty_PATCH_jobs_job_id(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: POST jobs, prepare payloads + TESTING: PATCH jobs with empty payload + CLEANUP: DELETE jobs + """ + + # SETUP + fail_log = "" + endpoint_path = "jobs" + test_name = "Negative Modify a batch job" + + created_batch_job_ids = conformance_util.post_jobs( + base_url=base_url, bearer_token=bearer_token + ) + + directory_path = conformance_util.get_examples_path() + examples_directory = "empty_payload" + + payload = next( + conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + ) + + # TESTING + for job_id in created_batch_job_ids: + prepared_endpoint_path = f"{endpoint_path}/{job_id}" + fail_log += conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=prepared_endpoint_path, + test_name=f"{test_name} {job_id}", + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="PATCH", + expected_status_codes=204, + ) + + # CLEANUP + conformance_util.delete_id_resource( + base_url=base_url, + endpoint_path=endpoint_path, + bearer_token=bearer_token, + ids=created_batch_job_ids, + ) + + assert fail_log == "" + + +@pytest.mark.longrunning +def test_empty_POST_result(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: gather payloads + TESTING: POST empty payloads to result + CLEANUP: None + """ + # SETUP + + endpoint_path = "result" + test_name = "Process and download data synchronously" + + directory_path = conformance_util.get_examples_path() + examples_directory = "empty_payload" + + payload = next( + conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + ) + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + expected_status_codes=range(400, 500), + ) + # CLEANUP + + assert fail_log == "" + + +def test_empty_POST_validation(base_url: str, spec: Spec, bearer_token: str): + """ + SETUP: load empty payloads + TESTING: POST empty payloads for validation + CLEANUP: None + """ + + # SETUP + endpoint_path = "validation" + test_name = "Validate a user-defined process (graph)" + + directory_path = conformance_util.get_examples_path() + examples_directory = "empty_payload" + + payload = next( + conformance_util.load_payloads_from_directory( + directory_path=f"{directory_path}/{examples_directory}" + ) + ) + + # TESTING + fail_log = conformance_util.test_endpoint( + base_url=base_url, + endpoint_path=endpoint_path, + test_name=test_name, + spec=spec, + bearer_token=bearer_token, + payload=payload, + method="POST", + expected_status_codes=200, + ) + + # CLEANUP + assert fail_log == "" + + +# endregion