Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update onnx and Werkzeug versions in environments for fixing vulnerabilities #3598

Open
wants to merge 1 commit into
base: main
Choose a base branch
from

Update onnx and Werkzeug versions in environments for fixing vulnerab…

6ab4094
Select commit
Loading
Failed to load commit list.
Sign in for the full log view
Open

Update onnx and Werkzeug versions in environments for fixing vulnerabilities #3598

Update onnx and Werkzeug versions in environments for fixing vulnerab…
6ab4094
Select commit
Loading
Failed to load commit list.
GitHub Actions / Test Results for assets-test failed Nov 13, 2024 in 0s

2 fail in 27m 8s

2 tests   0 ✅  27m 8s ⏱️
2 suites  0 💤
2 files    2 ❌

Results for commit 6ab4094.

Annotations

Check warning on line 0 in environment/ai-ml-automl-dnn.tests.automl_sample_test

See this annotation in the file changed.

@github-actions github-actions / Test Results for assets-test

test_azure_ai_ml_automl (environment/ai-ml-automl-dnn.tests.automl_sample_test) failed

pytest-reports/environment/ai-ml-automl-dnn.xml [took 12m 33s]
Raw output
azure.ai.ml.exceptions.JobException: Exception : 
 {
    "error": {
        "code": "UserError",
        "message": "Image build failed. For more details, check log file azureml-logs/20_image_build_log.txt.",
        "message_format": "Image build failed. For more details, check log file {ArtifactPath}.",
        "message_parameters": {
            "ArtifactPath": "azureml-logs/20_image_build_log.txt"
        },
        "details": [],
        "inner_error": {
            "code": "BadArgument",
            "inner_error": {
                "code": "ImageBuildFailure"
            }
        }
    },
    "correlation": {
        "operation": "50b6366d6212ae5898680c78abb03c99",
        "request": "f0c0c5ee19c9f390"
    },
    "environment": "eastus",
    "location": "eastus",
    "time": "2024-11-13T07:13:22.846957Z",
    "component_name": "RunHistory"
}
def test_azure_ai_ml_automl():
        """Tests a sample job using ai-ml-automl-dnn as the environment."""
        this_dir = Path(__file__).parent
    
        subscription_id = os.environ.get("subscription_id")
        resource_group = os.environ.get("resource_group")
        workspace_name = os.environ.get("workspace")
    
        ml_client = MLClient(
            AzureCliCredential(), subscription_id, resource_group, workspace_name
        )
    
        env_name = "ai-ml-automl-dnn"
    
        env_docker_context = Environment(
            build=BuildContext(path=this_dir / BUILD_CONTEXT),
            name="ai-ml-automl-dnn",
            description="ai-ml-automl-dnn environment created from a Docker context.",
        )
        ml_client.environments.create_or_update(env_docker_context)
    
        # create the command
        job = command(
            code=this_dir / JOB_SOURCE_CODE,  # local path where the code is stored
            command="python main.py --diabetes-csv ${{inputs.diabetes}}",
            inputs={
                "diabetes": Input(
                    type="uri_file",
                    path="https://azuremlexamples.blob.core.windows.net/datasets/diabetes.csv",
                )
            },
            environment=f"{env_name}@latest",
            compute=os.environ.get("cpu_cluster"),
            display_name="sklearn-diabetes-example",
            description="A test run of the ai-ml-automl-dnn curated environment",
            experiment_name="sklearnExperiment"
        )
    
        returned_job = ml_client.create_or_update(job)
        assert returned_job is not None
    
        # Poll until final status is reached or timed out
        timeout = time.time() + (TIMEOUT_MINUTES * 60)
        while time.time() <= timeout:
            job = ml_client.jobs.get(returned_job.name)
            status = job.status
            if status in [JobStatus.COMPLETED, JobStatus.FAILED]:
                break
            time.sleep(30)  # sleep 30 seconds
        else:
            # Timeout
            ml_client.jobs.cancel(returned_job.name)
            raise Exception(f"Test aborted because the job took longer than {TIMEOUT_MINUTES} minutes. "
                            f"Last status was {status}.")
    
        if status == JobStatus.FAILED:
            ml_client.jobs.download(returned_job.name)
            if STD_LOG.exists():
                print(f"*** BEGIN {STD_LOG} ***")
                with open(STD_LOG, "r") as f:
                    print(f.read(), end="")
                print(f"*** END {STD_LOG} ***")
            else:
>               ml_client.jobs.stream(returned_job.name)

tests/automl_sample_test.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/share/miniconda/envs/isolated_1731481244107/lib/python3.13/site-packages/azure/core/tracing/decorator.py:105: in wrapper_use_tracer
    return func(*args, **kwargs)
/usr/share/miniconda/envs/isolated_1731481244107/lib/python3.13/site-packages/azure/ai/ml/operations/_job_operations.py:617: in stream
    self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x7f8ef6d402f0>
job_resource = <azure.ai.ml._restclient.v2022_10_01_preview.models._models_py3.JobBase object at 0x7f8ef4331cd0>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x7f8ef6d0fa10>
raise_exception_on_failed_job = True

    def stream_logs_until_completion(
        run_operations: RunOperations,
        job_resource: JobBaseData,
        datastore_operations: DatastoreOperations = None,
        raise_exception_on_failed_job=True,
        *,
        requests_pipeline: HttpPipeline
    ) -> None:
        """Stream the experiment run output to the specified file handle. By
        default the the file handle points to stdout.
    
        :param run_operations: The run history operations class.
        :type run_operations: RunOperations
        :param job_resource: The job to stream
        :type job_resource: JobBaseData
        :param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
        :type datastore_operations: Optional[DatastoreOperations]
        :param raise_exception_on_failed_job: Should this method fail if job fails
        :type raise_exception_on_failed_job: Boolean
        :return:
        :rtype: None
        """
        job_type = job_resource.properties.job_type
        job_name = job_resource.name
        studio_endpoint = job_resource.properties.services.get("Studio", None)
        studio_endpoint = studio_endpoint.endpoint if studio_endpoint else None
        file_handle = sys.stdout
        ds_properties = None
        prefix = None
        if (
            hasattr(job_resource.properties, "outputs")
            and job_resource.properties.job_type != RestJobType.AUTO_ML
            and datastore_operations
        ):
            # Get default output location
    
            default_output = (
                job_resource.properties.outputs.get("default", None) if job_resource.properties.outputs else None
            )
            is_uri_folder = default_output and default_output.job_output_type == DataType.URI_FOLDER
            if is_uri_folder:
                output_uri = default_output.uri
                # Parse the uri format
                output_uri = output_uri.split("datastores/")[1]
                datastore_name, prefix = output_uri.split("/", 1)
                ds_properties = get_datastore_info(datastore_operations, datastore_name)
    
        try:
            file_handle.write("RunId: {}\n".format(job_name))
            file_handle.write("Web View: {}\n".format(studio_endpoint))
    
            _current_details: RunDetails = run_operations.get_run_details(job_name)
    
            processed_logs = {}
    
            poll_start_time = time.time()
            pipeline_with_retries = create_requests_pipeline_with_retry(requests_pipeline=requests_pipeline)
            while (
                _current_details.status in RunHistoryConstants.IN_PROGRESS_STATUSES
                or _current_details.status == JobStatus.FINALIZING
            ):
                file_handle.flush()
                time.sleep(_wait_before_polling(time.time() - poll_start_time))
                _current_details: RunDetails = run_operations.get_run_details(job_name)  # TODO use FileWatcher
                if job_type.lower() in JobType.PIPELINE:
                    legacy_folder_name = "/logs/azureml/"
                else:
                    legacy_folder_name = "/azureml-logs/"
                _current_logs_dict = (
                    list_logs_in_datastore(
                        ds_properties,
                        prefix=prefix,
                        legacy_log_folder_name=legacy_folder_name,
                    )
                    if ds_properties is not None
                    else _current_details.log_files
                )
                # Get the list of new logs available after filtering out the processed ones
                available_logs = _get_sorted_filtered_logs(_current_logs_dict, job_type, processed_logs)
                content = ""
                for current_log in available_logs:
                    content = download_text_from_url(
                        _current_logs_dict[current_log],
                        pipeline_with_retries,
                        timeout=RunHistoryConstants._DEFAULT_GET_CONTENT_TIMEOUT,
                    )
    
                    _incremental_print(content, processed_logs, current_log, file_handle)
    
                # TODO: Temporary solution to wait for all the logs to be printed in the finalizing state.
                if (
                    _current_details.status not in RunHistoryConstants.IN_PROGRESS_STATUSES
                    and _current_details.status == JobStatus.FINALIZING
                    and "The activity completed successfully. Finalizing run..." in content
                ):
                    break
    
            file_handle.write("\n")
            file_handle.write("Execution Summary\n")
            file_handle.write("=================\n")
            file_handle.write("RunId: {}\n".format(job_name))
            file_handle.write("Web View: {}\n".format(studio_endpoint))
    
            warnings = _current_details.warnings
            if warnings:
                messages = [x.message for x in warnings if x.message]
                if len(messages) > 0:
                    file_handle.write("\nWarnings:\n")
                    for message in messages:
                        file_handle.write(message + "\n")
                    file_handle.write("\n")
    
            if _current_details.status == JobStatus.FAILED:
                error = (
                    _current_details.error.as_dict()
                    if _current_details.error
                    else "Detailed error not set on the Run. Please check the logs for details."
                )
                # If we are raising the error later on, so we don't double print.
                if not raise_exception_on_failed_job:
                    file_handle.write("\nError:\n")
                    file_handle.write(json.dumps(error, indent=4))
                    file_handle.write("\n")
                else:
>                   raise JobException(
                        message="Exception : \n {} ".format(json.dumps(error, indent=4)),
                        target=ErrorTarget.JOB,
                        no_personal_data_message="Exception raised on failed job.",
                        error_category=ErrorCategory.SYSTEM_ERROR,
                    )
E                   azure.ai.ml.exceptions.JobException: Exception : 
E                    {
E                       "error": {
E                           "code": "UserError",
E                           "message": "Image build failed. For more details, check log file azureml-logs/20_image_build_log.txt.",
E                           "message_format": "Image build failed. For more details, check log file {ArtifactPath}.",
E                           "message_parameters": {
E                               "ArtifactPath": "azureml-logs/20_image_build_log.txt"
E                           },
E                           "details": [],
E                           "inner_error": {
E                               "code": "BadArgument",
E                               "inner_error": {
E                                   "code": "ImageBuildFailure"
E                               }
E                           }
E                       },
E                       "correlation": {
E                           "operation": "50b6366d6212ae5898680c78abb03c99",
E                           "request": "f0c0c5ee19c9f390"
E                       },
E                       "environment": "eastus",
E                       "location": "eastus",
E                       "time": "2024-11-13T07:13:22.846957Z",
E                       "component_name": "RunHistory"
E                   }

/usr/share/miniconda/envs/isolated_1731481244107/lib/python3.13/site-packages/azure/ai/ml/operations/_job_ops_helper.py:297: JobException

Check warning on line 0 in environment/ai-ml-automl.tests.automl_sample_test

See this annotation in the file changed.

@github-actions github-actions / Test Results for assets-test

test_azure_ai_ml_automl (environment/ai-ml-automl.tests.automl_sample_test) failed

pytest-reports/environment/ai-ml-automl.xml [took 14m 33s]
Raw output
azure.ai.ml.exceptions.JobException: Exception : 
 {
    "error": {
        "code": "UserError",
        "message": "Image build failed. For more details, check log file azureml-logs/20_image_build_log.txt.",
        "message_format": "Image build failed. For more details, check log file {ArtifactPath}.",
        "message_parameters": {
            "ArtifactPath": "azureml-logs/20_image_build_log.txt"
        },
        "details": [],
        "inner_error": {
            "code": "BadArgument",
            "inner_error": {
                "code": "ImageBuildFailure"
            }
        }
    },
    "correlation": {
        "operation": "fb09625a5c1b573892fd734fc7f8c24a",
        "request": "b06a5fb9c2d60b63"
    },
    "environment": "eastus",
    "location": "eastus",
    "time": "2024-11-13T07:15:00.136091Z",
    "component_name": "RunHistory"
}
def test_azure_ai_ml_automl():
        """Tests a sample job using ai-ml-automl as the environment."""
        this_dir = Path(__file__).parent
    
        subscription_id = os.environ.get("subscription_id")
        resource_group = os.environ.get("resource_group")
        workspace_name = os.environ.get("workspace")
    
        ml_client = MLClient(
            AzureCliCredential(), subscription_id, resource_group, workspace_name
        )
    
        env_name = "ai-ml-automl"
    
        env_docker_context = Environment(
            build=BuildContext(path=this_dir / BUILD_CONTEXT),
            name="ai-ml-automl",
            description="ai-ml-automl environment created from a Docker context.",
        )
        ml_client.environments.create_or_update(env_docker_context)
    
        # create the command
        job = command(
            code=this_dir / JOB_SOURCE_CODE,  # local path where the code is stored
            command="python main.py --diabetes-csv ${{inputs.diabetes}}",
            inputs={
                "diabetes": Input(
                    type="uri_file",
                    path="https://azuremlexamples.blob.core.windows.net/datasets/diabetes.csv",
                )
            },
            environment=f"{env_name}@latest",
            compute=os.environ.get("cpu_cluster"),
            display_name="sklearn-diabetes-example",
            description="A test run of the ai-ml-automl curated environment",
            experiment_name="sklearnExperiment"
        )
    
        returned_job = ml_client.create_or_update(job)
        assert returned_job is not None
    
        # Poll until final status is reached or timed out
        timeout = time.time() + (TIMEOUT_MINUTES * 60)
        while time.time() <= timeout:
            job = ml_client.jobs.get(returned_job.name)
            status = job.status
            if status in [JobStatus.COMPLETED, JobStatus.FAILED]:
                break
            time.sleep(30)  # sleep 30 seconds
        else:
            # Timeout
            ml_client.jobs.cancel(returned_job.name)
            raise Exception(f"Test aborted because the job took longer than {TIMEOUT_MINUTES} minutes. "
                            f"Last status was {status}.")
    
        if status == JobStatus.FAILED:
            ml_client.jobs.download(returned_job.name)
            if STD_LOG.exists():
                print(f"*** BEGIN {STD_LOG} ***")
                with open(STD_LOG, "r") as f:
                    print(f.read(), end="")
                print(f"*** END {STD_LOG} ***")
            else:
>               ml_client.jobs.stream(returned_job.name)

tests/automl_sample_test.py:85: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/share/miniconda/envs/isolated_1731481244880/lib/python3.13/site-packages/azure/core/tracing/decorator.py:105: in wrapper_use_tracer
    return func(*args, **kwargs)
/usr/share/miniconda/envs/isolated_1731481244880/lib/python3.13/site-packages/azure/ai/ml/operations/_job_operations.py:617: in stream
    self._stream_logs_until_completion(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

run_operations = <azure.ai.ml.operations._run_operations.RunOperations object at 0x7f154c13a660>
job_resource = <azure.ai.ml._restclient.v2022_10_01_preview.models._models_py3.JobBase object at 0x7f1549723a50>
datastore_operations = <azure.ai.ml.operations._datastore_operations.DatastoreOperations object at 0x7f154c107a10>
raise_exception_on_failed_job = True

    def stream_logs_until_completion(
        run_operations: RunOperations,
        job_resource: JobBaseData,
        datastore_operations: DatastoreOperations = None,
        raise_exception_on_failed_job=True,
        *,
        requests_pipeline: HttpPipeline
    ) -> None:
        """Stream the experiment run output to the specified file handle. By
        default the the file handle points to stdout.
    
        :param run_operations: The run history operations class.
        :type run_operations: RunOperations
        :param job_resource: The job to stream
        :type job_resource: JobBaseData
        :param datastore_operations: Optional, the datastore operations class, used to get logs from datastore
        :type datastore_operations: Optional[DatastoreOperations]
        :param raise_exception_on_failed_job: Should this method fail if job fails
        :type raise_exception_on_failed_job: Boolean
        :return:
        :rtype: None
        """
        job_type = job_resource.properties.job_type
        job_name = job_resource.name
        studio_endpoint = job_resource.properties.services.get("Studio", None)
        studio_endpoint = studio_endpoint.endpoint if studio_endpoint else None
        file_handle = sys.stdout
        ds_properties = None
        prefix = None
        if (
            hasattr(job_resource.properties, "outputs")
            and job_resource.properties.job_type != RestJobType.AUTO_ML
            and datastore_operations
        ):
            # Get default output location
    
            default_output = (
                job_resource.properties.outputs.get("default", None) if job_resource.properties.outputs else None
            )
            is_uri_folder = default_output and default_output.job_output_type == DataType.URI_FOLDER
            if is_uri_folder:
                output_uri = default_output.uri
                # Parse the uri format
                output_uri = output_uri.split("datastores/")[1]
                datastore_name, prefix = output_uri.split("/", 1)
                ds_properties = get_datastore_info(datastore_operations, datastore_name)
    
        try:
            file_handle.write("RunId: {}\n".format(job_name))
            file_handle.write("Web View: {}\n".format(studio_endpoint))
    
            _current_details: RunDetails = run_operations.get_run_details(job_name)
    
            processed_logs = {}
    
            poll_start_time = time.time()
            pipeline_with_retries = create_requests_pipeline_with_retry(requests_pipeline=requests_pipeline)
            while (
                _current_details.status in RunHistoryConstants.IN_PROGRESS_STATUSES
                or _current_details.status == JobStatus.FINALIZING
            ):
                file_handle.flush()
                time.sleep(_wait_before_polling(time.time() - poll_start_time))
                _current_details: RunDetails = run_operations.get_run_details(job_name)  # TODO use FileWatcher
                if job_type.lower() in JobType.PIPELINE:
                    legacy_folder_name = "/logs/azureml/"
                else:
                    legacy_folder_name = "/azureml-logs/"
                _current_logs_dict = (
                    list_logs_in_datastore(
                        ds_properties,
                        prefix=prefix,
                        legacy_log_folder_name=legacy_folder_name,
                    )
                    if ds_properties is not None
                    else _current_details.log_files
                )
                # Get the list of new logs available after filtering out the processed ones
                available_logs = _get_sorted_filtered_logs(_current_logs_dict, job_type, processed_logs)
                content = ""
                for current_log in available_logs:
                    content = download_text_from_url(
                        _current_logs_dict[current_log],
                        pipeline_with_retries,
                        timeout=RunHistoryConstants._DEFAULT_GET_CONTENT_TIMEOUT,
                    )
    
                    _incremental_print(content, processed_logs, current_log, file_handle)
    
                # TODO: Temporary solution to wait for all the logs to be printed in the finalizing state.
                if (
                    _current_details.status not in RunHistoryConstants.IN_PROGRESS_STATUSES
                    and _current_details.status == JobStatus.FINALIZING
                    and "The activity completed successfully. Finalizing run..." in content
                ):
                    break
    
            file_handle.write("\n")
            file_handle.write("Execution Summary\n")
            file_handle.write("=================\n")
            file_handle.write("RunId: {}\n".format(job_name))
            file_handle.write("Web View: {}\n".format(studio_endpoint))
    
            warnings = _current_details.warnings
            if warnings:
                messages = [x.message for x in warnings if x.message]
                if len(messages) > 0:
                    file_handle.write("\nWarnings:\n")
                    for message in messages:
                        file_handle.write(message + "\n")
                    file_handle.write("\n")
    
            if _current_details.status == JobStatus.FAILED:
                error = (
                    _current_details.error.as_dict()
                    if _current_details.error
                    else "Detailed error not set on the Run. Please check the logs for details."
                )
                # If we are raising the error later on, so we don't double print.
                if not raise_exception_on_failed_job:
                    file_handle.write("\nError:\n")
                    file_handle.write(json.dumps(error, indent=4))
                    file_handle.write("\n")
                else:
>                   raise JobException(
                        message="Exception : \n {} ".format(json.dumps(error, indent=4)),
                        target=ErrorTarget.JOB,
                        no_personal_data_message="Exception raised on failed job.",
                        error_category=ErrorCategory.SYSTEM_ERROR,
                    )
E                   azure.ai.ml.exceptions.JobException: Exception : 
E                    {
E                       "error": {
E                           "code": "UserError",
E                           "message": "Image build failed. For more details, check log file azureml-logs/20_image_build_log.txt.",
E                           "message_format": "Image build failed. For more details, check log file {ArtifactPath}.",
E                           "message_parameters": {
E                               "ArtifactPath": "azureml-logs/20_image_build_log.txt"
E                           },
E                           "details": [],
E                           "inner_error": {
E                               "code": "BadArgument",
E                               "inner_error": {
E                                   "code": "ImageBuildFailure"
E                               }
E                           }
E                       },
E                       "correlation": {
E                           "operation": "fb09625a5c1b573892fd734fc7f8c24a",
E                           "request": "b06a5fb9c2d60b63"
E                       },
E                       "environment": "eastus",
E                       "location": "eastus",
E                       "time": "2024-11-13T07:15:00.136091Z",
E                       "component_name": "RunHistory"
E                   }

/usr/share/miniconda/envs/isolated_1731481244880/lib/python3.13/site-packages/azure/ai/ml/operations/_job_ops_helper.py:297: JobException