diff --git a/assets/training/model_management/components/condition_block/asset.yaml b/assets/training/model_management/components/condition_block/asset.yaml new file mode 100644 index 0000000000..ea30e0bd5b --- /dev/null +++ b/assets/training/model_management/components/condition_block/asset.yaml @@ -0,0 +1,3 @@ +type: component +spec: spec.yaml +categories: ["Models"] diff --git a/assets/training/model_management/components/condition_block/spec.yaml b/assets/training/model_management/components/condition_block/spec.yaml new file mode 100644 index 0000000000..02a4786d13 --- /dev/null +++ b/assets/training/model_management/components/condition_block/spec.yaml @@ -0,0 +1,34 @@ +$schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json + +name: condition_block +version: 0.0.1 +type: command + +is_deterministic: True + +display_name: Condition Block +description: Component takes a condition and executes the command based on the condition. + +environment: azureml://registries/azureml/environments/model-management/versions/34 + +code: ../../src/ +command: | + set -ex + IFS=',' read -ra input_args <<< "${{inputs.args}}" + echo "Running condition block ..." + python -u run_condition_block.py --condition ${{inputs.condition}} --input-args "${{inputs.args}}" + echo "Completed condition block ... " + +inputs: + condition: + type: string + description: Input condition. + optional: false + + extra_pip_requirements: + type: string + description: | + Input arguments. + Arguments expressed as below. Do not use quotes for passing. + eg: a==1.0, b==1 + optional: false \ No newline at end of file diff --git a/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml b/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml index 7d2a12753c..db8f0d7af2 100644 --- a/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml +++ b/assets/training/model_management/components/convert_model_to_mlflow/spec.yaml @@ -1,7 +1,7 @@ $schema: https://azuremlschemas.azureedge.net/latest/commandComponent.schema.json name: convert_model_to_mlflow -version: 0.0.33 +version: 0.0.34 type: command is_deterministic: True @@ -19,7 +19,7 @@ command: | pip_pkg_str="${pip_pkgs[*]}" if [[ -n "$pip_pkg_str" ]]; then echo "Installing $pip_pkg_str"; pip install $pip_pkg_str; echo "pip installation completed. For any installation error please check above logs"; fi; echo "Running model conversion ... " - python -u run_model_preprocess.py $[[--model-id ${{inputs.model_id}}]] $[[--task-name ${{inputs.task_name}}]] $[[--model-download-metadata ${{inputs.model_download_metadata}}]] $[[--license-file-path ${{inputs.license_file_path}}]] $[[--hf-config-args "${{inputs.hf_config_args}}"]] $[[--hf-tokenizer-args "${{inputs.hf_tokenizer_args}}"]] $[[--hf-model-args "${{inputs.hf_model_args}}"]] $[[--hf-pipeline-args "${{inputs.hf_pipeline_args}}"]] $[[--hf-config-class ${{inputs.hf_config_class}}]] $[[--hf-model-class ${{inputs.hf_model_class}}]] $[[--hf-tokenizer-class ${{inputs.hf_tokenizer_class}}]] $[[--hf-use-experimental-features ${{inputs.hf_use_experimental_features}}]] $[[--extra-pip-requirements "${{inputs.extra_pip_requirements}}"]] $[[--inference-base-image "${{inputs.inference_base_image}}"]] --vllm-enabled ${{inputs.vllm_enabled}} --model-framework ${{inputs.model_framework}} --model-path ${{inputs.model_path}} --mlflow-model-output-dir ${{outputs.mlflow_model_folder}} --model-flavor ${{inputs.model_flavor}} + python -u run_model_preprocess.py $[[--model-id ${{inputs.model_id}}]] $[[--task-name ${{inputs.task_name}}]] $[[--model-download-metadata ${{inputs.model_download_metadata}}]] $[[--license-file-path ${{inputs.license_file_path}}]] $[[--hf-config-args "${{inputs.hf_config_args}}"]] $[[--hf-tokenizer-args "${{inputs.hf_tokenizer_args}}"]] $[[--hf-model-args "${{inputs.hf_model_args}}"]] $[[--hf-pipeline-args "${{inputs.hf_pipeline_args}}"]] $[[--hf-config-class ${{inputs.hf_config_class}}]] $[[--hf-model-class ${{inputs.hf_model_class}}]] $[[--hf-tokenizer-class ${{inputs.hf_tokenizer_class}}]] $[[--hf-use-experimental-features ${{inputs.hf_use_experimental_features}}]] $[[--extra-pip-requirements "${{inputs.extra_pip_requirements}}"]] $[[--inference-base-image "${{inputs.inference_base_image}}"]] --vllm-enabled ${{inputs.vllm_enabled}} --model-framework ${{inputs.model_framework}} $[[--model-path "${{inputs.model_path}}"]] $[[--model-path-mmd "${{inputs.model_path_mmd}}"]] --mlflow-model-output-dir ${{outputs.mlflow_model_folder}} --model-flavor ${{inputs.model_flavor}} echo "Completed model conversion ... " inputs: @@ -156,7 +156,13 @@ inputs: type: uri_folder description: Path to the model. mode: ro_mount - optional: false + optional: true + + model_path_mmd: + type: uri_folder + description: Path to the MMD model. + mode: ro_mount + optional: true license_file_path: type: uri_file diff --git a/assets/training/model_management/components/import_model/spec.yaml b/assets/training/model_management/components/import_model/spec.yaml index 2ff88ce5f8..c4244504b2 100644 --- a/assets/training/model_management/components/import_model/spec.yaml +++ b/assets/training/model_management/components/import_model/spec.yaml @@ -4,7 +4,7 @@ type: pipeline name: import_model display_name: Import model description: Import a model into a workspace or a registry -version: 0.0.39 +version: 0.0.40 # Pipeline inputs inputs: @@ -270,8 +270,23 @@ jobs: model_output: type: uri_folder + mmdetection_image_objectdetection_instancesegmentation_model_import: + component: azureml:mmdetection_image_objectdetection_instancesegmentation_model_import/versions/0.0.19 + compute: ${{parent.inputs.compute}} + resources: + instance_type: '${{parent.inputs.instance_type}}' + identity: + type: user_identity + inputs: + model_family: 'MmDetectionImage' + model_name: ${{parent.inputs.model_id}} + download_from_source: False + outputs: + output_dir: + type: uri_file + convert_model_to_mlflow: - component: azureml:convert_model_to_mlflow:0.0.33 + component: azureml:convert_model_to_mlflow:0.0.34 compute: ${{parent.inputs.compute}} resources: instance_type: '${{parent.inputs.instance_type}}' @@ -284,6 +299,7 @@ jobs: license_file_path: ${{parent.inputs.license_file_path}} model_framework: ${{parent.inputs.model_framework}} model_download_metadata: ${{parent.jobs.download_model.outputs.model_download_metadata}} + model_path_mmd: ${{parent.jobs.mmdetection_image_objectdetection_instancesegmentation_model_import.outputs.output_dir}} model_path: ${{parent.jobs.download_model.outputs.model_output}} hf_config_args: ${{parent.inputs.hf_config_args}} hf_tokenizer_args: ${{parent.inputs.hf_tokenizer_args}} diff --git a/assets/training/model_management/src/run_condition_block.py b/assets/training/model_management/src/run_condition_block.py new file mode 100644 index 0000000000..9366112381 --- /dev/null +++ b/assets/training/model_management/src/run_condition_block.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Run Model preprocessor module.""" + +import argparse +import os +import json +import shutil +from azureml.model.mgmt.config import AppName, ModelFramework +from azureml.model.mgmt.processors.transformers.config import HF_CONF +from azureml.model.mgmt.processors.preprocess import run_preprocess, check_for_py_files +from azureml.model.mgmt.processors.transformers.config import SupportedTasks as TransformersSupportedTasks +from azureml.model.mgmt.processors.pyfunc.config import SupportedTasks as PyFuncSupportedTasks +from azureml.model.mgmt.utils.exceptions import swallow_all_exceptions, UnsupportedTaskType +from azureml._common.exceptions import AzureMLException +from azureml._common._error_definition.azureml_error import AzureMLError +from azureml.model.mgmt.utils.logging_utils import custom_dimensions, get_logger +from pathlib import Path +from tempfile import TemporaryDirectory +import json + + +logger = get_logger(__name__) +custom_dimensions.app_name = AppName.CONVERT_MODEL_TO_MLFLOW + + +def _get_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--condition", type=str, required=True, help="Condition") + parser.add_argument( + "--input-args", + type=str, + required=True, + help="Input args", + ) + return parser + + +@swallow_all_exceptions(logger) +def run(): + """Run preprocess.""" + parser = _get_parser() + args, _ = parser.parse_known_args() + + input_args = args.input_args + condition = args.condition + input_args = json.loads(input_args) + result = None + try: + result = eval(condition, input_args) + except Exception as e: + logger.error(f"Error evaluating condition: {e}") + result = False + + +if __name__ == "__main__": + run() diff --git a/assets/training/model_management/src/run_model_preprocess.py b/assets/training/model_management/src/run_model_preprocess.py index 6dfebedfb2..3c8efaafc1 100644 --- a/assets/training/model_management/src/run_model_preprocess.py +++ b/assets/training/model_management/src/run_model_preprocess.py @@ -73,7 +73,8 @@ def _get_parser(): required=False, help="Model download details", ) - parser.add_argument("--model-path", type=Path, required=True, help="Model input path") + parser.add_argument("--model-path", type=Path, required=False, help="Model input path") + parser.add_argument("--model-path-mmd", type=Path, required=False, help="MMD Model input path") parser.add_argument("--license-file-path", type=Path, required=False, help="License file path") parser.add_argument( "--mlflow-model-output-dir", @@ -107,7 +108,7 @@ def run(): inference_base_image = args.inference_base_image model_download_metadata_path = args.model_download_metadata - model_path = args.model_path + model_path = args.model_path_mmd if model_framework == 'MMLab' else args.model_path mlflow_model_output_dir = args.mlflow_model_output_dir license_file_path = args.license_file_path TRUST_CODE_KEY = "trust_remote_code=True"