diff --git a/.ci/jenkins/ci.groovy b/.ci/jenkins/ci.groovy index 40fbe206..21398fc9 100644 --- a/.ci/jenkins/ci.groovy +++ b/.ci/jenkins/ci.groovy @@ -24,7 +24,6 @@ catch (Exception e) { [ sku : "skx", backends : ["cpu", "interpreter"] ], [ sku : "clx", backends : ["cpu", "interpreter"] ], [ sku : "bdw", backends : ["cpu", "interpreter"] ] - // [ sku: "iris", backend : "igpu" ] ] } echo "BACKEND_SKU_CONFIGURATIONS=${BACKEND_SKU_CONFIGURATIONS}" @@ -32,11 +31,11 @@ echo "BACKEND_SKU_CONFIGURATIONS=${BACKEND_SKU_CONFIGURATIONS}" // --- CI constants --- NGRAPH_ONNX_REPO_ADDRESS="git@github.com:NervanaSystems/ngraph-onnx.git" NGRAPH_REPO_ADDRESS="git@github.com:NervanaSystems/ngraph.git" -DLDT_REPO_ADDRESS = "git@gitlab-icv.inn.intel.com:inference-engine/dldt.git" +OPENVINO_REPO_ADDRESS = "git@github.com:openvinotoolkit/openvino.git" CI_LABELS = "ngraph_onnx && ci" CI_DIR = "ngraph-onnx/.ci/jenkins" -DOCKER_CONTAINER_NAME = "jenkins_ngraph-onnx_ci" +DOCKER_CONTAINER_PREFIX = "jenkins_ngraph-onnx_ci" JENKINS_GITHUB_CREDENTIAL_ID = "7157091e-bc04-42f0-99fd-dc4da2922a55" JENKINS_GITLAB_CREDENTIAL_ID = "1caab8d7-1d0c-4b8a-9438-b65336862ead" @@ -51,6 +50,7 @@ CONFIGURATION_WORKFLOW = { configuration -> timeout(activity: true, time: 60) { WORKDIR = "${WORKSPACE}/${BUILD_NUMBER}" DOCKER_HOME = "/home/${USER}" + DOCKER_CONTAINER_NAME="${DOCKER_CONTAINER_PREFIX}_${EXECUTOR_NUMBER}" try { stage("Clone repositories") { dir (WORKDIR) { @@ -60,11 +60,11 @@ CONFIGURATION_WORKFLOW = { configuration -> gitClone("Clone ngraph", NGRAPH_REPO_ADDRESS, configuration.ngraphBranch) } dir (WORKDIR) { - gitClone("Clone dldt", DLDT_REPO_ADDRESS, configuration.dldtBranch) + gitClone("Clone openvino", OPENVINO_REPO_ADDRESS, configuration.openvinoBranch) } - gitSubmoduleUpdate("dldt") + gitSubmoduleUpdate("openvino") } - String imageName = "${DOCKER_REGISTRY}/aibt/aibt/ngraph_cpp/${configuration.os}/ubuntu_18_04" + String imageName = "${DOCKER_REGISTRY}/aibt/aibt/ngraph_cpp/${configuration.os}/ubuntu_18_04_test" stage("Prepare Docker image") { pullDockerImage(imageName) appendUserToDockerImage(imageName) @@ -73,22 +73,7 @@ CONFIGURATION_WORKFLOW = { configuration -> runDockerContainer(imageName) } stage("Prepare environment") { - prepareEnvironment(configuration.backends, configuration.ngraphBranch) - } - for (backend in configuration.backends) { - try { - stage("Run ${backend} tests") { - runToxTests(backend) - } - } - catch(e) { - // If cause of exception was job abortion - throw exception - if ("$e".contains("143")) { - throw e - } else { - currentBuild.result = "FAILURE" - } - } + prepareEnvironment() } } catch(e) { @@ -172,7 +157,7 @@ def runDockerContainer(String imageName) { mkdir -p ${HOME}/ONNX_CI docker run -id --privileged \ --user ${USER} \ - --name ${DOCKER_CONTAINER_NAME} \ + --name ${DOCKER_CONTAINER_NAME} \ --volume ${WORKDIR}:/logs \ --volume ${HOME}/ONNX_CI/onnx_models/.onnx:${dockerOnnxModels} \ --volume ${HOME}/ONNX_CI/cache:${dockerCache} \ @@ -181,20 +166,17 @@ def runDockerContainer(String imageName) { """ } -def prepareEnvironment(List backends, String ngraph_branch) { - String backendsString = backends.join(",") +def prepareEnvironment() { sh """ docker exec ${DOCKER_CONTAINER_NAME} bash -c "${DOCKER_HOME}/${CI_DIR}/prepare_environment.sh \ - --build-dir=${DOCKER_HOME} \ - --backends=${backendsString} \ - --ngraph-branch=${ngraph_branch}" + --build-dir=${DOCKER_HOME} """ } def runToxTests(String backend) { String toxEnvVar = "TOX_INSTALL_NGRAPH_FROM=\${NGRAPH_WHL}" String backendEnvVar = "NGRAPH_BACKEND=${backend.toUpperCase()}" - String libraryVar = (backend == "ie") ? "LD_LIBRARY_PATH=${DOCKER_HOME}/dldt_dist/deployment_tools/inference_engine/external/tbb/lib:${DOCKER_HOME}/dldt_dist/deployment_tools/inference_engine/lib/intel64:${DOCKER_HOME}/dldt_dist/deployment_tools/inference_engine/external/mkltiny_lnx/lib:${DOCKER_HOME}/dldt_dist/deployment_tools/ngraph/lib" : "LD_LIBRARY_PATH=" + String libraryVar = (backend == "ie") ? "LD_LIBRARY_PATH=${DOCKER_HOME}/openvino_dist/deployment_tools/inference_engine/external/tbb/lib:${DOCKER_HOME}/openvino_dist/deployment_tools/inference_engine/lib/intel64:${DOCKER_HOME}/openvino_dist/deployment_tools/inference_engine/external/mkltiny_lnx/lib:${DOCKER_HOME}/openvino_dist/deployment_tools/ngraph/lib" : "LD_LIBRARY_PATH=" if (backend == "ie") { @@ -219,7 +201,7 @@ def cleanup() { deleteDir() } -def getConfigurationsMap(String dockerfilesPath, String ngraphOnnxBranch, String ngraphBranch, String dldtBranch) { +def getConfigurationsMap(String dockerfilesPath, String ngraphOnnxBranch, String ngraphBranch, String openvinoBranch) { def configurationsMap = [:] def osImages = sh (script: "find ${dockerfilesPath} -maxdepth 1 -name '*.dockerfile' -printf '%f\n'", returnStdout: true).trim().replaceAll(".dockerfile","").split("\n") as List @@ -230,7 +212,7 @@ def getConfigurationsMap(String dockerfilesPath, String ngraphOnnxBranch, String configuration.os = os configuration.ngraphOnnxBranch = ngraphOnnxBranch configuration.ngraphBranch = ngraphBranch - configuration.dldtBranch = dldtBranch + configuration.openvinoBranch = openvinoBranch String backendLabels = configuration.backends.join(" && ") configuration.label = "${backendLabels} && ${configuration.sku} && ${CI_LABELS}" configuration.name = "${configuration.sku}-${configuration.os}" diff --git a/.ci/jenkins/dockerfiles/ubuntu_18_04.dockerfile b/.ci/jenkins/dockerfiles/ubuntu_18_04.dockerfile index 8f1a90a4..666cd5d9 100644 --- a/.ci/jenkins/dockerfiles/ubuntu_18_04.dockerfile +++ b/.ci/jenkins/dockerfiles/ubuntu_18_04.dockerfile @@ -55,7 +55,8 @@ RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.d RUN pip3 install --upgrade pip==19.0.3 \ setuptools==41.0.0 \ - wheel==0.33.1 + wheel==0.33.1 \ + cython # ONNX dependencies RUN apt-get -y --no-install-recommends install \ diff --git a/.ci/jenkins/prepare_environment.sh b/.ci/jenkins/prepare_environment.sh index 40f5da81..675b1f2c 100755 --- a/.ci/jenkins/prepare_environment.sh +++ b/.ci/jenkins/prepare_environment.sh @@ -19,77 +19,58 @@ set -x set -e -function build_ngraph() { +function build_open_vino() { set -x local directory="$1" - local backends="$2" - CMAKE_ARGS="-DNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_WARNINGS_AS_ERRORS=TRUE -DCMAKE_BUILD_TYPE=Release -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX=${directory}/ngraph_dist" - cd "${directory}/ngraph" + CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Debug \ + -DENABLE_VALIDATION_SET=OFF \ + -DENABLE_VPU=OFF \ + -DENABLE_DLIA=OFF \ + -DENABLE_GNA=OFF \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_TESTS=OFF \ + -DENABLE_BEH_TESTS=OFF \ + -DENABLE_FUNCTIONAL_TESTS=OFF \ + -DENABLE_MKL_DNN=ON \ + -DENABLE_CLDNN=OFF \ + -DENABLE_PROFILING_ITT=OFF \ + -DENABLE_SAMPLES=OFF \ + -DENABLE_SPEECH_DEMO=OFF \ + -DENABLE_PYTHON=ON \ + -DPYTHON_EXECUTABLE=`which python3` \ + -DNGRAPH_ONNX_IMPORT_ENABLE=ON \ + -DNGRAPH_IE_ENABLE=ON \ + -DNGRAPH_INTERPRETER_ENABLE=ON \ + -DNGRAPH_DEBUG_ENABLE=OFF \ + -DNGRAPH_DYNAMIC_COMPONENTS_ENABLE=ON \ + -DCMAKE_INSTALL_PREFIX=${directory}/openvino_dist" - # CMAKE args for nGraph backends - if [[ ${backends} == *"igpu"* ]]; then - echo "Building nGraph for Intel GPU." - CMAKE_ARGS="${CMAKE_ARGS} -DNGRAPH_INTERPRETER_ENABLE=TRUE" - fi - if [[ ${backends} == *"interpreter"* ]]; then - echo "Building nGraph for INTERPRETER backend." - CMAKE_ARGS="${CMAKE_ARGS} -DNGRAPH_INTELGPU_ENABLE=TRUE" - fi - - cd "${directory}/ngraph" + cd "${directory}/openvino" + mkdir -p ./build cd ./build cmake ${CMAKE_ARGS} .. || return 1 make -j $(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l) || return 1 make install || return 1 - cd "${directory}/ngraph/python" + + cd "${directory}/openvino/ngraph/python" if [ ! -d ./pybind11 ]; then git clone --recursive https://github.com/pybind/pybind11.git fi - rm -f "${directory}/ngraph/python/dist/ngraph*.whl" - rm -rf "${directory}/ngraph/python/*.so ${directory}/ngraph/python/build" - export PYBIND_HEADERS_PATH="${directory}/ngraph/python/pybind11" - export NGRAPH_CPP_BUILD_PATH="${directory}/ngraph_dist" + virtualenv -p `which python3` venv + . venv/bin/activate + rm -f "${directory}/openvino/ngraph/python/dist/ngraph*.whl" + rm -rf "${directory}/openvino/ngraph/python/*.so ${directory}/openvino/ngraph/python/build" + export PYBIND_HEADERS_PATH="${directory}/openvino/ngraph/python/pybind11" + export NGRAPH_CPP_BUILD_PATH="${directory}/openvino_dist" export NGRAPH_ONNX_IMPORT_ENABLE="TRUE" - python3 setup.py bdist_wheel || return 1 - # Clean build artifacts - rm -rf "${directory}/ngraph_dist" - return 0 -} - -function build_dldt() { - set -x - local directory="$1" - local ngraph_branch="$2" - CMAKE_ARGS="-DNGRAPH_LIBRARY_OUTPUT_DIRECTORY=${directory}/dldt_dist \ - -DNGRAPH_COMPONENT_PREFIX=deployment_tools/ngraph/ -DNGRAPH_USE_PREBUILT_LLVM=TRUE \ - -DNGRAPH_TOOLS_ENABLE=TRUE -DNGRAPH_WARNINGS_AS_ERRORS=TRUE -DNGRAPH_UNIT_TEST_ENABLE=FALSE \ - -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=OFF -DENABLE_RPATH=ON -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ - -DENABLE_PERFORMANCE_TESTS=ON -DENABLE_TESTS=ON -DNGRAPH_DEBUG_ENABLE=OFF \ - -DENABLE_SAMPLES=OFF -DENABLE_FUNCTIONAL_TESTS=ON -DENABLE_MODELS=OFF -DENABLE_PRIVATE_MODELS=OFF \ - -DENABLE_GNA=OFF -DENABLE_VPU=OFF -DENABLE_SANITIZER=OFF -DENABLE_MYRIAD=OFF -DENABLE_MKL_DNN=ON \ - -DENABLE_CLDNN=OFF -DENABLE_VALIDATION_SET=OFF -DPYTHON_EXECUTABLE=`which python` \ - -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_UNIT_TEST_OPENVINO_ENABLE=TRUE -DNGRAPH_IE_ENABLE=ON \ - -DCMAKE_INSTALL_PREFIX=${directory}/dldt_dist -DNGRAPH_DYNAMIC_COMPONENTS_ENABLE=ON" - cd "${directory}/dldt/ngraph" - git checkout "${ngraph_branch}" - - cd "${directory}/dldt" - - mkdir -p ./build - cd ./build - git lfs install - cmake ${CMAKE_ARGS} .. || return 1 - make -j $(lscpu --parse=CORE | grep -v '#' | sort | uniq | wc -l) || return 1 - make install || return 1 + mv "${directory}/ngraph-onnx/.ci/jenkins/setup.py" . + python3 setup.py develop || return 1 return 0 } function main() { - # By default copy stored nGraph master and use it to build PR branch - BACKENDS="cpu" - - NUM_PARAMETERS="3" + NUM_PARAMETERS="1" if [ $# -lt "${NUM_PARAMETERS}" ]; then echo "ERROR: Expected at least ${NUM_PARAMETERS} parameter got $#" exit 1 @@ -99,15 +80,9 @@ function main() { for i in "$@" do case $i in - --backends=*) - BACKENDS="${i//${PATTERN}/}" - ;; --build-dir=*) BUILD_DIR="${i//${PATTERN}/}" ;; - --ngraph-branch=*) - NGRAPH_BRANCH="${i//${PATTERN}/}" - ;; *) echo "Parameter $i not recognized." exit 1 @@ -115,11 +90,9 @@ function main() { esac done - BUILD_NGRAPH_CALL="build_ngraph \"${BUILD_DIR}\" \"${BACKENDS}\"" - BUILD_DLDT_CALL="build_dldt \"${BUILD_DIR}\" \"${NGRAPH_BRANCH}\"" + BUILD_OV_CALL="build_open_vino \"${BUILD_DIR}\"" - eval "${BUILD_NGRAPH_CALL}" - # eval "${BUILD_DLDT_CALL}" + eval "${BUILD_OV_CALL}" } diff --git a/.ci/jenkins/setup.py b/.ci/jenkins/setup.py new file mode 100644 index 00000000..3a3b4440 --- /dev/null +++ b/.ci/jenkins/setup.py @@ -0,0 +1,387 @@ +# ****************************************************************************** +# Copyright 2017-2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ****************************************************************************** + +import distutils.ccompiler +import os +import re +import sys + +import setuptools +from setuptools import Extension, setup +from setuptools.command.build_ext import build_ext + +__version__ = os.environ.get("NGRAPH_VERSION", "0.0.0-dev") +PYNGRAPH_ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) +PYNGRAPH_SRC_DIR = os.path.join(PYNGRAPH_ROOT_DIR, "src") +NGRAPH_DEFAULT_INSTALL_DIR = os.environ.get("HOME") +NGRAPH_ONNX_IMPORT_ENABLE = os.environ.get("NGRAPH_ONNX_IMPORT_ENABLE") +NGRAPH_PYTHON_DEBUG = os.environ.get("NGRAPH_PYTHON_DEBUG") + + +def find_ngraph_dist_dir(): + """Return location of compiled ngraph library home.""" + if os.environ.get("NGRAPH_CPP_BUILD_PATH"): + ngraph_dist_dir = os.environ.get("NGRAPH_CPP_BUILD_PATH") + else: + ngraph_dist_dir = os.path.join(NGRAPH_DEFAULT_INSTALL_DIR, "ngraph_dist") + + found = os.path.exists(os.path.join(ngraph_dist_dir, "include/ngraph")) + if not found: + print( + "Cannot find nGraph library in {} make sure that " + "NGRAPH_CPP_BUILD_PATH is set correctly".format(ngraph_dist_dir) + ) + sys.exit(1) + else: + print("nGraph library found in {}".format(ngraph_dist_dir)) + return ngraph_dist_dir + + +def find_pybind_headers_dir(): + """Return location of pybind11 headers.""" + if os.environ.get("PYBIND_HEADERS_PATH"): + pybind_headers_dir = os.environ.get("PYBIND_HEADERS_PATH") + else: + pybind_headers_dir = os.path.join(PYNGRAPH_ROOT_DIR, "pybind11") + + found = os.path.exists(os.path.join(pybind_headers_dir, "include/pybind11")) + if not found: + print( + "Cannot find pybind11 library in {} make sure that " + "PYBIND_HEADERS_PATH is set correctly".format(pybind_headers_dir) + ) + sys.exit(1) + else: + print("pybind11 library found in {}".format(pybind_headers_dir)) + return pybind_headers_dir + + +NGRAPH_CPP_DIST_DIR = find_ngraph_dist_dir() +PYBIND11_INCLUDE_DIR = find_pybind_headers_dir() + "/include" +NGRAPH_CPP_INCLUDE_DIR = NGRAPH_CPP_DIST_DIR + "/include" +if os.path.exists(os.path.join(NGRAPH_CPP_DIST_DIR, "lib")): + NGRAPH_CPP_LIBRARY_DIR = os.path.join(NGRAPH_CPP_DIST_DIR, "lib") +elif os.path.exists(os.path.join(NGRAPH_CPP_DIST_DIR, "lib64")): + NGRAPH_CPP_LIBRARY_DIR = os.path.join(NGRAPH_CPP_DIST_DIR, "lib64") +else: + print( + "Cannot find library directory in {}, make sure that nGraph is installed " + "correctly".format(NGRAPH_CPP_DIST_DIR) + ) + sys.exit(1) + +if sys.platform == "win32": + NGRAPH_CPP_DIST_DIR = os.path.normpath(NGRAPH_CPP_DIST_DIR) + PYBIND11_INCLUDE_DIR = os.path.normpath(PYBIND11_INCLUDE_DIR) + NGRAPH_CPP_INCLUDE_DIR = os.path.normpath(NGRAPH_CPP_INCLUDE_DIR) + NGRAPH_CPP_LIBRARY_DIR = os.path.normpath(NGRAPH_CPP_LIBRARY_DIR) + +NGRAPH_CPP_LIBRARY_NAME = "ngraph" +"""For some platforms OpenVINO adds 'd' suffix to library names in debug configuration""" +if len([fn for fn in os.listdir(NGRAPH_CPP_LIBRARY_DIR) if re.search("ngraphd", fn)]): + NGRAPH_CPP_LIBRARY_NAME = "ngraphd" + +ONNX_IMPORTER_CPP_LIBRARY_NAME = "onnx_importer" +if len([fn for fn in os.listdir(NGRAPH_CPP_LIBRARY_DIR) if re.search("onnx_importerd", fn)]): + ONNX_IMPORTER_CPP_LIBRARY_NAME = "onnx_importerd" + + +def parallelCCompile( + self, + sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None, +): + """Build sources in parallel. + + Reference link: + http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils + Monkey-patch for parallel compilation. + """ + # those lines are copied from distutils.ccompiler.CCompiler directly + macros, objects, extra_postargs, pp_opts, build = self._setup_compile( + output_dir, macros, include_dirs, sources, depends, extra_postargs + ) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + + if NGRAPH_PYTHON_DEBUG in ["TRUE", "ON", True]: + try: + # pybind11 is much more verbose without -DNDEBUG + self.compiler.remove("-DNDEBUG") + self.compiler.remove("-O2") + self.compiler_so.remove("-DNDEBUG") + self.compiler_so.remove("-O2") + except (AttributeError, ValueError): + pass + # parallel code + import multiprocessing.pool + + def _single_compile(obj): + try: + src, ext = build[obj] + except KeyError: + return + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + + # convert to list, imap is evaluated on-demand + pool = multiprocessing.pool.ThreadPool() + list(pool.imap(_single_compile, objects)) + return objects + + +distutils.ccompiler.CCompiler.compile = parallelCCompile + + +def has_flag(compiler, flagname): + """Check whether a flag is supported by the specified compiler. + + As of Python 3.6, CCompiler has a `has_flag` method. + cf http://bugs.python.org/issue26689 + """ + import tempfile + + with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f: + f.write("int main (int argc, char **argv) { return 0; }") + try: + compiler.compile([f.name], extra_postargs=[flagname]) + except setuptools.distutils.errors.CompileError: + return False + return True + + +def cpp_flag(compiler): + """Check and return the -std=c++11 compiler flag.""" + if sys.platform == "win32": + return "" # C++11 is on by default in MSVC + elif has_flag(compiler, "-std=c++11"): + return "-std=c++11" + else: + raise RuntimeError("Unsupported compiler -- C++11 support is needed!") + + +sources = [ + "pyngraph/axis_set.cpp", + "pyngraph/axis_vector.cpp", + "pyngraph/coordinate.cpp", + "pyngraph/coordinate_diff.cpp", + "pyngraph/dimension.cpp", + "pyngraph/function.cpp", + "pyngraph/node.cpp", + "pyngraph/node_factory.cpp", + "pyngraph/ops/constant.cpp", + "pyngraph/ops/get_output_element.cpp", + "pyngraph/ops/op.cpp", + "pyngraph/ops/parameter.cpp", + "pyngraph/ops/regmodule_pyngraph_op.cpp", + "pyngraph/ops/result.cpp", + "pyngraph/ops/util/arithmetic_reduction.cpp", + "pyngraph/ops/util/binary_elementwise_arithmetic.cpp", + "pyngraph/ops/util/binary_elementwise_comparison.cpp", + "pyngraph/ops/util/binary_elementwise_logical.cpp", + "pyngraph/ops/util/index_reduction.cpp", + "pyngraph/ops/util/op_annotations.cpp", + "pyngraph/ops/util/regmodule_pyngraph_op_util.cpp", + "pyngraph/ops/util/unary_elementwise_arithmetic.cpp", + "pyngraph/passes/manager.cpp", + "pyngraph/passes/regmodule_pyngraph_passes.cpp", + "pyngraph/partial_shape.cpp", + "pyngraph/pyngraph.cpp", + "pyngraph/serializer.cpp", + "pyngraph/shape.cpp", + "pyngraph/strides.cpp", + "pyngraph/types/element_type.cpp", + "pyngraph/types/regmodule_pyngraph_types.cpp", + "pyngraph/util.cpp", +] + +packages = [ + "ngraph", + "ngraph.utils", + "ngraph.impl", + "ngraph.impl.op", + "ngraph.impl.op.util", + "ngraph.impl.passes", +] + +sources = [PYNGRAPH_SRC_DIR + "/" + source for source in sources] + +include_dirs = [PYNGRAPH_SRC_DIR, NGRAPH_CPP_INCLUDE_DIR, PYBIND11_INCLUDE_DIR] + +library_dirs = [NGRAPH_CPP_LIBRARY_DIR] + +libraries = [NGRAPH_CPP_LIBRARY_NAME, ONNX_IMPORTER_CPP_LIBRARY_NAME] + +extra_compile_args = [] +if NGRAPH_ONNX_IMPORT_ENABLE in ["TRUE", "ON", True]: + extra_compile_args.append("-DNGRAPH_ONNX_IMPORT_ENABLE") + +extra_link_args = [] + +data_files = [ + ( + "lib", + [ + os.path.join(NGRAPH_CPP_LIBRARY_DIR, library) + for library in os.listdir(NGRAPH_CPP_LIBRARY_DIR) + ], + ), + ( + "licenses", + [ + os.path.join(NGRAPH_CPP_DIST_DIR, "licenses", license) + for license in os.listdir(os.path.join(NGRAPH_CPP_DIST_DIR, "licenses")) + ], + ), + ("", [os.path.join(NGRAPH_CPP_DIST_DIR, "LICENSE")],), +] + +if NGRAPH_ONNX_IMPORT_ENABLE in ["TRUE", "ON", True]: + onnx_sources = [ + "pyngraph/onnx_import/onnx_import.cpp", + ] + onnx_sources = [PYNGRAPH_SRC_DIR + "/" + source for source in onnx_sources] + sources = sources + onnx_sources + + packages.append("ngraph.impl.onnx_import") + +ext_modules = [ + Extension( + "_pyngraph", + sources=sources, + include_dirs=include_dirs, + define_macros=[("VERSION_INFO", __version__)], + library_dirs=library_dirs, + libraries=libraries, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + language="c++", + ), +] + + +def add_platform_specific_link_args(link_args): + """Add linker flags specific for the OS detected during the build.""" + if sys.platform.startswith("linux"): + link_args += ["-Wl,-rpath,$ORIGIN/../.."] + link_args += ["-z", "noexecstack"] + link_args += ["-z", "relro"] + link_args += ["-z", "now"] + elif sys.platform == "darwin": + link_args += ["-Wl,-rpath,@loader_path/../.."] + link_args += ["-stdlib=libc++"] + elif sys.platform == "win32": + link_args += ["/LTCG"] + + +class BuildExt(build_ext): + """A custom build extension for adding compiler-specific options.""" + + def _add_extra_compile_arg(self, flag, compile_args): + """Return True if successfully added given flag to compiler args.""" + if has_flag(self.compiler, flag): + compile_args += [flag] + return True + return False + + def _add_debug_or_release_flags(self): + """Return compiler flags for Release and Debug build types.""" + if NGRAPH_PYTHON_DEBUG in ["TRUE", "ON", True]: + if sys.platform == "win32": + return ["/Od", "/Zi", "/RTC1"] + else: + return ["-O0", "-g"] + else: + if sys.platform == "win32": + return ["/O2"] + else: + return ["-O2", "-D_FORTIFY_SOURCE=2"] + + def _add_win_compiler_flags(self, ext): + self._add_extra_compile_arg("/GL", ext.extra_compile_args) # Whole Program Optimization + self._add_extra_compile_arg("/analyze", ext.extra_compile_args) + + def _add_unix_compiler_flags(self, ext): + if not self._add_extra_compile_arg("-fstack-protector-strong", ext.extra_compile_args): + self._add_extra_compile_arg("-fstack-protector", ext.extra_compile_args) + + self._add_extra_compile_arg("-fvisibility=hidden", ext.extra_compile_args) + self._add_extra_compile_arg("-flto", ext.extra_compile_args) + self._add_extra_compile_arg("-fPIC", ext.extra_compile_args) + + ext.extra_compile_args += ["-Wformat", "-Wformat-security"] + + def _customize_compiler_flags(self): + """Modify standard compiler flags.""" + try: + # -Wstrict-prototypes is not a valid option for c++ + self.compiler.compiler_so.remove("-Wstrict-prototypes") + if NGRAPH_PYTHON_DEBUG in ["TRUE", "ON", True]: + # pybind11 is much more verbose without -DNDEBUG + self.compiler.compiler_so.remove("-DNDEBUG") + self.compiler.compiler_so.remove("-O2") + except (AttributeError, ValueError): + pass + + def build_extensions(self): + """Build extension providing extra compiler flags.""" + self._customize_compiler_flags() + for ext in self.extensions: + ext.extra_compile_args += [cpp_flag(self.compiler)] + + if sys.platform == "win32": + self._add_win_compiler_flags(ext) + else: + self._add_unix_compiler_flags(ext) + + add_platform_specific_link_args(ext.extra_link_args) + + ext.extra_compile_args += self._add_debug_or_release_flags() + + if sys.platform == "darwin": + ext.extra_compile_args += ["-stdlib=libc++"] + + build_ext.build_extensions(self) + + +with open(os.path.join(PYNGRAPH_ROOT_DIR, "requirements.txt")) as req: + requirements = req.read().splitlines() + setup_requires = [item for item in requirements if item.strip().startswith("numpy")] + +setup( + name="ngraph-core", + description="nGraph - Intel's graph compiler and runtime for Neural Networks", + version=__version__, + author="Intel Corporation", + author_email="intelnervana@intel.com", + url="https://github.com/NervanaSystems/ngraph/", + license="License :: OSI Approved :: Apache Software License", + long_description=open(os.path.join(PYNGRAPH_ROOT_DIR, "README.md")).read(), + long_description_content_type="text/markdown", + ext_modules=ext_modules, + package_dir={'': 'src'}, + packages=packages, + cmdclass={"build_ext": BuildExt}, + data_files=data_files, + setup_requires=setup_requires, + install_requires=requirements, + zip_safe=False, + extras_require={}, +) \ No newline at end of file diff --git a/.ci/watchdog/Jenkinsfile b/.ci/watchdog/Jenkinsfile index b01408ec..d3227b71 100644 --- a/.ci/watchdog/Jenkinsfile +++ b/.ci/watchdog/Jenkinsfile @@ -54,6 +54,7 @@ timeout(30) export PYTHONHTTPSVERIFY=0 python ${WATCHDOG_ROOT}/Main.py \ --slack-token=${SLACK_TOKEN_FILE} \ + --msteams-url=${MSTEAMS_URL_FILE} \ --github-token=${GITHUB_TOKEN_FILE} \ --github-org=${GITHUB_ORG} \ --github-project=${GITHUB_PROJECT} \ @@ -61,7 +62,9 @@ timeout(30) --jenkins-server=${JENKINS_SERVER} \ --jenkins-user=${JENKINS_USER} \ --ci-job=${CI_JOB_NAME} \ - --watchdog-job=${WATCHDOG_JOB_NAME} + --watchdog-job=${WATCHDOG_JOB_NAME} \ + --slack-enabled=${SLACK_ENABLED} \ + --ms-teams-enabled=${MS_TEAMS_ENABLED} """ } } diff --git a/.ci/watchdog/MSTeamsCommunicator.py b/.ci/watchdog/MSTeamsCommunicator.py new file mode 100644 index 00000000..a509787b --- /dev/null +++ b/.ci/watchdog/MSTeamsCommunicator.py @@ -0,0 +1,143 @@ +#!/usr/bin/python3 + +# INTEL CONFIDENTIAL +# Copyright 2018-2020 Intel Corporation +# The source code contained or described herein and all documents related to the +# source code ("Material") are owned by Intel Corporation or its suppliers or +# licensors. Title to the Material remains with Intel Corporation or its +# suppliers and licensors. The Material may contain trade secrets and proprietary +# and confidential information of Intel Corporation and its suppliers and +# licensors, and is protected by worldwide copyright and trade secret laws and +# treaty provisions. No part of the Material may be used, copied, reproduced, +# modified, published, uploaded, posted, transmitted, distributed, or disclosed +# in any way without Intel's prior express written permission. +# No license under any patent, copyright, trade secret or other intellectual +# property right is granted to or conferred upon you by disclosure or delivery of +# the Materials, either expressly, by implication, inducement, estoppel or +# otherwise. Any license under such intellectual property rights must be express +# and approved by Intel in writing. +# Include any supplier copyright notices as supplier requires Intel to use. +# Include supplier trademarks or logos as supplier requires Intel to use, +# preceded by an asterisk. An asterisked footnote can be added as follows: +# *Third Party trademarks are the property of their respective owners. +# Unless otherwise agreed by Intel in writing, you may not remove or alter +# this notice or any other notice embedded in Materials by Intel or Intel's +# suppliers or licensors in any way. +import requests + + +class MSTeamsCommunicator: + """Class communicating with MSTeams using Incoming Webhook. + + The purpose of this class is to use MSTeams API to send message. + Docs for used API, including wrapped methods can be found at: + https://docs.microsoft.com/en-us/outlook/actionable-messages/send-via-connectors + """ + + def __init__(self, _ci_alerts_channel_url): + self._ci_alerts_channel_url = _ci_alerts_channel_url + self._queued_messages = { + self._ci_alerts_channel_url: [], + } + + @property + def messages(self): + """ + Get list of queued messages. + + :return: List of queued messages + :return type: List[String] + """ + return self._queued_messages.values() + + def queue_message(self, message): + """ + Queue message to be sent later. + + :param message: Message content + :type message: String + """ + self._queued_messages[self._ci_alerts_channel_url].append(message) + + def _parse_text(self, message): + """ + Parse text to display as alert. + + :param message: Unparsed message content + :type message: String + """ + message_split = message.split('\n') + title = message_split[2] + log_url = message_split[-1] + text = message_split[3] + header = message_split[0].split(' - ') + header_formatted = '{} - [Watchdog Log]({})'.format(header[0], header[1]) + text_formatted = '{}: ***{}***'.format(text.split(':', 1)[0], text.split(':', 1)[1]) + + return title, log_url, '{}\n\n{}'.format(header_formatted, text_formatted) + + def _json_request_content(self, title, log_url, text_formatted): + """ + Create final json request to send message to MS Teams channel. + + :param title: Title of alert + :param log_url: URL to Watchdog log + :param text_formatted: General content of alert - finally formatted + :type title: String + :type title: String + :type title: String + """ + data = { + '@context': 'https://schema.org/extensions', + '@type': 'MessageCard', + 'themeColor': '0072C6', + 'title': title, + 'text': text_formatted, + 'potentialAction': + [ + { + '@type': 'OpenUri', + 'name': 'Open PR', + 'targets': + [ + { + 'os': 'default', + 'uri': log_url, + }, + ], + }, + ], + } + return data + + def _send_to_channel(self, message, channel_url): + """ + Send MSTeams message to specified channel. + + :param message: Message content + :type message: String + :param channel_url: Channel url + :type channel_url: String + """ + title, log_url, text_formatted = self._parse_text(message) + data = self._json_request_content(title, log_url, text_formatted) + + try: + requests.post(url=channel_url, json=data) + except Exception as ex: + raise Exception('!!CRITICAL!! MSTeamsCommunicator: Could not send message ' + 'due to {}'.format(ex)) + + def send_message(self, message, quiet=False): + """ + Send queued messages as single communication. + + :param message: Final message's content + :param quiet: Flag for disabling sending report through MS Teams + :type message: String + :type quiet: Boolean + """ + for channel, message_queue in self._queued_messages.items(): + final_message = message + '\n\n' + '\n'.join(message_queue) + if not quiet and message_queue: + self._send_to_channel(final_message, channel) diff --git a/.ci/watchdog/Main.py b/.ci/watchdog/Main.py index 50352c15..ee7e16f6 100644 --- a/.ci/watchdog/Main.py +++ b/.ci/watchdog/Main.py @@ -29,6 +29,7 @@ from Watchdog import Watchdog DEFAULT_SLACK_TOKEN_FILE = '/home/lab_nerval/tokens/slack_token' +DEFAULT_MSTEAMS_URL_FILE = '/home/lab_nerval/tokens/msteams_url' DEFAULT_GITHUB_TOKEN_FILE = '/home/lab_nerval/tokens/github_token' DEFAULT_GITHUB_ORGANIZATION = 'NervanaSystems' DEFAULT_GITHUB_PROJECT = 'ngraph-onnx' @@ -53,12 +54,15 @@ def main(args): jenkins_user = args.jenkins_user.strip() jenkins_token = open(args.jenkins_token).read().replace('\n', '').strip() slack_token = open(args.slack_token).read().replace('\n', '').strip() + msteams_url = open(args.msteams_url).read().replace('\n', '').strip() github_token = open(args.github_token).read().replace('\n', '').strip() github_org = args.github_org github_project = args.github_project ci_job = args.ci_job.strip() watchdog_job = args.watchdog_job.strip() quiet = args.quiet + slack_enabled = args.slack_enabled + ms_teams_enabled = args.ms_teams_enabled wd = Watchdog(jenkins_token=jenkins_token, jenkins_server=jenkins_server, @@ -67,8 +71,11 @@ def main(args): git_org=github_org, git_project=github_project, slack_token=slack_token, + msteams_url=msteams_url, ci_job_name=ci_job, - watchdog_job_name=watchdog_job) + watchdog_job_name=watchdog_job, + slack_enabled=slack_enabled, + ms_teams_enabled=ms_teams_enabled) wd.run(quiet=quiet) return 0 @@ -80,6 +87,9 @@ def main(args): parser.add_argument('--slack-token', help='Path to Slack user token to communicate messages.', default=DEFAULT_SLACK_TOKEN_FILE, action='store', required=False) + parser.add_argument('--msteams-url', help='Path to MS Teams channel url to communicate messages.', + default=DEFAULT_MSTEAMS_URL_FILE, action='store', required=False) + parser.add_argument('--github-token', help='Path to GitHub user token to access repo.', default=DEFAULT_GITHUB_TOKEN_FILE, action='store', required=False) @@ -106,6 +116,10 @@ def main(args): parser.add_argument('--quiet', help="Quiet mode - doesn\'t send message to slack channel.", action='store_true', required=False) + parser.add_argument('--slack-enabled', type=int, help='Enable watchdog on Slack', + default=0, action='store', required=False) + parser.add_argument('--ms-teams-enabled', type=int, help='Enable watchdog on MS Teams', + default=1, action='store', required=False) args = parser.parse_args() sys.exit(main(args)) diff --git a/.ci/watchdog/Watchdog.py b/.ci/watchdog/Watchdog.py index cdba9567..5ba4a99f 100644 --- a/.ci/watchdog/Watchdog.py +++ b/.ci/watchdog/Watchdog.py @@ -29,6 +29,7 @@ import re import logging from SlackCommunicator import SlackCommunicator +from MSTeamsCommunicator import MSTeamsCommunicator from JenkinsWrapper import JenkinsWrapper from jenkins import NotFoundException from GitWrapper import GitWrapper, GitWrapperError @@ -58,29 +59,36 @@ class Watchdog: NervanaSystems/ngraph-onnx repository. Then it connects to specified Jenkins server to check CI jobs associated with every PR. Watchdog verifies time durations for Jenkins initial response, job queue and execution against time treshold constants. Every fail - is logged and reported through Slack App on channel **ngraph-onnx-ci-alerts**. + is logged and reported through Slack and MS Teams communicators. :param jenkins_token: Token used for Jenkins :param jenkins_server: Jenkins server address :param jenkins_user: Username used to connect to Jenkins :param git_token: Token used to connect to GitHub :param slack_token: Token used to connect to Slack App + :param msteams_url: URL used to connect to MS Teams channel :param ci_job_name: nGraph-ONNX CI job name used in Jenkins :param watchdog_job_name: Watchdog job name used in Jenkins + :param slack_enabled: Enable watchdog on Slack + :param ms_teams_enabled: Enable watchdog on MS Teams :type jenkins_token: String :type jenkins_server: String :type jenkins_user: String :type git_token: String :type slack_token: String + :type msteams_url: String :type ci_job_name: String :type watchdog_job_name: String + :type slack_enabled: Integer + :type ms_teams_enabled: Integer .. note:: Watchdog and nGraph-ONNX CI job must be placed on the same Jenkins server. """ def __init__(self, jenkins_token, jenkins_server, jenkins_user, git_token, git_org, - git_project, slack_token, ci_job_name, watchdog_job_name): + git_project, slack_token, msteams_url, ci_job_name, watchdog_job_name, + slack_enabled, ms_teams_enabled): self._config_path = os.path.join(_WATCHDOG_DIR, '{}/.{}_ci_watchdog.json'.format(_WATCHDOG_DIR, git_project)) # Jenkins Wrapper object for CI job self._jenkins = JenkinsWrapper(jenkins_token, @@ -90,6 +98,8 @@ def __init__(self, jenkins_token, jenkins_server, jenkins_user, git_token, git_o self._git = GitWrapper(git_token, repository=git_org, project=git_project) # Create Slack api object self._slack_app = SlackCommunicator(slack_token=slack_token) + # Create MS Teams api object + self._msteams_hook = MSTeamsCommunicator(msteams_url) self._ci_job_name = ci_job_name self._watchdog_job_name = watchdog_job_name # Read config file @@ -97,13 +107,15 @@ def __init__(self, jenkins_token, jenkins_server, jenkins_user, git_token, git_o # Time at Watchdog initiation self._now_time = datetime.datetime.now() self._current_prs = {} + self._slack_enabled = slack_enabled + self._ms_teams_enabled = ms_teams_enabled def run(self, quiet=False): """Run main watchdog logic. Retrieve list of pull requests and pass it to the method responsible for checking them. - :param quiet: Flag for disabling sending report through Slack + :param quiet: Flag for disabling sending report through communicator :type quiet: Boolean """ try: @@ -372,6 +384,9 @@ def _interpret_status(self, status, pr): # CI build in progress - verify timeouts for build queue and duration elif any(phrase in status.description for phrase in pending_statuses): self._check_in_progress(pr, build_number) + else: + message = 'ONNX CI job for PR# {}: unrecognized status: {}'.format(pr.number, status.description) + self._queue_message(message, message_severity='error', pr=pr) except Exception: # Log Watchdog internal error in case any status can't be properly verified message = 'Failed to verify status "{}" for PR# {}'.format(status.description, pr.number) @@ -397,7 +412,7 @@ def _retrieve_build_number(self, url): raise def _queue_message(self, message, message_severity='info', pr=None): - """Add a message to message queue in Slack App object. + """Add a message to message queue in communicator object. The queued message is constructed based on message string passed as a method argument and message header. Message header is mapped to message severity @@ -424,7 +439,10 @@ def _queue_message(self, message, message_severity='info', pr=None): message = message + '\n' + pr.html_url send = message_header + '\n' + message - self._slack_app.queue_message(send, internal_error=internal) + if self._slack_enabled: + self._slack_app.queue_message(send, internal_error=internal) + if self._ms_teams_enabled: + self._msteams_hook.queue_message(send) def _check_finished(self, pr, build_number): """Verify if finished build output contains expected string for either fail or success. @@ -441,19 +459,20 @@ def _check_finished(self, pr, build_number): build_output = self._jenkins.get_build_console_output(project_name_full, build_number) if _CI_BUILD_FAIL_MESSAGE not in build_output \ and _CI_BUILD_SUCCESS_MESSAGE not in build_output: - message = ('ONNX CI job for PR #{} finished but no tests success or fail ' + message = ('ONNX CI job for PR #{}: finished but no tests success or fail ' 'confirmation is present in console output!'.format(pr_number)) self._queue_message(message, message_severity='error', pr=pr) def _send_message(self, quiet=False): - """Send messages queued in Slack App object to designated Slack channel. + """Send messages queued in Slack and MS Teams objects to designated channel. Queued messages are being sent as a single communication. - :param quiet: Flag for disabling sending report through Slack + :param quiet: Flag for disabling sending report through communicator :type quiet: Boolean """ - if any(messages for messages in self._slack_app.messages): + if any(messages for messages in self._slack_app.messages) or \ + any(messages for messages in self._msteams_hook.messages): try: watchdog_build = self._jenkins.get_job_info(self._watchdog_job_name)['lastBuild'] watchdog_build_number = watchdog_build['number'] @@ -463,7 +482,11 @@ def _send_message(self, quiet=False): watchdog_build_link = self._jenkins.jenkins_server send = self._watchdog_job_name + '- build ' + str( watchdog_build_number) + ' - ' + watchdog_build_link - self._slack_app.send_message(send, quiet=quiet) + + if self._slack_enabled: + self._slack_app.send_message(send, quiet=quiet) + if self._ms_teams_enabled: + self._msteams_hook.send_message(send, quiet=quiet) else: log.info('Nothing to report.') diff --git a/.ci/watchdog/requirements.txt b/.ci/watchdog/requirements.txt index 285e0c78..8a9dbddd 100644 --- a/.ci/watchdog/requirements.txt +++ b/.ci/watchdog/requirements.txt @@ -1,5 +1,6 @@ -python-jenkins==1.5.0 +python-jenkins==1.7.0 retrying==1.3.3 slackclient==2.5.0 -pygithub==1.46 +pygithub==1.51 timeout-decorator==0.4.1 +requests==2.23.0 diff --git a/.mergify.yml b/.mergify.yml index 8bbb379f..4f15e5b8 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -5,7 +5,6 @@ pull_request_rules: - "#review-requested=0" - "#changes-requested-reviews-by=0" - "#commented-reviews-by=0" - - status-success=continuous-integration/travis-ci/pr - status-success=nGraph-ONNX Jenkins CI (IGK) - base=master - label!=WIP diff --git a/requirements.txt b/requirements.txt index ebc4932c..fde2b22b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -numpy==1.18.1 +numpy==1.18.4 onnx==1.6.0 -cachetools==4.0.0 +cachetools==4.1.0 diff --git a/requirements_test.txt b/requirements_test.txt index b2971068..6c9878b4 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -1,12 +1,11 @@ -zipp==0.5.0 -pytest==5.3.5 +pytest==5.4.2 pytest-timeout==1.3.4 -tox==3.14.3 -flake8==3.7.9 +tox==3.15.1 +flake8==3.8.2 flake8-commas==2.0.0 flake8-comprehensions==3.2.2 flake8-docstrings==1.5.0 -flake8-quotes==2.1.1 -mypy==0.761 +flake8-quotes==3.2.0 +mypy==0.770 pydocstyle==5.0.2 retrying==1.3.3 diff --git a/tests/test_backend.py b/tests/test_backend.py index 07e07f71..ffaa1ef3 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -28,6 +28,9 @@ import tests.utils +tests_xfail_custom = [] +skip_tests_custom = [] + def expect_fail(test_case_path): # type: (str) -> None """Mark the test as expected to fail.""" @@ -50,45 +53,92 @@ def expect_fail(test_case_path): # type: (str) -> None # import all test cases at global scope to make them visible to python.unittest backend_test = onnx.backend.test.BackendTest(ng_backend, __name__) -# MaxPool Indices -> NGRAPH-3131 -backend_test.exclude('test_maxpool_with_argmax') - -# RNN -> NC-323 -backend_test.exclude('test_simple_rnn') -backend_test.exclude('test_rnn') -backend_test.exclude('test_operator_rnn') - -# GRU -> NGONNX-325 -backend_test.exclude('test_gru') - -# Big model tests (see test_zoo_models.py): -backend_test.exclude('test_bvlc_alexnet') -backend_test.exclude('test_densenet121') -backend_test.exclude('test_inception_v1') -backend_test.exclude('test_inception_v2') -backend_test.exclude('test_resnet50') -backend_test.exclude('test_shufflenet') -backend_test.exclude('test_squeezenet') -backend_test.exclude('test_vgg19') -backend_test.exclude('test_zfnet512') - -# Support for ONNX Sequence type - NGONNX-789 -backend_test.exclude('test_sequence_model') - - -# Tests which fail on the CPU backend -> NC-330 -backend_test.exclude('test_Conv3d_dilated') -backend_test.exclude('test_Conv3d_dilated_strided') +skip_tests_general = [ + # Big model tests (see test_zoo_models.py): + 'test_bvlc_alexnet', + 'test_densenet121', + 'test_inception_v1', + 'test_inception_v2', + 'test_resnet50', + 'test_shufflenet', + 'test_squeezenet', + 'test_vgg19', + 'test_zfnet512', +] if selected_backend_name == 'IE:CPU': - # segfaults - backend_test.exclude('test_greater_cpu') - backend_test.exclude('test_greater_bcast_cpu') - backend_test.exclude('test_hardmax_axis_0_cpu') - backend_test.exclude('test_less_cpu') - backend_test.exclude('test_less_bcast_cpu') - backend_test.exclude('test_log_cpu') - backend_test.exclude('test_basic_conv_with_padding_cpu') + skip_tests_custom = [ + # Segmentation faults + 'test_and3d_cpu', + 'test_and_bcast4v3d_cpu', + 'test_and_bcast4v4d_cpu', + 'test_and_bcast4v2d_cpu', + 'test_argmax_keepdims_random_cpu', + 'test_argmax_negative_axis_keepdims_random_cpu', + 'test_argmax_no_keepdims_example_cpu', + 'test_batchnorm_epsilon_cpu', + 'test_clip_default_max_cpu', + 'test_xor3d_cpu', + 'test_basic_conv_without_padding_cpu', + 'test_greater_bcast_cpu', + 'test_conv_with_strides_no_padding_cpu', + 'test_hardmax_one_hot_cpu', + 'test_clip_default_min_cpu', + 'test_conv_with_strides_padding_cpu', + 'test_clip_cpu', + 'test_not_3d_cpu', + 'test_or_bcast4v3d_cpu', + 'test_xor4d_cpu', + 'test_xor_bcast4v3d_cpu', + 'test_conv_with_strides_and_asymmetric_padding_cpu', + 'test_basic_conv_with_padding_cpu', + 'test_convtranspose_with_kernel_cpu', + 'test_or3d_cpu', + 'test_or_bcast4v4d_cpu', + 'test_xor_bcast3v1d_cpu', + 'test_pow_bcast_array_cpu', + 'test_instancenorm_epsilon_cpu', + 'test_xor_bcast4v2d_cpu', + 'test_pow_bcast_scalar_cpu', + 'test_reshape_zero_and_negative_dim_cpu', + 'test_bvlc_alexnet_opset7_cpu', + 'test_less_bcast_cpu', + 'test_less_cpu', + 'test_xor_bcast3v2d_cpu', + 'test_sinh_cpu', + 'test_Embedding_sparse_cpu', + 'test_not_4d_cpu', + 'test_matmul_2d_cpu', + 'test_argmin_no_keepdims_random_cpu', + 'test_batchnorm_example_cpu', + 'test_clip_splitbounds_cpu', + 'test_and_bcast3v2d_cpu', + 'test_and4d_cpu', + 'test_argmax_default_axis_example_cpu', + 'test_and_bcast3v1d_cpu', + 'test_hardmax_axis_0_cpu', + 'test_greater_cpu', + 'test_or2d_cpu', + 'test_and2d_cpu', + 'test_or_bcast3v1d_cpu', + 'test_or4d_cpu', + 'test_or_bcast4v2d_cpu', + 'test_range_float_type_positive_delta_cpu', + 'test_onehot_negative_indices_cpu', + 'test_pow_cpu', + 'test_or_bcast3v2d_cpu', + 'test_xor_bcast4v4d_cpu', + 'test_onehot_with_axis_cpu', + 'test_expand_shape_model1_cpu', + 'test_onehot_with_negative_axis_cpu', + 'test_onehot_without_axis_cpu', + 'test_operator_exp_cpu', + ] + +skip_tests = skip_tests_general + skip_tests_custom + +for test in skip_tests: + backend_test.exclude(test) # NOTE: ALL backend_test.exclude CALLS MUST BE PERFORMED BEFORE THE CALL TO globals().update @@ -98,478 +148,509 @@ def expect_fail(test_case_path): # type: (str) -> None OnnxBackendPyTorchConvertedModelTest = None globals().update(backend_test.enable_report().test_cases) -# Dynamic Expand -> NGONNX-367 -expect_fail('OnnxBackendNodeModelTest.test_expand_dim_changed_cpu') -expect_fail('OnnxBackendNodeModelTest.test_expand_dim_unchanged_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_expand_shape_model1_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_expand_shape_model2_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_expand_shape_model3_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_expand_shape_model4_cpu') - -# Dynamic Reshape -> NGONNX-357 -expect_fail('OnnxBackendNodeModelTest.test_reshape_extended_dims_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_negative_dim_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_one_dim_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_reduced_dims_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_negative_extended_dims_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_reordered_all_dims_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_reordered_last_dims_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_zero_and_negative_dim_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reshape_zero_dim_cpu') - -# Dynamic Tile -> NGONNX-368 -expect_fail('OnnxBackendNodeModelTest.test_tile_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tile_precomputed_cpu') -expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_repeat_cpu') -expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_repeat_dim_overflow_cpu') - -# Cast (support for String type) -expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT_to_STRING_cpu') -expect_fail('OnnxBackendNodeModelTest.test_cast_STRING_to_FLOAT_cpu') - -# Scan -> NGONNX-433 -expect_fail('OnnxBackendNodeModelTest.test_scan9_sum_cpu') -expect_fail('OnnxBackendNodeModelTest.test_scan_sum_cpu') - -# Compress -> NGONNX-438 -expect_fail('OnnxBackendNodeModelTest.test_compress_default_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_compress_0_cpu') -expect_fail('OnnxBackendNodeModelTest.test_compress_1_cpu') -expect_fail('OnnxBackendNodeModelTest.test_compress_negative_axis_cpu') - -# Isnan -> NGONNX-440 -expect_fail('OnnxBackendNodeModelTest.test_isnan_cpu') - -# Constant of Shape -> NGONNX-445 -expect_fail('OnnxBackendNodeModelTest.test_constantofshape_float_ones_cpu') -expect_fail('OnnxBackendNodeModelTest.test_constantofshape_int_zeros_cpu') - -# Scatter -> NGONNX-446 -expect_fail('OnnxBackendNodeModelTest.test_scatter_with_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_scatter_without_axis_cpu') - -# Max unpool -> NGONNX-447 -expect_fail('OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu') -expect_fail('OnnxBackendNodeModelTest.test_maxunpool_export_without_output_shape_cpu') - -# OneHot -> NGONNX-486 -expect_fail('OnnxBackendNodeModelTest.test_onehot_with_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_onehot_without_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu') -expect_fail('OnnxBackendNodeModelTest.test_onehot_with_negative_axis_cpu') - -# TF id vectorizer -> NGONNX-523 -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu') -expect_fail('OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu') - -# Non zero -> NGONNX-472 -expect_fail('OnnxBackendNodeModelTest.test_nonzero_example_cpu') - -# Quantized NGONNX-595 -# Scale / zero point not a scalar -expect_fail('OnnxBackendNodeModelTest.test_qlinearconv_cpu') -expect_fail('OnnxBackendNodeModelTest.test_qlinearmatmul_2D_cpu') -expect_fail('OnnxBackendNodeModelTest.test_qlinearmatmul_3D_cpu') -expect_fail('OnnxBackendNodeModelTest.test_matmulinteger_cpu') - -# IsInf - NGONNX-528 -expect_fail('OnnxBackendNodeModelTest.test_isinf_cpu') -expect_fail('OnnxBackendNodeModelTest.test_isinf_negative_cpu') -expect_fail('OnnxBackendNodeModelTest.test_isinf_positive_cpu') - -# Pooling ops NGONNX-597 -expect_fail('OnnxBackendNodeModelTest.test_maxpool_2d_ceil_cpu') -expect_fail('OnnxBackendNodeModelTest.test_maxpool_2d_dilations_cpu') -expect_fail('OnnxBackendNodeModelTest.test_averagepool_2d_ceil_cpu') - -# Modulus - NGONNX-527 -# fmod=0 is not supported -expect_fail('OnnxBackendNodeModelTest.test_mod_broadcast_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_int16_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_int32_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_int64_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_int8_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_uint16_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_uint32_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_uint64_cpu') -expect_fail('OnnxBackendNodeModelTest.test_mod_uint8_cpu') -# float16 is not supported for Sign operator -expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu') - -# NonMaxSuppression - NGONNX-526 -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_center_point_box_format_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_flipped_coordinates_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_identical_boxes_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_limit_output_size_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_single_box_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_two_batches_cpu') -expect_fail('OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu') - -# Dynamic Slice NGONNX-522, 599 -expect_fail('OnnxBackendNodeModelTest.test_slice_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_default_axes_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_default_steps_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_neg_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_neg_steps_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu') -expect_fail('OnnxBackendNodeModelTest.test_slice_negative_axes_cpu') - -# StrNormalizer NGONNX-600 -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_lower_cpu') -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_nochangecase_cpu') -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_upper_cpu') -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_export_monday_empty_output_cpu') -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_export_monday_insensintive_upper_twodim_cpu') -expect_fail('OnnxBackendNodeModelTest.test_strnormalizer_nostopwords_nochangecase_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_lower_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_nochangecase_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_upper_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_monday_empty_output_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_monday_insensintive_upper_twodim_cpu') -expect_fail('OnnxBackendSimpleModelTest.test_strnorm_model_nostopwords_nochangecase_cpu') - -# RoiAlign - NGONNX-601 -expect_fail('OnnxBackendNodeModelTest.test_roialign_cpu') - -# Upsample - NGONNX-781 -expect_fail('OnnxBackendNodeModelTest.test_upsample_nearest_cpu') - -# BitShift - NGONNX-752 -expect_fail('OnnxBackendNodeModelTest.test_bitshift_left_uint16_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_left_uint32_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_left_uint64_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_left_uint8_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_right_uint16_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_right_uint32_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_right_uint64_cpu') -expect_fail('OnnxBackendNodeModelTest.test_bitshift_right_uint8_cpu') - -# Det - NGONNX-754 -expect_fail('OnnxBackendNodeModelTest.test_det_2d_cpu') -expect_fail('OnnxBackendNodeModelTest.test_det_nd_cpu') - -# GatherElements, ScatterElements - NGONNX-757 -expect_fail('OnnxBackendNodeModelTest.test_gather_elements_0_cpu') -expect_fail('OnnxBackendNodeModelTest.test_gather_elements_1_cpu') -expect_fail('OnnxBackendNodeModelTest.test_gather_elements_negative_indices_cpu') -expect_fail('OnnxBackendNodeModelTest.test_scatter_elements_with_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu') -expect_fail('OnnxBackendNodeModelTest.test_scatter_elements_without_axis_cpu') - -# GatherND - NGONNX-758 -expect_fail('OnnxBackendNodeModelTest.test_gathernd_example_int32_cpu') - -# Resize - NGONNX-782 -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_align_corners_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_align_corners_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_scales_nearest_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_align_corners_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_asymmetric_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_align_corners_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_sizes_cubic_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_floor_align_corners_cpu') -expect_fail('OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu') - -# Tests pass on backend with support for nGraph v1 opset. -expect_fail('OnnxBackendNodeModelTest.test_constant_pad_cpu') -expect_fail('OnnxBackendNodeModelTest.test_edge_pad_cpu') -expect_fail('OnnxBackendNodeModelTest.test_reflect_pad_cpu') - -# DynamicQuantizeLinear - NGONNX-786 -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu') -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_expanded_cpu') -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_cpu') -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_expanded_cpu') -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_cpu') -expect_fail('OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_expanded_cpu') - -# Range op - NGONNX-787 -expect_fail('OnnxBackendNodeModelTest.test_range_float_type_positive_delta_cpu') -expect_fail('OnnxBackendNodeModelTest.test_range_float_type_positive_delta_expanded_cpu') -expect_fail('OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_cpu') -expect_fail('OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_expanded_cpu') - -# Unique op - NGONNX-761 -expect_fail('OnnxBackendNodeModelTest.test_unique_not_sorted_without_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_unique_sorted_with_axis_3d_cpu') -expect_fail('OnnxBackendNodeModelTest.test_unique_sorted_with_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_unique_sorted_with_negative_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_unique_sorted_without_axis_cpu') - -# Round - NGONNX-760 -expect_fail('OnnxBackendNodeModelTest.test_round_cpu') - -# Operations not supported by nGraph Backends -expect_fail('OnnxBackendNodeModelTest.test_top_k_cpu') -expect_fail('OnnxBackendNodeModelTest.test_top_k_negative_axis_cpu') -expect_fail('OnnxBackendNodeModelTest.test_top_k_smallest_cpu') - -# Tests which fail on the INTELGPU backend +general_tests_xfail = [ + # MaxPool Indices -> NGRAPH-3131 + 'OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_strides_cpu', + 'OnnxBackendNodeModelTest.test_maxpool_with_argmax_2d_precomputed_pads_cpu', + + # RNN -> NC-323 + 'OnnxBackendNodeModelTest.test_rnn_seq_length_cpu', + 'OnnxBackendNodeModelTest.test_simple_rnn_defaults_cpu', + 'OnnxBackendNodeModelTest.test_simple_rnn_with_initial_bias_cpu', + + # GRU -> NGONNX-325 + 'OnnxBackendNodeModelTest.test_gru_defaults_cpu', + 'OnnxBackendNodeModelTest.test_gru_seq_length_cpu', + 'OnnxBackendNodeModelTest.test_gru_with_initial_bias_cpu', + + # Support for ONNX Sequence type - NGONNX-789 + 'OnnxBackendSimpleModelTest.test_sequence_model1_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model2_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model3_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model4_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model5_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model6_cpu', + 'OnnxBackendSimpleModelTest.test_sequence_model7_cpu', + + # Dynamic Expand -> NGONNX-367 + 'OnnxBackendNodeModelTest.test_expand_dim_changed_cpu', + 'OnnxBackendNodeModelTest.test_expand_dim_unchanged_cpu', + 'OnnxBackendSimpleModelTest.test_expand_shape_model1_cpu', + 'OnnxBackendSimpleModelTest.test_expand_shape_model2_cpu', + 'OnnxBackendSimpleModelTest.test_expand_shape_model3_cpu', + 'OnnxBackendSimpleModelTest.test_expand_shape_model4_cpu', + + # Dynamic Reshape -> NGONNX-357 + 'OnnxBackendNodeModelTest.test_reshape_extended_dims_cpu', + 'OnnxBackendNodeModelTest.test_reshape_negative_dim_cpu', + 'OnnxBackendNodeModelTest.test_reshape_one_dim_cpu', + 'OnnxBackendNodeModelTest.test_reshape_reduced_dims_cpu', + 'OnnxBackendNodeModelTest.test_reshape_negative_extended_dims_cpu', + 'OnnxBackendNodeModelTest.test_reshape_reordered_all_dims_cpu', + 'OnnxBackendNodeModelTest.test_reshape_reordered_last_dims_cpu', + 'OnnxBackendNodeModelTest.test_reshape_zero_and_negative_dim_cpu', + 'OnnxBackendNodeModelTest.test_reshape_zero_dim_cpu', + + # Dynamic Tile -> NGONNX-368 + 'OnnxBackendNodeModelTest.test_tile_cpu', + 'OnnxBackendNodeModelTest.test_tile_precomputed_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_repeat_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_repeat_dim_overflow_cpu', + + # Cast (support for String type, + 'OnnxBackendNodeModelTest.test_cast_FLOAT_to_STRING_cpu', + 'OnnxBackendNodeModelTest.test_cast_STRING_to_FLOAT_cpu', + + # Scan -> NGONNX-433 + 'OnnxBackendNodeModelTest.test_scan9_sum_cpu', + 'OnnxBackendNodeModelTest.test_scan_sum_cpu', + + # Compress -> NGONNX-438 + 'OnnxBackendNodeModelTest.test_compress_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_compress_0_cpu', + 'OnnxBackendNodeModelTest.test_compress_1_cpu', + 'OnnxBackendNodeModelTest.test_compress_negative_axis_cpu', + + # Isnan -> NGONNX-440 + 'OnnxBackendNodeModelTest.test_isnan_cpu', + + # Constant of Shape -> NGONNX-445 + 'OnnxBackendNodeModelTest.test_constantofshape_float_ones_cpu', + 'OnnxBackendNodeModelTest.test_constantofshape_int_zeros_cpu', + + # Scatter -> NGONNX-446 + 'OnnxBackendNodeModelTest.test_scatter_with_axis_cpu', + 'OnnxBackendNodeModelTest.test_scatter_without_axis_cpu', + + # Max unpool -> NGONNX-447 + 'OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu', + 'OnnxBackendNodeModelTest.test_maxunpool_export_without_output_shape_cpu', + + # OneHot -> NGONNX-486 + 'OnnxBackendNodeModelTest.test_onehot_with_axis_cpu', + 'OnnxBackendNodeModelTest.test_onehot_without_axis_cpu', + 'OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu', + 'OnnxBackendNodeModelTest.test_onehot_with_negative_axis_cpu', + + # TF id vectorizer -> NGONNX-523 + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_only_bigrams_skip0_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_onlybigrams_skip5_cpu', + 'OnnxBackendNodeModelTest.test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu', + + # Non zero -> NGONNX-472 + 'OnnxBackendNodeModelTest.test_nonzero_example_cpu', + + # Quantized NGONNX-595 + # Scale / zero point not a scalar + 'OnnxBackendNodeModelTest.test_qlinearconv_cpu', + 'OnnxBackendNodeModelTest.test_qlinearmatmul_2D_cpu', + 'OnnxBackendNodeModelTest.test_qlinearmatmul_3D_cpu', + 'OnnxBackendNodeModelTest.test_matmulinteger_cpu', + + # IsInf - NGONNX-528 + 'OnnxBackendNodeModelTest.test_isinf_cpu', + 'OnnxBackendNodeModelTest.test_isinf_negative_cpu', + 'OnnxBackendNodeModelTest.test_isinf_positive_cpu', + + # Pooling ops NGONNX-597 + 'OnnxBackendNodeModelTest.test_maxpool_2d_ceil_cpu', + 'OnnxBackendNodeModelTest.test_maxpool_2d_dilations_cpu', + 'OnnxBackendNodeModelTest.test_averagepool_2d_ceil_cpu', + + # Modulus - NGONNX-527 + # fmod=0 is not supported + 'OnnxBackendNodeModelTest.test_mod_broadcast_cpu', + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_int16_cpu', + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_int32_cpu', + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_int64_cpu', + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_int8_cpu', + 'OnnxBackendNodeModelTest.test_mod_uint16_cpu', + 'OnnxBackendNodeModelTest.test_mod_uint32_cpu', + 'OnnxBackendNodeModelTest.test_mod_uint64_cpu', + 'OnnxBackendNodeModelTest.test_mod_uint8_cpu', + + # float16 is not supported for Sign operator + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_float16_cpu', + + # NonMaxSuppression - NGONNX-526 + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_center_point_box_format_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_flipped_coordinates_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_identical_boxes_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_limit_output_size_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_single_box_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_suppress_by_IOU_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_two_batches_cpu', + 'OnnxBackendNodeModelTest.test_nonmaxsuppression_two_classes_cpu', + + # Dynamic Slice NGONNX-522, 599 + 'OnnxBackendNodeModelTest.test_slice_cpu', + 'OnnxBackendNodeModelTest.test_slice_default_axes_cpu', + 'OnnxBackendNodeModelTest.test_slice_default_steps_cpu', + 'OnnxBackendNodeModelTest.test_slice_end_out_of_bounds_cpu', + 'OnnxBackendNodeModelTest.test_slice_neg_cpu', + 'OnnxBackendNodeModelTest.test_slice_neg_steps_cpu', + 'OnnxBackendNodeModelTest.test_slice_start_out_of_bounds_cpu', + 'OnnxBackendNodeModelTest.test_slice_negative_axes_cpu', + + # StrNormalizer NGONNX-600 + 'OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_lower_cpu', + 'OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_nochangecase_cpu', + 'OnnxBackendNodeModelTest.test_strnormalizer_export_monday_casesensintive_upper_cpu', + 'OnnxBackendNodeModelTest.test_strnormalizer_export_monday_empty_output_cpu', + 'OnnxBackendNodeModelTest.test_strnormalizer_export_monday_insensintive_upper_twodim_cpu', + 'OnnxBackendNodeModelTest.test_strnormalizer_nostopwords_nochangecase_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_lower_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_nochangecase_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_monday_casesensintive_upper_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_monday_empty_output_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_monday_insensintive_upper_twodim_cpu', + 'OnnxBackendSimpleModelTest.test_strnorm_model_nostopwords_nochangecase_cpu', + + # RoiAlign - NGONNX-601 + 'OnnxBackendNodeModelTest.test_roialign_cpu', + + # Upsample - NGONNX-781 + 'OnnxBackendNodeModelTest.test_upsample_nearest_cpu', + + # BitShift - NGONNX-752 + 'OnnxBackendNodeModelTest.test_bitshift_left_uint16_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_left_uint32_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_left_uint64_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_left_uint8_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_right_uint16_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_right_uint32_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_right_uint64_cpu', + 'OnnxBackendNodeModelTest.test_bitshift_right_uint8_cpu', + + # Det - NGONNX-754 + 'OnnxBackendNodeModelTest.test_det_2d_cpu', + 'OnnxBackendNodeModelTest.test_det_nd_cpu', + + # GatherElements, ScatterElements - NGONNX-757 + 'OnnxBackendNodeModelTest.test_gather_elements_0_cpu', + 'OnnxBackendNodeModelTest.test_gather_elements_1_cpu', + 'OnnxBackendNodeModelTest.test_gather_elements_negative_indices_cpu', + 'OnnxBackendNodeModelTest.test_scatter_elements_with_axis_cpu', + 'OnnxBackendNodeModelTest.test_scatter_elements_with_negative_indices_cpu', + 'OnnxBackendNodeModelTest.test_scatter_elements_without_axis_cpu', + + # GatherND - NGONNX-758 + 'OnnxBackendNodeModelTest.test_gathernd_example_int32_cpu', + + # Resize - NGONNX-782 + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_align_corners_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_cubic_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_align_corners_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_linear_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_scales_nearest_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_sizes_cubic_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_cpu', + 'OnnxBackendNodeModelTest.test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu', + 'OnnxBackendNodeModelTest.test_resize_tf_crop_and_resize_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_align_corners_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_asymmetric_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_cubic_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_align_corners_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_linear_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_scales_nearest_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_sizes_cubic_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_floor_align_corners_cpu', + 'OnnxBackendNodeModelTest.test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu', + + # Tests pass on backend with support for nGraph v1 opset. + 'OnnxBackendNodeModelTest.test_constant_pad_cpu', + 'OnnxBackendNodeModelTest.test_edge_pad_cpu', + 'OnnxBackendNodeModelTest.test_reflect_pad_cpu', + + # DynamicQuantizeLinear - NGONNX-786 + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_cpu', + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_expanded_cpu', + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_cpu', + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_max_adjusted_expanded_cpu', + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_cpu', + 'OnnxBackendNodeModelTest.test_dynamicquantizelinear_min_adjusted_expanded_cpu', + + # Range op - NGONNX-787 + 'OnnxBackendNodeModelTest.test_range_float_type_positive_delta_cpu', + 'OnnxBackendNodeModelTest.test_range_float_type_positive_delta_expanded_cpu', + 'OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_cpu', + 'OnnxBackendNodeModelTest.test_range_int32_type_negative_delta_expanded_cpu', + + # Unique op - NGONNX-761 + 'OnnxBackendNodeModelTest.test_unique_not_sorted_without_axis_cpu', + 'OnnxBackendNodeModelTest.test_unique_sorted_with_axis_3d_cpu', + 'OnnxBackendNodeModelTest.test_unique_sorted_with_axis_cpu', + 'OnnxBackendNodeModelTest.test_unique_sorted_with_negative_axis_cpu', + 'OnnxBackendNodeModelTest.test_unique_sorted_without_axis_cpu', + + # Operations not supported by nGraph Backends + 'OnnxBackendNodeModelTest.test_top_k_cpu', + 'OnnxBackendNodeModelTest.test_top_k_negative_axis_cpu', + 'OnnxBackendNodeModelTest.test_top_k_smallest_cpu', +] + if selected_backend_name == 'INTELGPU': - expect_fail('OnnxBackendNodeModelTest.test_edge_pad_cpu') - expect_fail('OnnxBackendNodeModelTest.test_erf_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gemm_broadcast_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_2_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_default_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_one_hot_cpu') - expect_fail('OnnxBackendNodeModelTest.test_maxpool_2d_same_upper_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reflect_pad_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_ReflectionPad2d_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_ReplicationPad2d_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_pad_cpu') + tests_xfail_custom = [ + 'OnnxBackendNodeModelTest.test_edge_pad_cpu', + 'OnnxBackendNodeModelTest.test_erf_cpu', + 'OnnxBackendNodeModelTest.test_gather_0_cpu', + 'OnnxBackendNodeModelTest.test_gather_1_cpu', + 'OnnxBackendNodeModelTest.test_gemm_broadcast_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_2_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_example_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_one_hot_cpu', + 'OnnxBackendNodeModelTest.test_maxpool_2d_same_upper_cpu', + 'OnnxBackendNodeModelTest.test_reflect_pad_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_ReflectionPad2d_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_ReplicationPad2d_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_pad_cpu', + ] # Tests which fail or are very slow on the INTERPRETER backend if selected_backend_name == 'INTERPRETER': - # Cast -> NGONNX-764 - expect_fail('OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu') + tests_xfail_custom = [ + # Cast -> NGONNX-764 + 'OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu', + ] if selected_backend_name == 'CPU': - # Cast -> NGONNX-764 - expect_fail('OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu') + tests_xfail_custom = [ + # Cast -> NGONNX-764 + 'OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu', + ] if selected_backend_name == 'PlaidML': - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_ReflectionPad2d_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_ReplicationPad2d_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_pad_cpu') - expect_fail('OnnxBackendNodeModelTest.test_clip_default_inbounds_cpu') - expect_fail('OnnxBackendNodeModelTest.test_clip_default_max_cpu') - expect_fail('OnnxBackendNodeModelTest.test_clip_default_min_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_output_shape_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_edge_pad_cpu') - expect_fail('OnnxBackendNodeModelTest.test_erf_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_axis_2_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_default_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_hardmax_one_hot_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reflect_pad_cpu') - # Test which fail on PlaidML with INTELGPU - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_pow_cpu') + tests_xfail_custom = [ + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_ReflectionPad2d_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_ReplicationPad2d_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_pad_cpu', + 'OnnxBackendNodeModelTest.test_clip_default_inbounds_cpu', + 'OnnxBackendNodeModelTest.test_clip_default_max_cpu', + 'OnnxBackendNodeModelTest.test_clip_default_min_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_output_shape_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu', + 'OnnxBackendNodeModelTest.test_edge_pad_cpu', + 'OnnxBackendNodeModelTest.test_erf_cpu', + 'OnnxBackendNodeModelTest.test_gather_0_cpu', + 'OnnxBackendNodeModelTest.test_gather_1_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_axis_2_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_example_cpu', + 'OnnxBackendNodeModelTest.test_hardmax_one_hot_cpu', + 'OnnxBackendNodeModelTest.test_reflect_pad_cpu', + + # Test which fail on PlaidML with INTELGPU + 'OnnxBackendPyTorchOperatorModelTest.test_operator_pow_cpu', + ] + if selected_backend_name == 'IE:CPU': - # Unsupported primitive of type: Sqrt - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_default_axes_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_default_axes_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_do_not_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_keep_dims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reduce_l2_negative_axes_keep_dims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_sqrt_cpu') - expect_fail('OnnxBackendNodeModelTest.test_sqrt_example_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_sqrt_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_symbolic_override_cpu') - - # [NOT_IMPLEMENTED] Input image format BOOL is not supported yet... - expect_fail('OnnxBackendNodeModelTest.test_and2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and_bcast3v1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and_bcast3v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and_bcast4v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and_bcast4v3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_and_bcast4v4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_not_2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_not_3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_not_4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or_bcast3v1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or_bcast3v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or_bcast4v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or_bcast4v3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_or_bcast4v4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_where_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_where_long_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor4d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor_bcast3v1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor_bcast3v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor_bcast4v2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor_bcast4v3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_xor_bcast4v4d_cpu') - - # Pooling layer. Unsupported mode. Only 4D and 5D blobs are supported as input. - expect_fail('OnnxBackendNodeModelTest.test_averagepool_1d_default_cpu') - expect_fail('OnnxBackendNodeModelTest.test_maxpool_1d_default_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_maxpool_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_MaxPool1d_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_MaxPool1d_stride_cpu') - - # Layer y input port 1 is not connected to any data - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_3d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_dilations_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_kernel_shape_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_output_shape_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_pad_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_pads_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convtranspose_with_kernel_cpu') - expect_fail('OnnxBackendNodeModelTest.test_prelu_broadcast_cpu') - expect_fail('OnnxBackendNodeModelTest.test_prelu_example_cpu') - - # Cannot cast ngraph node y to CNNLayer! - expect_fail('OnnxBackendNodeModelTest.test_basic_convinteger_cpu') - expect_fail('OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu') - expect_fail('OnnxBackendNodeModelTest.test_dequantizelinear_cpu') - expect_fail('OnnxBackendNodeModelTest.test_quantizelinear_cpu') - expect_fail('OnnxBackendNodeModelTest.test_scatternd_cpu') - - # Incorrect precision f64! - expect_fail('OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT_to_DOUBLE_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu') - expect_fail('OnnxBackendNodeModelTest.test_mod_mixed_sign_float64_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_broadcast_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu') - - # Unsupported primitive of type: Ceiling name: y - expect_fail('OnnxBackendNodeModelTest.test_ceil_cpu') - expect_fail('OnnxBackendNodeModelTest.test_ceil_example_cpu') - - # Can't convert dims 0 to Layout! - expect_fail('OnnxBackendNodeModelTest.test_pow_bcast_scalar_cpu') - - # RuntimeError: data [] doesn't exist - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu') - expect_fail('OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu') - expect_fail('OnnxBackendNodeModelTest.test_constant_cpu') - expect_fail('OnnxBackendNodeModelTest.test_dropout_default_cpu') - expect_fail('OnnxBackendNodeModelTest.test_dropout_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_eyelike_populate_off_main_diagonal_cpu') - expect_fail('OnnxBackendNodeModelTest.test_eyelike_without_dtype_cpu') - expect_fail('OnnxBackendNodeModelTest.test_identity_cpu') - expect_fail('OnnxBackendNodeModelTest.test_max_one_input_cpu') - expect_fail('OnnxBackendNodeModelTest.test_mean_one_input_cpu') - expect_fail('OnnxBackendNodeModelTest.test_min_one_input_cpu') - expect_fail('OnnxBackendNodeModelTest.test_shape_cpu') - expect_fail('OnnxBackendNodeModelTest.test_shape_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_size_cpu') - expect_fail('OnnxBackendNodeModelTest.test_size_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_sum_one_input_cpu') - - # RuntimeError: [PARAMETER_MISMATCH] Failed to set Blob with precision FP32 - expect_fail('OnnxBackendNodeModelTest.test_equal_bcast_cpu') - expect_fail('OnnxBackendNodeModelTest.test_equal_cpu') - - # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... - expect_fail('OnnxBackendNodeModelTest.test_gather_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_gather_negative_indices_cpu') - expect_fail('OnnxBackendNodeModelTest.test_mod_int64_fmod_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reversesequence_batch_cpu') - expect_fail('OnnxBackendNodeModelTest.test_reversesequence_time_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_non_float_params_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu') - - # RuntimeError: Cannot cast ngraph node LSTMSequence to CNNLayer! - expect_fail('OnnxBackendNodeModelTest.test_lstm_defaults_cpu') - expect_fail('OnnxBackendNodeModelTest.test_lstm_with_initial_bias_cpu') - expect_fail('OnnxBackendNodeModelTest.test_lstm_with_peepholes_cpu') - - # RuntimeError: Cannot cast ngraph node output to CNNLayer! - expect_fail('OnnxBackendNodeModelTest.test_gathernd_example_float32_cpu') - - # AssertionError: result mismatch - expect_fail('OnnxBackendNodeModelTest.test_argmax_default_axis_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_default_axis_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_no_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmax_no_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_default_axis_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_default_axis_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_no_keepdims_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_argmin_no_keepdims_random_cpu') - expect_fail('OnnxBackendNodeModelTest.test_elu_example_cpu') - expect_fail('OnnxBackendNodeModelTest.test_logsoftmax_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_logsoftmax_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_logsoftmax_default_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_mvn_cpu') - expect_fail('OnnxBackendNodeModelTest.test_softmax_axis_0_cpu') - expect_fail('OnnxBackendNodeModelTest.test_softmax_axis_1_cpu') - expect_fail('OnnxBackendNodeModelTest.test_softmax_default_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_equal_parts_1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_equal_parts_2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_equal_parts_default_axis_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_variable_parts_1d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_variable_parts_2d_cpu') - expect_fail('OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_chunk_cpu') - expect_fail('OnnxBackendPyTorchOperatorModelTest.test_operator_symbolic_override_nested_cpu') - - # RuntimeError: Node Split contains empty child edge for index 0 - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_GLU_cpu') - expect_fail('OnnxBackendPyTorchConvertedModelTest.test_GLU_dim_cpu') - - # RuntimeError: invalid next size (fast) - expect_fail('OnnxBackendNodeModelTest.test_basic_conv_with_padding_cpu') + tests_xfail_custom = [ + # [NOT_IMPLEMENTED] Input image format BOOL is not supported yet... + 'OnnxBackendNodeModelTest.test_and2d_cpu', + 'OnnxBackendNodeModelTest.test_and3d_cpu', + 'OnnxBackendNodeModelTest.test_and4d_cpu', + 'OnnxBackendNodeModelTest.test_and_bcast3v1d_cpu', + 'OnnxBackendNodeModelTest.test_and_bcast3v2d_cpu', + 'OnnxBackendNodeModelTest.test_and_bcast4v2d_cpu', + 'OnnxBackendNodeModelTest.test_and_bcast4v3d_cpu', + 'OnnxBackendNodeModelTest.test_and_bcast4v4d_cpu', + 'OnnxBackendNodeModelTest.test_not_2d_cpu', + 'OnnxBackendNodeModelTest.test_not_3d_cpu', + 'OnnxBackendNodeModelTest.test_not_4d_cpu', + 'OnnxBackendNodeModelTest.test_or2d_cpu', + 'OnnxBackendNodeModelTest.test_or3d_cpu', + 'OnnxBackendNodeModelTest.test_or4d_cpu', + 'OnnxBackendNodeModelTest.test_or_bcast3v1d_cpu', + 'OnnxBackendNodeModelTest.test_or_bcast3v2d_cpu', + 'OnnxBackendNodeModelTest.test_or_bcast4v2d_cpu', + 'OnnxBackendNodeModelTest.test_or_bcast4v3d_cpu', + 'OnnxBackendNodeModelTest.test_or_bcast4v4d_cpu', + 'OnnxBackendNodeModelTest.test_where_long_example_cpu', + 'OnnxBackendNodeModelTest.test_xor2d_cpu', + 'OnnxBackendNodeModelTest.test_xor3d_cpu', + 'OnnxBackendNodeModelTest.test_xor4d_cpu', + 'OnnxBackendNodeModelTest.test_xor_bcast3v1d_cpu', + 'OnnxBackendNodeModelTest.test_xor_bcast3v2d_cpu', + 'OnnxBackendNodeModelTest.test_xor_bcast4v2d_cpu', + 'OnnxBackendNodeModelTest.test_xor_bcast4v3d_cpu', + 'OnnxBackendNodeModelTest.test_xor_bcast4v4d_cpu', + + # Pooling layer. Unsupported mode. Only 4D and 5D blobs are supported as input. + 'OnnxBackendNodeModelTest.test_averagepool_1d_default_cpu', + 'OnnxBackendNodeModelTest.test_maxpool_1d_default_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_maxpool_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_MaxPool1d_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_MaxPool1d_stride_cpu', + + # Layer y input port 1 is not connected to any data + 'OnnxBackendNodeModelTest.test_convtranspose_1d_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_3d_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_dilations_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_kernel_shape_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_output_shape_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_pad_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_pads_cpu', + 'OnnxBackendNodeModelTest.test_convtranspose_with_kernel_cpu', + 'OnnxBackendNodeModelTest.test_prelu_broadcast_cpu', + 'OnnxBackendNodeModelTest.test_prelu_example_cpu', + + # Cannot cast ngraph node y to CNNLayer! + 'OnnxBackendNodeModelTest.test_basic_convinteger_cpu', + 'OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu', + 'OnnxBackendNodeModelTest.test_dequantizelinear_cpu', + 'OnnxBackendNodeModelTest.test_quantizelinear_cpu', + 'OnnxBackendNodeModelTest.test_scatternd_cpu', + + # Incorrect precision f64! + 'OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_cast_DOUBLE_to_FLOAT_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_DOUBLE_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT_to_DOUBLE_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_exclusive_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_reverse_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_1d_reverse_exclusive_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_cumsum_2d_negative_axis_cpu', + 'OnnxBackendNodeModelTest.test_eyelike_with_dtype_cpu', + 'OnnxBackendNodeModelTest.test_mod_mixed_sign_float64_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_add_broadcast_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_broadcast_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_right_broadcast_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_add_size1_singleton_broadcast_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_addconstant_cpu', + + # Unsupported primitive of type: Ceiling name: y + 'OnnxBackendNodeModelTest.test_ceil_cpu', + 'OnnxBackendNodeModelTest.test_ceil_example_cpu', + + # Can't convert dims 0 to Layout! + 'OnnxBackendNodeModelTest.test_pow_bcast_scalar_cpu', + + # RuntimeError: data [] doesn't exist + 'OnnxBackendNodeModelTest.test_cast_FLOAT16_to_FLOAT_cpu', + 'OnnxBackendNodeModelTest.test_cast_FLOAT_to_FLOAT16_cpu', + 'OnnxBackendNodeModelTest.test_constant_cpu', + 'OnnxBackendNodeModelTest.test_dropout_default_cpu', + 'OnnxBackendNodeModelTest.test_dropout_random_cpu', + 'OnnxBackendNodeModelTest.test_eyelike_populate_off_main_diagonal_cpu', + 'OnnxBackendNodeModelTest.test_eyelike_without_dtype_cpu', + 'OnnxBackendNodeModelTest.test_identity_cpu', + 'OnnxBackendNodeModelTest.test_max_one_input_cpu', + 'OnnxBackendNodeModelTest.test_mean_one_input_cpu', + 'OnnxBackendNodeModelTest.test_min_one_input_cpu', + 'OnnxBackendNodeModelTest.test_shape_cpu', + 'OnnxBackendNodeModelTest.test_shape_example_cpu', + 'OnnxBackendNodeModelTest.test_size_cpu', + 'OnnxBackendNodeModelTest.test_size_example_cpu', + 'OnnxBackendNodeModelTest.test_sum_one_input_cpu', + + # RuntimeError: [PARAMETER_MISMATCH] Failed to set Blob with precision FP32 + 'OnnxBackendNodeModelTest.test_equal_bcast_cpu', + 'OnnxBackendNodeModelTest.test_equal_cpu', + + # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... + 'OnnxBackendNodeModelTest.test_gather_0_cpu', + 'OnnxBackendNodeModelTest.test_gather_1_cpu', + 'OnnxBackendNodeModelTest.test_gather_negative_indices_cpu', + 'OnnxBackendNodeModelTest.test_mod_int64_fmod_cpu', + 'OnnxBackendNodeModelTest.test_reversesequence_batch_cpu', + 'OnnxBackendNodeModelTest.test_reversesequence_time_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_non_float_params_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_Embedding_sparse_cpu', + + # RuntimeError: Cannot cast ngraph node LSTMSequence to CNNLayer! + 'OnnxBackendNodeModelTest.test_lstm_defaults_cpu', + 'OnnxBackendNodeModelTest.test_lstm_with_initial_bias_cpu', + 'OnnxBackendNodeModelTest.test_lstm_with_peepholes_cpu', + + # RuntimeError: Cannot cast ngraph node output to CNNLayer! + 'OnnxBackendNodeModelTest.test_gathernd_example_float32_cpu', + + # AssertionError: result mismatch + 'OnnxBackendNodeModelTest.test_argmax_default_axis_example_cpu', + 'OnnxBackendNodeModelTest.test_argmax_default_axis_random_cpu', + 'OnnxBackendNodeModelTest.test_argmax_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmax_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmax_negative_axis_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_argmax_no_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmax_no_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_argmin_default_axis_example_cpu', + 'OnnxBackendNodeModelTest.test_argmin_default_axis_random_cpu', + 'OnnxBackendNodeModelTest.test_argmin_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmin_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmin_negative_axis_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_argmin_no_keepdims_example_cpu', + 'OnnxBackendNodeModelTest.test_argmin_no_keepdims_random_cpu', + 'OnnxBackendNodeModelTest.test_elu_example_cpu', + 'OnnxBackendNodeModelTest.test_logsoftmax_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_logsoftmax_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_logsoftmax_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_mvn_cpu', + 'OnnxBackendNodeModelTest.test_softmax_axis_0_cpu', + 'OnnxBackendNodeModelTest.test_softmax_axis_1_cpu', + 'OnnxBackendNodeModelTest.test_softmax_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_split_equal_parts_1d_cpu', + 'OnnxBackendNodeModelTest.test_split_equal_parts_2d_cpu', + 'OnnxBackendNodeModelTest.test_split_equal_parts_default_axis_cpu', + 'OnnxBackendNodeModelTest.test_split_variable_parts_1d_cpu', + 'OnnxBackendNodeModelTest.test_split_variable_parts_2d_cpu', + 'OnnxBackendNodeModelTest.test_split_variable_parts_default_axis_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_chunk_cpu', + 'OnnxBackendPyTorchOperatorModelTest.test_operator_symbolic_override_nested_cpu', + 'OnnxBackendNodeModelTest.test_clip_example_cpu', + 'OnnxBackendNodeModelTest.test_clip_inbounds_cpu', + 'OnnxBackendNodeModelTest.test_clip_outbounds_cpu', + 'OnnxBackendNodeModelTest.test_instancenorm_example_cpu', + + # RuntimeError: Node Split contains empty child edge for index 0 + 'OnnxBackendPyTorchConvertedModelTest.test_GLU_cpu', + 'OnnxBackendPyTorchConvertedModelTest.test_GLU_dim_cpu', + + # RuntimeError: invalid next size (fast) + 'OnnxBackendNodeModelTest.test_basic_conv_with_padding_cpu', + + # RuntimeError: Detected op not belonging to opset1 + 'OnnxBackendNodeModelTest.test_round_cpu', + ] + +tests_xfail = general_tests_xfail + tests_xfail_custom + +for test_name in tests_xfail: + expect_fail('{}'.format(test_name)) diff --git a/tests/test_ngraph_backend.py b/tests/test_ngraph_backend.py index 2fe32356..d5c98c19 100644 --- a/tests/test_ngraph_backend.py +++ b/tests/test_ngraph_backend.py @@ -24,10 +24,10 @@ from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from ngraph_onnx.onnx_importer.backend import NgraphBackend -import tests.utils +from tests.utils import BACKEND_NAME # Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones. -selected_backend_name = tests.utils.BACKEND_NAME +selected_backend_name = BACKEND_NAME @pytest.fixture() diff --git a/tests/test_ops_batchnorm.py b/tests/test_ops_batchnorm.py index e6d2b080..8240ac48 100644 --- a/tests/test_ops_batchnorm.py +++ b/tests/test_ops_batchnorm.py @@ -15,6 +15,7 @@ # ****************************************************************************** import onnx +import pytest import numpy as np @@ -26,6 +27,7 @@ def make_batch_norm_node(**node_attributes): outputs=['Y'], **node_attributes) +@pytest.mark.skip_on_ie def test_batch_norm_test_node(): data = np.arange(48).reshape((1, 3, 4, 4)).astype(np.float32) scale = np.ones((3,)).astype(np.float32) # Gamma diff --git a/tests/test_ops_binary.py b/tests/test_ops_binary.py index 8d878da0..111cdc2a 100644 --- a/tests/test_ops_binary.py +++ b/tests/test_ops_binary.py @@ -22,7 +22,7 @@ import pytest from onnx.helper import make_tensor_value_info, make_graph, make_model -from tests.utils import run_model +from tests.utils import run_model, xfail_test def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **node_attributes): @@ -41,7 +41,7 @@ def import_and_compute(op_type, input_data_left, input_data_right, opset=7, **no return run_model(model, inputs)[0] -@pytest.mark.skip_on_ie # RuntimeError: Can't convert dims 0 to Layout! +@xfail_test("IE:CPU", reason="RuntimeError: Can't convert dims 0 to Layout") # noqa: Q000 Remove bad quotes def test_add_opset4(): assert np.array_equal(import_and_compute('Add', 1, 2, opset=4), np.array(3, dtype=np.float32)) @@ -107,7 +107,6 @@ def test_add_opset7(left_shape, right_shape): left_input + right_input) -@pytest.mark.skip_on_ie # RuntimeError: Can't convert dims 0 to Layout! def test_sub(): assert np.array_equal(import_and_compute('Sub', 20, 1), np.array(19, dtype=np.float32)) @@ -122,7 +121,6 @@ def test_sub(): np.array([[-6, -6, -6], [-3, -3, -3]], dtype=np.float32)) -@pytest.mark.skip_on_ie # RuntimeError: Can't convert dims 0 to Layout! def test_mul(): assert np.array_equal(import_and_compute('Mul', 2, 3), np.array(6, dtype=np.float32)) @@ -137,7 +135,6 @@ def test_mul(): np.array([[7, 16, 27], [28, 40, 54]], dtype=np.float32)) -@pytest.mark.skip_on_ie # RuntimeError: Can't convert dims 0 to Layout! def test_div(): assert np.array_equal(import_and_compute('Div', 6, 3), np.array(2, dtype=np.float32)) diff --git a/tests/test_ops_convpool.py b/tests/test_ops_convpool.py index 4d00543f..948736b4 100644 --- a/tests/test_ops_convpool.py +++ b/tests/test_ops_convpool.py @@ -22,7 +22,7 @@ import numpy as np from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from ngraph_onnx.onnx_importer.importer import import_onnx_model -from tests.utils import run_model, run_node, get_node_model, get_runtime +from tests.utils import run_model, run_node, get_node_model, get_runtime, xfail_test @pytest.fixture @@ -59,7 +59,6 @@ def import_and_compute_conv(x, weights, transpose=False, **attributes): return computation(x, weights)[0] -@pytest.mark.skip_on_ie # RuntimeError: Layer Y input port 1 is not connected to any data def test_2d_conv(): # x should have shape N(batch) x C x H x W input_x = np.array([ @@ -125,7 +124,6 @@ def test_2d_conv(): dtype=np.float32)) -@pytest.mark.skip_on_ie # RuntimeError: Layer Y input port 1 is not connected to any data def test_3d_conv(): # x should have shape N(batch) x C x H x W x D input_x = np.array([ @@ -170,7 +168,7 @@ def test_3d_conv(): dtype=np.float32)) -@pytest.mark.skip_on_ie # RuntimeError: Layer Y input port 1 is not connected to any data +@xfail_test('IE:CPU', reason='RuntimeError: Layer Y input port 1 is not connected to any data') def test_2d_conv_transpose(): # x should have shape N(batch) x C x H x W input_x = np.array( @@ -207,7 +205,7 @@ def test_2d_conv_transpose(): dtype=np.float32)) -@pytest.mark.xfail(reason='NGONNX-498') +@pytest.mark.xfail(reason='NGONNX-498', strict=True) def test_pad_opset_1(): x = np.ones((2, 2), dtype=np.float32) y = np.pad(x, pad_width=1, mode='constant') @@ -259,7 +257,7 @@ def test_pad_opset_2(): # Error of validate layer: B with type: Pad. Cannot parse parameter pads_begin # from IR for layer B. Value -1,0 cannot be casted to int. -@pytest.mark.skip_on_ie +@xfail_test('IE:CPU', reason='RuntimeError: Layer Y input port 1 is not connected to any data') def test_pad_negative_values_begin(): x = np.ones((2, 2), dtype=np.float32) @@ -276,7 +274,7 @@ def test_pad_negative_values_begin(): # Error of validate layer: B with type: Pad. Cannot parse parameter pads_begin # from IR for layer B. Value -1,0 cannot be casted to int. -@pytest.mark.skip_on_ie +@xfail_test('IE:CPU', reason='RuntimeError: Layer Y input port 1 is not connected to any data') def test_pad_negative_values_end(): x = np.ones((2, 2), dtype=np.float32) diff --git a/tests/test_ops_logical.py b/tests/test_ops_logical.py index 547df9da..bcdf4ae6 100644 --- a/tests/test_ops_logical.py +++ b/tests/test_ops_logical.py @@ -20,12 +20,11 @@ import onnx import pytest -from tests.utils import run_node +from tests.utils import run_node, xfail_test -# [NOT_IMPLEMENTED] Input image format BOOL is not supported yet... # [PARAMETER_MISMATCH] Failed to set Blob with precision FP32 -@pytest.mark.skip_on_ie +@xfail_test('IE:CPU', reason='Input image format BOOL is not supported yet') @pytest.mark.parametrize('onnx_op, numpy_func, data_type', [ ('And', np.logical_and, np.bool), ('Or', np.logical_or, np.bool), @@ -50,7 +49,7 @@ def test_logical(onnx_op, numpy_func, data_type): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format BOOL is not supported yet... +@xfail_test('IE:CPU', reason='Input image format BOOL is not supported yet') def test_logical_not(): input_data = np.array([[False, True, True], [False, True, False], [False, False, True]]) expected_output = np.logical_not(input_data) diff --git a/tests/test_ops_matmul.py b/tests/test_ops_matmul.py index a83dc057..72904dec 100644 --- a/tests/test_ops_matmul.py +++ b/tests/test_ops_matmul.py @@ -16,14 +16,14 @@ from __future__ import print_function, division -import pytest - import onnx +import pytest + import numpy as np from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from ngraph_onnx.onnx_importer.importer import import_onnx_model -from tests.utils import get_runtime +from tests.utils import get_runtime, xfail_test def make_onnx_model_for_matmul_op(input_left, input_right): @@ -95,8 +95,7 @@ def import_and_compute_gemm(input_a, input_b, input_c, **kwargs): return computation(input_a, input_b, input_c)[0] -# Error of validate layer: Z with type: Gemm. Gemm input shapes must have at least 2 dimensions -@pytest.mark.skip_on_ie +@xfail_test('IE:CPU', reason='Error: Z with type: Gemm. Gemm input shapes must have at least 2 dimensions') def test_op_matmul(): # vector @ vector data = ([1, 2], [1, 3]) @@ -136,7 +135,8 @@ def test_op_matmul_3d(): assert np.array_equal(import_and_compute_matmul(*data), np.matmul(*data)) -@pytest.mark.xfail(reason='NGONNX-494') +@xfail_test('IE:CPU', reason='NGONNX-494, RuntimeError: Error of validate layer: \ + MatMul_11405 with type: Gemm. Gemm input shapes must have at least 2 dimensions') def test_gemm(): data = ([1, 2], [1, 3], [1, 4]) assert np.array_equal(import_and_compute_gemm(*data), numpy_gemm(*data)) @@ -156,7 +156,8 @@ def test_gemm(): assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) -@pytest.mark.xfail(reason='NGONNX-494') +@xfail_test('IE:CPU', reason='NGONNX-494, RuntimeError: Error of validate layer: \ + MatMul_13893 with type: Gemm. Gemm input shapes must have at least 2 dimensions') def test_gemm_transpositions(): data = ([1, 2], [1, 3], [1, 4]) kwargs = {'trans_a': True, 'trans_b': True} @@ -175,7 +176,8 @@ def test_gemm_transpositions(): assert np.array_equal(import_and_compute_gemm(*data, **kwargs), numpy_gemm(*data, **kwargs)) -@pytest.mark.xfail(reason='NGONNX-494') +@pytest.mark.xfail(reason='NGONNX-494, ValueError: shapes (4,1,1) and (4,) not aligned: 1 (dim 2) != 4 (dim 0)', + strict=True) def test_gemm_flatten(): # input_a.shape is (4,1,1) data = ([[[1]], [[2]], [[3]], [[4]]], [1, 3, 5, 7], [1, 4]) diff --git a/tests/test_ops_nonlinear.py b/tests/test_ops_nonlinear.py index 7e2c3766..8f4e3d91 100644 --- a/tests/test_ops_nonlinear.py +++ b/tests/test_ops_nonlinear.py @@ -19,7 +19,7 @@ import onnx import pytest -from tests.utils import run_node +from tests.utils import run_node, xfail_test def import_and_compute(op_type, input_data, **node_attrs): @@ -72,7 +72,8 @@ def leaky_relu(x, alpha=0.01): assert_onnx_import_equals_callable('LeakyRelu', leaky_relu, [[-3, -2, -1], [1, 2, 3]]) -@pytest.mark.skip_on_ie # RuntimeError: Layer y input port 1 is not connected to any data +@xfail_test('IE:CPU', reason='RuntimeError: Error of validate layer: y with type: PReLU. \ + Number of inputs (2) is not equal to expected ones: 1') @pytest.mark.parametrize('x,slope', [ ([-2, -1., 0., 1., 2.], 0.5), ([0.], 1), @@ -104,8 +105,7 @@ def selu(x, alpha=1.67326319217681884765625, gamma=1.05070102214813232421875): assert_onnx_import_equals_callable('Selu', selu, [-2, -1., 0., 1., 2.], gamma=0.5, alpha=0.5) -# AssertionError: resutl mismatch -@pytest.mark.skip_on_ie +@xfail_test('IE:CPU', reason='AssertionError: Results mismatch') def test_elu(): # f(x) = alpha * (exp(x) - 1) for x < 0, f(x) = x for x >= 0 def elu(x, alpha=1): diff --git a/tests/test_ops_reduction.py b/tests/test_ops_reduction.py index d40a062b..fb778a6b 100644 --- a/tests/test_ops_reduction.py +++ b/tests/test_ops_reduction.py @@ -20,7 +20,7 @@ import numpy as np import pytest -from tests.utils import run_node +from tests.utils import run_node, xfail_test def import_and_compute(op_type, input_data, **node_attrs): @@ -217,7 +217,6 @@ def test_reduce_l1_default_axes(): assert np.allclose(expected, ng_result) -@pytest.mark.skip_on_ie # RuntimeError: Unsupported primitive of type: Sqrt @pytest.mark.parametrize('reduction_axes', [ (0,), (0, 2), @@ -243,7 +242,6 @@ def test_reduce_l2(reduction_axes): assert np.allclose(expected, ng_result) -@pytest.mark.skip_on_ie # RuntimeError: Unsupported primitive of type: Sqrt def test_reduce_l2_default_axes(): shape = [2, 4, 3, 2] np.random.seed(133391) @@ -377,7 +375,7 @@ def test_reduce_sum_square_default_axes(): assert np.allclose(expected, ng_result) -@pytest.mark.skip_on_ie # AssertionError: result mismatch +@xfail_test('IE:CPU', reason="RuntimeError: data [y] doesn't exist") def test_reduce_argmin(): def argmin(ndarray, axis, keepdims=False): res = np.argmin(ndarray, axis=axis) @@ -401,7 +399,7 @@ def argmin(ndarray, axis, keepdims=False): argmin(data, keepdims=False, axis=2)) -@pytest.mark.skip_on_ie # AssertionError: result mismatch +@xfail_test('IE:CPU', reason="RuntimeError: data [y] doesn't exist") def test_reduce_argmax(): def argmax(ndarray, axis, keepdims=False): res = np.argmax(ndarray, axis=axis) diff --git a/tests/test_ops_unary.py b/tests/test_ops_unary.py index 21bce26d..ad935dd3 100644 --- a/tests/test_ops_unary.py +++ b/tests/test_ops_unary.py @@ -21,13 +21,13 @@ import onnx.mapping import numpy as np -from tests.utils import run_model, run_node, get_node_model, get_runtime +from tests.utils import run_model, run_node, get_node_model, get_runtime, xfail_test from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from ngraph_onnx.onnx_importer.importer import import_onnx_model from ngraph.exceptions import NgraphTypeError -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') @pytest.mark.parametrize('input_data', [ np.array([-4, 0, 5, -10]), np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]]), @@ -40,7 +40,6 @@ def test_abs(input_data): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # Unsupported primitive of type: Sqrt @pytest.mark.parametrize('input_data', [ np.array([4, 0, 5, 10]), np.array([[4, 0, 5, 10], [4, 0, 5, 10]]), @@ -80,7 +79,7 @@ def test_log(input_data): assert np.allclose(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') @pytest.mark.parametrize('input_data', [ np.array([-4, 0, 5, -10]), np.array([[-4, 0, 5, -10], [-4, 0, 5, -10]]), @@ -93,7 +92,7 @@ def test_neg(input_data): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # Incorrect precision f64! +@xfail_test('IE:CPU', reason='RuntimeError: Incorrect precision f64') @pytest.mark.parametrize('input_data', [ np.array([-4.2, 0.43, 5.99, -10.01]), np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), @@ -106,7 +105,7 @@ def test_floor(input_data): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # Incorrect precision f64! +@xfail_test('IE:CPU', reason='RuntimeError: Incorrect precision f64') @pytest.mark.parametrize('input_data', [ np.array([-4.2, 0, 5.99, -10.01]), np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), @@ -149,7 +148,7 @@ def test_clip_default(): assert np.allclose(result, [expected]) -@pytest.mark.skip_on_ie # Incorrect precision f64! +@xfail_test('IE:CPU', reason='RuntimeError: Incorrect precision f64') @pytest.mark.parametrize('input_data', [ np.array([-4.2, 1, 5.99, -10.01]), np.array([[-4.5, 0.99, 5.01, -10.00], [-4.5, 0.5, 5.1, 10.01]]), @@ -162,7 +161,6 @@ def test_reciprocal(input_data): assert np.allclose(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # Result mismatch @pytest.mark.skip_on_intelgpu @pytest.mark.skip_on_plaidml @pytest.mark.parametrize('axis, dim1, dim2', [ @@ -182,7 +180,6 @@ def hardmax_2d(data): assert np.allclose(ng_results, [expected]) -@pytest.mark.skip_on_ie # Result mismatch @pytest.mark.skip_on_intelgpu @pytest.mark.skip_on_plaidml def test_hardmax_special_cases(): @@ -237,7 +234,7 @@ def hardsigmoid(data, alpha=float(0.2), beta=float(0.5)): assert np.allclose(ng_results, [expected]) -@pytest.mark.skip_on_ie # Result mismatch +@xfail_test('IE:CPU', reason='Assertion Error: Result mismatch') def test_softmax(): def softmax_2d(x): max_x = np.max(x, axis=1).reshape((-1, 1)) @@ -277,7 +274,7 @@ def softmax_2d(x): ng_results = run_node(node, [data]) -@pytest.mark.skip_on_ie # Result mismatch +@xfail_test('IE:CPU', reason='Assertion Error: Result mismatch') def test_logsoftmax(): def logsoftmax_2d(x): max_x = np.max(x, axis=1).reshape((-1, 1)) @@ -338,7 +335,7 @@ def softsign(x): assert np.allclose(ng_results, [expected]) -@pytest.mark.skip_on_ie # RuntimeError: data [y] doesn't exist +@xfail_test('IE:CPU', reason="RuntimeError: data [y] doesn't exist") def test_identity(): np.random.seed(133391) shape = [2, 4] @@ -366,7 +363,7 @@ def test_identity(): assert np.array_equal(ng_results[0], expected_result) -@pytest.mark.skip_on_ie # RuntimeError: data [B] doesn't exist +@xfail_test('IE:CPU', reason="RuntimeError: data [B] doesn't exist") @pytest.mark.parametrize('val_type, input_data', [ (np.dtype(bool), np.zeros((2, 2), dtype=int)), ]) @@ -379,10 +376,13 @@ def test_cast_to_bool(val_type, input_data): assert np.allclose(result, expected) -@pytest.mark.skip_on_ie # data [B] doesn't exist, Incorrect precision f64! @pytest.mark.parametrize('val_type, range_start, range_end, in_dtype', [ - (np.dtype(np.float32), -8, 8, np.dtype(np.int32)), - (np.dtype(np.float64), -16383, 16383, np.dtype(np.int64)), + pytest.param(np.dtype(np.float32), -8, 8, np.dtype(np.int32), + marks=xfail_test('IE:CPU', + reason="RuntimeError: data [values] doesn't exist")), + pytest.param(np.dtype(np.float64), -16383, 16383, np.dtype(np.int64), + marks=xfail_test('IE:CPU', + reason='RuntimeError: Incorrect precision f64')), ]) def test_cast_to_float(val_type, range_start, range_end, in_dtype): np.random.seed(133391) @@ -395,12 +395,14 @@ def test_cast_to_float(val_type, range_start, range_end, in_dtype): assert np.allclose(result, expected) -@pytest.mark.skip_on_ie # RuntimeError: data [B] doesn't exist @pytest.mark.parametrize('val_type', [ - np.dtype(np.int8), - np.dtype(np.int16), + pytest.param(np.dtype(np.int8), + marks=xfail_test('IE:CPU', reason="RuntimeError: data [B] doesn't exist")), + pytest.param(np.dtype(np.int16), + marks=xfail_test('IE:CPU', reason="RuntimeError: data [B] doesn't exist")), np.dtype(np.int32), - np.dtype(np.int64), + pytest.param(np.dtype(np.int64), + marks=xfail_test('IE:CPU', reason="RuntimeError: Check 'm_data.size() <= bytes'")), ]) def test_cast_to_int(val_type): np.random.seed(133391) @@ -413,12 +415,19 @@ def test_cast_to_int(val_type): assert np.allclose(result, expected) -@pytest.mark.skip_on_ie # RuntimeError: data [B] doesn't exist @pytest.mark.parametrize('val_type', [ - np.dtype(np.uint8), - np.dtype(np.uint16), - np.dtype(np.uint32), - np.dtype(np.uint64), + pytest.param(np.dtype(np.uint8), + marks=xfail_test('IE:CPU', + reason="RuntimeError: data [values] doesn't exist")), + pytest.param(np.dtype(np.uint16), + marks=xfail_test('IE:CPU', + reason="RuntimeError: data [values] doesn't exist")), + pytest.param(np.dtype(np.uint32), + marks=xfail_test('IE:CPU', + reason='RuntimeError: Incorrect precision u32')), + pytest.param(np.dtype(np.uint64), + marks=xfail_test('IE:CPU', + reason="RuntimeError: data [values] doesn't exist")), ]) def test_cast_to_uint(val_type): np.random.seed(133391) @@ -485,10 +494,13 @@ def test_cast_errors(): import_onnx_model(model) -@pytest.mark.skip_on_ie # RuntimeError: data [values] doesn't exist, Incorrect precision f64! @pytest.mark.parametrize('value_type', [ - np.float32, - np.float64, + pytest.param(np.float32, + marks=xfail_test('IE:CPU', + reason="RuntimeError: data [values] doesn't exist")), + pytest.param(np.float64, + marks=xfail_test('IE:CPU', + reason='RuntimeError: Incorrect precision f64')), ]) def test_constant(value_type): values = np.random.randn(5, 5).astype(value_type) @@ -507,7 +519,7 @@ def test_constant(value_type): # See https://github.com/onnx/onnx/issues/1190 -@pytest.mark.xfail(reason='ONNX#1190 numpy.float16 not supported by ONNX make_node.') +@pytest.mark.xfail(reason='ONNX#1190 numpy.float16 not supported by ONNX make_node', strict=True) def test_constant_err(): values = np.random.randn(5, 5).astype(np.float16) node = onnx.helper.make_node( diff --git a/tests/test_ops_variadic.py b/tests/test_ops_variadic.py index 900a6eb3..5d14d591 100644 --- a/tests/test_ops_variadic.py +++ b/tests/test_ops_variadic.py @@ -23,10 +23,10 @@ import onnx -from tests.utils import run_node +from tests.utils import run_node, xfail_test -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') @pytest.mark.parametrize('onnx_op,numpy_func', [ ('Sum', np.add), ('Min', np.minimum), @@ -41,7 +41,7 @@ def test_variadic(onnx_op, numpy_func): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') def test_mean(): data = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])] node = onnx.helper.make_node('Mean', inputs=['data_0', 'data_1', 'data_2'], outputs=['y']) diff --git a/tests/test_reshape.py b/tests/test_reshape.py index bd754dd8..a4245557 100644 --- a/tests/test_reshape.py +++ b/tests/test_reshape.py @@ -20,12 +20,12 @@ import onnx import pytest -from tests.utils import all_arrays_equal, run_model, run_node, get_node_model, get_runtime +from tests.utils import all_arrays_equal, run_model, run_node, get_node_model, get_runtime, xfail_test from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from ngraph_onnx.onnx_importer.importer import import_onnx_model -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') def test_reshape(): input_data = np.arange(2560).reshape(16, 4, 4, 10) reshape_node = onnx.helper.make_node('Reshape', inputs=['x'], outputs=['y'], shape=(256, 10)) @@ -70,7 +70,7 @@ def test_reshape_opset5(): assert np.array_equal(ng_results[0], expected_output) -@pytest.mark.xfail(reason='NGONNX-357 Dynamic reshape not supported.') +@pytest.mark.xfail(reason='NGONNX-357 Dynamic reshape not supported', strict=True) def test_reshape_opset5_param_err(): original_shape = [2, 3, 4] output_shape = np.array([4, 2, 3], dtype=np.int64) @@ -80,7 +80,7 @@ def test_reshape_opset5_param_err(): assert ng_result[0].shape == output_shape -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') @pytest.mark.parametrize('axis,expected_output', [ (0, np.arange(120).reshape(1, 120)), (1, np.arange(120).reshape(2, 60)), @@ -103,7 +103,7 @@ def test_flatten_exception(): run_node(node, [data]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') def test_transpose(): data = np.arange(120).reshape(2, 3, 4, 5) @@ -118,7 +118,7 @@ def test_transpose(): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # RuntimeError: B has zero dimension that is not allowable +@xfail_test('IE:CPU', reason='RuntimeError: B has zero dimension that is not allowable') def test_slice_opset1(): data = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) @@ -167,7 +167,7 @@ def test_slice_opset1(): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') def test_concat(): a = np.array([[1, 2], [3, 4]]) b = np.array([[5, 6]]) @@ -212,7 +212,7 @@ def test_concat(): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') def test_squeeze(): data = np.arange(6).reshape(1, 2, 3, 1) expected_output = data.reshape(2, 3) @@ -246,7 +246,7 @@ def test_unsqueeze(): assert np.array_equal(ng_results, [expected_output]) -@pytest.mark.skip_on_ie # [NOT_IMPLEMENTED] Input image format I64 is not supported yet... +@xfail_test('IE:CPU', reason='Input image format I64 is not supported yet') @pytest.mark.parametrize('node, expected_output', [ # Split into 2 equal parts along axis=0 (onnx.helper.make_node('Split', inputs=['x'], outputs=['y', 'z'], axis=0), diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index ffbd964f..678d7a8a 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -20,6 +20,7 @@ import numpy as np import ngraph as ng +import pytest from onnx.helper import make_node, make_graph, make_tensor_value_info, make_model from typing import List, Dict, Text, Any, Optional, Iterable @@ -35,6 +36,10 @@ BACKEND_NAME = None +def xfail_test(*backends, reason='Mark the test as expected to fail'): + return pytest.mark.xfail(condition=BACKEND_NAME in backends, reason=reason, strict=True) + + def get_runtime(): return ng.runtime(backend_name=BACKEND_NAME) diff --git a/tox.ini b/tox.ini index 7cebdcde..c10d6bbc 100644 --- a/tox.ini +++ b/tox.ini @@ -32,7 +32,7 @@ commands= mkdir -p .tox/mypy-imports ln -sf {envsitepackagesdir}/ngraph .tox/mypy-imports/ngraph mypy --config-file=tox.ini {posargs:ngraph_onnx/} - pytest {posargs:tests/} --backend={env:NGRAPH_BACKEND:CPU} -v -s -k 'not _cuda' + pytest {posargs:tests/} --backend={env:NGRAPH_BACKEND:CPU} -v -s -k 'not _cuda' {env:PYTEST_ARGS:} [pytest] timeout = 300