From 5f8f1a96797cfce64032492151b01cf0e1c97f06 Mon Sep 17 00:00:00 2001 From: Ryan OShea <86965113+ArmRyan@users.noreply.github.com> Date: Wed, 16 Oct 2024 13:20:55 +0200 Subject: [PATCH] Minimum and Maximum s8 operator support (#148) * Adds Minimum and Maximum functions for s8 * Adds Refactored Unit Tests for Minimum and Maximum s8 * Fix small issue with build_and_run_tests.sh Change-Id: I38333a14888b59293dcafa633105ec65c2d582a1 --------- Signed-off-by: Ryan O'Shea --- ARM.CMSIS-NN.pdsc | 2 + Include/arm_nnfunctions.h | 56 ++- Include/arm_nnsupportfunctions.h | 24 +- README.md | 2 + Source/BasicMathFunctions/arm_maximum_s8.c | 263 +++++++++++++ Source/BasicMathFunctions/arm_minimum_s8.c | 262 +++++++++++++ Tests/UnitTest/CMakeLists.txt | 1 + Tests/UnitTest/README.md | 4 + .../Lib/op_maximum_minimum.py | 110 ++++++ Tests/UnitTest/RefactoredTestGen/Lib/test.py | 5 +- .../UnitTest/RefactoredTestGen/test_plan.json | 202 ++++++++++ .../config_data.h | 25 ++ .../input_tensor_1.h | 19 + .../input_tensor_2.h | 13 + .../maximum_broadcast_batch_int8/output.h | 19 + .../maximum_broadcast_batch_int8/test_data.h | 4 + .../maximum_broadcast_ch_int8/config_data.h | 25 ++ .../input_tensor_1.h | 7 + .../input_tensor_2.h | 27 ++ .../maximum_broadcast_ch_int8/output.h | 26 ++ .../maximum_broadcast_ch_int8/test_data.h | 4 + .../config_data.h | 25 ++ .../input_tensor_1.h | 9 + .../input_tensor_2.h | 20 + .../maximum_broadcast_height_int8/output.h | 20 + .../maximum_broadcast_height_int8/test_data.h | 4 + .../config_data.h | 25 ++ .../input_tensor_1.h | 13 + .../input_tensor_2.h | 8 + .../maximum_broadcast_width_int8/output.h | 15 + .../maximum_broadcast_width_int8/test_data.h | 4 + .../maximum_no_broadcast_int8/config_data.h | 25 ++ .../input_tensor_1.h | 17 + .../input_tensor_2.h | 17 + .../maximum_no_broadcast_int8/output.h | 18 + .../maximum_no_broadcast_int8/test_data.h | 4 + .../maximum_scalar_1_int8/config_data.h | 25 ++ .../maximum_scalar_1_int8/input_tensor_1.h | 6 + .../maximum_scalar_1_int8/input_tensor_2.h | 14 + .../TestData/maximum_scalar_1_int8/output.h | 15 + .../maximum_scalar_1_int8/test_data.h | 4 + .../maximum_scalar_2_int8/config_data.h | 25 ++ .../maximum_scalar_2_int8/input_tensor_1.h | 14 + .../maximum_scalar_2_int8/input_tensor_2.h | 6 + .../TestData/maximum_scalar_2_int8/output.h | 15 + .../maximum_scalar_2_int8/test_data.h | 4 + .../config_data.h | 25 ++ .../input_tensor_1.h | 19 + .../input_tensor_2.h | 12 + .../minimum_broadcast_batch_int8/output.h | 22 ++ .../minimum_broadcast_batch_int8/test_data.h | 4 + .../minimum_broadcast_ch_int8/config_data.h | 25 ++ .../input_tensor_1.h | 7 + .../input_tensor_2.h | 27 ++ .../minimum_broadcast_ch_int8/output.h | 29 ++ .../minimum_broadcast_ch_int8/test_data.h | 4 + .../config_data.h | 25 ++ .../input_tensor_1.h | 10 + .../input_tensor_2.h | 20 + .../minimum_broadcast_height_int8/output.h | 23 ++ .../minimum_broadcast_height_int8/test_data.h | 4 + .../config_data.h | 25 ++ .../input_tensor_1.h | 14 + .../input_tensor_2.h | 8 + .../minimum_broadcast_width_int8/output.h | 16 + .../minimum_broadcast_width_int8/test_data.h | 4 + .../minimum_no_broadcast_int8/config_data.h | 25 ++ .../input_tensor_1.h | 17 + .../input_tensor_2.h | 17 + .../minimum_no_broadcast_int8/output.h | 20 + .../minimum_no_broadcast_int8/test_data.h | 4 + .../minimum_scalar_1_int8/config_data.h | 25 ++ .../minimum_scalar_1_int8/input_tensor_1.h | 6 + .../minimum_scalar_1_int8/input_tensor_2.h | 14 + .../TestData/minimum_scalar_1_int8/output.h | 16 + .../minimum_scalar_1_int8/test_data.h | 4 + .../minimum_scalar_2_int8/config_data.h | 25 ++ .../minimum_scalar_2_int8/input_tensor_1.h | 14 + .../minimum_scalar_2_int8/input_tensor_2.h | 6 + .../TestData/minimum_scalar_2_int8/output.h | 16 + .../minimum_scalar_2_int8/test_data.h | 4 + .../CMakeLists.txt | 25 ++ .../Unity/unity_test_arm_maximum_minimum_s8.c | 62 +++ .../test_arm_maximum_s8.c | 365 ++++++++++++++++++ .../test_arm_minimum_s8.c | 365 ++++++++++++++++++ Tests/UnitTest/build_and_run_tests.sh | 3 +- 86 files changed, 2802 insertions(+), 6 deletions(-) create mode 100644 Source/BasicMathFunctions/arm_maximum_s8.c create mode 100644 Source/BasicMathFunctions/arm_minimum_s8.c create mode 100644 Tests/UnitTest/RefactoredTestGen/Lib/op_maximum_minimum.py create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/config_data.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_1.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_2.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/output.h create mode 100644 Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/test_data.h create mode 100644 Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/CMakeLists.txt create mode 100644 Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/Unity/unity_test_arm_maximum_minimum_s8.c create mode 100644 Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_maximum_s8.c create mode 100644 Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_minimum_s8.c diff --git a/ARM.CMSIS-NN.pdsc b/ARM.CMSIS-NN.pdsc index 30e2010b..48cc75c2 100644 --- a/ARM.CMSIS-NN.pdsc +++ b/ARM.CMSIS-NN.pdsc @@ -96,6 +96,8 @@ + + diff --git a/Include/arm_nnfunctions.h b/Include/arm_nnfunctions.h index 017dd0af..26b61422 100644 --- a/Include/arm_nnfunctions.h +++ b/Include/arm_nnfunctions.h @@ -21,8 +21,8 @@ * Title: arm_nnfunctions.h * Description: Public header file for CMSIS NN Library * - * $Date: 5 Sep 2024 - * $Revision: V.17.0.0 + * $Date: 08 October 2024 + * $Revision: V.17.1.0 * * Target : Arm(R) M-Profile Architecture * -------------------------------------------------------------------- */ @@ -2780,6 +2780,58 @@ arm_cmsis_nn_status arm_batch_matmul_s16(const cmsis_nn_context *ctx, const cmsis_nn_dims *output_dims, int16_t *output); +/** + * @brief Elementwise binary minimum with 8bit data. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] input_1_data Pointer to input1 tensor + * @param[in] input_1_dims Input1 tensor dimensions + * @param[in] input_2_data Pointer to input2 tensor + * @param[in] input_2_dims Input2 tensor dimensions + * @param[out] output_data Pointer to the output tensor + * @param[in] output_dims Output tensor dimensions + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_minimum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims); + +/** + * @brief Elementwise binary maximum with 8bit data. + * + * @param[in] ctx Temporary scratch buffer + * The caller is expected to clear the buffer, if applicable, for security reasons. + * @param[in] input_1_data Pointer to input1 tensor + * @param[in] input_1_dims Input1 tensor dimensions + * @param[in] input_2_data Pointer to input2 tensor + * @param[in] input_2_dims Input2 tensor dimensions + * @param[out] output_data Pointer to the output tensor + * @param[in] output_dims Output tensor dimensions + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite Micro + * + */ +arm_cmsis_nn_status arm_maximum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims); + #ifdef __cplusplus } #endif diff --git a/Include/arm_nnsupportfunctions.h b/Include/arm_nnsupportfunctions.h index 12d3d20c..86f53baf 100644 --- a/Include/arm_nnsupportfunctions.h +++ b/Include/arm_nnsupportfunctions.h @@ -21,8 +21,8 @@ * Title: arm_nnsupportfunctions.h * Description: Public header file of support functions for CMSIS NN Library * - * $Date: 12 Jul 2024 - * $Revision: V.22.3.0 + * $Date: 08 October 2024 + * $Revision: V.22.4.0 * * Target : Arm(R) M-Profile Architecture * -------------------------------------------------------------------- */ @@ -2073,6 +2073,26 @@ arm_cmsis_nn_status arm_elementwise_mul_acc_s16(const int16_t *input_1_vect, const int32_t out_activation_max, const int32_t block_size); +/** + * @brief Check if a broadcast is required between 2 cmsis_nn_dims. + * @param[in] shape_1 pointer to input tensor 1 + * @param[in] shape_2 pointer to input tensor 2 + * @return The function returns 1 if a broadcast is required, or 0 if not. + * + * @details Compares each dimension and returns 1 if any dimension does not match. + * This function does not check that broadcast rules are met. + */ +__STATIC_FORCEINLINE int32_t arm_check_broadcast_required(const cmsis_nn_dims *shape_1, const cmsis_nn_dims *shape_2) +{ + if ((shape_1->n != shape_2->n) || (shape_1->h != shape_2->h) || (shape_1->w != shape_2->w) || + (shape_1->c != shape_2->c)) + { + return 1; + } + + return 0; +} + #ifdef __cplusplus } #endif diff --git a/README.md b/README.md index 790c9d84..eb059b0e 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,8 @@ Examples are Cortex-M55 or Cortex-M85 configured with MVE. | Fully Connected | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | Batch Matmul | Yes | Yes | No | Yes | Yes | No | Yes | Yes | No | | Add | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A | +| Minimum | Yes | No | N/A | No | No | N/A | Yes | No | N/A | +| Maximum | Yes | No | N/A | No | No | N/A | Yes | No | N/A | | Mul | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A | | MaxPooling | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A | | AvgPooling | Yes | Yes | N/A | Yes | Yes | N/A | Yes | Yes | N/A | diff --git a/Source/BasicMathFunctions/arm_maximum_s8.c b/Source/BasicMathFunctions/arm_maximum_s8.c new file mode 100644 index 00000000..83eac081 --- /dev/null +++ b/Source/BasicMathFunctions/arm_maximum_s8.c @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_maximum_s8 + * Description: Minimum and Maximum + * + * $Date: 08 October 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup minimumMaximum + * @{ + */ + +static arm_cmsis_nn_status +arm_max_no_broadcast_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec1 = vldrbq_z_s8(input_1, p); + input_1 += 16; + int8x16_t vec2 = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vmaxq_s8(vec1, vec2), p); + output += 16; + flat_size -= 16; + } +#else + while (flat_size > 0) + { + int8_t in1 = *input_1++; + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in1 : in2; + --flat_size; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +static arm_cmsis_nn_status +arm_max_scalar_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + int8x16_t scalar_vec = vdupq_n_s8(*input_1); + + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + int8x16_t vec = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vmaxq_s8(scalar_vec, vec), p); + output += 16; + flat_size -= 16; + } +#else + int8_t in1 = *input_1; + while (flat_size > 0) + { + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in1 : in2; + --flat_size; + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/* + * s8 maximum + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_maximum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims) +{ + (void)ctx; + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t output_width = output_dims->w; + + const int32_t input_1_batch = input_1_dims->n; + const int32_t input_1_height = input_1_dims->h; + const int32_t input_1_width = input_1_dims->w; + const int32_t input_1_channels = input_1_dims->c; + + const int32_t input_2_batch = input_2_dims->n; + const int32_t input_2_height = input_2_dims->h; + const int32_t input_2_width = input_2_dims->w; + const int32_t input_2_channels = input_2_dims->c; + + int32_t flat_size_1 = input_1_batch * input_1_height * input_1_width * input_1_channels; + int32_t flat_size_2 = input_2_batch * input_2_height * input_2_width * input_2_channels; + + if (arm_check_broadcast_required(input_1_dims, input_2_dims)) + { + if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_data, input_2_data, output_data, flat_size_2); + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_data, input_1_data, output_data, flat_size_1); + } + else + { + int32_t width_1_diff = input_1_width >= input_2_width ? 0 : input_1_channels; + int32_t width_2_diff = input_2_width >= input_1_width ? 0 : input_2_channels; + + int32_t height_1_diff = + input_1_height >= input_2_height ? width_1_diff : -input_1_width * (input_1_channels - width_1_diff); + int32_t height_2_diff = + input_2_height >= input_1_height ? width_2_diff : -input_2_width * (input_2_channels - width_2_diff); + + int32_t batch_1_diff = + input_1_batch >= input_2_batch ? input_1_channels * input_1_width * input_1_height : 0; + int32_t batch_2_diff = + input_2_batch >= input_1_batch ? input_2_channels * input_2_width * input_2_height : 0; + + for (int32_t i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + const int8_t *input_1_ptr = input_1_data; + const int8_t *input_2_ptr = input_2_data; + flat_size_1 = input_1_height * input_1_width * input_1_channels; + flat_size_2 = input_2_height * input_2_width * input_2_channels; + if (input_1_height == input_2_height && input_1_width == input_2_width && + input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else + { + flat_size_1 = input_1_width * input_1_channels; + flat_size_2 = input_2_width * input_2_channels; + for (int32_t i_out_height = 0; i_out_height < output_height; i_out_height++) + { + if (input_1_width == input_2_width && input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + input_1_ptr += flat_size_1; + input_2_ptr += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + ++input_1_ptr; + input_2_ptr += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + ++input_2_ptr; + input_1_ptr += flat_size_1; + } + else + { + for (int32_t i_out_width = 0; i_out_width < output_width; i_out_width++) + { + if (input_1_channels == input_2_channels) + { + arm_max_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr += input_1_channels; + } + else if (input_1_channels == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_1_ptr, input_2_ptr, output_data, input_2_channels); + output_data += input_2_channels; + input_1_ptr++; + input_2_ptr += input_2_channels; + } + else if (input_2_channels == 1) + { + // arm_max_scalar expects the tensor with the scalar value to be provided first + arm_max_scalar_s8(input_2_ptr, input_1_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr++; + } + input_1_ptr -= width_1_diff; + input_2_ptr -= width_2_diff; + } + } + input_1_ptr += height_1_diff; + input_2_ptr += height_2_diff; + } + } + input_1_data += batch_1_diff; + input_2_data += batch_2_diff; + } + } + } + else + { + arm_max_no_broadcast_s8(input_1_data, input_2_data, output_data, flat_size_1); + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ diff --git a/Source/BasicMathFunctions/arm_minimum_s8.c b/Source/BasicMathFunctions/arm_minimum_s8.c new file mode 100644 index 00000000..1ab7c295 --- /dev/null +++ b/Source/BasicMathFunctions/arm_minimum_s8.c @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_minimum_s8 + * Description: Minimum and Maximum + * + * $Date: 08 October 2024 + * $Revision: V.1.0.0 + * + * Target : Arm(R) M-Profile Architecture + * + * -------------------------------------------------------------------- */ + +#include "arm_nnfunctions.h" +#include "arm_nnsupportfunctions.h" + +/** + * @ingroup Public + */ + +/** + * @addtogroup minimumMaximum + * @{ + */ + +static arm_cmsis_nn_status +arm_min_no_broadcast_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec1 = vldrbq_z_s8(input_1, p); + input_1 += 16; + int8x16_t vec2 = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vminq_s8(vec1, vec2), p); + output += 16; + flat_size -= 16; + } +#else + while (flat_size > 0) + { + int8_t in1 = *input_1++; + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in2 : in1; + --flat_size; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +static arm_cmsis_nn_status +arm_min_scalar_s8(const int8_t *input_1, const int8_t *input_2, int8_t *output, int32_t flat_size) +{ +#if defined(ARM_MATH_MVEI) + int8x16_t scalar_vec = vdupq_n_s8(*input_1); + + while (flat_size > 0) + { + mve_pred16_t p = vctp8q(flat_size); + + int8x16_t vec = vldrbq_z_s8(input_2, p); + input_2 += 16; + + vstrbq_p_s8(output, vminq_s8(scalar_vec, vec), p); + output += 16; + flat_size -= 16; + } +#else + int8_t in1 = *input_1; + while (flat_size > 0) + { + int8_t in2 = *input_2++; + *output++ = in1 >= in2 ? in2 : in1; + --flat_size; + } +#endif + return ARM_CMSIS_NN_SUCCESS; +} + +/* + * s8 minimum + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_minimum_s8(const cmsis_nn_context *ctx, + const int8_t *input_1_data, + const cmsis_nn_dims *input_1_dims, + const int8_t *input_2_data, + const cmsis_nn_dims *input_2_dims, + int8_t *output_data, + const cmsis_nn_dims *output_dims) +{ + (void)ctx; + const int32_t output_batch = output_dims->n; + const int32_t output_height = output_dims->h; + const int32_t output_width = output_dims->w; + + const int32_t input_1_batch = input_1_dims->n; + const int32_t input_1_height = input_1_dims->h; + const int32_t input_1_width = input_1_dims->w; + const int32_t input_1_channels = input_1_dims->c; + + const int32_t input_2_batch = input_2_dims->n; + const int32_t input_2_height = input_2_dims->h; + const int32_t input_2_width = input_2_dims->w; + const int32_t input_2_channels = input_2_dims->c; + + int32_t flat_size_1 = input_1_batch * input_1_height * input_1_width * input_1_channels; + int32_t flat_size_2 = input_2_batch * input_2_height * input_2_width * input_2_channels; + + if (arm_check_broadcast_required(input_1_dims, input_2_dims)) + { + if (flat_size_1 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_data, input_2_data, output_data, flat_size_2); + } + else if (flat_size_2 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_data, input_1_data, output_data, flat_size_1); + } + else + { + int32_t width_1_diff = input_1_width >= input_2_width ? 0 : input_1_channels; + int32_t width_2_diff = input_2_width >= input_1_width ? 0 : input_2_channels; + + int32_t height_1_diff = + input_1_height >= input_2_height ? width_1_diff : -input_1_width * (input_1_channels - width_1_diff); + int32_t height_2_diff = + input_2_height >= input_1_height ? width_2_diff : -input_2_width * (input_2_channels - width_2_diff); + + int32_t batch_1_diff = + input_1_batch >= input_2_batch ? input_1_channels * input_1_width * input_1_height : 0; + int32_t batch_2_diff = + input_2_batch >= input_1_batch ? input_2_channels * input_2_width * input_2_height : 0; + + for (int32_t i_out_batch = 0; i_out_batch < output_batch; i_out_batch++) + { + const int8_t *input_1_ptr = input_1_data; + const int8_t *input_2_ptr = input_2_data; + flat_size_1 = input_1_height * input_1_width * input_1_channels; + flat_size_2 = input_2_height * input_2_width * input_2_channels; + if (input_1_height == input_2_height && input_1_width == input_2_width && + input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else if (flat_size_1 == 1) + { + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + } + else if (flat_size_2 == 1) + { + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + } + else + { + flat_size_1 = input_1_width * input_1_channels; + flat_size_2 = input_2_width * input_2_channels; + for (int32_t i_out_height = 0; i_out_height < output_height; i_out_height++) + { + if (input_1_width == input_2_width && input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, flat_size_1); + output_data += flat_size_1; + input_1_ptr += flat_size_1; + input_2_ptr += flat_size_1; + } + else if (flat_size_1 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, flat_size_2); + output_data += flat_size_2; + ++input_1_ptr; + input_2_ptr += flat_size_2; + } + else if (flat_size_2 == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, flat_size_1); + output_data += flat_size_1; + ++input_2_ptr; + input_1_ptr += flat_size_1; + } + else + { + for (int32_t i_out_width = 0; i_out_width < output_width; i_out_width++) + { + if (input_1_channels == input_2_channels) + { + arm_min_no_broadcast_s8(input_1_ptr, input_2_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr += input_1_channels; + } + else if (input_1_channels == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_1_ptr, input_2_ptr, output_data, input_2_channels); + output_data += input_2_channels; + input_1_ptr++; + input_2_ptr += input_2_channels; + } + else if (input_2_channels == 1) + { + // arm_min_scalar expects the tensor with the scalar value to be provided first + arm_min_scalar_s8(input_2_ptr, input_1_ptr, output_data, input_1_channels); + output_data += input_1_channels; + input_1_ptr += input_1_channels; + input_2_ptr++; + } + input_1_ptr -= width_1_diff; + input_2_ptr -= width_2_diff; + } + } + input_1_ptr += height_1_diff; + input_2_ptr += height_2_diff; + } + } + input_1_data += batch_1_diff; + input_2_data += batch_2_diff; + } + } + } + else + { + arm_min_no_broadcast_s8(input_1_data, input_2_data, output_data, flat_size_1); + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of Doxygen group + */ diff --git a/Tests/UnitTest/CMakeLists.txt b/Tests/UnitTest/CMakeLists.txt index d51c362f..a333bcc4 100644 --- a/Tests/UnitTest/CMakeLists.txt +++ b/Tests/UnitTest/CMakeLists.txt @@ -97,6 +97,7 @@ add_subdirectory(TestCases/test_arm_fully_connected_s8) add_subdirectory(TestCases/test_arm_fully_connected_s4) add_subdirectory(TestCases/test_arm_grouped_convolve_s8) add_subdirectory(TestCases/test_arm_lstm_unidirectional_s8) +add_subdirectory(TestCases/test_arm_maximum_minimum_s8) add_subdirectory(TestCases/test_arm_max_pool_s16) add_subdirectory(TestCases/test_arm_max_pool_s8) add_subdirectory(TestCases/test_arm_softmax_s16) diff --git a/Tests/UnitTest/README.md b/Tests/UnitTest/README.md index 71fdaa55..01ff3eb1 100644 --- a/Tests/UnitTest/README.md +++ b/Tests/UnitTest/README.md @@ -150,6 +150,8 @@ Operator bit-exactness compability: | add | x | x | | mul | x | x | | batch matmul | x | x | +| minimum | x | x | +| maximum | x | x | ### Refactoring of generate_test_data.py Test data generation is in progress of incrementally moving over to the cleaned up scripts placed in `RefactoredTestGen`. @@ -176,6 +178,8 @@ Current progress: | add | x | | | mul | x | | | batch matmul | | x | +| minimum | | x | +| maximum | | x | ## Overview of the Folders diff --git a/Tests/UnitTest/RefactoredTestGen/Lib/op_maximum_minimum.py b/Tests/UnitTest/RefactoredTestGen/Lib/op_maximum_minimum.py new file mode 100644 index 00000000..b855c8ee --- /dev/null +++ b/Tests/UnitTest/RefactoredTestGen/Lib/op_maximum_minimum.py @@ -0,0 +1,110 @@ +# SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import Lib.op_utils +import tensorflow as tf +import math +import numpy as np + +from tensorflow.lite.python.interpreter import Interpreter +from tensorflow.lite.python.interpreter import OpResolverType +import tf_keras as keras + +class Op_maximum_minimum(Lib.op_utils.Op_type): + + def get_shapes(params): + shapes = {} + shapes["input_tensor_1"] = (params["batch_1"], params["height_1"], params["width_1"], params["channel_1"]) + shapes["input_tensor_2"] = (params["batch_2"], params["height_2"], params["width_2"], params["channel_2"]) + shapes["representational_dataset"] = (params["batch_1"], params["height_1"], params["width_1"], params["channel_1"]) + shapes["representational_dataset2"] = (params["batch_2"], params["height_2"], params["width_2"], params["channel_2"]) + shapes["different_in_shapes"]=True + + return shapes + + def generate_keras_model(shapes, params): + tf.keras.backend.clear_session() + layer_type = None + if params['layer_type'] == "minimum": + layer_type = tf.minimum + elif params['layer_type'] == "maximum": + layer_type = tf.maximum + + input_1_shape = (params["batch_1"], params["height_1"], params["width_1"], params["channel_1"]) + input_2_shape = (params["batch_2"], params["height_2"], params["width_2"], params["channel_2"]) + input_1 = keras.layers.Input(batch_input_shape=input_1_shape) + input_2 = keras.layers.Input(batch_input_shape=input_2_shape) + + layer = layer_type(input_1, input_2) + model = keras.Model([input_1, input_2], [layer]) + + return model + + def generate_data_tflite(tflite_fname, params): + tensors = {} + effective_scales = {} + scales = {} + generated_params = {} + aliases = {} + + # To be removed + aliases["output_multiplier"] = "output_mult" + aliases["output"] = "output_ref" + + interpreter = Interpreter(str(tflite_fname), experimental_op_resolver_type=OpResolverType.BUILTIN_REF) + interpreter.allocate_tensors() + tensor_details = interpreter.get_tensor_details() + + input_1 = tensor_details[0] + input_2 = tensor_details[1] + + input_details = interpreter.get_input_details() + (scales["scale_1"], scales["zero_point_1"]) = input_details[0]['quantization'] + (scales["scale_2"], scales["zero_point2"]) = input_details[1]['quantization'] + + output_details = interpreter.get_output_details() + (scales["output_scale"], scales["output_zero_point"]) = output_details[0]['quantization'] + + minval = Lib.op_utils.get_dtype_min(params["input_data_type"]) + maxval = Lib.op_utils.get_dtype_max(params["input_data_type"]) + + n_output = output_details[0]['shape'][0] + h_output = output_details[0]['shape'][1] + w_output = output_details[0]['shape'][2] + c_output = output_details[0]['shape'][3] + + generated_params["dst_size"] = n_output * h_output * w_output * c_output + generated_params["output_batch"] = n_output + generated_params["output_height"] = h_output + generated_params["output_width"] = w_output + generated_params["output_channel"] = c_output + generated_params["input_1_offset"] = -input_1['quantization_parameters']['zero_points'][0] + generated_params["input_2_offset"] = -input_2['quantization_parameters']['zero_points'][0] + generated_params["output_offset"] = output_details[0]['quantization'][1] + + def quantize_scale(scales): + effective_output_scale = scales["scale_1"] * scales["scale_2"] / scales["output_scale"] + + significand, shift = math.frexp(effective_output_scale) + significand_q31 = round(significand * (1 << 31)) + return significand_q31, shift + + mult, shift = quantize_scale(scales) + generated_params["output_multiplier"] = mult + generated_params["output_shift"] = shift + + return Lib.op_utils.Generated_data(generated_params, tensors, scales, effective_scales, aliases) + diff --git a/Tests/UnitTest/RefactoredTestGen/Lib/test.py b/Tests/UnitTest/RefactoredTestGen/Lib/test.py index 0a8b7082..e1a72cc2 100644 --- a/Tests/UnitTest/RefactoredTestGen/Lib/test.py +++ b/Tests/UnitTest/RefactoredTestGen/Lib/test.py @@ -20,6 +20,7 @@ import Lib.op_batch_matmul import Lib.op_fully_connected import Lib.op_pooling +import Lib.op_maximum_minimum import tensorflow as tf import numpy as np from tensorflow.lite.python.interpreter import Interpreter @@ -183,8 +184,10 @@ def get_op_type(op_type_string): return Lib.op_batch_matmul.Op_batch_matmul elif op_type_string == "fully_connected": return Lib.op_fully_connected.Op_fully_connected - if op_type_string == "avgpool" or op_type_string == "maxpool": + elif op_type_string == "avgpool" or op_type_string == "maxpool": return Lib.op_pooling.Op_pooling + elif op_type_string == "maximum_minimum": + return Lib.op_maximum_minimum.Op_maximum_minimum else: raise ValueError(f"Unknown op type '{op_type_string}'") diff --git a/Tests/UnitTest/RefactoredTestGen/test_plan.json b/Tests/UnitTest/RefactoredTestGen/test_plan.json index d3d8359d..dd269014 100644 --- a/Tests/UnitTest/RefactoredTestGen/test_plan.json +++ b/Tests/UnitTest/RefactoredTestGen/test_plan.json @@ -1152,5 +1152,207 @@ "activation_min": -30000 } ] +}, +{ + "suite_name" : "test_arm_minimum_s8", + "op_type" : "maximum_minimum", + "layer_type": "minimum", + "input_data_type": "int8_t", + "interpreter": "tensorflow", + "tflite_generator": "keras", + "tests" : [ + {"name" : "minimum_scalar_1_int8", + "batch_1" : 1, + "height_1" : 1, + "width_1" : 1, + "channel_1" : 1, + "batch_2": 1, + "height_2": 2, + "width_2": 4, + "channel_2": 19, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_scalar_2_int8", + "batch_1" : 1, + "height_1" : 2, + "width_1" : 4, + "channel_1" : 19, + "batch_2": 1, + "height_2": 1, + "width_2": 1, + "channel_2": 1, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_no_broadcast_int8", + "batch_1" : 2, + "height_1" : 2, + "width_1" : 3, + "channel_1" : 18, + "batch_2": 2, + "height_2": 2, + "width_2": 3, + "channel_2": 18, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_broadcast_batch_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 6, + "channel_1" : 21, + "batch_2": 1, + "height_2": 1, + "width_2": 6, + "channel_2": 21, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_broadcast_height_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 2, + "channel_1" : 17, + "batch_2": 2, + "height_2": 4, + "width_2": 2, + "channel_2": 17, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_broadcast_width_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 4, + "channel_1" : 19, + "batch_2": 2, + "height_2": 1, + "width_2": 1, + "channel_2": 19, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "minimum_broadcast_ch_int8", + "batch_1" : 2, + "height_1" : 2, + "width_1" : 4, + "channel_1" : 1, + "batch_2": 2, + "height_2": 2, + "width_2": 4, + "channel_2": 24, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + } + ] +}, +{ + "suite_name" : "test_arm_maximum_s8", + "op_type" : "maximum_minimum", + "layer_type": "maximum", + "input_data_type": "int8_t", + "interpreter": "tensorflow", + "tflite_generator": "keras", + "tests" : [ + {"name" : "maximum_scalar_1_int8", + "batch_1" : 1, + "height_1" : 1, + "width_1" : 1, + "channel_1" : 1, + "batch_2": 1, + "height_2": 2, + "width_2": 4, + "channel_2": 19, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_scalar_2_int8", + "batch_1" : 1, + "height_1" : 2, + "width_1" : 4, + "channel_1" : 19, + "batch_2": 1, + "height_2": 1, + "width_2": 1, + "channel_2": 1, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_no_broadcast_int8", + "batch_1" : 2, + "height_1" : 2, + "width_1" : 3, + "channel_1" : 18, + "batch_2": 2, + "height_2": 2, + "width_2": 3, + "channel_2": 18, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_broadcast_batch_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 6, + "channel_1" : 21, + "batch_2": 1, + "height_2": 1, + "width_2": 6, + "channel_2": 21, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_broadcast_height_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 2, + "channel_1" : 17, + "batch_2": 2, + "height_2": 4, + "width_2": 2, + "channel_2": 17, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_broadcast_width_int8", + "batch_1" : 2, + "height_1" : 1, + "width_1" : 4, + "channel_1" : 19, + "batch_2": 2, + "height_2": 1, + "width_2": 1, + "channel_2": 19, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + }, + {"name" : "maximum_broadcast_ch_int8", + "batch_1" : 2, + "height_1" : 2, + "width_1" : 4, + "channel_1" : 1, + "batch_2": 2, + "height_2": 2, + "width_2": 4, + "channel_2": 24, + "pad" : "SAME", + "activation_max": 127, + "activation_min": -128 + } + ] } ] diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/config_data.h new file mode 100644 index 00000000..d6699070 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_BROADCAST_BATCH_INT8_LAYER_TYPE maximum +#define MAXIMUM_BROADCAST_BATCH_INT8_BATCH_1 2 +#define MAXIMUM_BROADCAST_BATCH_INT8_HEIGHT_1 1 +#define MAXIMUM_BROADCAST_BATCH_INT8_WIDTH_1 6 +#define MAXIMUM_BROADCAST_BATCH_INT8_CHANNEL_1 21 +#define MAXIMUM_BROADCAST_BATCH_INT8_BATCH_2 1 +#define MAXIMUM_BROADCAST_BATCH_INT8_HEIGHT_2 1 +#define MAXIMUM_BROADCAST_BATCH_INT8_WIDTH_2 6 +#define MAXIMUM_BROADCAST_BATCH_INT8_CHANNEL_2 21 +#define MAXIMUM_BROADCAST_BATCH_INT8_PAD SAME +#define MAXIMUM_BROADCAST_BATCH_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_BROADCAST_BATCH_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_BROADCAST_BATCH_INT8_DST_SIZE 252 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_BATCH 2 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_HEIGHT 1 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_WIDTH 6 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_CHANNEL 21 +#define MAXIMUM_BROADCAST_BATCH_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_BROADCAST_BATCH_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_MULTIPLIER 1077923840 +#define MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_1.h new file mode 100644 index 00000000..da728b6d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_1.h @@ -0,0 +1,19 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_batch_int8_input_tensor_1[252] = { + 23, 99, -38, 4, -115, 106, 72, 7, -27, -115, 73, 63, -4, 115, 90, 113, 45, -116, -31, 126, + -22, -32, 52, -53, -90, -92, 17, -5, 91, -31, 52, -73, -5, -106, 2, -24, -35, 79, 49, 31, + 6, -76, 5, 91, -60, 112, -5, 95, -61, -43, 7, -92, 95, 77, 87, 15, 83, -124, 102, -30, + -34, -9, -78, -35, 51, -84, -91, 11, 123, -66, -57, 32, -87, 115, -61, -49, -26, 42, 11, -74, + 13, -125, 114, -42, -95, 28, -47, -2, 2, 79, -98, 61, -29, 115, 61, 53, 34, 66, 15, 80, + 102, -37, 30, 103, 119, -117, 54, -86, 31, -83, 66, -75, 40, 15, 73, -4, -71, -120, 2, -23, + 28, 52, 72, -119, 24, -3, 45, 31, 39, 113, -101, -92, -65, -19, -80, 89, 22, -76, 109, -49, + 29, 63, 93, 69, 19, 104, -88, 2, 33, 105, -56, 99, -119, -123, 21, -81, 118, 123, 80, -31, + 115, 117, -33, 64, -117, -99, -119, -64, 5, 127, 46, -80, -54, 63, 63, -33, 84, -28, -41, 26, + 120, 52, 8, 118, -68, -90, -58, 55, 47, -75, -115, 83, -69, 86, -40, 31, -54, -84, 51, 108, + -27, -2, 81, -103, -54, 10, 114, 55, -79, 75, -40, 106, -96, -13, 49, -64, 2, 108, -48, -118, + -3, 75, 31, 24, -9, 75, -107, 126, -50, -34, 120, 127, -16, -16, -53, 68, -54, 59, -58, 104, + -121, -62, 10, -26, 101, -43, -99, -33, -36, 21, 90, -81}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_2.h new file mode 100644 index 00000000..8eb7a1b5 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/input_tensor_2.h @@ -0,0 +1,13 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_batch_int8_input_tensor_2[126] = { + 58, -74, 110, 112, -66, -86, -5, -63, -4, 82, -92, 27, 37, 26, 1, 7, -117, -106, + 10, 43, -118, 107, 42, -114, -96, 96, 42, 124, 67, 68, -75, 122, 73, -27, -100, -78, + 4, 122, 86, 23, 90, -14, -122, 67, 14, 66, -113, 78, -45, 55, 48, 42, -92, 25, + 31, 64, -123, 23, -38, 81, -24, -105, -105, -125, 32, -30, -95, -44, -45, -80, -102, -42, + -99, -4, -90, 31, 112, -64, -114, -92, -107, 41, -18, 37, 1, 100, -25, -100, 73, 127, + -109, 53, 63, -29, 92, -83, 5, -86, -81, -36, -93, 104, 86, -32, 99, 41, -46, 96, + -126, -121, 121, 26, -57, -107, -49, 18, 51, 27, 14, -56, 88, 92, -55, -121, -29, 0}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/output.h new file mode 100644 index 00000000..80f08c72 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/output.h @@ -0,0 +1,19 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_batch_int8_output[252] = { + 58, 99, 110, 112, -66, 106, 72, 7, -4, 82, 73, 63, 37, 115, 90, 113, 45, -106, 10, 126, -22, 107, 52, + -53, -90, 96, 42, 124, 91, 68, 52, 122, 73, -27, 2, -24, 4, 122, 86, 31, 90, -14, 5, 91, 14, 112, + -5, 95, -45, 55, 48, 42, 95, 77, 87, 64, 83, 23, 102, 81, -24, -9, -78, -35, 51, -30, -91, 11, 123, + -66, -57, 32, -87, 115, -61, 31, 112, 42, 11, -74, 13, 41, 114, 37, 1, 100, -25, -2, 73, 127, -98, 61, + 63, 115, 92, 53, 34, 66, 15, 80, 102, 104, 86, 103, 119, 41, 54, 96, 31, -83, 121, 26, 40, 15, 73, + 18, 51, 27, 14, -23, 88, 92, 72, -119, 24, 0, 58, 31, 110, 113, -66, -86, -5, -19, -4, 89, 22, 27, + 109, 26, 29, 63, 93, 69, 19, 104, -88, 107, 42, 105, -56, 99, 42, 124, 67, 68, 118, 123, 80, -27, 115, + 117, 4, 122, 86, 23, 90, -14, 5, 127, 46, 66, -54, 78, 63, 55, 84, 42, -41, 26, 120, 64, 8, 118, + -38, 81, -24, 55, 47, -75, 32, 83, -69, 86, -40, 31, -54, -42, 51, 108, -27, 31, 112, -64, -54, 10, 114, + 55, -18, 75, 1, 106, -25, -13, 73, 127, 2, 108, 63, -29, 92, 75, 31, 24, -9, 75, -93, 126, 86, -32, + 120, 127, -16, 96, -53, 68, 121, 59, -57, 104, -49, 18, 51, 27, 101, -43, 88, 92, -36, 21, 90, 0}; + +const int8_t *const maximum_broadcast_batch_int8_output_ref = maximum_broadcast_batch_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_batch_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/config_data.h new file mode 100644 index 00000000..edea1db0 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_BROADCAST_CH_INT8_LAYER_TYPE maximum +#define MAXIMUM_BROADCAST_CH_INT8_BATCH_1 2 +#define MAXIMUM_BROADCAST_CH_INT8_HEIGHT_1 2 +#define MAXIMUM_BROADCAST_CH_INT8_WIDTH_1 4 +#define MAXIMUM_BROADCAST_CH_INT8_CHANNEL_1 1 +#define MAXIMUM_BROADCAST_CH_INT8_BATCH_2 2 +#define MAXIMUM_BROADCAST_CH_INT8_HEIGHT_2 2 +#define MAXIMUM_BROADCAST_CH_INT8_WIDTH_2 4 +#define MAXIMUM_BROADCAST_CH_INT8_CHANNEL_2 24 +#define MAXIMUM_BROADCAST_CH_INT8_PAD SAME +#define MAXIMUM_BROADCAST_CH_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_BROADCAST_CH_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_BROADCAST_CH_INT8_DST_SIZE 384 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_BATCH 2 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_HEIGHT 2 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_WIDTH 4 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_CHANNEL 24 +#define MAXIMUM_BROADCAST_CH_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_BROADCAST_CH_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_MULTIPLIER 1077949184 +#define MAXIMUM_BROADCAST_CH_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_1.h new file mode 100644 index 00000000..835d0e5e --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_1.h @@ -0,0 +1,7 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_ch_int8_input_tensor_1[16] = + {103, -55, -5, -80, -62, 5, 64, -54, -126, -120, -118, 1, -40, -38, -57, 64}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_2.h new file mode 100644 index 00000000..ee0b2974 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/input_tensor_2.h @@ -0,0 +1,27 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_ch_int8_input_tensor_2[384] = { + -81, -124, -123, 90, 87, 22, -80, 115, 98, 27, -124, -79, -107, 75, 96, -48, 86, -99, 116, + 113, -110, -123, 90, -91, -128, -126, -33, 91, -51, -103, -38, -60, 91, 12, -100, 27, -98, -42, + 103, 10, 120, -63, 78, 123, -109, -122, -74, -69, 32, 16, 97, -97, -126, 76, -64, -112, 0, + -79, -6, -59, -97, 55, 27, 99, -106, 56, 59, 57, 25, 118, -4, 123, -72, 13, -37, -20, + 94, 13, -101, -23, -71, -89, 66, -101, -19, 114, -80, -106, 108, -102, 15, 98, -100, -116, 7, + -98, 58, -77, -106, 113, 63, 75, 83, -105, -109, 120, 13, 4, 67, 20, 38, 12, -100, -95, + -40, -71, -59, 99, -68, -117, -8, -29, 28, 69, -111, -107, -112, 97, -121, 95, -46, 97, -34, + -30, 7, -100, -58, -71, -75, -64, 84, 39, 123, 1, -98, -62, -74, 113, 111, -97, -29, 2, + 44, -7, -57, -19, -61, -118, 47, -84, 78, 5, -77, -35, -126, -3, 22, -116, -73, -56, 38, + -94, 53, 112, -84, -17, -86, 113, -53, -80, -50, 92, 90, -126, -22, -100, 93, -50, 102, 108, + -88, -1, 87, -44, -2, 75, 52, 33, 34, -63, 37, 40, -73, 118, -36, 52, 55, 32, -124, + -34, -16, -88, -75, 38, 40, -93, -23, -126, -64, -104, -79, -68, 116, 71, 60, 101, -39, 65, + 65, -94, 120, 63, -83, 10, -73, 97, -103, 22, 123, -61, -55, 85, -56, -62, 101, 52, 13, + 44, 45, -10, -38, -109, 75, 39, 101, -16, 125, 14, -22, 39, -109, -84, 11, -58, -123, 65, + 13, -77, -106, 75, 84, -90, -97, -32, 100, 60, 16, -6, -79, 68, 124, 17, -16, 44, -103, + -58, 13, -39, -31, 59, -52, 120, -19, 117, 26, 93, -38, 52, -105, -44, 26, -45, -76, -30, + -23, -126, 41, 39, -36, 59, 71, -53, 85, 78, -100, -21, 58, 101, -2, 113, -97, -90, 21, + -85, -81, 74, 48, 6, 8, -81, -92, 57, 112, 56, 107, -110, -39, 75, 80, -45, -74, 88, + 12, -93, -68, 20, -78, -15, -95, -7, -58, 30, -47, -39, 82, -115, 12, -114, -4, 27, 45, + -82, 103, 51, 100, -118, -7, 68, -48, -3, 65, -40, 1, 120, 40, 30, 19, 81, -39, -49, + 25, 103, -48, -102}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/output.h new file mode 100644 index 00000000..b99f853a --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/output.h @@ -0,0 +1,26 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_ch_int8_output[384] = { + 103, 103, 103, 103, 103, 103, 103, 115, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 116, 113, 103, 103, + 103, 103, -55, -55, -33, 91, -51, -55, -38, -55, 91, 12, -55, 27, -55, -42, 103, 10, 120, -55, 78, 123, + -55, -55, -55, -55, 32, 16, 97, -5, -5, 76, -5, -5, 0, -5, -5, -5, -5, 55, 27, 99, -5, 56, + 59, 57, 25, 118, -4, 123, -72, 13, -37, -20, 94, 13, -80, -23, -71, -80, 66, -80, -19, 114, -80, -80, + 108, -80, 15, 98, -80, -80, 7, -80, 58, -62, -62, 113, 63, 75, 83, -62, -62, 120, 13, 4, 67, 20, + 38, 12, -62, -62, -40, -62, -59, 99, -62, -62, 5, 5, 28, 69, 5, 5, 5, 97, 5, 95, 5, 97, + 5, 5, 7, 5, 5, 5, 5, 5, 84, 39, 123, 5, 64, 64, 64, 113, 111, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 64, 78, 64, 64, 64, 64, 64, 64, 64, -54, -54, 38, -54, 53, 112, -54, -17, + -54, 113, -53, -54, -50, 92, 90, -54, -22, -54, 93, -50, 102, 108, -54, -1, 87, -44, -2, 75, 52, 33, + 34, -63, 37, 40, -73, 118, -36, 52, 55, 32, -124, -34, -16, -88, -75, 38, 40, -93, -23, -120, -64, -104, + -79, -68, 116, 71, 60, 101, -39, 65, 65, -94, 120, 63, -83, 10, -73, 97, -103, 22, 123, -61, -55, 85, + -56, -62, 101, 52, 13, 44, 45, -10, -38, -109, 75, 39, 101, -16, 125, 14, -22, 39, -109, -84, 11, -58, + 1, 65, 13, 1, 1, 75, 84, 1, 1, 1, 100, 60, 16, 1, 1, 68, 124, 17, 1, 44, 1, 1, + 13, 1, -31, 59, -40, 120, -19, 117, 26, 93, -38, 52, -40, -40, 26, -40, -40, -30, -23, -40, 41, 39, + -36, 59, 71, -40, 85, 78, -38, -21, 58, 101, -2, 113, -38, -38, 21, -38, -38, 74, 48, 6, 8, -38, + -38, 57, 112, 56, 107, -38, -39, 75, 80, -45, -57, 88, 12, -57, -57, 20, -57, -15, -57, -7, -57, 30, + -47, -39, 82, -57, 12, -57, -4, 27, 64, 64, 103, 64, 100, 64, 64, 68, 64, 64, 65, 64, 64, 120, + 64, 64, 64, 81, 64, 64, 64, 103, 64, 64}; + +const int8_t *const maximum_broadcast_ch_int8_output_ref = maximum_broadcast_ch_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_ch_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/config_data.h new file mode 100644 index 00000000..40a0d3a7 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_BROADCAST_HEIGHT_INT8_LAYER_TYPE maximum +#define MAXIMUM_BROADCAST_HEIGHT_INT8_BATCH_1 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_1 1 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_WIDTH_1 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_1 17 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_BATCH_2 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_2 4 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_WIDTH_2 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_2 17 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_PAD SAME +#define MAXIMUM_BROADCAST_HEIGHT_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE 272 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_BATCH 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_HEIGHT 4 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_WIDTH 2 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_CHANNEL 17 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_MULTIPLIER 1077934720 +#define MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_1.h new file mode 100644 index 00000000..23abfe8d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_1.h @@ -0,0 +1,9 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_height_int8_input_tensor_1[68] = { + 8, -126, -63, 80, 119, 62, -53, 61, -98, -16, 119, 30, -75, 24, 119, 43, 75, 47, 64, 15, 37, 7, 33, + -24, 23, 62, 46, 89, 88, -62, 82, -41, 17, -110, 0, -26, 16, 23, 47, 118, 7, -77, 61, -114, -81, 76, + -89, 45, 39, -82, 121, -94, -62, 65, -30, -74, -115, -24, -60, 56, 0, 38, -91, 49, -80, -101, -13, -51}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_2.h new file mode 100644 index 00000000..96fa0cdf --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/input_tensor_2.h @@ -0,0 +1,20 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_height_int8_input_tensor_2[272] = { + 29, -93, 116, -92, 43, 3, -107, -1, -127, -22, 10, -23, 46, 30, -12, 120, 13, 104, 30, -110, + -67, 111, 10, -55, -100, 111, 57, 112, 40, -64, 66, 17, 106, 112, -26, 92, -115, 3, -91, 21, + 0, 7, 11, -60, 126, -75, -42, 118, 3, -61, -110, -80, 70, 15, 5, -53, -25, -57, -42, 104, + 15, 32, -51, 70, 105, 22, 23, -107, 103, -14, -98, 124, -83, 31, 45, 113, 64, 11, 16, 18, + 4, 91, 63, -67, 26, 20, -112, -3, -57, -117, -45, -101, -71, 35, 51, -47, -40, 102, 83, 41, + -56, 28, 101, 91, 52, 86, -97, -79, 37, -114, -84, 37, -100, 85, -43, 99, 4, -71, -127, 117, + 46, -125, 65, 76, 105, 47, -54, 12, 73, -93, -69, 31, -106, 35, -77, -64, -110, 45, 64, 124, + -92, -94, -96, -77, 86, 70, 61, -78, 41, 43, -105, -100, -69, -94, 95, 86, -45, -74, 20, 91, + 115, 93, -75, -25, -81, 46, -108, 55, -33, 76, -5, 52, 50, 91, -100, 60, -30, -87, -111, 10, + -33, -120, 85, -107, -46, 65, -14, 112, -24, 10, 91, 2, 28, -78, 23, 111, -4, -55, 46, 55, + 12, -103, -29, 60, -56, 100, -66, 22, -10, -58, 9, -89, -32, 26, -109, 90, 68, 55, -18, 95, + -55, 54, -27, -86, 31, 43, 4, 53, 70, 76, -88, -70, 16, -60, 95, -6, 0, -58, 71, -11, + 126, 78, -111, 51, 126, -75, -68, -27, 48, -72, 41, 3, 98, 118, 110, 77, -100, -84, -77, 81, + 17, -124, -13, 71, 73, -20, 29, -31, -126, -100, 13, -69}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/output.h new file mode 100644 index 00000000..ba0b9a9d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/output.h @@ -0,0 +1,20 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_height_int8_output[272] = { + 29, -93, 116, 80, 119, 62, -53, 61, -98, -16, 119, 30, 46, 30, 119, 120, 75, 104, 64, 15, 37, 111, 33, + -24, 23, 111, 57, 112, 88, -62, 82, 17, 106, 112, 8, 92, -63, 80, 119, 62, 0, 61, 11, -16, 126, 30, + -42, 118, 119, 43, 75, 47, 70, 15, 37, 7, 33, -24, 23, 104, 46, 89, 88, 70, 105, 22, 23, -107, 103, + -14, -63, 124, 119, 62, 45, 113, 64, 11, 119, 30, 4, 91, 119, 43, 75, 47, 64, 15, 37, 7, 33, -24, + 23, 62, 51, 89, 88, 102, 83, 41, 17, 28, 101, 91, 52, 86, 119, 62, 37, 61, -84, 37, 119, 85, -43, + 99, 119, 43, 75, 117, 64, 15, 65, 76, 105, 47, 23, 62, 73, 89, 88, 31, 82, 35, 17, -64, 0, 45, + 64, 124, 47, 118, 7, -77, 86, 70, 61, 76, 41, 45, 39, -82, 121, -94, 95, 86, -30, -74, 20, 91, 115, + 93, 0, 38, -81, 49, -80, 55, -13, 76, 0, 52, 50, 91, 47, 118, 7, -77, 61, 10, -33, 76, 85, 45, + 39, 65, 121, 112, -24, 65, 91, 2, 28, -24, 23, 111, 0, 38, 46, 55, 12, -101, -13, 60, 0, 100, 16, + 23, 47, 118, 9, -77, 61, 26, -81, 90, 68, 55, 39, 95, 121, 54, -27, 65, 31, 43, 4, 53, 70, 76, + 0, 38, 16, 49, 95, -6, 0, -51, 71, -11, 126, 78, 47, 118, 126, -75, 61, -27, 48, 76, 41, 45, 98, + 118, 121, 77, -62, 65, -30, 81, 17, -24, -13, 71, 73, 38, 29, 49, -80, -100, 13, -51}; + +const int8_t *const maximum_broadcast_height_int8_output_ref = maximum_broadcast_height_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_height_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/config_data.h new file mode 100644 index 00000000..7cbd49ed --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_BROADCAST_WIDTH_INT8_LAYER_TYPE maximum +#define MAXIMUM_BROADCAST_WIDTH_INT8_BATCH_1 2 +#define MAXIMUM_BROADCAST_WIDTH_INT8_HEIGHT_1 1 +#define MAXIMUM_BROADCAST_WIDTH_INT8_WIDTH_1 4 +#define MAXIMUM_BROADCAST_WIDTH_INT8_CHANNEL_1 19 +#define MAXIMUM_BROADCAST_WIDTH_INT8_BATCH_2 2 +#define MAXIMUM_BROADCAST_WIDTH_INT8_HEIGHT_2 1 +#define MAXIMUM_BROADCAST_WIDTH_INT8_WIDTH_2 1 +#define MAXIMUM_BROADCAST_WIDTH_INT8_CHANNEL_2 19 +#define MAXIMUM_BROADCAST_WIDTH_INT8_PAD SAME +#define MAXIMUM_BROADCAST_WIDTH_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_BROADCAST_WIDTH_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_BROADCAST_WIDTH_INT8_DST_SIZE 152 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_BATCH 2 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_HEIGHT 1 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_WIDTH 4 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_CHANNEL 19 +#define MAXIMUM_BROADCAST_WIDTH_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_BROADCAST_WIDTH_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_MULTIPLIER 1077921152 +#define MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_1.h new file mode 100644 index 00000000..6d1e3951 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_1.h @@ -0,0 +1,13 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_width_int8_input_tensor_1[152] = { + 2, -93, 7, -17, -24, -68, 105, -91, 106, 87, -14, -60, -54, 41, -63, 45, -45, 38, -52, 49, 5, -3, + -69, -55, -75, -84, 31, -3, -20, 113, 67, -120, -98, 29, 92, 72, 51, 47, -16, 12, -88, -12, 76, -128, + 50, -101, -69, -119, 45, 60, 23, -100, -19, -113, 18, -44, 70, 44, 89, 80, 11, -26, -39, -105, -85, -16, + 59, 56, 50, -89, -23, -37, 31, 33, 27, -117, -22, 107, 106, -41, 42, -16, 89, 68, -94, 92, -15, 40, + 35, -113, -31, 100, -10, 87, 95, 22, 127, 71, -79, -34, -13, 35, -49, 40, -15, 72, 105, -29, -76, 81, + -40, 13, 9, -40, 63, 69, -83, -125, -6, 94, -14, 57, -29, 61, 53, 56, -42, 56, 32, -20, -20, 74, + 79, 55, -78, -24, -20, -13, -77, 118, 8, 123, 102, -52, 80, 83, 10, 120, 115, 120, -67, -126}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_2.h new file mode 100644 index 00000000..0e17f528 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/input_tensor_2.h @@ -0,0 +1,8 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_width_int8_input_tensor_2[38] = { + -28, 109, 14, 85, -82, 126, -52, -115, 84, 47, 127, 95, -96, -83, -110, -72, 43, -55, 28, + 79, -111, 68, 102, -8, 114, -109, 27, 30, 98, 38, 126, -104, 98, 103, -60, -57, -28, 2}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/output.h new file mode 100644 index 00000000..4f36d226 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/output.h @@ -0,0 +1,15 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_broadcast_width_int8_output[152] = { + 2, 109, 14, 85, -24, 126, 105, -91, 106, 87, 127, 95, -54, 41, -63, 45, 43, 38, 28, 49, 109, 14, + 85, -55, 126, -52, 31, 84, 47, 127, 95, -96, -83, 29, 92, 72, 51, 47, -16, 109, 14, 85, 76, 126, + 50, -101, 84, 47, 127, 95, 23, -83, -19, -72, 43, -44, 70, 44, 109, 80, 85, -26, 126, -52, -85, 84, + 59, 127, 95, -89, -23, -37, 31, 43, 27, 28, 79, 107, 106, 102, 42, 114, 89, 68, 30, 98, 38, 126, + 35, 98, 103, 100, -10, 87, 95, 79, 127, 71, 102, -8, 114, 35, 27, 40, 98, 72, 126, -29, 98, 103, + -40, 13, 9, 2, 79, 69, 68, 102, -6, 114, -14, 57, 30, 98, 53, 126, -42, 98, 103, -20, -20, 74, + 79, 79, -78, 68, 102, -8, 114, 118, 27, 123, 102, 38, 126, 83, 98, 120, 115, 120, -28, 2}; + +const int8_t *const maximum_broadcast_width_int8_output_ref = maximum_broadcast_width_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_broadcast_width_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/config_data.h new file mode 100644 index 00000000..7cb84458 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_NO_BROADCAST_INT8_LAYER_TYPE maximum +#define MAXIMUM_NO_BROADCAST_INT8_BATCH_1 2 +#define MAXIMUM_NO_BROADCAST_INT8_HEIGHT_1 2 +#define MAXIMUM_NO_BROADCAST_INT8_WIDTH_1 3 +#define MAXIMUM_NO_BROADCAST_INT8_CHANNEL_1 18 +#define MAXIMUM_NO_BROADCAST_INT8_BATCH_2 2 +#define MAXIMUM_NO_BROADCAST_INT8_HEIGHT_2 2 +#define MAXIMUM_NO_BROADCAST_INT8_WIDTH_2 3 +#define MAXIMUM_NO_BROADCAST_INT8_CHANNEL_2 18 +#define MAXIMUM_NO_BROADCAST_INT8_PAD SAME +#define MAXIMUM_NO_BROADCAST_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_NO_BROADCAST_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_NO_BROADCAST_INT8_DST_SIZE 216 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_BATCH 2 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_HEIGHT 2 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_WIDTH 3 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_CHANNEL 18 +#define MAXIMUM_NO_BROADCAST_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_NO_BROADCAST_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_MULTIPLIER 1077942400 +#define MAXIMUM_NO_BROADCAST_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_1.h new file mode 100644 index 00000000..13192159 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_1.h @@ -0,0 +1,17 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_no_broadcast_int8_input_tensor_1[216] = { + -91, -37, -118, -9, 122, 3, -64, -40, 94, -85, 14, -50, -79, 103, 105, -27, -50, 43, 62, -88, + 33, 46, -119, -27, 114, -23, -15, -99, -124, 60, 56, 90, 70, -34, -9, 78, -10, 32, 37, 15, + -95, 87, 125, -118, 12, 63, 59, 33, 84, 15, 127, -55, -45, -61, -51, 13, -2, 64, 105, 50, + 101, 96, 101, -58, 56, -123, 5, -31, 90, -100, 57, -80, 68, 115, -107, -3, 44, 11, -119, -93, + -74, 24, 9, 93, -114, -59, 105, 103, -127, 54, 125, -127, -88, 53, -37, -84, 108, -31, 101, -86, + -18, 31, -46, 62, -73, 70, -30, -69, 71, -108, -94, 77, -39, -46, -81, -46, 45, 42, 79, 28, + -93, 12, 34, -44, 0, -22, 123, 110, -24, 78, -123, 13, -72, 23, -52, -93, 94, 89, 62, 27, + 98, 108, -62, -26, 19, -13, -2, 69, 12, -7, -117, -55, 25, -126, -53, 90, -89, 84, -65, 89, + -43, -121, 74, -3, -89, -78, 44, 43, 35, 56, -52, -70, -99, 19, 108, -93, 55, 95, 42, -92, + -71, -84, 66, 93, 30, 78, -16, 101, -112, 60, 36, -100, 4, -120, 26, 105, -15, -47, -49, -120, + -39, 37, -97, 111, -20, 40, 16, -2, -65, -91, 62, 65, -125, 89, -113, 35}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_2.h new file mode 100644 index 00000000..4387b2e0 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/input_tensor_2.h @@ -0,0 +1,17 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_no_broadcast_int8_input_tensor_2[216] = { + 96, 66, 89, 67, -72, 100, 90, -45, 50, -51, 106, 26, 97, 95, -92, 11, 54, -84, 2, -94, + 41, -87, -64, -128, -128, 70, 109, 62, 116, 38, 78, -85, -33, 33, 103, -84, -112, -65, 10, -11, + -66, 69, 79, -77, 57, -63, 37, 36, -104, -32, -56, -69, -72, 48, 16, 3, -92, 117, 43, -85, + 1, -89, 63, 78, 26, -125, 109, 86, 41, 58, 45, 70, -65, 58, -118, -105, -44, 2, -41, -128, + 122, 3, -54, -78, -120, 105, -46, -19, 90, -20, 19, 55, -15, 53, 70, -72, -74, 93, -53, 11, + 99, -40, 44, -104, -87, 124, -96, -110, 77, 73, 116, 18, 116, 113, -38, 14, 34, -68, 27, -21, + -94, -63, -29, -40, -110, 80, 84, -53, -97, 103, -4, 8, -16, 27, -25, -126, -111, -10, -117, -111, + 4, 106, 59, 55, 48, 83, 118, -114, -98, -26, -18, 65, -43, 46, 28, 35, 62, 110, -118, 26, + -43, 30, -124, -44, 90, -12, 14, 100, -85, 57, 69, 87, -17, 119, -60, -34, -19, 1, 90, -21, + 54, 109, -44, 40, 95, -20, 93, 81, 78, -8, -23, 18, 81, -17, 3, 113, -51, 120, -68, 63, + 89, 102, -42, 45, -93, 23, -29, 57, 117, -11, 24, -116, -4, -61, 46, 22}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/output.h new file mode 100644 index 00000000..222ce483 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/output.h @@ -0,0 +1,18 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_no_broadcast_int8_output[216] = { + 96, 66, 89, 67, 122, 100, 90, -40, 94, -51, 106, 26, 97, 103, 105, 11, 54, 43, 62, -88, 41, 46, + -64, -27, 114, 70, 109, 62, 116, 60, 78, 90, 70, 33, 103, 78, -10, 32, 37, 15, -66, 87, 125, -77, + 57, 63, 59, 36, 84, 15, 127, -55, -45, 48, 16, 13, -2, 117, 105, 50, 101, 96, 101, 78, 56, -123, + 109, 86, 90, 58, 57, 70, 68, 115, -107, -3, 44, 11, -41, -93, 122, 24, 9, 93, -114, 105, 105, 103, + 90, 54, 125, 55, -15, 53, 70, -72, 108, 93, 101, 11, 99, 31, 44, 62, -73, 124, -30, -69, 77, 73, + 116, 77, 116, 113, -38, 14, 45, 42, 79, 28, -93, 12, 34, -40, 0, 80, 123, 110, -24, 103, -4, 13, + -16, 27, -25, -93, 94, 89, 62, 27, 98, 108, 59, 55, 48, 83, 118, 69, 12, -7, -18, 65, 25, 46, + 28, 90, 62, 110, -65, 89, -43, 30, 74, -3, 90, -12, 44, 100, 35, 57, 69, 87, -17, 119, 108, -34, + 55, 95, 90, -21, 54, 109, 66, 93, 95, 78, 93, 101, 78, 60, 36, 18, 81, -17, 26, 113, -15, 120, + -49, 63, 89, 102, -42, 111, -20, 40, 16, 57, 117, -11, 62, 65, -4, 89, 46, 35}; + +const int8_t *const maximum_no_broadcast_int8_output_ref = maximum_no_broadcast_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_no_broadcast_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/config_data.h new file mode 100644 index 00000000..0bab46c4 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_SCALAR_1_INT8_LAYER_TYPE maximum +#define MAXIMUM_SCALAR_1_INT8_BATCH_1 1 +#define MAXIMUM_SCALAR_1_INT8_HEIGHT_1 1 +#define MAXIMUM_SCALAR_1_INT8_WIDTH_1 1 +#define MAXIMUM_SCALAR_1_INT8_CHANNEL_1 1 +#define MAXIMUM_SCALAR_1_INT8_BATCH_2 1 +#define MAXIMUM_SCALAR_1_INT8_HEIGHT_2 2 +#define MAXIMUM_SCALAR_1_INT8_WIDTH_2 4 +#define MAXIMUM_SCALAR_1_INT8_CHANNEL_2 19 +#define MAXIMUM_SCALAR_1_INT8_PAD SAME +#define MAXIMUM_SCALAR_1_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_SCALAR_1_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_SCALAR_1_INT8_DST_SIZE 152 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_BATCH 1 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_HEIGHT 2 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_WIDTH 4 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_CHANNEL 19 +#define MAXIMUM_SCALAR_1_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_SCALAR_1_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_MULTIPLIER 1077632000 +#define MAXIMUM_SCALAR_1_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_1.h new file mode 100644 index 00000000..0bacd143 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_1.h @@ -0,0 +1,6 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_1_int8_input_tensor_1[1] = {-87}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_2.h new file mode 100644 index 00000000..6fd1d883 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/input_tensor_2.h @@ -0,0 +1,14 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_1_int8_input_tensor_2[152] = { + 103, -67, -80, -9, -25, 113, 125, 6, -111, -106, 101, 98, -71, -41, -86, -125, 46, 4, 42, + 124, -125, -62, 89, -104, -45, 67, -38, 61, -59, -29, -111, -1, -65, 106, 54, -104, 69, -32, + -7, -11, 101, -93, 34, 50, -99, -59, 4, 116, -91, 20, -45, 7, -47, -26, 53, 108, -27, + -43, 50, -40, -17, -85, 47, -31, -102, 89, 64, -4, 6, -1, -94, 57, -70, 23, 30, -32, + 64, 12, 88, -52, -3, 127, 23, 7, 36, -95, -114, 21, 64, 114, 72, -122, 108, -126, -38, + -71, 35, -101, -78, -58, 25, -88, -123, 86, 59, -45, -78, 73, 88, -89, 37, -66, 81, -83, + -102, 64, -6, -37, 29, -100, -93, -61, 2, 39, -111, 121, -36, -1, 106, -2, 100, -77, -17, + 85, 44, 38, 19, 31, 57, -76, 73, -81, 124, 53, -30, 101, -28, -87, 38, -114, 13, 77}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/output.h new file mode 100644 index 00000000..01e1411f --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/output.h @@ -0,0 +1,15 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_1_int8_output[152] = { + 103, -67, -80, -9, -25, 113, 125, 6, -87, -87, 101, 98, -71, -41, -86, -87, 46, 4, 42, 124, -87, -62, + 89, -87, -45, 67, -38, 61, -59, -29, -87, -1, -65, 106, 54, -87, 69, -32, -7, -11, 101, -87, 34, 50, + -87, -59, 4, 116, -87, 20, -45, 7, -47, -26, 53, 108, -27, -43, 50, -40, -17, -85, 47, -31, -87, 89, + 64, -4, 6, -1, -87, 57, -70, 23, 30, -32, 64, 12, 88, -52, -3, 127, 23, 7, 36, -87, -87, 21, + 64, 114, 72, -87, 108, -87, -38, -71, 35, -87, -78, -58, 25, -87, -87, 86, 59, -45, -78, 73, 88, -87, + 37, -66, 81, -83, -87, 64, -6, -37, 29, -87, -87, -61, 2, 39, -87, 121, -36, -1, 106, -2, 100, -77, + -17, 85, 44, 38, 19, 31, 57, -76, 73, -81, 124, 53, -30, 101, -28, -87, 38, -87, 13, 77}; + +const int8_t *const maximum_scalar_1_int8_output_ref = maximum_scalar_1_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_1_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/config_data.h new file mode 100644 index 00000000..6c6117dc --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MAXIMUM_SCALAR_2_INT8_LAYER_TYPE maximum +#define MAXIMUM_SCALAR_2_INT8_BATCH_1 1 +#define MAXIMUM_SCALAR_2_INT8_HEIGHT_1 2 +#define MAXIMUM_SCALAR_2_INT8_WIDTH_1 4 +#define MAXIMUM_SCALAR_2_INT8_CHANNEL_1 19 +#define MAXIMUM_SCALAR_2_INT8_BATCH_2 1 +#define MAXIMUM_SCALAR_2_INT8_HEIGHT_2 1 +#define MAXIMUM_SCALAR_2_INT8_WIDTH_2 1 +#define MAXIMUM_SCALAR_2_INT8_CHANNEL_2 1 +#define MAXIMUM_SCALAR_2_INT8_PAD SAME +#define MAXIMUM_SCALAR_2_INT8_ACTIVATION_MAX 127 +#define MAXIMUM_SCALAR_2_INT8_ACTIVATION_MIN -128 +#define MAXIMUM_SCALAR_2_INT8_DST_SIZE 152 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_BATCH 1 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_HEIGHT 2 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_WIDTH 4 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_CHANNEL 19 +#define MAXIMUM_SCALAR_2_INT8_INPUT_1_OFFSET 128 +#define MAXIMUM_SCALAR_2_INT8_INPUT_2_OFFSET 128 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_OFFSET -128 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_MULTIPLIER 1077922304 +#define MAXIMUM_SCALAR_2_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_1.h new file mode 100644 index 00000000..27ca538d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_1.h @@ -0,0 +1,14 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_2_int8_input_tensor_1[152] = { + 62, -97, -75, -101, 59, 85, -61, -23, -9, 124, 11, -43, 28, -32, 47, -77, -71, 60, 95, + 25, 74, 5, 102, 95, 110, 71, 98, -57, 44, 88, -124, 121, 5, 16, -4, 30, 43, 67, + 99, -83, 78, -70, 97, -38, 68, 51, 32, 61, -116, -59, 62, 108, -107, 80, -111, -39, -101, + 21, 7, 30, -27, -73, 122, 13, 50, 100, 8, -59, 113, -35, 38, -19, -31, -107, -88, -70, + 76, 31, -78, 69, 75, 76, 83, 14, 26, 48, -7, 89, -31, -13, -119, 113, -43, 80, -51, + -112, 106, 5, -80, -47, 14, -1, 112, -99, 32, -43, -58, 37, -1, 40, -30, 101, 4, 57, + 127, -60, -118, 110, -109, -49, -98, 43, 117, -10, -64, -91, 12, -60, 40, -30, -119, 52, 121, + -26, 42, 42, -9, -2, -123, -102, -56, -8, 14, 20, -82, 93, -26, 20, -64, 89, -9, -119}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_2.h new file mode 100644 index 00000000..0feab046 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/input_tensor_2.h @@ -0,0 +1,6 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_2_int8_input_tensor_2[1] = {-36}; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/output.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/output.h new file mode 100644 index 00000000..1d9d8926 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/output.h @@ -0,0 +1,15 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t maximum_scalar_2_int8_output[152] = { + 62, -36, -36, -36, 59, 85, -36, -23, -9, 124, 11, -36, 28, -32, 47, -36, -36, 60, 95, 25, 74, 5, + 102, 95, 110, 71, 98, -36, 44, 88, -36, 121, 5, 16, -4, 30, 43, 67, 99, -36, 78, -36, 97, -36, + 68, 51, 32, 61, -36, -36, 62, 108, -36, 80, -36, -36, -36, 21, 7, 30, -27, -36, 122, 13, 50, 100, + 8, -36, 113, -35, 38, -19, -31, -36, -36, -36, 76, 31, -36, 69, 75, 76, 83, 14, 26, 48, -7, 89, + -31, -13, -36, 113, -36, 80, -36, -36, 106, 5, -36, -36, 14, -1, 112, -36, 32, -36, -36, 37, -1, 40, + -30, 101, 4, 57, 127, -36, -36, 110, -36, -36, -36, 43, 117, -10, -36, -36, 12, -36, 40, -30, -36, 52, + 121, -26, 42, 42, -9, -2, -36, -36, -36, -8, 14, 20, -36, 93, -26, 20, -36, 89, -9, -36}; + +const int8_t *const maximum_scalar_2_int8_output_ref = maximum_scalar_2_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/maximum_scalar_2_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/config_data.h new file mode 100644 index 00000000..d0c92536 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_BROADCAST_BATCH_INT8_LAYER_TYPE minimum +#define MINIMUM_BROADCAST_BATCH_INT8_BATCH_1 2 +#define MINIMUM_BROADCAST_BATCH_INT8_HEIGHT_1 1 +#define MINIMUM_BROADCAST_BATCH_INT8_WIDTH_1 6 +#define MINIMUM_BROADCAST_BATCH_INT8_CHANNEL_1 21 +#define MINIMUM_BROADCAST_BATCH_INT8_BATCH_2 1 +#define MINIMUM_BROADCAST_BATCH_INT8_HEIGHT_2 1 +#define MINIMUM_BROADCAST_BATCH_INT8_WIDTH_2 6 +#define MINIMUM_BROADCAST_BATCH_INT8_CHANNEL_2 21 +#define MINIMUM_BROADCAST_BATCH_INT8_PAD SAME +#define MINIMUM_BROADCAST_BATCH_INT8_ACTIVATION_MAX 127 +#define MINIMUM_BROADCAST_BATCH_INT8_ACTIVATION_MIN -128 +#define MINIMUM_BROADCAST_BATCH_INT8_DST_SIZE 252 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_BATCH 2 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_HEIGHT 1 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_WIDTH 6 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_CHANNEL 21 +#define MINIMUM_BROADCAST_BATCH_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_BROADCAST_BATCH_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_MULTIPLIER 2143453696 +#define MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_SHIFT -8 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_1.h new file mode 100644 index 00000000..e6804807 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_1.h @@ -0,0 +1,19 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_batch_int8_input_tensor_1[252] = { + 112, -49, 74, -2, -52, -51, -31, -48, 110, 123, 49, -123, -13, 82, 117, -94, 40, 83, 52, -61, + 29, 24, -84, 34, -62, 107, -49, -101, 13, -67, 8, -95, 26, -98, 76, 97, -32, -76, -4, 45, + -80, -19, -75, -53, 31, -29, -79, -78, 17, -6, 85, 79, 56, 35, -65, -83, 51, 64, 1, -7, + 78, 36, -31, 27, -88, -106, 91, -84, -59, 77, -50, 125, 53, 30, 94, -76, 74, 82, -25, 102, + -23, 99, 45, 29, 77, -93, 110, -23, -68, 46, -60, 3, 54, -37, -48, -37, -1, -103, 69, -113, + -122, -28, -125, -48, 12, -123, -98, -39, 98, 40, -49, 53, 55, -115, 118, -36, 67, 83, 107, -49, + 107, 120, 16, 101, 63, -87, -109, -50, -28, 47, 113, 91, -77, -92, 40, 49, -87, -99, -33, -63, + -8, -122, 59, -125, 42, 110, -24, 39, 55, 65, -23, -110, -31, 31, -93, 95, 105, -75, -70, 69, + -102, 44, 103, 77, 98, 19, -83, -59, 127, -17, 126, -44, -102, -21, -42, 27, 33, -54, 26, -62, + -103, 14, -96, -120, -11, 96, -5, 119, 53, 39, -6, -7, 113, -28, -109, 9, -4, -111, 75, 32, + 41, -13, -50, -81, 91, -116, 11, 94, 95, -41, 53, -75, -7, 83, 34, 66, -126, 77, -33, 62, + -48, -14, 45, 14, -63, -64, -128, 117, -2, -28, 106, -82, 43, -115, -70, -118, 92, 14, -11, 78, + -10, -67, -86, 31, 40, -48, -98, 95, 80, -114, -117, 66}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_2.h new file mode 100644 index 00000000..a0d1a040 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/input_tensor_2.h @@ -0,0 +1,12 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_batch_int8_input_tensor_2[126] = { + -74, 59, 17, 52, -84, 51, -47, -78, 44, 12, 92, 82, -127, 40, 62, -99, -83, -99, -111, -9, 45, + -76, 50, -67, 108, -33, -60, 61, 17, 13, 49, -48, -125, 14, 4, -37, 51, 101, -68, -126, 111, 28, + 31, 35, -64, -5, -94, -80, -67, -49, 50, -85, 65, -20, 110, 82, 55, -119, -24, -52, 43, -53, -54, + 1, 106, -52, -10, -67, 100, 77, 109, 8, -11, -108, -125, 21, -94, 109, -34, -89, 74, -12, -70, 74, + 82, 42, -27, 125, -99, -70, 19, 40, 94, 10, -90, -97, 24, -119, -46, 20, 112, 35, 77, 3, 50, + 123, 75, -38, 43, -53, 113, 97, -33, -116, 57, 0, 100, 101, -67, 83, 58, -73, -56, -28, 9, 115}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/output.h new file mode 100644 index 00000000..33de9a84 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/output.h @@ -0,0 +1,22 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_batch_int8_output[252] = { + -74, -49, 17, -2, -84, -51, -47, -78, 44, 12, 49, -123, -127, 40, 62, -99, -83, -99, + -111, -61, 29, -76, -84, -67, -62, -33, -60, -101, 13, -67, 8, -95, -125, -98, 4, -37, + -32, -76, -68, -126, -80, -19, -75, -53, -64, -29, -94, -80, -67, -49, 50, -85, 56, -20, + -65, -83, 51, -119, -24, -52, 43, -53, -54, 1, -88, -106, -10, -84, -59, 77, -50, 8, + -11, -108, -125, -76, -94, 82, -34, -89, -23, -12, -70, 29, 77, -93, -27, -23, -99, -70, + -60, 3, 54, -37, -90, -97, -1, -119, -46, -113, -122, -28, -125, -48, 12, -123, -98, -39, + 43, -53, -49, 53, -33, -116, 57, -36, 67, 83, -67, -49, 58, -73, -56, -28, 9, -87, + -109, -50, -28, 47, -84, 51, -77, -92, 40, 12, -87, -99, -127, -63, -8, -122, -83, -125, + -111, -9, -24, -76, 50, -67, -23, -110, -60, 31, -93, 13, 49, -75, -125, 14, -102, -37, + 51, 77, -68, -126, -83, -59, 31, -17, -64, -44, -102, -80, -67, -49, 33, -85, 26, -62, + -103, 14, -96, -120, -24, -52, -5, -53, -54, 1, -6, -52, -10, -67, -109, 9, -4, -111, + -11, -108, -125, -13, -94, -81, -34, -116, 11, -12, -70, -41, 53, -75, -27, 83, -99, -70, + -126, 40, -33, 10, -90, -97, 24, -119, -63, -64, -128, 35, -2, -28, 50, -82, 43, -115, + -70, -118, 92, 14, -33, -116, -10, -67, -86, 31, -67, -48, -98, -73, -56, -114, -117, 66}; + +const int8_t *const minimum_broadcast_batch_int8_output_ref = minimum_broadcast_batch_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_batch_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/config_data.h new file mode 100644 index 00000000..8188019a --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_BROADCAST_CH_INT8_LAYER_TYPE minimum +#define MINIMUM_BROADCAST_CH_INT8_BATCH_1 2 +#define MINIMUM_BROADCAST_CH_INT8_HEIGHT_1 2 +#define MINIMUM_BROADCAST_CH_INT8_WIDTH_1 4 +#define MINIMUM_BROADCAST_CH_INT8_CHANNEL_1 1 +#define MINIMUM_BROADCAST_CH_INT8_BATCH_2 2 +#define MINIMUM_BROADCAST_CH_INT8_HEIGHT_2 2 +#define MINIMUM_BROADCAST_CH_INT8_WIDTH_2 4 +#define MINIMUM_BROADCAST_CH_INT8_CHANNEL_2 24 +#define MINIMUM_BROADCAST_CH_INT8_PAD SAME +#define MINIMUM_BROADCAST_CH_INT8_ACTIVATION_MAX 127 +#define MINIMUM_BROADCAST_CH_INT8_ACTIVATION_MIN -128 +#define MINIMUM_BROADCAST_CH_INT8_DST_SIZE 384 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_BATCH 2 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_HEIGHT 2 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_WIDTH 4 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_CHANNEL 24 +#define MINIMUM_BROADCAST_CH_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_BROADCAST_CH_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_MULTIPLIER 2147253632 +#define MINIMUM_BROADCAST_CH_INT8_OUTPUT_SHIFT -8 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_1.h new file mode 100644 index 00000000..f205ab9a --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_1.h @@ -0,0 +1,7 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_ch_int8_input_tensor_1[16] = + {-47, -29, 1, -80, 106, 86, -13, -14, 119, 82, 62, 0, -17, 91, -120, -35}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_2.h new file mode 100644 index 00000000..0d57c883 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/input_tensor_2.h @@ -0,0 +1,27 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_ch_int8_input_tensor_2[384] = { + -40, 69, -38, -80, 43, -28, -114, 47, -16, -85, -30, 7, 51, -64, -127, -48, -55, -111, -68, + 36, 120, -83, 11, 26, -72, -121, 45, -46, 100, -2, -82, -10, -66, 83, 43, 39, -55, 7, + -75, 42, -80, -87, 77, 57, 42, 110, -12, -92, 24, -108, -114, 110, 90, -109, -24, 4, 4, + 4, -49, -1, -88, -51, 5, 71, 2, 69, 40, -104, 123, -48, -8, -12, 91, -96, -78, -105, + -49, 121, 44, 69, 67, 69, 76, -104, -121, -59, 60, 22, -108, -84, 32, 111, 62, -104, 3, + -102, -109, -3, -103, -79, 52, -12, 46, 55, -41, 15, -2, -109, -128, 66, 57, -44, 112, 5, + 38, -2, -18, 37, 73, -82, -12, -109, 20, -43, 74, 57, -49, -92, 113, 103, 65, 99, -30, + 120, 96, 62, 53, -109, 38, -86, 30, 11, 37, -125, 5, 88, 81, -61, 115, 10, 19, 33, + 19, -126, -75, -8, -92, 52, -40, -1, -42, 15, -90, 99, -68, 67, -8, 20, -46, -36, 44, + -94, -120, -98, 0, 16, -95, 85, -116, -114, 126, 14, -34, -102, 122, -82, 7, -127, 98, -20, + 71, 9, -113, -88, 7, 61, 58, -125, -121, 29, 28, 18, 99, -111, -85, -17, -127, 81, 0, + -32, 112, -3, 46, 61, -68, -29, 2, -56, 81, -37, -96, 31, -122, 80, -39, -76, 48, -2, + 20, 46, 28, -3, -35, 86, 55, 124, 21, 97, 88, -18, -46, 43, 66, 111, -85, 103, -92, + 5, -72, -15, -110, -29, 39, -40, 122, -33, 121, -21, -41, -97, 59, 120, -4, 35, 111, 82, + 33, 65, 88, -114, 44, -115, 34, 113, 30, -4, -96, 29, -89, -21, -37, -122, 79, 19, -21, + -94, -94, -5, 0, 11, 52, -77, -52, -29, 57, 126, -48, 111, -124, -8, 90, -43, 42, 84, + 81, 106, -118, -53, 71, -8, 51, -101, -106, -3, -26, 7, 93, 90, 5, -13, 79, -9, 119, + -67, -18, -112, 115, -100, 22, 29, 61, -79, 35, -117, -9, -28, 120, -96, -64, 42, -67, 120, + 77, -39, 64, 123, 121, 31, -110, 126, -13, 18, 106, 11, 108, -28, -65, 64, -4, -45, 33, + -72, -51, -22, 15, -88, 19, 120, 58, -84, 86, -3, -47, -27, 79, -64, -71, 27, -46, 25, + 81, -38, 30, -43}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/output.h new file mode 100644 index 00000000..955a03f9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/output.h @@ -0,0 +1,29 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_ch_int8_output[384] = { + -47, -47, -47, -80, -47, -47, -114, -47, -47, -85, -47, -47, -47, -64, -127, -48, -55, -111, -68, + -47, -47, -83, -47, -47, -72, -121, -29, -46, -29, -29, -82, -29, -66, -29, -29, -29, -55, -29, + -75, -29, -80, -87, -29, -29, -29, -29, -29, -92, 1, -108, -114, 1, 1, -109, -24, 1, 1, + 1, -49, -1, -88, -51, 1, 1, 1, 1, 1, -104, 1, -48, -8, -12, -80, -96, -80, -105, + -80, -80, -80, -80, -80, -80, -80, -104, -121, -80, -80, -80, -108, -84, -80, -80, -80, -104, -80, + -102, -109, -3, -103, -79, 52, -12, 46, 55, -41, 15, -2, -109, -128, 66, 57, -44, 106, 5, + 38, -2, -18, 37, 73, -82, -12, -109, 20, -43, 74, 57, -49, -92, 86, 86, 65, 86, -30, + 86, 86, 62, 53, -109, 38, -86, 30, 11, 37, -125, -13, -13, -13, -61, -13, -13, -13, -13, + -13, -126, -75, -13, -92, -13, -40, -13, -42, -13, -90, -13, -68, -13, -13, -13, -46, -36, -14, + -94, -120, -98, -14, -14, -95, -14, -116, -114, -14, -14, -34, -102, -14, -82, -14, -127, -14, -20, + -14, -14, -113, -88, 7, 61, 58, -125, -121, 29, 28, 18, 99, -111, -85, -17, -127, 81, 0, + -32, 112, -3, 46, 61, -68, -29, 2, -56, 81, -37, -96, 31, -122, 80, -39, -76, 48, -2, + 20, 46, 28, -3, -35, 82, 55, 82, 21, 82, 82, -18, -46, 43, 62, 62, -85, 62, -92, + 5, -72, -15, -110, -29, 39, -40, 62, -33, 62, -21, -41, -97, 59, 62, -4, 35, 0, 0, + 0, 0, 0, -114, 0, -115, 0, 0, 0, -4, -96, 0, -89, -21, -37, -122, 0, 0, -21, + -94, -94, -5, -17, -17, -17, -77, -52, -29, -17, -17, -48, -17, -124, -17, -17, -43, -17, -17, + -17, -17, -118, -53, -17, -17, -17, -101, -106, -3, -26, 7, 91, 90, 5, -13, 79, -9, 91, + -67, -18, -112, 91, -100, 22, 29, 61, -79, 35, -117, -9, -28, -120, -120, -120, -120, -120, -120, + -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -120, -35, + -72, -51, -35, -35, -88, -35, -35, -35, -84, -35, -35, -47, -35, -35, -64, -71, -35, -46, -35, + -35, -38, -35, -43}; + +const int8_t *const minimum_broadcast_ch_int8_output_ref = minimum_broadcast_ch_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_ch_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/config_data.h new file mode 100644 index 00000000..7f62998a --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_BROADCAST_HEIGHT_INT8_LAYER_TYPE minimum +#define MINIMUM_BROADCAST_HEIGHT_INT8_BATCH_1 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_1 1 +#define MINIMUM_BROADCAST_HEIGHT_INT8_WIDTH_1 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_1 17 +#define MINIMUM_BROADCAST_HEIGHT_INT8_BATCH_2 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_2 4 +#define MINIMUM_BROADCAST_HEIGHT_INT8_WIDTH_2 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_2 17 +#define MINIMUM_BROADCAST_HEIGHT_INT8_PAD SAME +#define MINIMUM_BROADCAST_HEIGHT_INT8_ACTIVATION_MAX 127 +#define MINIMUM_BROADCAST_HEIGHT_INT8_ACTIVATION_MIN -128 +#define MINIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE 272 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_BATCH 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_HEIGHT 4 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_WIDTH 2 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_CHANNEL 17 +#define MINIMUM_BROADCAST_HEIGHT_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_BROADCAST_HEIGHT_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_MULTIPLIER 1073769216 +#define MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_1.h new file mode 100644 index 00000000..40fb6633 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_1.h @@ -0,0 +1,10 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_height_int8_input_tensor_1[68] = { + 63, 105, 65, 29, -46, 25, 104, 115, 120, -111, 50, -38, -75, -125, 61, -120, -33, + -30, -103, -4, -66, -7, -39, -3, -21, -45, 44, 38, 89, -118, 95, -98, -9, 124, + 90, 109, 25, 62, -74, 116, 119, -102, 72, -103, 59, -70, 69, -50, -29, 0, 116, + 39, -1, -91, 116, -84, -70, -119, -22, -61, 69, 101, -125, 43, 60, -15, -70, -103}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_2.h new file mode 100644 index 00000000..cfdfd972 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/input_tensor_2.h @@ -0,0 +1,20 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_height_int8_input_tensor_2[272] = { + -16, 69, -83, 103, -17, 123, 75, -115, -30, -12, -77, -19, -49, 97, 25, -81, 29, -63, 55, 22, + 12, 49, 95, 54, -51, -1, -86, 38, 71, -99, 55, 78, -18, -66, 59, 95, 76, 75, -106, -46, + 25, 82, 63, -25, 92, 116, 99, -92, 32, 122, -103, 97, 107, -32, 60, 8, -50, 77, -36, 81, + -61, 11, 87, 6, -58, 42, -18, 26, 54, 108, -57, 23, -11, -24, 54, -125, -115, 75, -79, 9, + 22, -66, 92, 124, 19, -115, -74, -48, 34, 34, 3, -21, -50, 70, 55, 53, 8, 50, 0, -102, + 33, 110, -75, 67, -50, -6, -88, 23, -43, 78, 125, 44, -16, -109, -122, -89, 28, 50, -27, -91, + -88, 105, 6, -30, -114, -8, -76, -10, 76, 117, -34, -123, -50, 120, 6, 61, 126, -31, -73, 115, + 51, -32, -105, -48, 61, 97, 4, 13, 58, -19, 45, -113, 35, 30, -12, -68, -92, -126, -29, 96, + -44, 101, 49, -126, -6, 48, -94, -97, 65, 39, 68, -1, 68, 120, -42, 99, -13, 53, -56, -15, + -5, -65, -91, -79, -69, 119, 80, 16, -43, -102, -24, 32, 35, -12, -127, 14, -85, -38, -56, -119, + 62, -5, -30, -21, -38, 19, -93, 45, -34, 88, 96, -83, 45, -27, 25, -93, 60, 20, -4, -90, + 41, -69, -110, -64, -122, -96, -32, -34, 12, 12, 18, 34, -103, 16, -126, -51, 59, 97, -43, 113, + 100, 106, -60, -119, 42, 49, -29, 33, -81, -85, -61, -127, 116, -93, 53, 49, 70, 85, -72, 122, + 75, 25, 111, -100, 77, 14, -66, 49, 73, -32, 60, -37}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/output.h new file mode 100644 index 00000000..413532ed --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/output.h @@ -0,0 +1,23 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_height_int8_output[272] = { + -16, 69, -83, 29, -46, 25, 75, -115, -30, -111, -77, -38, -75, -125, 25, -120, -33, -63, -103, + -4, -66, -7, -39, -3, -51, -45, -86, 38, 71, -118, 55, -98, -18, -66, 59, 95, 65, 29, + -106, -46, 25, 82, 63, -111, 50, -38, -75, -125, 32, -120, -103, -30, -103, -32, -66, -7, -50, + -3, -36, -45, -61, 11, 87, -118, -58, -98, -18, 26, 54, 105, -57, 23, -46, -24, 54, -125, + -115, -111, -79, -38, -75, -125, 61, -120, -33, -115, -103, -48, -66, -7, -39, -21, -50, -45, 44, + 38, 8, -118, 0, -102, -9, 110, -75, 67, -50, -6, -88, 23, -43, 78, 120, -111, -16, -109, + -122, -125, 28, -120, -33, -91, -103, -4, -66, -30, -114, -8, -76, -45, 44, 38, -34, -123, -50, + -98, -9, 61, 90, -31, -73, 62, -74, -32, -105, -102, 61, -103, 4, -70, 58, -50, -29, -113, + 35, 30, -12, -91, -92, -126, -70, -119, -44, -61, 49, -126, -125, 43, -94, -97, -70, -103, 68, + -1, 25, 62, -74, 99, -13, -102, -56, -103, -5, -70, -91, -79, -69, 0, 80, 16, -43, -102, + -24, -84, -70, -119, -127, -61, -85, -38, -125, -119, 60, -15, -70, -103, -38, 19, -93, 45, -74, + 88, 96, -102, 45, -103, 25, -93, 60, -50, -29, -90, 41, -69, -110, -91, -122, -96, -70, -119, + -22, -61, 18, 34, -125, 16, -126, -51, -70, -103, -43, 109, 25, 62, -74, -119, 42, -102, -29, + -103, -81, -85, -61, -127, -29, -93, 53, 39, -1, -91, -72, -84, -70, -119, -22, -100, 69, 14, + -125, 43, 60, -32, -70, -103}; + +const int8_t *const minimum_broadcast_height_int8_output_ref = minimum_broadcast_height_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_height_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/config_data.h new file mode 100644 index 00000000..61eef9d7 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_BROADCAST_WIDTH_INT8_LAYER_TYPE minimum +#define MINIMUM_BROADCAST_WIDTH_INT8_BATCH_1 2 +#define MINIMUM_BROADCAST_WIDTH_INT8_HEIGHT_1 1 +#define MINIMUM_BROADCAST_WIDTH_INT8_WIDTH_1 4 +#define MINIMUM_BROADCAST_WIDTH_INT8_CHANNEL_1 19 +#define MINIMUM_BROADCAST_WIDTH_INT8_BATCH_2 2 +#define MINIMUM_BROADCAST_WIDTH_INT8_HEIGHT_2 1 +#define MINIMUM_BROADCAST_WIDTH_INT8_WIDTH_2 1 +#define MINIMUM_BROADCAST_WIDTH_INT8_CHANNEL_2 19 +#define MINIMUM_BROADCAST_WIDTH_INT8_PAD SAME +#define MINIMUM_BROADCAST_WIDTH_INT8_ACTIVATION_MAX 127 +#define MINIMUM_BROADCAST_WIDTH_INT8_ACTIVATION_MIN -128 +#define MINIMUM_BROADCAST_WIDTH_INT8_DST_SIZE 152 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_BATCH 2 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_HEIGHT 1 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_WIDTH 4 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_CHANNEL 19 +#define MINIMUM_BROADCAST_WIDTH_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_BROADCAST_WIDTH_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_MULTIPLIER 1075080320 +#define MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_1.h new file mode 100644 index 00000000..4e5ab3b2 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_1.h @@ -0,0 +1,14 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_width_int8_input_tensor_1[152] = { + 8, -36, 88, 97, -30, -6, -25, -42, 9, -81, 107, 76, -90, -26, -110, -113, 7, -79, 30, + -113, 100, 31, -79, 45, 8, -105, -15, 51, -8, 90, -82, -57, 119, -77, 117, 118, -125, -124, + -96, -30, -76, -87, 47, 84, -31, -99, 121, -121, 34, 38, 124, -127, 43, -71, -50, 90, -105, + 99, -40, 38, 97, -79, -72, 47, 63, -79, 27, -126, -41, -93, 56, -44, -77, 71, 96, -67, + 78, -111, -56, -112, 70, -18, 24, 103, -25, 91, 84, -81, 29, 122, 104, 39, 2, 39, 35, + -112, -105, 5, 79, -83, 123, -47, 40, -48, 37, -54, -64, 111, -14, -94, -116, -82, -52, -49, + -69, 119, -88, -50, 88, 33, 117, 38, -58, -76, 104, 90, -28, -24, 109, 1, 38, -2, 13, + -20, 62, 111, 21, -103, -56, 97, -58, 102, 21, 117, -47, 99, -48, 127, 32, 2, -5, -13}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_2.h new file mode 100644 index 00000000..28799330 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/input_tensor_2.h @@ -0,0 +1,8 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_width_int8_input_tensor_2[38] = { + 121, -52, -113, 126, -122, 105, -54, 50, -87, -81, -20, -121, 73, 86, 119, -72, 67, -101, 52, + -125, 110, -114, -81, -112, 63, -105, -97, -120, 101, -23, 21, 57, 87, -87, 21, -19, -7, -37}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/output.h new file mode 100644 index 00000000..f3e1ff9d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/output.h @@ -0,0 +1,16 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_broadcast_width_int8_output[152] = { + 8, -52, -113, 97, -122, -6, -54, -42, -87, -81, -20, -121, -90, -26, -110, -113, 7, -101, 30, + -113, -52, -113, -79, -122, 8, -105, -15, -87, -81, -20, -121, -57, 86, -77, -72, 67, -125, -124, + -96, -52, -113, -87, -122, 84, -54, -99, -87, -121, -20, -121, 73, -127, 43, -72, -50, -101, -105, + 99, -52, -113, 97, -122, -72, -54, 50, -87, -81, -126, -121, -93, 56, -44, -77, 67, -101, -67, + -125, -111, -114, -112, -112, -18, -105, -97, -120, 91, -23, -81, 29, 87, -87, 21, -19, -7, -37, + -125, -105, -114, -81, -112, 63, -105, -97, -120, 37, -54, -64, 57, -14, -94, -116, -82, -52, -49, + -125, 110, -114, -81, -112, 33, -105, -97, -120, -76, -23, 21, -28, -24, -87, 1, -19, -7, -37, + -125, 62, -114, -81, -112, -56, -105, -97, -120, 21, -23, -47, 57, -48, -87, 21, -19, -7, -37}; + +const int8_t *const minimum_broadcast_width_int8_output_ref = minimum_broadcast_width_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_broadcast_width_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/config_data.h new file mode 100644 index 00000000..fddbe741 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_NO_BROADCAST_INT8_LAYER_TYPE minimum +#define MINIMUM_NO_BROADCAST_INT8_BATCH_1 2 +#define MINIMUM_NO_BROADCAST_INT8_HEIGHT_1 2 +#define MINIMUM_NO_BROADCAST_INT8_WIDTH_1 3 +#define MINIMUM_NO_BROADCAST_INT8_CHANNEL_1 18 +#define MINIMUM_NO_BROADCAST_INT8_BATCH_2 2 +#define MINIMUM_NO_BROADCAST_INT8_HEIGHT_2 2 +#define MINIMUM_NO_BROADCAST_INT8_WIDTH_2 3 +#define MINIMUM_NO_BROADCAST_INT8_CHANNEL_2 18 +#define MINIMUM_NO_BROADCAST_INT8_PAD SAME +#define MINIMUM_NO_BROADCAST_INT8_ACTIVATION_MAX 127 +#define MINIMUM_NO_BROADCAST_INT8_ACTIVATION_MIN -128 +#define MINIMUM_NO_BROADCAST_INT8_DST_SIZE 216 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_BATCH 2 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_HEIGHT 2 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_WIDTH 3 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_CHANNEL 18 +#define MINIMUM_NO_BROADCAST_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_NO_BROADCAST_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_MULTIPLIER 2144448512 +#define MINIMUM_NO_BROADCAST_INT8_OUTPUT_SHIFT -8 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_1.h new file mode 100644 index 00000000..0a315f45 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_1.h @@ -0,0 +1,17 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_no_broadcast_int8_input_tensor_1[216] = { + 30, 85, 72, 55, 11, 110, 35, 95, -108, -63, -119, -58, 32, 29, -123, -92, -120, -99, 17, 37, + 113, 71, 47, -56, 111, 73, 64, 71, 38, -27, 61, -120, 96, -92, -23, 45, -120, -17, 10, 57, + -13, 78, 120, 17, -39, -58, -28, 7, 9, -1, 21, 59, 96, 84, 110, -12, -49, -22, -96, -23, + -110, -104, -21, 6, 86, -7, -6, 111, 80, -109, 74, 102, 2, -79, 45, -106, 14, 43, 74, 33, + -35, 122, 101, 34, -122, -1, -100, 49, -89, 112, 13, -55, 97, -97, 28, -51, -89, -103, -7, 39, + -56, 84, -29, -95, -93, 14, 51, -9, 28, -57, -15, -5, 49, -33, 15, 36, 86, -26, 67, 113, + 80, -99, 83, -107, 126, 27, 61, -82, 103, -44, -58, -85, 57, -62, 37, 67, 47, 77, 30, 111, + 15, 100, -18, 117, -118, -114, -80, 76, 69, 27, -43, 59, 79, 46, 46, -30, -33, -10, -66, 119, + -96, 86, 14, -110, -53, 94, -122, 85, -73, 117, 43, -33, -27, -12, -116, -120, -83, -101, 100, 77, + 19, -83, -120, 72, 5, 47, 31, -35, -33, 76, 122, -52, -107, 115, 72, 93, 26, -52, -26, -113, + 69, 106, 94, -2, -36, -15, 71, 62, 47, -8, -16, -45, 121, 75, 42, 39}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_2.h new file mode 100644 index 00000000..2257d66f --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/input_tensor_2.h @@ -0,0 +1,17 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_no_broadcast_int8_input_tensor_2[216] = { + -11, 9, 102, 126, -51, 11, 68, -37, 126, 81, -40, -41, 125, -21, -111, -61, -84, -40, -53, 44, + -57, -12, 37, -90, -22, 71, -108, -58, -55, 127, 39, 19, 127, 71, -84, -12, 53, 49, 47, -126, + -48, 16, -111, -55, 59, 99, -29, -124, -79, 122, 110, 97, -96, 99, -70, 32, 61, -94, 98, -63, + 30, 112, 104, 65, 43, -6, 19, 121, 70, -112, -70, -52, 56, 74, -61, 102, -13, 114, 7, -115, + 95, 47, -126, 88, 24, 36, -109, -15, -67, 22, -12, -26, 35, 101, -69, 89, 51, -78, -114, 17, + 25, -57, -44, 83, 100, -51, -29, 42, 81, 92, 108, -61, 7, -96, 93, -89, 117, 66, -29, -28, + -10, -68, -75, 41, -84, 68, -115, -69, -18, -18, 28, 66, 78, -80, -72, -116, -5, 74, -74, -67, + 82, -1, -51, 93, 59, 125, -50, 80, 37, 117, -110, 88, -46, 77, 99, -49, 43, 78, -46, -75, + 78, -107, 39, -95, 6, -79, 115, -13, 55, -82, -119, -18, 78, -34, 95, 47, 123, -95, 69, 88, + 73, 28, 67, -15, 71, -53, 6, 14, -15, 47, 123, -33, 121, -6, 89, -102, -81, 97, -121, -16, + -104, 80, 32, -4, -59, -74, 113, -81, -108, -105, 23, -12, 68, -85, -63, -15}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/output.h new file mode 100644 index 00000000..2e67c5d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/output.h @@ -0,0 +1,20 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_no_broadcast_int8_output[216] = { + -11, 9, 72, 55, -51, 11, 35, -37, -108, -63, -119, -58, 32, -21, -123, -92, -120, -99, + -53, 37, -57, -12, 37, -90, -22, 71, -108, -58, -55, -27, 39, -120, 96, -92, -84, -12, + -120, -17, 10, -126, -48, 16, -111, -55, -39, -58, -29, -124, -79, -1, 21, 59, -96, 84, + -70, -12, -49, -94, -96, -63, -110, -104, -21, 6, 43, -7, -6, 111, 70, -112, -70, -52, + 2, -79, -61, -106, -13, 43, 7, -115, -35, 47, -126, 34, -122, -1, -109, -15, -89, 22, + -12, -55, 35, -97, -69, -51, -89, -103, -114, 17, -56, -57, -44, -95, -93, -51, -29, -9, + 28, -57, -15, -61, 7, -96, 15, -89, 86, -26, -29, -28, -10, -99, -75, -107, -84, 27, + -115, -82, -18, -44, -58, -85, 57, -80, -72, -116, -5, 74, -74, -67, 15, -1, -51, 93, + -118, -114, -80, 76, 37, 27, -110, 59, -46, 46, 46, -49, -33, -10, -66, -75, -96, -107, + 14, -110, -53, -79, -122, -13, -73, -82, -119, -33, -27, -34, -116, -120, -83, -101, 69, 77, + 19, -83, -120, -15, 5, -53, 6, -35, -33, 47, 122, -52, -107, -6, 72, -102, -81, -52, + -121, -113, -104, 80, 32, -4, -59, -74, 71, -81, -108, -105, -16, -45, 68, -85, -63, -15}; + +const int8_t *const minimum_no_broadcast_int8_output_ref = minimum_no_broadcast_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_no_broadcast_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/config_data.h new file mode 100644 index 00000000..26b74c5d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_SCALAR_1_INT8_LAYER_TYPE minimum +#define MINIMUM_SCALAR_1_INT8_BATCH_1 1 +#define MINIMUM_SCALAR_1_INT8_HEIGHT_1 1 +#define MINIMUM_SCALAR_1_INT8_WIDTH_1 1 +#define MINIMUM_SCALAR_1_INT8_CHANNEL_1 1 +#define MINIMUM_SCALAR_1_INT8_BATCH_2 1 +#define MINIMUM_SCALAR_1_INT8_HEIGHT_2 2 +#define MINIMUM_SCALAR_1_INT8_WIDTH_2 4 +#define MINIMUM_SCALAR_1_INT8_CHANNEL_2 19 +#define MINIMUM_SCALAR_1_INT8_PAD SAME +#define MINIMUM_SCALAR_1_INT8_ACTIVATION_MAX 127 +#define MINIMUM_SCALAR_1_INT8_ACTIVATION_MIN -128 +#define MINIMUM_SCALAR_1_INT8_DST_SIZE 152 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_BATCH 1 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_HEIGHT 2 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_WIDTH 4 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_CHANNEL 19 +#define MINIMUM_SCALAR_1_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_SCALAR_1_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_MULTIPLIER 2133420544 +#define MINIMUM_SCALAR_1_INT8_OUTPUT_SHIFT -8 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_1.h new file mode 100644 index 00000000..9efc1656 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_1.h @@ -0,0 +1,6 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_1_int8_input_tensor_1[1] = {-107}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_2.h new file mode 100644 index 00000000..f11cd6d6 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/input_tensor_2.h @@ -0,0 +1,14 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_1_int8_input_tensor_2[152] = { + -95, -93, -55, -56, -1, -44, -35, 54, 86, -16, 108, -125, 51, -1, -81, -17, 3, 77, -90, + -107, -124, -57, -79, 65, 8, -5, 104, -13, 64, -88, 30, 107, 32, -14, 71, 109, -41, 108, + -44, 8, -93, 36, 42, 26, -78, 6, 116, -101, -54, 46, -124, 74, -97, -102, -3, 41, 119, + -49, -116, -90, 71, 76, -75, 37, 112, -31, -51, -41, 68, -31, -36, -128, -79, -126, 6, -23, + -13, -92, -85, 80, -5, 126, -53, 110, -110, -108, -59, 54, 16, -19, 102, -81, -119, -107, 96, + 81, 78, -51, -35, -127, 38, 84, 16, -16, -67, -10, 11, 72, -118, 52, 54, -51, -122, 20, + 27, 26, -127, -80, -125, -82, -13, 97, -69, -119, 113, 57, -86, -124, -11, 121, 8, -82, -29, + -90, 115, 21, 20, -125, -18, -99, -93, 70, -47, 32, -99, -16, 105, 37, -83, -8, -60, -36}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/output.h new file mode 100644 index 00000000..3bed4f1f --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/output.h @@ -0,0 +1,16 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_1_int8_output[152] = { + -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -125, -107, -107, -107, -107, -107, -107, -107, + -107, -124, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, + -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -124, -107, -107, -107, -107, -107, -107, + -107, -116, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -128, -107, -126, -107, -107, + -107, -107, -107, -107, -107, -107, -107, -107, -110, -108, -107, -107, -107, -107, -107, -107, -119, -107, -107, + -107, -107, -107, -107, -127, -107, -107, -107, -107, -107, -107, -107, -107, -118, -107, -107, -107, -122, -107, + -107, -107, -127, -107, -125, -107, -107, -107, -107, -119, -107, -107, -107, -124, -107, -107, -107, -107, -107, + -107, -107, -107, -107, -125, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107, -107}; + +const int8_t *const minimum_scalar_1_int8_output_ref = minimum_scalar_1_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_1_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/config_data.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/config_data.h new file mode 100644 index 00000000..d6160086 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/config_data.h @@ -0,0 +1,25 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#define MINIMUM_SCALAR_2_INT8_LAYER_TYPE minimum +#define MINIMUM_SCALAR_2_INT8_BATCH_1 1 +#define MINIMUM_SCALAR_2_INT8_HEIGHT_1 2 +#define MINIMUM_SCALAR_2_INT8_WIDTH_1 4 +#define MINIMUM_SCALAR_2_INT8_CHANNEL_1 19 +#define MINIMUM_SCALAR_2_INT8_BATCH_2 1 +#define MINIMUM_SCALAR_2_INT8_HEIGHT_2 1 +#define MINIMUM_SCALAR_2_INT8_WIDTH_2 1 +#define MINIMUM_SCALAR_2_INT8_CHANNEL_2 1 +#define MINIMUM_SCALAR_2_INT8_PAD SAME +#define MINIMUM_SCALAR_2_INT8_ACTIVATION_MAX 127 +#define MINIMUM_SCALAR_2_INT8_ACTIVATION_MIN -128 +#define MINIMUM_SCALAR_2_INT8_DST_SIZE 152 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_BATCH 1 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_HEIGHT 2 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_WIDTH 4 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_CHANNEL 19 +#define MINIMUM_SCALAR_2_INT8_INPUT_1_OFFSET 128 +#define MINIMUM_SCALAR_2_INT8_INPUT_2_OFFSET 128 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_OFFSET -128 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_MULTIPLIER 1074442112 +#define MINIMUM_SCALAR_2_INT8_OUTPUT_SHIFT -7 diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_1.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_1.h new file mode 100644 index 00000000..93927437 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_1.h @@ -0,0 +1,14 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_2_int8_input_tensor_1[152] = { + -14, -18, -76, -105, -46, -62, -88, -40, -115, 55, 27, -71, 100, -30, 28, 26, -120, 116, 125, + -46, 53, 89, -63, -54, -62, -68, -27, 34, -92, -75, -57, 57, -122, 43, 35, -80, -58, 25, + -37, 68, 86, -21, 81, 122, 51, 71, -7, -128, -70, -61, 125, -65, -52, -117, -24, 42, 73, + 106, 69, -102, -89, 68, -16, 102, -21, -32, 58, -19, -110, -21, -7, -65, 76, -26, 87, -102, + 61, -1, -113, 84, -124, 103, 119, -15, 59, 65, 50, -1, 11, -31, 6, 34, -123, 75, 87, + -1, -32, -61, -98, -79, 105, 93, 34, -79, 97, 118, 14, 101, 108, -27, -119, 33, -44, 60, + -8, -113, -25, 91, 12, -76, 49, 82, 86, 3, 48, 38, 56, 44, -122, 71, 17, -92, 93, + 103, -4, 83, 80, 125, 52, -118, 58, -12, 33, -114, 3, -126, -4, 67, 80, 119, -40, -108}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_2.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_2.h new file mode 100644 index 00000000..9824328a --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/input_tensor_2.h @@ -0,0 +1,6 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_2_int8_input_tensor_2[1] = {-49}; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/output.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/output.h new file mode 100644 index 00000000..2216c82d --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/output.h @@ -0,0 +1,16 @@ +// Generated by generate_test_data.py using tensorflow version 2.17.0 (Keras version 3.5.0). +// Interpreter from tensorflow version 2.17.0 and revision v2.17.0-rc1-2-gad6d8cc177d. +#pragma once +#include + +const int8_t minimum_scalar_2_int8_output[152] = { + -49, -49, -76, -105, -49, -62, -88, -49, -115, -49, -49, -71, -49, -49, -49, -49, -120, -49, -49, + -49, -49, -49, -63, -54, -62, -68, -49, -49, -92, -75, -57, -49, -122, -49, -49, -80, -58, -49, + -49, -49, -49, -49, -49, -49, -49, -49, -49, -128, -70, -61, -49, -65, -52, -117, -49, -49, -49, + -49, -49, -102, -89, -49, -49, -49, -49, -49, -49, -49, -110, -49, -49, -65, -49, -49, -49, -102, + -49, -49, -113, -49, -124, -49, -49, -49, -49, -49, -49, -49, -49, -49, -49, -49, -123, -49, -49, + -49, -49, -61, -98, -79, -49, -49, -49, -79, -49, -49, -49, -49, -49, -49, -119, -49, -49, -49, + -49, -113, -49, -49, -49, -76, -49, -49, -49, -49, -49, -49, -49, -49, -122, -49, -49, -92, -49, + -49, -49, -49, -49, -49, -49, -118, -49, -49, -49, -114, -49, -126, -49, -49, -49, -49, -49, -108}; + +const int8_t *const minimum_scalar_2_int8_output_ref = minimum_scalar_2_int8_output; diff --git a/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/test_data.h b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/test_data.h new file mode 100644 index 00000000..490761d9 --- /dev/null +++ b/Tests/UnitTest/TestCases/TestData/minimum_scalar_2_int8/test_data.h @@ -0,0 +1,4 @@ +#include "config_data.h" +#include "input_tensor_1.h" +#include "input_tensor_2.h" +#include "output.h" diff --git a/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/CMakeLists.txt b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/CMakeLists.txt new file mode 100644 index 00000000..f7bacf10 --- /dev/null +++ b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/CMakeLists.txt @@ -0,0 +1,25 @@ +# +# SPDX-FileCopyrightText: Copyright 2023-2024 Arm Limited and/or its affiliates +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +add_cmsis_nn_unit_test_executable(test_arm_maximum_minimum_s8) + +target_sources(test_arm_maximum_minimum_s8 PRIVATE + Unity/unity_test_arm_maximum_minimum_s8.c + Unity/TestRunner/unity_test_arm_maximum_minimum_s8_runner.c) + + diff --git a/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/Unity/unity_test_arm_maximum_minimum_s8.c b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/Unity/unity_test_arm_maximum_minimum_s8.c new file mode 100644 index 00000000..ce78d7d6 --- /dev/null +++ b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/Unity/unity_test_arm_maximum_minimum_s8.c @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2021, 2023-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include "../test_arm_maximum_s8.c" +#include "../test_arm_minimum_s8.c" +#include "unity.h" + +#ifdef USING_FVP_CORSTONE_300 +extern void uart_init(void); +#endif + +/* This function is called from the autogenerated file. + * The name must be exactly like this + */ +void setUp(void) +{ /* This is run before EACH TEST */ +#ifdef USING_FVP_CORSTONE_300 + uart_init(); +#endif +} + +/* This function is called from the autogenerated file. + * The name must be exactly like this + */ +void tearDown(void) {} + +void test_arm_minimum_scalar_1_int8(void) { minimum_scalar_1_int8(); } +void test_arm_minimum_scalar_2_int8(void) { minimum_scalar_2_int8(); } +void test_arm_minimum_no_broadcast_int8(void) { minimum_no_broadcast_int8(); } +void test_arm_minimum_broadcast_batch_int8(void) { minimum_broadcast_batch_int8(); } +void test_arm_minimum_broadcast_height_int8(void) { minimum_broadcast_height_int8(); } +void test_arm_minimum_broadcast_width_int8(void) { minimum_broadcast_width_int8(); } +void test_arm_minimum_broadcast_ch_int8(void) { minimum_broadcast_ch_int8(); } + +void test_arm_maximum_scalar_1_int8(void) { maximum_scalar_1_int8(); } +void test_arm_maximum_scalar_2_int8(void) { maximum_scalar_2_int8(); } +void test_arm_maximum_no_broadcast_int8(void) { maximum_no_broadcast_int8(); } +void test_arm_maximum_broadcast_batch_int8(void) { maximum_broadcast_batch_int8(); } +void test_arm_maximum_broadcast_height_int8(void) { maximum_broadcast_height_int8(); } +void test_arm_maximum_broadcast_width_int8(void) { maximum_broadcast_width_int8(); } +void test_arm_maximum_broadcast_ch_int8(void) { maximum_broadcast_ch_int8(); } diff --git a/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_maximum_s8.c b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_maximum_s8.c new file mode 100644 index 00000000..37d6610c --- /dev/null +++ b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_maximum_s8.c @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "stdio.h" +#include +#include +#include + +#include "../TestData/maximum_broadcast_batch_int8/test_data.h" +#include "../TestData/maximum_broadcast_ch_int8/test_data.h" +#include "../TestData/maximum_broadcast_height_int8/test_data.h" +#include "../TestData/maximum_broadcast_width_int8/test_data.h" +#include "../TestData/maximum_no_broadcast_int8/test_data.h" +#include "../TestData/maximum_scalar_1_int8/test_data.h" +#include "../TestData/maximum_scalar_2_int8/test_data.h" + +#include "../Utils/validate.h" + +void maximum_scalar_1_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MAXIMUM_SCALAR_1_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_scalar_1_int8_input_tensor_1; + const int8_t *input_2_data = maximum_scalar_1_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_scalar_1_int8_output; + + input_1_dims.n = MAXIMUM_SCALAR_1_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_SCALAR_1_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_SCALAR_1_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_SCALAR_1_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_SCALAR_1_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_SCALAR_1_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_SCALAR_1_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_SCALAR_1_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_SCALAR_1_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_SCALAR_1_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_SCALAR_1_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_SCALAR_1_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_SCALAR_1_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_scalar_2_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MAXIMUM_SCALAR_2_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_scalar_2_int8_input_tensor_1; + const int8_t *input_2_data = maximum_scalar_2_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_scalar_2_int8_output; + + input_1_dims.n = MAXIMUM_SCALAR_2_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_SCALAR_2_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_SCALAR_2_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_SCALAR_2_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_SCALAR_2_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_SCALAR_2_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_SCALAR_2_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_SCALAR_2_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_SCALAR_2_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_SCALAR_2_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_SCALAR_2_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_SCALAR_2_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_SCALAR_2_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_no_broadcast_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MAXIMUM_NO_BROADCAST_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_no_broadcast_int8_input_tensor_1; + const int8_t *input_2_data = maximum_no_broadcast_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_no_broadcast_int8_output; + + input_1_dims.n = MAXIMUM_NO_BROADCAST_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_NO_BROADCAST_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_NO_BROADCAST_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_NO_BROADCAST_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_NO_BROADCAST_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_NO_BROADCAST_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_NO_BROADCAST_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_NO_BROADCAST_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_NO_BROADCAST_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_NO_BROADCAST_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_NO_BROADCAST_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_NO_BROADCAST_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_NO_BROADCAST_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_broadcast_batch_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MAXIMUM_BROADCAST_BATCH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_broadcast_batch_int8_input_tensor_1; + const int8_t *input_2_data = maximum_broadcast_batch_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_broadcast_batch_int8_output; + + input_1_dims.n = MAXIMUM_BROADCAST_BATCH_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_BROADCAST_BATCH_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_BROADCAST_BATCH_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_BROADCAST_BATCH_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_BROADCAST_BATCH_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_BROADCAST_BATCH_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_BROADCAST_BATCH_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_BROADCAST_BATCH_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_BROADCAST_BATCH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_BROADCAST_BATCH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_broadcast_height_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MAXIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_broadcast_height_int8_input_tensor_1; + const int8_t *input_2_data = maximum_broadcast_height_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_broadcast_height_int8_output; + + input_1_dims.n = MAXIMUM_BROADCAST_HEIGHT_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_BROADCAST_HEIGHT_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_BROADCAST_HEIGHT_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_BROADCAST_HEIGHT_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_broadcast_width_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MAXIMUM_BROADCAST_WIDTH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_broadcast_width_int8_input_tensor_1; + const int8_t *input_2_data = maximum_broadcast_width_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_broadcast_width_int8_output; + + input_1_dims.n = MAXIMUM_BROADCAST_WIDTH_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_BROADCAST_WIDTH_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_BROADCAST_WIDTH_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_BROADCAST_WIDTH_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_BROADCAST_WIDTH_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_BROADCAST_WIDTH_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_BROADCAST_WIDTH_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_BROADCAST_WIDTH_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_BROADCAST_WIDTH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_BROADCAST_WIDTH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void maximum_broadcast_ch_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MAXIMUM_BROADCAST_CH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = maximum_broadcast_ch_int8_input_tensor_1; + const int8_t *input_2_data = maximum_broadcast_ch_int8_input_tensor_2; + const int8_t *output_ref_data = maximum_broadcast_ch_int8_output; + + input_1_dims.n = MAXIMUM_BROADCAST_CH_INT8_BATCH_1; + input_1_dims.h = MAXIMUM_BROADCAST_CH_INT8_HEIGHT_1; + input_1_dims.w = MAXIMUM_BROADCAST_CH_INT8_WIDTH_1; + input_1_dims.c = MAXIMUM_BROADCAST_CH_INT8_CHANNEL_1; + + input_2_dims.n = MAXIMUM_BROADCAST_CH_INT8_BATCH_2; + input_2_dims.h = MAXIMUM_BROADCAST_CH_INT8_HEIGHT_2; + input_2_dims.w = MAXIMUM_BROADCAST_CH_INT8_WIDTH_2; + input_2_dims.c = MAXIMUM_BROADCAST_CH_INT8_CHANNEL_2; + + output_dims.n = MAXIMUM_BROADCAST_CH_INT8_OUTPUT_BATCH; + output_dims.h = MAXIMUM_BROADCAST_CH_INT8_OUTPUT_HEIGHT; + output_dims.w = MAXIMUM_BROADCAST_CH_INT8_OUTPUT_WIDTH; + output_dims.c = MAXIMUM_BROADCAST_CH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_maximum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MAXIMUM_BROADCAST_CH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} \ No newline at end of file diff --git a/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_minimum_s8.c b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_minimum_s8.c new file mode 100644 index 00000000..192f9301 --- /dev/null +++ b/Tests/UnitTest/TestCases/test_arm_maximum_minimum_s8/test_arm_minimum_s8.c @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2024 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "stdio.h" +#include +#include +#include + +#include "../TestData/minimum_broadcast_batch_int8/test_data.h" +#include "../TestData/minimum_broadcast_ch_int8/test_data.h" +#include "../TestData/minimum_broadcast_height_int8/test_data.h" +#include "../TestData/minimum_broadcast_width_int8/test_data.h" +#include "../TestData/minimum_no_broadcast_int8/test_data.h" +#include "../TestData/minimum_scalar_1_int8/test_data.h" +#include "../TestData/minimum_scalar_2_int8/test_data.h" + +#include "../Utils/validate.h" + +void minimum_scalar_1_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MINIMUM_SCALAR_1_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_scalar_1_int8_input_tensor_1; + const int8_t *input_2_data = minimum_scalar_1_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_scalar_1_int8_output; + + input_1_dims.n = MINIMUM_SCALAR_1_INT8_BATCH_1; + input_1_dims.h = MINIMUM_SCALAR_1_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_SCALAR_1_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_SCALAR_1_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_SCALAR_1_INT8_BATCH_2; + input_2_dims.h = MINIMUM_SCALAR_1_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_SCALAR_1_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_SCALAR_1_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_SCALAR_1_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_SCALAR_1_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_SCALAR_1_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_SCALAR_1_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_SCALAR_1_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_scalar_2_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MINIMUM_SCALAR_2_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_scalar_2_int8_input_tensor_1; + const int8_t *input_2_data = minimum_scalar_2_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_scalar_2_int8_output; + + input_1_dims.n = MINIMUM_SCALAR_2_INT8_BATCH_1; + input_1_dims.h = MINIMUM_SCALAR_2_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_SCALAR_2_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_SCALAR_2_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_SCALAR_2_INT8_BATCH_2; + input_2_dims.h = MINIMUM_SCALAR_2_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_SCALAR_2_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_SCALAR_2_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_SCALAR_2_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_SCALAR_2_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_SCALAR_2_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_SCALAR_2_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_SCALAR_2_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_no_broadcast_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MINIMUM_NO_BROADCAST_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_no_broadcast_int8_input_tensor_1; + const int8_t *input_2_data = minimum_no_broadcast_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_no_broadcast_int8_output; + + input_1_dims.n = MINIMUM_NO_BROADCAST_INT8_BATCH_1; + input_1_dims.h = MINIMUM_NO_BROADCAST_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_NO_BROADCAST_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_NO_BROADCAST_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_NO_BROADCAST_INT8_BATCH_2; + input_2_dims.h = MINIMUM_NO_BROADCAST_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_NO_BROADCAST_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_NO_BROADCAST_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_NO_BROADCAST_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_NO_BROADCAST_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_NO_BROADCAST_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_NO_BROADCAST_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_NO_BROADCAST_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_broadcast_batch_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MINIMUM_BROADCAST_BATCH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_broadcast_batch_int8_input_tensor_1; + const int8_t *input_2_data = minimum_broadcast_batch_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_broadcast_batch_int8_output; + + input_1_dims.n = MINIMUM_BROADCAST_BATCH_INT8_BATCH_1; + input_1_dims.h = MINIMUM_BROADCAST_BATCH_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_BROADCAST_BATCH_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_BROADCAST_BATCH_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_BROADCAST_BATCH_INT8_BATCH_2; + input_2_dims.h = MINIMUM_BROADCAST_BATCH_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_BROADCAST_BATCH_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_BROADCAST_BATCH_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_BROADCAST_BATCH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_BROADCAST_BATCH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_broadcast_height_int8(void) +{ + cmsis_nn_context ctx; + int8_t output[MINIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_broadcast_height_int8_input_tensor_1; + const int8_t *input_2_data = minimum_broadcast_height_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_broadcast_height_int8_output; + + input_1_dims.n = MINIMUM_BROADCAST_HEIGHT_INT8_BATCH_1; + input_1_dims.h = MINIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_BROADCAST_HEIGHT_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_BROADCAST_HEIGHT_INT8_BATCH_2; + input_2_dims.h = MINIMUM_BROADCAST_HEIGHT_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_BROADCAST_HEIGHT_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_BROADCAST_HEIGHT_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_BROADCAST_HEIGHT_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_BROADCAST_HEIGHT_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_broadcast_width_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MINIMUM_BROADCAST_WIDTH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_broadcast_width_int8_input_tensor_1; + const int8_t *input_2_data = minimum_broadcast_width_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_broadcast_width_int8_output; + + input_1_dims.n = MINIMUM_BROADCAST_WIDTH_INT8_BATCH_1; + input_1_dims.h = MINIMUM_BROADCAST_WIDTH_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_BROADCAST_WIDTH_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_BROADCAST_WIDTH_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_BROADCAST_WIDTH_INT8_BATCH_2; + input_2_dims.h = MINIMUM_BROADCAST_WIDTH_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_BROADCAST_WIDTH_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_BROADCAST_WIDTH_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_BROADCAST_WIDTH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_BROADCAST_WIDTH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} + +void minimum_broadcast_ch_int8(void) +{ + cmsis_nn_context ctx; + + int8_t output[MINIMUM_BROADCAST_CH_INT8_DST_SIZE] = {0}; + + int buf_size = 0; + ctx.buf = malloc(buf_size); + ctx.size = 0; + + cmsis_nn_dims input_1_dims; + cmsis_nn_dims input_2_dims; + cmsis_nn_dims output_dims; + + const int8_t *input_1_data = minimum_broadcast_ch_int8_input_tensor_1; + const int8_t *input_2_data = minimum_broadcast_ch_int8_input_tensor_2; + const int8_t *output_ref_data = minimum_broadcast_ch_int8_output; + + input_1_dims.n = MINIMUM_BROADCAST_CH_INT8_BATCH_1; + input_1_dims.h = MINIMUM_BROADCAST_CH_INT8_HEIGHT_1; + input_1_dims.w = MINIMUM_BROADCAST_CH_INT8_WIDTH_1; + input_1_dims.c = MINIMUM_BROADCAST_CH_INT8_CHANNEL_1; + + input_2_dims.n = MINIMUM_BROADCAST_CH_INT8_BATCH_2; + input_2_dims.h = MINIMUM_BROADCAST_CH_INT8_HEIGHT_2; + input_2_dims.w = MINIMUM_BROADCAST_CH_INT8_WIDTH_2; + input_2_dims.c = MINIMUM_BROADCAST_CH_INT8_CHANNEL_2; + + output_dims.n = MINIMUM_BROADCAST_CH_INT8_OUTPUT_BATCH; + output_dims.h = MINIMUM_BROADCAST_CH_INT8_OUTPUT_HEIGHT; + output_dims.w = MINIMUM_BROADCAST_CH_INT8_OUTPUT_WIDTH; + output_dims.c = MINIMUM_BROADCAST_CH_INT8_OUTPUT_CHANNEL; + + arm_cmsis_nn_status result = + arm_minimum_s8(&ctx, input_1_data, &input_1_dims, input_2_data, &input_2_dims, output, &output_dims); + + if (ctx.buf) + { + // The caller is responsible to clear the scratch buffers for security reasons if applicable. + memset(ctx.buf, 0, buf_size); + free(ctx.buf); + } + + int dst_size = MINIMUM_BROADCAST_CH_INT8_DST_SIZE; + TEST_ASSERT_EQUAL(ARM_CMSIS_NN_SUCCESS, result); + TEST_ASSERT_TRUE(validate(output, output_ref_data, dst_size)); +} \ No newline at end of file diff --git a/Tests/UnitTest/build_and_run_tests.sh b/Tests/UnitTest/build_and_run_tests.sh index 5d591d42..623970a9 100755 --- a/Tests/UnitTest/build_and_run_tests.sh +++ b/Tests/UnitTest/build_and_run_tests.sh @@ -59,7 +59,8 @@ args: while getopts hc:o:qbreapfu:gC: flag do case "${flag}" in - h) echo "${usage}";; + h) echo "${usage}" + exit 1;; c) CPU=${OPTARG};; o) OPTIMIZATION=${OPTARG};; q) QUIET=1;;