diff --git a/core/gen_case.cpp b/core/gen_case.cpp index dfc103e58..2ad7dbf4d 100644 --- a/core/gen_case.cpp +++ b/core/gen_case.cpp @@ -761,7 +761,7 @@ std::string descToString(mluOpTensorDescriptor_t desc, char delimiter) { tensor_info << " layout: " << mluop::getNameOfTensorLayout(layout) << delimiter; tensor_info << " dtype: " << mluop::getNameOfDataType(dtype) << delimiter; - if (desc->pointer_mode == MLUOP_POINTER_MODE_HOST) { + if (desc->getPointerMode() == MLUOP_POINTER_MODE_HOST) { tensor_info << " pointer_mode: POINTER_MODE_HOST" << delimiter; if ((total_element_num != 1) || (dim != 0)) { LOG(WARNING) << "[gen_case] Tensor has been set to POINTER_MODE_HOST, " diff --git a/core/gen_case.h b/core/gen_case.h index 7e60c9de7..0bfe6c405 100644 --- a/core/gen_case.h +++ b/core/gen_case.h @@ -492,7 +492,7 @@ class PbNode { uint64_t data_size = total_num * mluop::getSizeOfDataType(dtype); void *data = malloc(data_size); auto memcpy_dir = - (tensors[index].desc->pointer_mode == MLUOP_POINTER_MODE_HOST + (tensors[index].desc->getPointerMode() == MLUOP_POINTER_MODE_HOST ? cnrtMemcpyHostToHost : cnrtMemcpyDevToHost); if (cnrtSuccess == cnrtMemcpy(data, diff --git a/core/tensor.cpp b/core/tensor.cpp index 72af32672..f34569052 100644 --- a/core/tensor.cpp +++ b/core/tensor.cpp @@ -388,7 +388,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpCreateGroupTensorDescriptors( return MLUOP_STATUS_SUCCESS; } -static inline mluOpStatus_t mluOpSetTensorDescriptorZeroDim( +inline mluOpStatus_t mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim( mluOpTensorDescriptor_t desc) { if (desc->pointer_mode == MLUOP_POINTER_MODE_HOST) { desc->dim = 0; @@ -406,7 +406,7 @@ static inline mluOpStatus_t mluOpSetTensorDescriptorZeroDim( } } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptor( mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, mluOpDataType_t dtype, int dimNb, const int *dimSize) { PARAM_CHECK("[mluOpSetTensorDescriptor]", desc != NULL); @@ -417,7 +417,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor( desc->layout = layout; if (dimNb == 0) { - return mluOpSetTensorDescriptorZeroDim(desc); + return mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim(desc); } else { PARAM_CHECK("[mluOpSetTensorDescriptor]", dimNb > 0); PARAM_CHECK("[mluOpSetTensorDescriptor]", dimSize != NULL); @@ -425,7 +425,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor( } } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptor_v2( mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, mluOpDataType_t dtype, int dimNb, const int64_t *dimSize) { PARAM_CHECK("[mluOpSetTensorDescriptor]", desc != NULL); @@ -436,7 +436,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor_v2( desc->layout = layout; if (dimNb == 0) { - return mluOpSetTensorDescriptorZeroDim(desc); + return mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim(desc); } else { PARAM_CHECK("[mluOpSetTensorDescriptor]", dimNb > 0); PARAM_CHECK("[mluOpSetTensorDescriptor]", dimSize != NULL); @@ -446,8 +446,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor_v2( } // Internal interface. Caller should guarantee parameter validity. -static inline void mluOpSetTensorDescriptorDimBase(mluOpTensorDescriptor_t desc, - int dimNb) { +inline void mluOpTensorStruct::mluOpSetTensorDescriptorDimBase(mluOpTensorDescriptor_t desc,int dimNb) { if (dimNb != desc->dim) { if MLUOP_PREDICT_FALSE (desc->dims != desc->normal_dims) { delete[] desc->dims; @@ -464,11 +463,11 @@ static inline void mluOpSetTensorDescriptorDimBase(mluOpTensorDescriptor_t desc, } } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorDim( mluOpTensorDescriptor_t desc, int dimNb, const int *dimSize) { if (dimNb == 0) { CHECK_RETURN("[mluOpSetTensorDescriptorDim]", - mluOpSetTensorDescriptorZeroDim(desc)); + mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim(desc)); } else { mluOpSetTensorDescriptorDimBase(desc, dimNb); std::copy(dimSize, dimSize + dimNb, desc->dims); @@ -504,10 +503,9 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorDim_v2( mluOpTensorDescriptor_t desc, int dimNb, const int64_t *dimSize) { mluOpSetTensorDescriptorDimBase(desc, dimNb); - memcpy(desc->dims, dimSize, dimNb * sizeof(int64_t)); // infer strides of dimNb dimensions and compute total_num & total_size @@ -540,7 +538,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim_v2( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetGroupTensorDescriptors( mluOpTensorDescriptor_t **group_desc, const mluOpTensorLayout_t *group_layout, const mluOpDataType_t *group_dtype, const int *group_dimNb, const int *group_dimSize, const int desc_num) { @@ -586,7 +584,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetGroupTensorDescriptors_v2( mluOpTensorDescriptor_t **group_desc, const mluOpTensorLayout_t *group_layout, const mluOpDataType_t *group_dtype, const int *group_dimNb, const int64_t *group_dimSize, const int desc_num) { @@ -632,7 +630,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors_v2( } mluOpStatus_t MLUOP_WIN_API -mluOpResetTensorDescriptor(mluOpTensorDescriptor_t desc) { +mluOpTensorStruct::mluOpResetTensorDescriptor(mluOpTensorDescriptor_t desc) { PARAM_CHECK("[mluOpResetTensorDescriptor]", desc != NULL); if MLUOP_PREDICT_FALSE (desc->dims != desc->normal_dims) { @@ -660,7 +658,7 @@ mluOpResetTensorDescriptor(mluOpTensorDescriptor_t desc) { return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorEx( mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, mluOpDataType_t dtype, int dimNb, const int *dimSize, const int *dimStride) { @@ -672,13 +670,13 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx( desc->layout = layout; if (dimNb == 0) { - return mluOpSetTensorDescriptorZeroDim(desc); + return mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim(desc); } else { PARAM_CHECK("[mluOpSetTensorDescriptorEx]", dimSize != NULL); PARAM_CHECK("[mluOpSetTensorDescriptorEx]", dimStride != NULL); PARAM_CHECK("[mluOpSetTensorDescriptorEx]", dimNb > 0); - mluOpSetTensorDescriptorDimBase(desc, dimNb); + mluOpTensorStruct::mluOpSetTensorDescriptorDimBase(desc, dimNb); std::copy(dimSize, dimSize + dimNb, desc->dims); std::copy(dimStride, dimStride + dimNb, desc->strides); @@ -694,7 +692,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx( } } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorEx_v2( mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, mluOpDataType_t dtype, int dimNb, const int64_t *dimSize, const int64_t *dimStride) { @@ -706,12 +704,12 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx_v2( desc->layout = layout; if MLUOP_PREDICT_FALSE (dimNb == 0) { - return mluOpSetTensorDescriptorZeroDim(desc); + return mluOpTensorStruct::mluOpSetTensorDescriptorZeroDim(desc); } else { PARAM_CHECK("[mluOpSetTensorDescriptorEx]", dimSize != NULL); PARAM_CHECK("[mluOpSetTensorDescriptorEx]", dimStride != NULL); - mluOpSetTensorDescriptorDimBase(desc, dimNb); + mluOpTensorStruct::mluOpSetTensorDescriptorDimBase(desc, dimNb); memcpy(desc->dims, dimSize, dimNb * sizeof(int64_t)); memcpy(desc->strides, dimStride, dimNb * sizeof(int64_t)); @@ -727,7 +725,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx_v2( } } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorOnchipDataType( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorOnchipDataType( mluOpTensorDescriptor_t desc, mluOpDataType_t onchip_dtype) { PARAM_CHECK("[mluOpSetTensorDescriptorOnchipDataType]", desc != NULL); @@ -736,15 +734,15 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorOnchipDataType( } mluOpStatus_t MLUOP_WIN_API -mluOpSetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int position) { +mluOpTensorStruct::mluOpSetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int position) { PARAM_CHECK("[mluOpSetTensorDescriptorPosition]", desc != NULL); desc->position = position; return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionAndScale( - mluOpTensorDescriptor_t desc, int position, float scale) { +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorPositionAndScale( + mluOpTensorDescriptor_t desc, int position, float scale) { PARAM_CHECK("[mluOpSetTensorDescriptorPositionAndScale]", desc != NULL); desc->position = position; @@ -752,7 +750,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionAndScale( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionScaleAndOffset( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorPositionScaleAndOffset( mluOpTensorDescriptor_t desc, int position, float scale, int offset) { PARAM_CHECK("[mluOpSetTensorDescriptorPositionScaleAndOffset]", desc != NULL); @@ -762,7 +760,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionScaleAndOffset( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPointerMode( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpSetTensorDescriptorPointerMode( mluOpTensorDescriptor_t desc, mluOpPointerMode_t pointer_mode) { PARAM_CHECK("[mluOpSetTensorDescriptorPointerMode]", desc != NULL); PARAM_CHECK("[mluOpSetTensorDescriptorPointerMode]", pointer_mode >= 0); @@ -771,8 +769,8 @@ mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPointerMode( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx( - const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorEx( + mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, mluOpDataType_t *dtype, int *dimNb, int *dimSize, int *dimStride) { PARAM_CHECK("[mluOpGetTensorDescriptorEx]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorEx]", layout != NULL); @@ -792,7 +790,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorEx_v2( const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, mluOpDataType_t *dtype, int *dimNb, int64_t *dimSize, int64_t *dimStride) { PARAM_CHECK("[mluOpGetTensorDescriptorEx]", desc != NULL); @@ -813,7 +811,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx_v2( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptor( const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, mluOpDataType_t *dtype, int *dimNb, int *dimSize) { PARAM_CHECK("[mluOpGetTensorDescriptor]", desc != NULL); @@ -826,7 +824,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor_v2( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptor_v2( const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, mluOpDataType_t *dtype, int *dimNb, int64_t *dimSize) { PARAM_CHECK("[mluOpGetTensorDescriptor]", desc != NULL); @@ -839,7 +837,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor_v2( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorOnchipDataType( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorOnchipDataType( const mluOpTensorDescriptor_t desc, mluOpDataType_t *onchip_dtype) { PARAM_CHECK("[mluOpGetTensorDescriptorOnchipDataType]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorOnchipDataType]", onchip_dtype != NULL); @@ -849,7 +847,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorOnchipDataType( } mluOpStatus_t MLUOP_WIN_API -mluOpGetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int *position) { +mluOpTensorStruct::mluOpGetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int *position) { PARAM_CHECK("[mluOpGetTensorDescriptorPosition]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorPosition]", position != NULL); @@ -857,7 +855,7 @@ mluOpGetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int *position) { return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionAndScale( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorPositionAndScale( mluOpTensorDescriptor_t desc, int *position, float *scale) { PARAM_CHECK("[mluOpGetTensorDescriptorPositionAndScale]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorPositionAndScale]", position != NULL); @@ -867,8 +865,8 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionAndScale( *scale = desc->scale; return MLUOP_STATUS_SUCCESS; } - -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionScaleAndOffset( + +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorPositionScaleAndOffset( mluOpTensorDescriptor_t desc, int *position, float *scale, int *offset) { PARAM_CHECK("[mluOpGetTensorDescriptorPositionScaleAndOffset]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorPositionScaleAndOffset]", @@ -884,7 +882,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionScaleAndOffset( return MLUOP_STATUS_SUCCESS; } -mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPointerMode( +mluOpStatus_t MLUOP_WIN_API mluOpTensorStruct::mluOpGetTensorDescriptorPointerMode( mluOpTensorDescriptor_t desc, mluOpPointerMode_t *pointer_mode) { PARAM_CHECK("[mluOpGetTensorDescriptorPointerMode]", desc != NULL); PARAM_CHECK("[mluOpGetTensorDescriptorPointerMode]", pointer_mode != NULL); @@ -894,7 +892,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPointerMode( } mluOpStatus_t MLUOP_WIN_API -mluOpDestroyTensorDescriptor(mluOpTensorDescriptor_t desc) { +mluOpTensorStruct::mluOpDestroyTensorDescriptor(mluOpTensorDescriptor_t desc) { PARAM_CHECK("[mluOpDestroyTensorDescriptor]", desc != NULL); #if MLUOP_TENSOR_QUEUE_ENABLE @@ -932,7 +930,7 @@ mluOpStatus_t MLUOP_WIN_API mluOpDestroyGroupTensorDescriptors( // usr interface. uint64_t MLUOP_WIN_API -mluOpGetTensorElementNum(const mluOpTensorDescriptor_t desc) { +mluOpTensorStruct::mluOpGetTensorElementNum(const mluOpTensorDescriptor_t desc) { CHECK(desc != NULL); return desc->total_element_num; } diff --git a/core/tensor.h b/core/tensor.h index 38fba2a67..7139a8f42 100644 --- a/core/tensor.h +++ b/core/tensor.h @@ -107,6 +107,169 @@ struct alignas(64) mluOpTensorStruct { inline bool isSameDims(const mluOpTensorStruct *other) const; inline bool isCpuScalar() const; + public: + inline float getOffset() const { return this->offset; } + inline void setOffset(float newOffset) { this->offset = newOffset; } + + inline float getScale() const { return this->scale; } + inline void setScale(float newScale) { this->scale = newScale; } + + inline mluOpTensorLayout_t getLayout() const { return this->layout; } + inline void setLayout(mluOpTensorLayout_t newLayout) { + this->layout = newLayout; + } + + inline uint64_t getTotalTensorSize() const { return this->total_tensor_size; } + inline void setTotalTensorSize(uint64_t newSize) { + this->total_tensor_size = newSize; + } + inline uint64_t getTotalElementNum() const { return this->total_element_num; } + inline void setTotalElementNum(uint64_t newNum) { + this->total_element_num = newNum; + } + + inline int getPosition() const { return this->position; } + inline void setPosition(int newPosition) { this->position = newPosition; } + + inline mluOpDataType_t getDtype() const { return this->dtype; } + inline void setDtype(mluOpDataType_t newDtype) { this->dtype = newDtype; } + inline mluOpDataType_t getOnchipDtype() const { return this->onchip_dtype; } + inline void setOnchipDtype(mluOpDataType_t newDtype) { + this->onchip_dtype = newDtype; + } + + inline int getDim() const { return this->dim; } + inline void setDim(int newDim) { this->dim = newDim; } + + inline void releaseDims() { delete[] this->dims; } + inline int64_t *getDims() const { return this->dims; } + inline int64_t getDimIndex(size_t index) { + if (index >= this->dim) { + throw std::out_of_range("Index out of range"); + } + return (this->dims)[index]; + } + inline void setDims(int64_t *newDims) { this->dims = newDims; } + + inline void releaseStrides() { delete[] this->strides; } + inline int64_t *getStrides() const { return this->strides; } + inline int64_t getStrideIndex(size_t index) const { + if (index >= this->dim) { + throw std::out_of_range("Index out of range"); + } + return (this->strides)[index]; + } + inline void setStrideIndex(size_t index, int64_t newStride) { + this->strides[index] = newStride; + } + + inline void setStrides(int64_t *newStrides) { this->strides = newStrides; } + + inline mluOpPointerMode_t getPointerMode() const { + return this->pointer_mode; + } + inline void setPointerMode(mluOpPointerMode_t new_pointer_mode) { + this->pointer_mode = new_pointer_mode; + } + + inline int64_t *getNormalDims() { return this->normal_dims; } + inline int64_t *getNormalStrides() { return this->normal_strides; } + // Definition of function in tensor.cpp + static inline mluOpStatus_t mluOpSetTensorDescriptorZeroDim(mluOpTensorDescriptor_t desc); + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor( + mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, + mluOpDataType_t dtype, int dimNb, const int *dimSize); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptor_v2( + mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, + mluOpDataType_t dtype, int dimNb, const int64_t *dimSize); + + static inline void mluOpSetTensorDescriptorDimBase( + mluOpTensorDescriptor_t desc, int dimNb); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim( + mluOpTensorDescriptor_t desc, int dimNb, const int *dimSize); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorDim_v2( + mluOpTensorDescriptor_t desc, int dimNb, const int64_t *dimSize); + + mluOpStatus_t MLUOP_WIN_API + mluOpResetTensorDescriptor( mluOpTensorDescriptor_t desc); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx( + mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, + mluOpDataType_t dtype, int dimNb, const int *dimSize, + const int *dimStride); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorEx_v2( + mluOpTensorDescriptor_t desc, mluOpTensorLayout_t layout, + mluOpDataType_t dtype, int dimNb, const int64_t *dimSize, + const int64_t *dimStride); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorOnchipDataType( + mluOpTensorDescriptor_t desc, mluOpDataType_t onchip_dtype); + + mluOpStatus_t MLUOP_WIN_API + mluOpSetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int position); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionAndScale( + mluOpTensorDescriptor_t desc, int position, float scale); + + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPositionScaleAndOffset( + mluOpTensorDescriptor_t desc, int position, float scale, int offset); + + mluOpStatus_t MLUOP_WIN_API mluOpSetTensorDescriptorPointerMode( + mluOpTensorDescriptor_t desc, mluOpPointerMode_t pointer_mode); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx( + const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, + mluOpDataType_t *dtype, int *dimNb, int *dimSize, int *dimStride); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorEx_v2( + const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, + mluOpDataType_t *dtype, int *dimNb, int64_t *dimSize, int64_t *dimStride); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor( + const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, + mluOpDataType_t *dtype, int *dimNb, int *dimSize); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptor_v2( + const mluOpTensorDescriptor_t desc, mluOpTensorLayout_t *layout, + mluOpDataType_t *dtype, int *dimNb, int64_t *dimSize); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorOnchipDataType( + const mluOpTensorDescriptor_t desc, mluOpDataType_t *onchip_dtype); + + mluOpStatus_t MLUOP_WIN_API + mluOpGetTensorDescriptorPosition(mluOpTensorDescriptor_t desc, int *position); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionAndScale( + mluOpTensorDescriptor_t desc, int *position, float *scale); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPositionScaleAndOffset( + mluOpTensorDescriptor_t desc, int *position, float *scale, int *offset); + + mluOpStatus_t MLUOP_WIN_API mluOpGetTensorDescriptorPointerMode( + mluOpTensorDescriptor_t desc, mluOpPointerMode_t *pointer_mode); + + mluOpStatus_t MLUOP_WIN_API + mluOpDestroyTensorDescriptor(mluOpTensorDescriptor_t desc); + + uint64_t MLUOP_WIN_API + mluOpGetTensorElementNum(mluOpTensorDescriptor_t desc); + + mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors( + mluOpTensorDescriptor_t **group_desc, + const mluOpTensorLayout_t *group_layout, const mluOpDataType_t *group_dtype, + const int *group_dimNb, const int *group_dimSize, const int desc_num); + + mluOpStatus_t MLUOP_WIN_API mluOpSetGroupTensorDescriptors_v2( + mluOpTensorDescriptor_t **group_desc, + const mluOpTensorLayout_t *group_layout, const mluOpDataType_t *group_dtype, + const int *group_dimNb, const int64_t *group_dimSize, const int desc_num); + + // private: /* Try to pack and align the struct */ /* ------------------- 64 Bytes - 1 -------------------*/ int64_t normal_dims[MLUOP_DIM_MAX]; @@ -158,7 +321,7 @@ struct mluOpTensorSetStruct { CHECK(!this->tensor_set.empty()); size_t tensor_set_size = 0; for (int i = 0; i < tensor_set.size(); i++) { - tensor_set_size += tensor_set[i]->total_tensor_size; + tensor_set_size += tensor_set[i]->getTotalTensorSize(); } return tensor_set_size; } @@ -175,7 +338,7 @@ struct mluOpTensorSetStruct { int64_t offset = 0; int index = this->getIndex(tensorIndex); for (int i = 0; i < index; i++) { - offset += tensor_set[i]->total_tensor_size; + offset += tensor_set[i]->getTotalTensorSize(); } data_offset[index] = offset; return offset; @@ -189,12 +352,12 @@ struct mluOpTensorSetStruct { inline mluOpDataType_t getDatatype() const { CHECK(!this->tensor_set.empty()); - return this->tensor_set[0]->dtype; + return this->tensor_set[0]->getDtype(); } inline mluOpTensorLayout_t getLayout() const { CHECK(!this->tensor_set.empty()); - return this->tensor_set[0]->layout; + return this->tensor_set[0]->getLayout(); } inline void checkDataOffset() const { @@ -218,7 +381,7 @@ struct mluOpTensorSetStruct { int offset = 0; data_offset[0] = offset; for (int i = 0; i < tensor_num - 1; i++) { - offset += tensor_set[i]->total_tensor_size; + offset += tensor_set[i]->getTotalTensorSize(); data_offset[i + 1] = offset; } return data_offset; @@ -288,16 +451,16 @@ inline int mluOpDataTypeBytes(const mluOpDataType_t dt) { } inline int64_t mluOpGetTensordimN(const mluOpTensorDescriptor_t desc) { - switch (desc->layout) { + switch (desc->getLayout()) { case MLUOP_LAYOUT_NCHW: case MLUOP_LAYOUT_NHWC: case MLUOP_LAYOUT_NDHWC: case MLUOP_LAYOUT_NLC: - return desc->dims[0]; + return desc->getDimIndex(0); case MLUOP_LAYOUT_NCDHW: - return desc->dims[0]; + return desc->getDimIndex(0); case MLUOP_LAYOUT_HWCN: - return desc->dims[3]; + return desc->getDimIndex(3); default: LOG(ERROR) << "Failed to call dimN, illegal layout in TensorDescriptor.\n"; @@ -306,11 +469,11 @@ inline int64_t mluOpGetTensordimN(const mluOpTensorDescriptor_t desc) { } inline int64_t mluOpGetTensordimD(const mluOpTensorDescriptor_t desc) { - switch (desc->layout) { + switch (desc->getLayout()) { case MLUOP_LAYOUT_NDHWC: - return desc->dims[1]; + return desc->getDimIndex(1); case MLUOP_LAYOUT_NCDHW: - return desc->dims[2]; + return desc->getDimIndex(2); default: LOG(ERROR) << "Failed to call dimD, illegal layout in TensorDescriptor.\n"; @@ -319,18 +482,18 @@ inline int64_t mluOpGetTensordimD(const mluOpTensorDescriptor_t desc) { } inline int64_t mluOpGetTensordimC(const mluOpTensorDescriptor_t desc) { - switch (desc->layout) { + switch (desc->getLayout()) { case MLUOP_LAYOUT_NCHW: - return desc->dims[1]; + return desc->getDimIndex(1); case MLUOP_LAYOUT_NHWC: - return desc->dims[3]; + return desc->getDimIndex(3); case MLUOP_LAYOUT_NDHWC: - return desc->dims[4]; + return desc->getDimIndex(4); case MLUOP_LAYOUT_NCDHW: - return desc->dims[1]; + return desc->getDimIndex(1); case MLUOP_LAYOUT_HWCN: case MLUOP_LAYOUT_NLC: - return desc->dims[2]; + return desc->getDimIndex(2); default: LOG(ERROR) << "Failed to call dimC, illegal layout in TensorDescriptor.\n"; @@ -339,17 +502,17 @@ inline int64_t mluOpGetTensordimC(const mluOpTensorDescriptor_t desc) { } inline int64_t mluOpGetTensordimH(const mluOpTensorDescriptor_t desc) { - switch (desc->layout) { + switch (desc->getLayout()) { case MLUOP_LAYOUT_NCHW: - return desc->dims[2]; + return desc->getDimIndex(2); case MLUOP_LAYOUT_NHWC: - return desc->dims[1]; + return desc->getDimIndex(1); case MLUOP_LAYOUT_NDHWC: - return desc->dims[2]; + return desc->getDimIndex(2); case MLUOP_LAYOUT_NCDHW: - return desc->dims[3]; + return desc->getDimIndex(3); case MLUOP_LAYOUT_HWCN: - return desc->dims[0]; + return desc->getDimIndex(0); default: LOG(ERROR) << "Failed to call dimH, illegal layout in TensorDescriptor.\n"; @@ -358,18 +521,18 @@ inline int64_t mluOpGetTensordimH(const mluOpTensorDescriptor_t desc) { } inline int64_t mluOpGetTensordimW(const mluOpTensorDescriptor_t desc) { - switch (desc->layout) { + switch (desc->getLayout()) { case MLUOP_LAYOUT_NCHW: - return desc->dims[3]; + return desc->getDimIndex(3); case MLUOP_LAYOUT_NHWC: - return desc->dims[2]; + return desc->getDimIndex(2); case MLUOP_LAYOUT_NDHWC: - return desc->dims[3]; + return desc->getDimIndex(3); case MLUOP_LAYOUT_NCDHW: - return desc->dims[4]; + return desc->getDimIndex(4); case MLUOP_LAYOUT_HWCN: case MLUOP_LAYOUT_NLC: - return desc->dims[1]; + return desc->getDimIndex(1); default: LOG(ERROR) << "Failed to call dimW, illegal layout in TensorDescriptor.\n"; @@ -466,12 +629,12 @@ inline int64_t mluOpGetSeqDataDimC(const mluOpSeqDataDescriptor_t desc) { inline uint64_t shapeStrideCount(const mluOpTensorDescriptor_t desc) { uint64_t total = 1; - for (int i = 0; i < desc->dim; ++i) { - if (desc->dims[i] == 0) { + for (int i = 0; i < desc->getDim(); ++i) { + if (desc->getDimIndex(i) == 0) { total = 0; break; } - total += (desc->dims[i] - 1) * desc->strides[i]; + total += (desc->getDimIndex(i) - 1) * desc->getStrideIndex(i); } return total; } diff --git a/test/mlu_op_gtest/pb_gtest/src/zoo/active_rotated_filter_forward/active_rotated_filter_forward.cpp b/test/mlu_op_gtest/pb_gtest/src/zoo/active_rotated_filter_forward/active_rotated_filter_forward.cpp index f4ac68a06..c5891bd4b 100644 --- a/test/mlu_op_gtest/pb_gtest/src/zoo/active_rotated_filter_forward/active_rotated_filter_forward.cpp +++ b/test/mlu_op_gtest/pb_gtest/src/zoo/active_rotated_filter_forward/active_rotated_filter_forward.cpp @@ -112,7 +112,7 @@ void ActiveRotatedFilterForwardExecutor::cpuCompute() { } int64_t ActiveRotatedFilterForwardExecutor::getTheoryIoSize() { - auto dtype = tensor_desc_[0].tensor->dtype; + auto dtype = tensor_desc_[0].tensor->getDtype(); std::vector indices_shape = parser_->input(1)->shape; const int64_t rotations = indices_shape[3]; diff --git a/test/mlu_op_gtest/pb_gtest/src/zoo/fft/fft.cpp b/test/mlu_op_gtest/pb_gtest/src/zoo/fft/fft.cpp index d623ce2c5..3036fba30 100644 --- a/test/mlu_op_gtest/pb_gtest/src/zoo/fft/fft.cpp +++ b/test/mlu_op_gtest/pb_gtest/src/zoo/fft/fft.cpp @@ -123,8 +123,8 @@ int64_t FftExecutor::getTheoryIoSize() { // dtype check auto input_tensor = tensor_desc_[0].tensor; auto output_tensor = tensor_desc_[1].tensor; - mluOpDataType_t input_dtype = input_tensor->dtype; - mluOpDataType_t output_dtype = output_tensor->dtype; + mluOpDataType_t input_dtype = input_tensor->getDtype(); + mluOpDataType_t output_dtype = output_tensor->getDtype(); auto fft_param = parser_->getProtoNode()->fft_param(); int rank = fft_param.rank(); diff --git a/test/mlu_op_gtest/pb_gtest/src/zoo/masked_col2im_forward/masked_col2im_forward.cpp b/test/mlu_op_gtest/pb_gtest/src/zoo/masked_col2im_forward/masked_col2im_forward.cpp index 582803a15..e1cdb18e1 100644 --- a/test/mlu_op_gtest/pb_gtest/src/zoo/masked_col2im_forward/masked_col2im_forward.cpp +++ b/test/mlu_op_gtest/pb_gtest/src/zoo/masked_col2im_forward/masked_col2im_forward.cpp @@ -114,7 +114,7 @@ void MaskedCol2imForwardExecutor::cpuCompute() { int64_t MaskedCol2imForwardExecutor::getTheoryIoSize() { int input_size = parser_->getInputDataCount(0); - auto dtype = tensor_desc_[0].tensor->dtype; + auto dtype = tensor_desc_[0].tensor->getDtype(); int dsize = 0; if (dtype == MLUOP_DTYPE_FLOAT) { dsize = 4;