From 23543f2c89694df69199acf89a3dad6931058891 Mon Sep 17 00:00:00 2001 From: Cheng Lai Date: Fri, 15 Jul 2022 15:13:02 +0800 Subject: [PATCH] TLX2ONNX 0.0.1 release --- OP_LIST.md | 34 ++++++++++----- requirements.txt | 2 +- setup.py | 2 +- tests/test_layernorm.py | 39 +++++++++++++++++ tlx2onnx/main.py | 28 +++++++++--- tlx2onnx/op_mapper/nn/normalization.py | 59 ++++++++++++++++++++++++++ 6 files changed, 145 insertions(+), 19 deletions(-) create mode 100644 tests/test_layernorm.py diff --git a/OP_LIST.md b/OP_LIST.md index 2556a08..4bee5a3 100644 --- a/OP_LIST.md +++ b/OP_LIST.md @@ -53,17 +53,27 @@ |Tile| 1~12|Supported| |UpSampling2d| 1~12|Supported| |DownSampling2d| 1~12|Supported| -|Concat| -Elementwise| -|GaussianNoise| -|PadLayer| -|ZeroPad1d| -|ZeroPad2d| -|ZeroPad3d| -|Stack| -|UnStack| -|Sign| -|Scale| +|Concat| 1~12 | Supported| +|Elementwise| 1~12 | Supported| +|GaussianNoise| 1~12 | Supported| +|PadLayer| 1~12 | Supported| +|ZeroPad1d| 1~12 | Supported| +|ZeroPad2d| 1~12 | Supported| +|ZeroPad3d| 1~12 | Supported| +|Stack| 1~12 | Supported| +|UnStack| 1~12 | Supported| +|Scale| 1~12 | Supported| +|RNN|1~12 | Supported| +|RNNCell|1~12 | Supported| +|LSTM|1~12 | Supported| +|LSTMCell|1~12 | Supported| +|GRU|1~12 | Supported| +|GRUCell|1~12 | Supported| +|LayerNorm| 17 | Supported| +|GroupConv2d +|SeparableConv1d +|SeparableConv2d +|SubpixelConv2d | Matmul | 1~12 | Supported| @@ -75,3 +85,5 @@ Elementwise| + + diff --git a/requirements.txt b/requirements.txt index bdce9d3..d1f15a5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ onnx<=1.11.0 -tensorlayerx>=0.5.1 +tensorlayerx>=0.5.6 diff --git a/setup.py b/setup.py index 1189f9a..e573240 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ MAJOR = 0 MINOR = 0 PATCH = 1 -PRE_RELEASE = 'alpha' +PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, prerelease) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) diff --git a/tests/test_layernorm.py b/tests/test_layernorm.py new file mode 100644 index 0000000..3dc8288 --- /dev/null +++ b/tests/test_layernorm.py @@ -0,0 +1,39 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import os +os.environ["TL_BACKEND"] = 'tensorflow' +import tensorlayerx as tlx +from tensorlayerx.nn import Module +from tensorlayerx.nn import LayerNorm +from tlx2onnx.main import export +import onnxruntime as rt +import numpy as np + + +class NET(Module): + def __init__(self): + super(NET, self).__init__() + self.layernorm = LayerNorm([50, 50, 32]) + + def forward(self, x): + x = self.layernorm(x) + return x + +net = NET() +print(type(net)) +net.set_eval() +input = tlx.nn.Input(shape=(10, 50, 50, 32)) +onnx_model = export(net, input_spec=input, path='layernorm.onnx', enable_onnx_checker=False) +print("tlx out", input) + +# Infer Model +sess = rt.InferenceSession('layernorm.onnx') + +input_name = sess.get_inputs()[0].name +output_name = sess.get_outputs()[0].name + +input_data = np.array(input, dtype=np.float32) + +result = sess.run([output_name], {input_name: input_data}) +print("onnx out", result) diff --git a/tlx2onnx/main.py b/tlx2onnx/main.py index a988a76..227f795 100644 --- a/tlx2onnx/main.py +++ b/tlx2onnx/main.py @@ -9,18 +9,33 @@ from .common import make_graph, logging from .op_mapper.datatype_mapping import NP_TYPE_TO_TENSOR_TYPE -def export(model, input_spec, path=None, export_params=False, opset_version = 9, auto_update_opset=True): +def export(model, input_spec, path=None, enable_onnx_checker=True, opset_version = 9, auto_update_opset=True): """ Parameters ---------- - model - input_spec - path - export_params + model : object + TensorLayerX instantiate the net object. + input_spec : tensor + TensorLayerX Input. + path : string + ONNX file saving path + enable_onnx_checker : bool + Whether to enable ONNX model checker. + opset_version : int + The version of the default (ai.onnx) opset to target. Must be >= 7 and <= 17. Returns ------- + ONNX model file + + Examples + --------- + >>> class NET(Module): + >>> net = NET() + >>> net.set_eval() + >>> input = tlx.nn.Input([10, 50, 50, 32], name='input') + >>> onnx_model = export(net, input_spec=input, path='vgg.onnx') """ @@ -64,7 +79,8 @@ def export(model, input_spec, path=None, export_params=False, opset_version = 9, producer_name='onnx-mode' ) - onnx.checker.check_model(model_def) + if enable_onnx_checker: + onnx.checker.check_model(model_def) onnx.save(model_def, path) logging.info("ONNX model saved in {}".format(path)) return model_def diff --git a/tlx2onnx/op_mapper/nn/normalization.py b/tlx2onnx/op_mapper/nn/normalization.py index b8d09be..a0523c3 100644 --- a/tlx2onnx/op_mapper/nn/normalization.py +++ b/tlx2onnx/op_mapper/nn/normalization.py @@ -79,4 +79,63 @@ def version_1(cls, node, **kwargs): outputs=node['out_nodes_name'] ) onnx_node.append(bn_node) + return onnx_node, onnx_value, onnx_init + + +@OpMapper(['LayerNorm']) +class LayerNorm(): + # supports v17 + + @classmethod + def version_17(cls, node, **kwargs): + onnx_node = [] + onnx_value = [] + onnx_init = [] + + # input , output, data_format + x = node['in_nodes_name'][0] + x_shape = node['in_tensors'][0] + + out_shape = node['out_tensors'][0] + out_v = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], + shape=node['out_tensors'][0]) + onnx_value.append(out_v) + + spatial = 2 + # get parameters + beta_name = node['node'].layer.name + '/beta' + beta_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.beta), name=beta_name) + onnx_init.append(beta_weight) + + gamma_name = node['node'].layer.name + '/gamma' + gamma_weight = numpy_helper.from_array(arr=to_numpy(node['node'].layer.gamma), name=gamma_name) + onnx_init.append(gamma_weight) + + epsilon = node['node'].layer.epsilon + + # if data_format == 'channels_last': + # channels last conver weights and input + x_shape = make_shape_channels_first(x_shape) + out_temp_shape = make_shape_channels_first(out_shape) + # make channels transpose + t_x = helper.make_tensor_value_info(node['in_nodes_name'][0] + 't', + NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=x_shape) + onnx_value.append(t_x) + tx_node, x = make_node('Transpose', inputs=[x], outputs=[node['in_nodes_name'][0] + 't'], + perm=get_channels_first_permutation(spatial)) + onnx_node.append(tx_node) + # make batch normalization + out_temp = helper.make_tensor_value_info(node['out_nodes_name'][0] + 'bn', + NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_temp_shape) + onnx_value.append(out_temp) + ln_node, out = make_node('LayerNormalization', + inputs=[node['in_nodes_name'][0] + 't', beta_name, gamma_name], + outputs=[node['out_nodes_name'][0] + 'bn'], epsilon=epsilon + ) + onnx_node.append(ln_node) + # make channels transpose + t_out = helper.make_tensor_value_info(node['out_nodes_name'][0], NP_TYPE_TO_TENSOR_TYPE[node['dtype']], shape=out_shape) + onnx_value.append(t_out) + tout_node, _ = make_node('Transpose', inputs=[out], outputs=node['out_nodes_name'], perm=get_channels_last_permutation(spatial)) + onnx_node.append(tout_node) return onnx_node, onnx_value, onnx_init \ No newline at end of file