diff --git a/.gitignore b/.gitignore index 6d40de30..bb03a7b6 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,7 @@ tests/*/cpp */*/models tests/deep_mlp/data .vscode +*.pyc +.*.pyc +*.swp +.*.swp diff --git a/utensor_cgen/backend/operators.py b/utensor_cgen/backend/operators.py index a5d0209d..24812049 100644 --- a/utensor_cgen/backend/operators.py +++ b/utensor_cgen/backend/operators.py @@ -10,6 +10,25 @@ from .snippets import * # pylint: disable=W0401,W0614 +def add_tensor_string_reference(sref_name, **kwargs): + sref_snippet = TensorStringReferenceSnippet(sref_name) + weight_container = kwargs['weight_container'] + weight_container.add_snippet(sref_snippet) + +def add_tensor_string_references(inputs, outputs, **kwargs): + def add_things(mthings, **kwargs): + if isinstance(mthings, list): + for sref_name in mthings: + add_tensor_string_reference(sref_name, **kwargs) + else: + add_tensor_string_reference(mthings, **kwargs) + add_things(inputs, **kwargs) + add_things(outputs, **kwargs) + +def prepare_string_ref_name(tensor_name): + inline = tensor_name.replace(":", "_").replace("/", "_") + prepared = "sref_{}".format(inline) + return prepared class OperatorFactory(): # Can easily do something smarter @@ -52,14 +71,16 @@ class _AddOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) tf_dtype = op_info.input_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = AddOpSnippet(inputs, output, tf_dtype, ref_count, to_eval) + + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register @@ -69,9 +90,9 @@ class _ArgMaxOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] out_tensor_info = op_info.output_tensors[0] - output, out_dtype = out_tensor_info.name, out_tensor_info.dtype + output, out_dtype = prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype in_dtype = op_info.input_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) @@ -79,6 +100,7 @@ def __init__(self, op_info, **kwargs): to_eval = parser.get('to_eval', False) self._snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _DequantizeOperator(_Operator): @@ -87,15 +109,16 @@ class _DequantizeOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] out_tensor_info = op_info.output_tensors[0] - output, out_dtype = out_tensor_info.name, out_tensor_info.dtype + output, out_dtype = prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = DequantizeOpSnippet(inputs, output, out_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _MaxOperator(_Operator): @@ -104,9 +127,9 @@ class _MaxOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] out_tensor_info = op_info.output_tensors[0] - output, out_dtype, out_shape = (out_tensor_info.name, + output, out_dtype, out_shape = (prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype, out_tensor_info.shape) # FIXME: automatic alloc for uTensor fail @@ -117,6 +140,8 @@ def __init__(self, op_info, **kwargs): ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval) + + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _MaxPool(_Operator): @@ -125,8 +150,8 @@ class _MaxPool(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) dtype = op_info.output_tensors[0].dtype ksize = op_info.op_attr['ksize'].value.ints_value strides = op_info.op_attr['strides'].value.ints_value @@ -138,6 +163,8 @@ def __init__(self, op_info, **kwargs): self._snippet = MaxPoolSnippet(inputs, output, dtype, ksize, strides, padding, ref_count, to_eval) + + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register @@ -147,8 +174,8 @@ class _QuantizedMaxPool(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] dtype = op_info.output_tensors[0].dtype ksize = op_info.op_attr['ksize'].value.ints_value strides = op_info.op_attr['strides'].value.ints_value @@ -161,6 +188,7 @@ def __init__(self, op_info, **kwargs): ksize, strides, padding, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _MinOperator(_Operator): @@ -169,9 +197,9 @@ class _MinOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] out_info = op_info.output_tensors[0] - output, out_dtype, out_shape = (out_info.name, + output, out_dtype, out_shape = (prepare_string_ref_name(out_info.name), out_info.dtype, out_info.shape) # FIXME: automatic alloc for uTensor fail @@ -183,6 +211,7 @@ def __init__(self, op_info, **kwargs): to_eval = parser.get('to_eval', False) self._snippet = MinOpSnippet(inputs, output, out_dtype, out_shape, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _QuantizeV2Operator(_Operator): @@ -191,8 +220,8 @@ class _QuantizeV2Operator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] out_dtype = op_info.output_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) @@ -200,6 +229,7 @@ def __init__(self, op_info, **kwargs): to_eval = parser.get('to_eval', False) self._snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _MatMulOperator(_Operator): @@ -208,8 +238,8 @@ class _MatMulOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) in_tensor_info = op_info.input_tensors[0] x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype, @@ -221,6 +251,8 @@ def __init__(self, op_info, **kwargs): self._snippet = MatMulOpSnippet(inputs, output, x_dtype, w_dtype, out_dtype, ref_count, to_eval) + + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _QuantizedMatMulOperator(_Operator): @@ -229,8 +261,8 @@ class _QuantizedMatMulOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] in_tensor_info = op_info.input_tensors[0] x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype, @@ -243,6 +275,8 @@ def __init__(self, op_info, **kwargs): x_dtype, w_dtype, out_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) + @OperatorFactory.register class _ReluOperator(_Operator): @@ -250,8 +284,8 @@ class _ReluOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) in_dtype, out_dtype = (op_info.input_tensors[0].dtype, op_info.output_tensors[0].dtype) #NT: why separate this out? #DB: I don't know, it's in the uTensor C code @@ -263,6 +297,7 @@ def __init__(self, op_info, **kwargs): out_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _QuantizedReluOperator(_Operator): @@ -271,8 +306,8 @@ class _QuantizedReluOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] in_dtype, qout_dtype = (op_info.input_tensors[0].dtype, op_info.output_tensors[0].dtype) #NT: why separate this out? #DB: I don't know, it's in the uTensor C code @@ -285,6 +320,7 @@ def __init__(self, op_info, **kwargs): out_dtypes, qout_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _QuantizedAddOperator(_Operator): @@ -293,8 +329,8 @@ class _QuantizedAddOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype, op_info.output_tensors[0].dtype) @@ -306,6 +342,7 @@ def __init__(self, op_info, **kwargs): x_dtype, w_dtype, out_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _QuantizedMulOperator(_Operator): @@ -314,8 +351,8 @@ class _QuantizedMulOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] x_dtype, w_dtype, out_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype, op_info.output_tensors[0].dtype) @@ -327,6 +364,7 @@ def __init__(self, op_info, **kwargs): x_dtype, w_dtype, out_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _RequantizationRangeOperator(_Operator): @@ -335,8 +373,8 @@ class _RequantizationRangeOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] out_dtype = op_info.output_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) @@ -344,6 +382,7 @@ def __init__(self, op_info, **kwargs): to_eval = parser.get('to_eval', False) self._snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register @@ -352,8 +391,8 @@ class _RequantizeOperator(_Operator): op_type = "Requantize" def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] qout_dtype = op_info.output_tensors[0].dtype range_dtype = op_info.output_tensors[1].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, @@ -364,6 +403,7 @@ def __init__(self, op_info, **kwargs): qout_dtype, range_dtype, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _ReshapeOperator(_Operator): @@ -372,14 +412,16 @@ class _ReshapeOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) dtype = op_info.input_tensors[0].dtype self._snippet = ReshapeOpSnippet(inputs, output, dtype, ref_count, to_eval) + + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register @@ -389,8 +431,8 @@ class _QuantizedReshapeOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_counts = parser.get('ref_counts', []) @@ -400,6 +442,7 @@ def __init__(self, op_info, **kwargs): ref_counts=ref_counts, to_eval=to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _CMSIS_NN_FCOperator(_Operator): @@ -409,8 +452,8 @@ def __init__(self, op_info, **kwargs): _Operator.__init__(self) #import pdb; pdb.set_trace() # Note order of inputs/outputs is preserved - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) out_dtype = op_info.output_tensors[0].dtype in_dtypes = [tensor_info.dtype for tensor_info in op_info.input_tensors] assert (op_info.input_tensors[0].shape[1] == None or op_info.input_tensors[0].shape[1] == 1) @@ -425,6 +468,7 @@ def __init__(self, op_info, **kwargs): out_dtype=out_dtype, to_eval=to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _Conv2DOperator(_Operator): @@ -432,8 +476,8 @@ class _Conv2DOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) in_dtype, filter_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype) out_dtype = op_info.output_tensors[0].dtype @@ -446,6 +490,7 @@ def __init__(self, op_info, **kwargs): self._snippet = Conv2DOpSnippent(inputs, output, strides, padding, in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype, ref_count=ref_count, to_eval=to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _FusedConv2DMaxpoolOperator(_Operator): @@ -453,8 +498,8 @@ class _FusedConv2DMaxpoolOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) in_dtype, filter_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype) out_dtype = op_info.output_tensors[0].dtype @@ -469,6 +514,8 @@ def __init__(self, op_info, **kwargs): in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype, ref_count=ref_count, to_eval=to_eval) + add_tensor_string_references(inputs, output, **kwargs) + @OperatorFactory.register class _QuantizedFusedConv2DMaxpoolOperator(_Operator): @@ -476,11 +523,11 @@ class _QuantizedFusedConv2DMaxpoolOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] in_dtype, filter_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype) - out_dtype = op_info.output_tensors[0].dtype + out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors] strides = op_info.op_attr['_utensor_conv']["strides"].value.ints_value ksize = op_info.op_attr['_utensor_pool']["ksize"].value.ints_value padding = op_info.op_attr['_utensor_conv']["padding"].value.decode('utf8') @@ -488,10 +535,12 @@ def __init__(self, op_info, **kwargs): op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) - self._snippet = QuantizedFusedConv2DMaxpoolOpSnippet(inputs, output, strides, ksize, padding, - in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtype=out_dtype, + self._snippet = QuantizedFusedConv2DMaxpoolOpSnippet(inputs, outputs, strides, ksize, padding, + in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes, ref_count=ref_count, to_eval=to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) + @OperatorFactory.register class _Conv2DQuantOperator(_Operator): @@ -499,8 +548,8 @@ class _Conv2DQuantOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] in_dtype, filter_dtype = (op_info.input_tensors[0].dtype, op_info.input_tensors[1].dtype) out_dtypes = [tensor_info.dtype for tensor_info in op_info.output_tensors] @@ -513,6 +562,7 @@ def __init__(self, op_info, **kwargs): self._snippet = Conv2DQuantOpSnippent(inputs, outputs, strides, padding, in_dtype=in_dtype, filter_dtype=filter_dtype, out_dtypes=out_dtypes, ref_counts=ref_counts, to_eval=to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _Uint8Q7OriginOperator(_Operator): @@ -520,13 +570,14 @@ class _Uint8Q7OriginOperator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = Uint8Q7OriginSnippet(inputs, output, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) #hard coding to uint8_t uint8_t int32_t for now @OperatorFactory.register @@ -536,8 +587,8 @@ class _QuantRangeForMultiplication_u8_u8_int32_Operator(_Operator): def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - outputs = [tensor_info.name for tensor_info in op_info.output_tensors] + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + outputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.output_tensors] if op_info.output_tensors[0].dtype != op_info.output_tensors[1].dtype: assert "output tensors must have the same data type" #output_type = op_info.output_tensors[0].dtype @@ -548,6 +599,7 @@ def __init__(self, op_info, **kwargs): ref_counts = parser.get('ref_counts', []) to_eval = parser.get('to_eval', False) self._snippet = QuantRangeForMultiplicationSnippet(inputs, outputs, output_type, ref_counts, to_eval) + add_tensor_string_references(inputs, outputs, **kwargs) @OperatorFactory.register class _InlineOperator(_Operator): @@ -556,7 +608,7 @@ class _InlineOperator(_Operator): def __init__(self, op_info, **kwargs): out_tensor_info = op_info.output_tensors[0] - out_tname, out_dtype, tensor_shape = (out_tensor_info.name, + out_tname, out_dtype, tensor_shape = (prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype, out_tensor_info.shape) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, @@ -577,6 +629,7 @@ def __init__(self, op_info, **kwargs): value) weight_container = kwargs['weight_container'] weight_container.add_snippet(weight_snippet) + add_tensor_string_references([], out_tname, **kwargs) def _prepare_tensor_name(self, tensor_name): prepared = tensor_name.replace(":", "_").replace("/", "_") @@ -587,6 +640,8 @@ def _prepare_inline_array_name(self, tensor_name): preapred = "inline_{}".format(inline) return preapred + +# TODO check for correctness with cstring stuffs @OperatorFactory.register class _ConstOperator(_Operator): @@ -594,7 +649,7 @@ class _ConstOperator(_Operator): def __init__(self, op_info, **kwargs): out_tensor_info = op_info.output_tensors[0] - out_tname, out_dtype = (out_tensor_info.name, + out_tname, out_dtype = (prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) @@ -604,6 +659,7 @@ def __init__(self, op_info, **kwargs): idx_dir = kwargs['idx_dir'] embed_data_dir = kwargs.get('embed_data_dir', os.path.join("/fs", idx_dir)) + #out_tname = prepare_string_ref_name(out_tname) self._snippet = CreateTensorIdxSnippet(embed_data_dir, out_tname, idx_fname=idx_fname, np_dtype=out_dtype, @@ -611,6 +667,7 @@ def __init__(self, op_info, **kwargs): idx_path = os.path.join(idx_dir, idx_fname) value = op_info.op_attr['value'].value self._tf_save_data(idx_path, value) + add_tensor_string_references([], out_tname, **kwargs) def _tf_prepare_tensor_name(self, tensor_name): """Replace all ':' and '/' with '_' in a given tensor name @@ -633,108 +690,117 @@ class _RamOperator(_Operator): def __init__(self, op_info, **kwargs): out_tensor_info = op_info.output_tensors[0] - out_tname, out_dtype, tensor_shape = (out_tensor_info.name, + out_tname, out_dtype, tensor_shape = (prepare_string_ref_name(out_tensor_info.name), out_tensor_info.dtype, out_tensor_info.shape) parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] pre_tname = self._prepare_tensor_name(out_tname) + #out_tname = prepare_string_ref_name(out_tname) #inline_tname = self._prepare_inline_array_name(out_tname) #value = op_info.op_attr['value'].value.np_array.flatten() self._snippet = CreateTensorRamSnippet(out_tname, tensor_shape=tensor_shape, tf_dtype=out_dtype, sptr_name=pre_tname, ref_count=ref_count) + + add_tensor_string_references([], out_tname, **kwargs) + def _prepare_tensor_name(self, tensor_name): prepared = tensor_name.replace(":", "_").replace("/", "_") return prepared @OperatorFactory.register class _ShapeOperator(_Operator): - op_type = "Shape" + op_type = "Shape" - def __init__(self, op_info, **kwargs): - _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name - parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, - op_info.op_attr) - ref_count = parser.get('ref_counts', [0])[0] - to_eval = parser.get('to_eval', True) - out_dtype = op_info.output_tensors[0].dtype - self._snippet = ShapeOpSnippet(inputs, output, out_dtype, ref_count, to_eval) + def __init__(self, op_info, **kwargs): + _Operator.__init__(self) + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) + parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, + op_info.op_attr) + ref_count = parser.get('ref_counts', [0])[0] + to_eval = parser.get('to_eval', True) + out_dtype = op_info.output_tensors[0].dtype + self._snippet = ShapeOpSnippet(inputs, output, out_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _StridedSliceOperator(_Operator): - op_type = "StridedSlice" - - def __init__(self, op_info, **kwargs): - _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name - parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, - op_info.op_attr) - ref_count = parser.get('ref_counts', [0])[0] - to_eval = parser.get('to_eval', True) - dtype = op_info.input_tensors[0].dtype - out_dtype = op_info.output_tensors[0].dtype - begin_mask = op_info.op_attr['begin_mask'].value - ellipsis_mask = op_info.op_attr['ellipsis_mask'].value - end_mask = op_info.op_attr['end_mask'].value - new_axis_mask = op_info.op_attr['begin_mask'].value - shrink_axis_mask = op_info.op_attr['shrink_axis_mask'].value - self._snippet = StridedSliceOpSnippet(inputs, output, dtype, out_dtype, - begin_mask, ellipsis_mask, end_mask, - new_axis_mask, shrink_axis_mask, - ref_count, to_eval) + op_type = "StridedSlice" + + def __init__(self, op_info, **kwargs): + _Operator.__init__(self) + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) + parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, + op_info.op_attr) + ref_count = parser.get('ref_counts', [0])[0] + to_eval = parser.get('to_eval', True) + dtype = op_info.input_tensors[0].dtype + out_dtype = op_info.output_tensors[0].dtype + begin_mask = op_info.op_attr['begin_mask'].value + ellipsis_mask = op_info.op_attr['ellipsis_mask'].value + end_mask = op_info.op_attr['end_mask'].value + new_axis_mask = op_info.op_attr['begin_mask'].value + shrink_axis_mask = op_info.op_attr['shrink_axis_mask'].value + self._snippet = StridedSliceOpSnippet(inputs, output, dtype, out_dtype, + begin_mask, ellipsis_mask, end_mask, + new_axis_mask, shrink_axis_mask, + ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _PackOperator(_Operator): - op_type = "Pack" - - def __init__(self, op_info, **kwargs): - _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name - parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, - op_info.op_attr) - ref_count = parser.get('ref_counts', [0])[0] - to_eval = parser.get('to_eval', True) - dtype = op_info.input_tensors[0].dtype - out_dtype = op_info.output_tensors[0].dtype - N = op_info.op_attr['N'].value - axis = op_info.op_attr['axis'].value - self._snippet = PackOpSnippet(inputs, output, dtype, out_dtype, N, axis, ref_count, to_eval) + op_type = "Pack" + + def __init__(self, op_info, **kwargs): + _Operator.__init__(self) + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) + parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, + op_info.op_attr) + ref_count = parser.get('ref_counts', [0])[0] + to_eval = parser.get('to_eval', True) + dtype = op_info.input_tensors[0].dtype + out_dtype = op_info.output_tensors[0].dtype + N = op_info.op_attr['N'].value + axis = op_info.op_attr['axis'].value + self._snippet = PackOpSnippet(inputs, output, dtype, out_dtype, N, axis, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _SoftmaxOperator(_Operator): - op_type = "Softmax" + op_type = "Softmax" - def __init__(self, op_info, **kwargs): - _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name - parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, - op_info.op_attr) - ref_count = parser.get('ref_counts', [0])[0] - to_eval = parser.get('to_eval', True) - out_dtype = op_info.output_tensors[0].dtype - self._snippet = SoftmaxOpSnippet(inputs, output, out_dtype, ref_count, to_eval) + def __init__(self, op_info, **kwargs): + _Operator.__init__(self) + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) + parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, + op_info.op_attr) + ref_count = parser.get('ref_counts', [0])[0] + to_eval = parser.get('to_eval', True) + out_dtype = op_info.output_tensors[0].dtype + self._snippet = SoftmaxOpSnippet(inputs, output, out_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) @OperatorFactory.register class _GatherOperator(_Operator): - op_type = "Gather" # tf op type + op_type = "GatherV2" # tf op type def __init__(self, op_info, **kwargs): _Operator.__init__(self) - inputs = [tensor_info.name for tensor_info in op_info.input_tensors] - output = op_info.output_tensors[0].name + inputs = [prepare_string_ref_name(tensor_info.name) for tensor_info in op_info.input_tensors] + output = prepare_string_ref_name(op_info.output_tensors[0].name) tf_dtype = op_info.input_tensors[0].dtype parser = NamescopedKWArgsParser(RefCntOptimizer.KWARGS_NAMESCOPE, op_info.op_attr) ref_count = parser.get('ref_counts', [0])[0] to_eval = parser.get('to_eval', False) self._snippet = GatherOpSnippet(inputs, output, tf_dtype, ref_count, to_eval) + add_tensor_string_references(inputs, output, **kwargs) diff --git a/utensor_cgen/backend/snippets/_snippets.py b/utensor_cgen/backend/snippets/_snippets.py index 2314a7a6..1521f1fc 100644 --- a/utensor_cgen/backend/snippets/_snippets.py +++ b/utensor_cgen/backend/snippets/_snippets.py @@ -18,12 +18,23 @@ "CommentSnippet", "ContextHeaderSnippet", "ContextSnippetsContainer", "QuantizedAddOpSnippet", "QuantizedMulOpSnippet", - "CreateTensorBinarySnippet", "WeightSnippet", + "CreateTensorBinarySnippet", "WeightSnippet", "TensorStringReferenceSnippet", "ContextGlobalArrayContainer", "QuantRangeForMultiplicationSnippet", "FusedConv2DOpMaxpoolSnippet", "QuantizedFusedConv2DMaxpoolOpSnippet", "GatherOpSnippet", "CreateTensorRamSnippet", "Uint8Q7OriginSnippet"] +#TODO Put this in the correct location +def mhash(mstr): + """ + Simple java string hash + """ + v = int(7) + for c in mstr: + v = (v*31 + ord(c)) & 0xffffffff + return v + + # TODO: Better abstraction, i.e a better backend for code generation class CreateTensorIdxSnippet(Snippet): __template_name__ = "snippets/create_tensor_idx.cpp" @@ -666,21 +677,23 @@ def __init__(self, inputs, output, strides, ksize, padding, self.template_vars["to_eval"] = to_eval class QuantizedFusedConv2DMaxpoolOpSnippet(Snippet): - __template_name__ = "snippets/fused_conv2d_maxpool_op.cpp" + __template_name__ = "snippets/quantized_fused_conv2d_maxpool_op.cpp" __headers__ = set(['"uTensor/ops/MatrixOps.hpp"']) - def __init__(self, inputs, output, strides, ksize, padding, - in_dtype, filter_dtype, out_dtype, + def __init__(self, inputs, outputs, strides, ksize, padding, + in_dtype, filter_dtype, out_dtypes, ref_count=0, to_eval=False): Snippet.__init__(self) if ref_count: self.template_vars["ref_count"] = ref_count + print(outputs) + print(out_dtypes) self.template_vars["inputs"] = inputs - self.template_vars["output"] = output + self.template_vars["outputs"] = outputs self.template_vars["in_dtype"] = NP_TYPES_MAP[in_dtype].tensor_type_str self.template_vars["filter_dtype"] = NP_TYPES_MAP[filter_dtype].tensor_type_str - self.template_vars["out_dtype"] = NP_TYPES_MAP[out_dtype].tensor_type_str + self.template_vars["out_dtypes"] = [NP_TYPES_MAP[out_dtype].tensor_type_str for out_dtype in out_dtypes] self.template_vars["strides"] = strides self.template_vars["ksize"] = ksize self.template_vars["padding"] = padding @@ -766,10 +779,38 @@ def __init__(self, guard_name, graph_name, placeholders=None): self.template_vars["graph_name"] = graph_name self.template_vars["placeholders"] = placeholders +class TensorStringReferenceSnippet(Snippet): + __template_name__ = "snippets/tensor_string_reference.hpp" + __headers__ = set([]) + __references__ = set([]) + + @classmethod + def add_reference(cls, sref_name): + cls.__references__.add(sref_name) + + @classmethod + def have_reference(cls, sref_name): + return sref_name not in cls.__references__ + + def __init__(self, sref_name): + Snippet.__init__(self) + self.template_vars['sref_name'] = sref_name + self.template_vars['string_id'] = mhash(sref_name) + # Dont render duplicates + self.renderable = self.have_reference(sref_name) + self.add_reference(sref_name) + + def render(self): + if self.renderable: + return Snippet.render(self) + else: + return '' + class WeightSnippet(Snippet): __template_name__ = "snippets/weight_snippet.hpp" __headers__ = set([]) + def __init__(self, inline_name, type, shape, value): Snippet.__init__(self) length = np.prod(shape) diff --git a/utensor_cgen/backend/snippets/templates/containers/weight_header.hpp b/utensor_cgen/backend/snippets/templates/containers/weight_header.hpp index e1d89bcb..56e86eb3 100644 --- a/utensor_cgen/backend/snippets/templates/containers/weight_header.hpp +++ b/utensor_cgen/backend/snippets/templates/containers/weight_header.hpp @@ -1,3 +1,4 @@ +#include {% for snippet in snippets%} {{snippet.render()}} {% endfor %} diff --git a/utensor_cgen/backend/snippets/templates/snippets/add_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/add_op.cpp index 7cf9db99..8009ac6e 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/add_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/add_op.cpp @@ -3,14 +3,14 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new AddOp<{{in_dtype}}, {{out_dtype}}>(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/argmax_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/argmax_op.cpp index 0e4ebe0f..5f5196a1 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/argmax_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/argmax_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new ArgMaxOp<{{in_dtype}}, {{out_dtype}}>(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/cmsis_nn_fc_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/cmsis_nn_fc_op.cpp index dd871804..be36921a 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/cmsis_nn_fc_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/cmsis_nn_fc_op.cpp @@ -1,25 +1,25 @@ { {# // {%if ref_counts%} - // ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[2]}}, {{ref_counts[2]}}); // {%else%} - // ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}"); - // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}"); - // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[2]}}"); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[1]}}); + // ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[2]}}); // {%endif%} #} {%if ref_counts%} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_counts[0]}}); {%else%} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {%endif%} ctx.push(new FullyConnectedLayerCmsisOp<{{out_dtype}}>(), - { {%for tname in inputs[:-1] %}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {%if to_eval%} ctx.eval(); {%endif%} diff --git a/utensor_cgen/backend/snippets/templates/snippets/cmsis_uint8q7origin_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/cmsis_uint8q7origin_op.cpp index a4cd9102..332428cf 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/cmsis_uint8q7origin_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/cmsis_uint8q7origin_op.cpp @@ -3,14 +3,14 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor(), "{{output}}"); + ctx.add(new RamTensor(), {{output}}); {% endif %} ctx.push(new Uint8Q7OriginOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/conv2d_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/conv2d_op.cpp index e9245688..bbde3d1d 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/conv2d_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/conv2d_op.cpp @@ -1,12 +1,12 @@ { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new ConvOp<{{in_dtype}}, {{filter_dtype}}, {{out_dtype}}>({ {% for s in strides[:-1]%}{{s}}, {%endfor%}{{strides[-1]}} }, {{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}"}); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}}}); {% if to_eval %} ctx.eval(); {% endif %} diff --git a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_binary.cpp b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_binary.cpp index 42b2f5bc..95f95cf0 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_binary.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_binary.cpp @@ -4,14 +4,14 @@ S_TENSOR {{sptr_name}}; { {%if ref_count%} ctx.add(new {{tensor_type}}<{{dtype}}>({{tensor_shape}}, {{inline_name}}), - "{{tensor_name}}", + {{ tensor_name }}, {{ref_count}}); {% else %} ctx.add(new {{tensor_type}}<{{dtype}}>({{tensor_shape}}, {{inline_name}}), - "{{tensor_name}}"); + {{ tensor_name }}); {%endif%} {% if create_sptr %} - {{sptr_name}} = ctx.get("{{tensor_name}}"); + {{sptr_name}} = ctx.get({{ tensor_name }}); {% endif %} {%if to_eval%} ctx.eval(); diff --git a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_idx.cpp b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_idx.cpp index f5f80865..53298e3b 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_idx.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_idx.cpp @@ -4,17 +4,17 @@ S_TENSOR {{sptr_name}}; { TensorIdxImporter t_import; {% if ref_count %} - ctx.add(t_import.{{importer_dtype}}_import("{{idx_path}}"), - "{{tensor_name}}", + ctx.add(t_import.{{importer_dtype}}_import({{idx_path}}), + {{tensor_name}}, {{ref_count}}); {% else %} - ctx.add(t_import.{{importer_dtype}}_import("{{idx_path}}"), - "{{tensor_name}}"); + ctx.add(t_import.{{importer_dtype}}_import({{idx_path}}), + {{tensor_name}}); {% endif %} {% if create_sptr %} - {{sptr_name}} = ctx.get("{{tensor_name}}"); + {{sptr_name}} = ctx.get({{tensor_name}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_new.cpp b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_new.cpp index c00b2c0f..e97a9958 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/create_tensor_new.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/create_tensor_new.cpp @@ -2,11 +2,11 @@ S_TENSOR {{sptr_name}}; {% endif %} { - ctx.add(new {{tensor_type}}<{{dtype}}>({% if tensor_shape %}{{tensor_shape}}{%endif%}), "{{tensor_name}}"{%if ref_count%}, {{ref_count}}{%endif%}); + ctx.add(new {{tensor_type}}<{{dtype}}>({% if tensor_shape %}{{tensor_shape}}{%endif%}), {{tensor_name}}{%if ref_count%}, {{ref_count}}{%endif%}); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{tensor_name}}"); + {{sptr_name}} = ctx.get({{tensor_name}}); {% endif %} {%if to_eval%} ctx.eval(); {%endif%} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/dequantize_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/dequantize_op.cpp index 58882518..944406cf 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/dequantize_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/dequantize_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new DequantizeOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {%if to_eval%} ctx.eval(); {%endif%} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/fused_conv2d_maxpool_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/fused_conv2d_maxpool_op.cpp index f4418541..13618c34 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/fused_conv2d_maxpool_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/fused_conv2d_maxpool_op.cpp @@ -1,12 +1,12 @@ { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new FusedConvMaxpoolOp<{{in_dtype}}, {{filter_dtype}}, {{out_dtype}}>({ {% for s in strides[:-1]%}{{s}}, {%endfor%}{{strides[-1]}} }, { {% for s in ksize[:-1]%}{{s}}, {%endfor%}{{ksize[-1]}} },{{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}"}); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}}}); {% if to_eval %} ctx.eval(); {% endif %} diff --git a/utensor_cgen/backend/snippets/templates/snippets/gather_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/gather_op.cpp index 57bcf4ac..e3c7f3e5 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/gather_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/gather_op.cpp @@ -3,13 +3,13 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new GatherOp<{{in_dtype}}>(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if to_eval %} ctx.eval(); {% endif %} diff --git a/utensor_cgen/backend/snippets/templates/snippets/matmul_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/matmul_op.cpp index d8243054..27ded788 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/matmul_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/matmul_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new MatMulOp<{{x_dtype}}, {{w_dtype}}, {{out_dtype}}>(), - { {%for tname in inputs[:-1] %}"{{tname}}", {% endfor %} "{{inputs[-1]}}" }, - { "{{output}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {% endfor %} {{inputs[-1]}} }, + { {{output}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/max_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/max_op.cpp index 356a25cd..e37fe815 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/max_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/max_op.cpp @@ -9,17 +9,17 @@ S_TENSOR {{sptr_name}}; out_tensor = new RamTensor<{{out_dtype}}>(); {%endif%} {%if ref_count %} - ctx.add(out_tensor, "{{output}}", {{ref_count}}); + ctx.add(out_tensor, {{output}}, {{ref_count}}); {%else%} - ctx.add(out_tensor, "{{output}}"); + ctx.add(out_tensor, {{output}}); {%endif%} ctx.push(new MaxOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {%if to_eval%} ctx.eval(); {%endif%} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/max_pool_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/max_pool_op.cpp index 0bd17e92..19607451 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/max_pool_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/max_pool_op.cpp @@ -3,22 +3,22 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{dtype}}>(), {{output}}); {% endif %} ctx.push(new MaxPoolingOp<{{dtype}}>({{wind_rows}}, {{wind_cols}}, {{row_stride}}, {{col_stride}}, {{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {# {% if create_sptr %} #} {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {# {% endif %} #} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/min_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/min_op.cpp index 4bc2aab4..fe7678ad 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/min_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/min_op.cpp @@ -9,17 +9,17 @@ S_TENSOR {{sptr_name}}; out_tensor = new RamTensor<{{out_dtype}}>(); {% endif %} {% if ref_count%} - ctx.add(out_tensor, "{{output}}", {{ref_count}}); + ctx.add(out_tensor, {{output}}, {{ref_count}}); {% else %} - ctx.add(out_tensor, "{{output}}"); + ctx.add(out_tensor, {{output}}); {% endif %} ctx.push(new MinOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/pack_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/pack_op.cpp index a15de170..ba62bec1 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/pack_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/pack_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new PackOp<{{dtype}}>({{N}}, {{axis}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/qadd_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qadd_op.cpp index 9a33c897..96d21524 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qadd_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qadd_op.cpp @@ -3,19 +3,19 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_counts %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QuantizedAddOp<{{x_dtype}}, {{w_dtype}}, {{out_dtype}}>(), - { {%for tname in inputs[:-1] %}"{{tname}}", {% endfor %} "{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {% endfor %} "{{outputs[-1]}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {% endfor %} {{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {% endfor %} {{outputs[-1]}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); diff --git a/utensor_cgen/backend/snippets/templates/snippets/qconv2d_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qconv2d_op.cpp index 1c136eca..8a2e40b8 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qconv2d_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qconv2d_op.cpp @@ -1,17 +1,17 @@ { {% if ref_counts %} - ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[1]}}); + ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), {{outputs[2]}}); {% endif %} ctx.push(new QntConvOp<{{in_dtype}}, {{filter_dtype}}, {{out_dtypes[0]}}>({ {% for s in strides[:-1]%}{{s}}, {%endfor%}{{strides[-1]}} }, {{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { {% for tname in outputs[:-1]%}"{{tname}}", {%endfor%}"{{outputs[-1]}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {% for tname in outputs[:-1]%}{{tname}}, {%endfor%}{{outputs[-1]}} }); {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/qmatmul_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qmatmul_op.cpp index bf8320d1..9027c8b0 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qmatmul_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qmatmul_op.cpp @@ -3,21 +3,21 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_counts %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QntMatMulOp<{{x_dtype}}, {{w_dtype}}, {{out_dtype}}>(), - { {%for tname in inputs[:-1] %}"{{tname}}", {% endfor %} "{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {% endfor %} "{{outputs[-1]}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {% endfor %} {{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {% endfor %} {{outputs[-1]}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/qmax_pool_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qmax_pool_op.cpp index 209b8fb5..dbd088f4 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qmax_pool_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qmax_pool_op.cpp @@ -3,26 +3,26 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_counts %} - ctx.add(new RamTensor<{{dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QuantizedMaxPoolingOp<{{dtype}}>({{wind_rows}}, {{wind_cols}}, {{row_stride}}, {{col_stride}}, {{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {% endfor %} "{{outputs[-1]}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {% endfor %} {{outputs[-1]}} }); {# {% if create_sptr %} #} {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {# {% endif %} #} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/qmul_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qmul_op.cpp index e2c83757..29ee3d95 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qmul_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qmul_op.cpp @@ -3,19 +3,19 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_counts %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QuantizedMulOp<{{x_dtype}}, {{w_dtype}}, {{out_dtype}}>(), - { {%for tname in inputs[:-1] %}"{{tname}}", {% endfor %} "{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {% endfor %} "{{outputs[-1]}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {% endfor %} {{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {% endfor %} {{outputs[-1]}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); diff --git a/utensor_cgen/backend/snippets/templates/snippets/qrelu_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qrelu_op.cpp index cac38f62..0240e994 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qrelu_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qrelu_op.cpp @@ -3,19 +3,19 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {%if ref_counts%} - ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{qout_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[2]}}, {{ref_counts[2]}}); {%else%} - ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{qout_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor<{{out_dtypes[0]}}>({1}), {{outputs[1]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[2]}}); {%endif%} ctx.push(new QuantizedReluOp<{{in_dtype}}, {{out_dtypes[0]}}, {{qout_dtype}}>(), - { {% for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, - { {% for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {% endfor %}{{inputs[-1]}} }, + { {% for tname in outputs[:-1]%}{{tname}}, {% endfor %}{{outputs[-1]}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval%} ctx.eval(); diff --git a/utensor_cgen/backend/snippets/templates/snippets/qreshape_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/qreshape_op.cpp index dae714dc..b2e74724 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/qreshape_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/qreshape_op.cpp @@ -1,17 +1,17 @@ { {% if ref_counts%} - ctx.add(new RamTensor(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QuantizedReshapeOp(), - { {%for tname in inputs[:-1] %}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {%endfor%}"{{outputs[-1]}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {%endfor%}{{outputs[-1]}} }); {%if to_eval%} ctx.eval(); {%endif%} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/quantV2_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/quantV2_op.cpp index 6f55c1a4..cb835b09 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/quantV2_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/quantV2_op.cpp @@ -3,21 +3,21 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {% if ref_counts%} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor({1}), {{outputs[1]}}); + ctx.add(new RamTensor({1}), {{outputs[2]}}); {% endif %} ctx.push(new QuantizeV2Op(), - { {% for tname in inputs[:-1]%} "{{tname}}", {% endfor %}"{{inputs[-1]}}" }, - { {% for tname in outputs[:-1]%} "{{tname}}", {% endfor %}"{{outputs[-1]}}" }); + { {% for tname in inputs[:-1]%} {{tname}}, {% endfor %}{{inputs[-1]}} }, + { {% for tname in outputs[:-1]%} {{tname}}, {% endfor %}{{outputs[-1]}} }); {%for sptr_name, output in zip(sptr_names, outputs)%} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); {%endif%} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/quant_range_for_multiplication_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/quant_range_for_multiplication_op.cpp index db922bbb..d3a8fe08 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/quant_range_for_multiplication_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/quant_range_for_multiplication_op.cpp @@ -1,15 +1,15 @@ { {%if ref_counts%} - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); {%else%} - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}"); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[0]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[1]}}); {%endif%} ctx.push(new QuantRangeForMultiplicationOp(), - { {%for tname in inputs[:-1] %}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { {%for tname in outputs[:-1] %}"{{tname}}", {%endfor%}"{{outputs[-1]}}" }); + { {%for tname in inputs[:-1] %}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {%for tname in outputs[:-1] %}{{tname}}, {%endfor%}{{outputs[-1]}} }); {%if to_eval%} ctx.eval(); {%endif%} diff --git a/utensor_cgen/backend/snippets/templates/snippets/quantized_fused_conv2d_maxpool_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/quantized_fused_conv2d_maxpool_op.cpp index 7aa4f07a..27859ae5 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/quantized_fused_conv2d_maxpool_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/quantized_fused_conv2d_maxpool_op.cpp @@ -1,16 +1,16 @@ { {% if ref_counts %} - ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), {{outputs[2]}}, {{ref_counts[2]}}); {% else %} - ctx.add(new RamTensor<{{out_dtypes[0]}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{out_dtypes[0]}}>(), {{outputs[0]}}); + ctx.add(new RamTensor<{{out_dtypes[1]}}>({1}), {{outputs[1]}}); + ctx.add(new RamTensor<{{out_dtypes[2]}}>({1}), {{outputs[2]}}); {% endif %} - ctx.push(new QuantizedFusedConvMaxpoolOp<{{in_dtype}}, {{filter_dtype}}, {{out_dtype}}>({ {% for s in strides[:-1]%}{{s}}, {%endfor%}{{strides[-1]}} }, { {% for s in ksize[:-1]%}{{s}}, {%endfor%}{{ksize[-1]}} },{{padding}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { {% for tname in outputs[:-1]%}"{{tname}}", {%endfor%}"{{outputs[-1]}}" }); + ctx.push(new QuantizedFusedConvMaxpoolOp<{{in_dtype}}, {{filter_dtype}}, {{out_dtypes[0]}}>({ {% for s in strides[:-1]%}{{s}}, {%endfor%}{{strides[-1]}} }, { {% for s in ksize[:-1]%}{{s}}, {%endfor%}{{ksize[-1]}} },{{padding}}), + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {% for tname in outputs[:-1]%}{{tname}}, {%endfor%}{{outputs[-1]}} }); {% if to_eval %} ctx.eval(); {% endif %} diff --git a/utensor_cgen/backend/snippets/templates/snippets/relu_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/relu_op.cpp index 29b7c2fa..a9195255 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/relu_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/relu_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {%if ref_count%} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {%else%} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {%endif%} ctx.push(new ReluOp<{{in_dtype}}, {{out_dtype}}>(), - { {% for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {% endfor %}{{inputs[-1]}} }, + { {{output}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval%} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/requant_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/requant_op.cpp index 4bac1905..c9cb20a5 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/requant_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/requant_op.cpp @@ -3,21 +3,21 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {%if ref_counts%} - ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); - ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[2]}}", {{ref_counts[2]}}); + ctx.add(new RamTensor<{{qout_dtype}}>(), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{range_dtype}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); + ctx.add(new RamTensor<{{range_dtype}}>({1}), {{outputs[2]}}, {{ref_counts[2]}}); {%else%} - ctx.add(new RamTensor<{{qout_dtype}}>(), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[1]}}"); - ctx.add(new RamTensor<{{range_dtype}}>({1}), "{{outputs[2]}}"); + ctx.add(new RamTensor<{{qout_dtype}}>(), {{outputs[0]}}); + ctx.add(new RamTensor<{{range_dtype}}>({1}), {{outputs[1]}}); + ctx.add(new RamTensor<{{range_dtype}}>({1}), {{outputs[2]}}); {%endif%} ctx.push(new RequantizeOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, - { {% for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {% endfor %}{{inputs[-1]}} }, + { {% for tname in outputs[:-1]%}{{tname}}, {% endfor %}{{outputs[-1]}} }); {%for sptr_name, output in zip(sptr_names, outputs)%} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {%endfor%} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/requant_range_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/requant_range_op.cpp index 97e126cf..33d5b74a 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/requant_range_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/requant_range_op.cpp @@ -3,19 +3,19 @@ S_TENSOR {%for sptr_name in sptr_names[:-1]%}{{sptr_name}}, {%endfor%} {{sptr_na {% endif %} { {%if ref_counts%} - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}", {{ref_counts[0]}}); - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}", {{ref_counts[1]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[0]}}, {{ref_counts[0]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[1]}}, {{ref_counts[1]}}); {%else%} - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[0]}}"); - ctx.add(new RamTensor<{{out_dtype}}>({1}), "{{outputs[1]}}"); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[0]}}); + ctx.add(new RamTensor<{{out_dtype}}>({1}), {{outputs[1]}}); {%endif%} ctx.push(new Requantization_RangeOp(), - { {%for tname in inputs[:-1]%}"{{tname}}", {% endfor %}"{{inputs[-1]}}" }, - { {%for tname in outputs[:-1]%}"{{tname}}", {% endfor %}"{{outputs[-1]}}" }); + { {%for tname in inputs[:-1]%}{{tname}}, {% endfor %}{{inputs[-1]}} }, + { {%for tname in outputs[:-1]%}{{tname}}, {% endfor %}{{outputs[-1]}} }); {% for sptr_name, output in zip(sptr_names, outputs) %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endfor %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/reshape_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/reshape_op.cpp index 092a5f3e..d05c112b 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/reshape_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/reshape_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{dtype}}>(), {{output}}); {% endif %} ctx.push(new ReshapeOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/shape_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/shape_op.cpp index c25d2b73..2e96e8be 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/shape_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/shape_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new ShapeOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/softmax_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/softmax_op.cpp index aab8cf95..cfe8a858 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/softmax_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/softmax_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new SoftmaxOp(), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/strided_slice_op.cpp b/utensor_cgen/backend/snippets/templates/snippets/strided_slice_op.cpp index d67e8629..1f901a54 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/strided_slice_op.cpp +++ b/utensor_cgen/backend/snippets/templates/snippets/strided_slice_op.cpp @@ -3,17 +3,17 @@ S_TENSOR {{sptr_name}}; {% endif %} { {% if ref_count %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}", {{ref_count}}); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}, {{ref_count}}); {% else %} - ctx.add(new RamTensor<{{out_dtype}}>(), "{{output}}"); + ctx.add(new RamTensor<{{out_dtype}}>(), {{output}}); {% endif %} ctx.push(new StridedSliceOp<{{dtype}}>({{begin_mask}}, {{ellipsis_mask}}, {{end_mask}}, {{new_axis_mask}}, {{shrink_axis_mask}}), - { {% for tname in inputs[:-1]%}"{{tname}}", {%endfor%}"{{inputs[-1]}}" }, - { "{{output}}" }); + { {% for tname in inputs[:-1]%}{{tname}}, {%endfor%}{{inputs[-1]}} }, + { {{output}} }); {% if create_sptr %} - {{sptr_name}} = ctx.get("{{output}}"); + {{sptr_name}} = ctx.get({{output}}); {% endif %} {% if to_eval %} ctx.eval(); {% endif %} -} \ No newline at end of file +} diff --git a/utensor_cgen/backend/snippets/templates/snippets/tensor_string_reference.hpp b/utensor_cgen/backend/snippets/templates/snippets/tensor_string_reference.hpp new file mode 100644 index 00000000..2db9d1d9 --- /dev/null +++ b/utensor_cgen/backend/snippets/templates/snippets/tensor_string_reference.hpp @@ -0,0 +1 @@ +static const uint32_t {{ sref_name }} = {{ string_id }}; diff --git a/utensor_cgen/backend/snippets/templates/snippets/weight_snippet.hpp b/utensor_cgen/backend/snippets/templates/snippets/weight_snippet.hpp index 2ccf15fc..10adc447 100644 --- a/utensor_cgen/backend/snippets/templates/snippets/weight_snippet.hpp +++ b/utensor_cgen/backend/snippets/templates/snippets/weight_snippet.hpp @@ -1,3 +1,3 @@ -#include +//#include const {{ type }} {{ inline_name }} [ {{ length }} ] = { {% for item in value %} {{ item }}, {% endfor %} };