Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Do not merge] Re arch support extra ops #109

Open
wants to merge 11 commits into
base: develop
Choose a base branch
from
4 changes: 4 additions & 0 deletions utensor_cgen/backend/utensor/_graph_lower/_op_lower.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ class uTensorRearchGraphLower(uTensorGraphLowerBase):
class OptypeRenameManager(object):
NAME_MAP = {
'Add': 'AddOperator',
'Mul': 'MulOperator',
'Sub': 'SubOperator',
'Conv2D': 'ConvOperator',
'MatMul': 'MatrixMultOperator'
}
Expand Down Expand Up @@ -89,6 +91,8 @@ def apply(cls, ugraph):
for op_info in ugraph.get_ops_by_type('FullyConnectedOperator'):
if cls._check_quantized(op_info):
op_info.code_gen_attributes['namespaces'] = ('TflmSymQuantOps',)
else:
op_info.code_gen_attributes['namespaces'] = ('ReferenceOperators',)

@classmethod
def _check_quantized(cls, op_info):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,69 @@ def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _ConvOperator(_CommonParams):
op_type = "ConvOperator"

@classmethod
@must_return_type(Hashable)
def get_constructor_parameters(cls, op_info):

strides = [
1,
op_info.op_attr['StrideW'],
op_info.op_attr['StrideH'],
1,
]
padding = cls._PADDING_MAP[op_info.op_attr['Padding']]
strides_str = ','.join(map(str, strides))
return ("{{ {} }}".format(strides_str), padding)

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.out_dtypes[0]],
op_var_name=op_var_name,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return ConvOpEvalSnippet(
op_info=op_info,
templ_dtypes=[self.out_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
)

@OperatorFactory.register
class _FullyConnectedOperator(_CommonParams):
namespaces = ('ReferenceOperators',)
op_type = "FullyConnectedOperator"

@classmethod
@must_return_type(Hashable)
def get_constructor_parameters(cls, op_info):
activation_idx = cls._ACTIVATION_STR_PATTERN.match(
op_info.op_attr['FusedActivationFunction']
).group(1)
activation = cls._ACTIVATION_MAP[activation_idx]
return (activation,)

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.out_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return FullyConnectedSnippet(
op_info=op_info,
templ_dtypes=[self.out_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _QuantizedFullyConnectedOperator(_CommonParams):
Expand Down Expand Up @@ -521,3 +584,158 @@ def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return MissingOpEvalSnippet(op_info, tensor_var_map)

OperatorFactory._operators[_MissingOperator.op_type] = _MissingOperator

@OperatorFactory.register
class _BatchNormOperator(_Operator):
namespaces = ('ReferenceOperators',)
op_type = "BatchNormOperator"

@classmethod
@must_return_type(Hashable)
def get_constructor_parameters(cls, op_info):
strides = [
1,
op_info.op_attr['StrideW'],
op_info.op_attr['StrideH'],
1,
]
padding = cls._PADDING_MAP[op_info.op_attr['Padding']]
strides_str = ','.join(map(str, strides))
return ("{{ {} }}".format(strides_str), padding)

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.out_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return BatchNormSnippet(
op_info=op_info,
templ_dtypes=[self.out_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _MeanOperator(_Operator):
namespaces = ('ReferenceOperators',)
op_type = "MeanOperator"

@classmethod
@must_return_type(Hashable)
def get_constructor_parameters(cls, op_info):
keep_dims = str(op_info.op_attr["keep_dims"])
return (" {} ".format(keep_dims), )

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.out_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return BatchNormSnippet(
op_info=op_info,
templ_dtypes=[self.out_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _SoftmaxOperator(_CommonParams):
namespaces = ('ReferenceOperators',)
op_type = "SoftmaxOperator"

@classmethod
@must_return_type(Hashable)
def get_constructor_parameters(cls, op_info):
Beta = op_info.op_attr["Beta"]
return (" %f " % Beta,)

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.out_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return BatchNormSnippet(
op_info=op_info,
templ_dtypes=[self.out_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _MulOperator(_Operator):
namespaces = ('ReferenceOperators',)
op_type = 'MulOperator'

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.in_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return MulOpEvalSnippet(
op_info=op_info,
templ_dtypes=[self.in_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _SubOperator(_Operator):
namespaces = ('ReferenceOperators',)
op_type = 'SubOperator'

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.in_dtypes[0]],
op_var_name=op_var_name,
nested_namespaces=type(self).namespaces,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return SubOpEvalSnippet(
op_info=op_info,
templ_dtypes=[self.in_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
nested_namespaces=type(self).namespaces,
)

@OperatorFactory.register
class _SigmoidOperator(_Operator):
namespaces = ('ReferenceOperators',)
op_type = 'SigmoidOperator'

def get_declare_snippet(self, op_var_name, tensor_var_map):
return DeclareOpSnippet(
op=self,
templ_dtypes=[self.in_dtypes[0]],
op_var_name=op_var_name,
)

def get_eval_snippet(self, op_var_name, op_info, tensor_var_map):
return SigmoidOpEvalSnippet(
op_info=op_info,
templ_dtypes=[self.in_dtypes[0]],
op_name=op_var_name,
tensor_var_map=tensor_var_map,
)
34 changes: 34 additions & 0 deletions utensor_cgen/backend/utensor/snippets/rearch/_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,16 @@
"MinPoolEvalSnippet",
"MaxPoolEvalSnippet",
"QuantizedFullyConnectedSnippet",
"FullyConnectedSnippet",
"MissingOpEvalSnippet",
"BatchNormSnippet",
"TimeSlotContainer",
"MulOpEvalSnippet",
"SubOpEvalSnippet",
"ConvOpEvalSnippet",
"MeanOpEvalSnippet",
"SoftmaxOpEvalSnippet",
"SigmoidOpEvalSnippet",
"SimpleContainer",
]

Expand Down Expand Up @@ -156,6 +164,9 @@ class DepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "depthwise_filter", "pointwise_filter"]
__outputs__ = ["out"]

class ConvOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "filter"]
__outputs__ = ["out"]

class QuantDepthwiseSeperateConvOpEvalSnippet(OpEvalSnippet):
__inputs__ = ["in", "filter", "bias"]
Expand Down Expand Up @@ -226,11 +237,31 @@ class MaxPoolEvalSnippet(OpEvalSnippet):
__inputs__ = ["in"]
__outputs__ = ["out"]

class FullyConnectedSnippet(OpEvalSnippet):
__inputs__ = ["input", "filter", "bias"]
__outputs__ = ["output"]

class QuantizedFullyConnectedSnippet(OpEvalSnippet):
__inputs__ = ["input", "filter", "bias"]
__outputs__ = ["output"]

class BatchNormSnippet(OpEvalSnippet):
__inputs__ = ["x", "mean", "variance", "offset", "scale"]
__outputs__ = ["output"]

class MulOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['a', 'b']
__outputs__ = ['c']
class SubOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['a', 'b']
__outputs__ = ['c']
class MeanOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['input', 'axis']
__outputs__ = ['output']
class SoftmaxOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['input']
__outputs__ = ['output']


class MissingOpEvalSnippet(OpEvalSnippet):
__template_name__ = "snippets/rearch/op_missing.cpp"
Expand All @@ -252,6 +283,9 @@ def __init__(self, op_info, tensor_var_map):
]
self.template_vars['output_tensors'] = op_info.output_tensors[:]
self.template_vars['quant_params_map'] = quant_params_map
class SigmoidOpEvalSnippet(OpEvalSnippet):
__inputs__ = ['in']
__outputs__ = ['out']


class TimeSlotContainer(SnippetBase):
Expand Down
Loading