diff --git a/optimum/exporters/onnx/model_configs.py b/optimum/exporters/onnx/model_configs.py index f1d451f97b5..2fb2a5c2952 100644 --- a/optimum/exporters/onnx/model_configs.py +++ b/optimum/exporters/onnx/model_configs.py @@ -1182,7 +1182,11 @@ def outputs(self) -> Dict[str, Dict[int, str]]: } -class T5EncoderOnnxConfig(CLIPTextOnnxConfig): +class T5EncoderOnnxConfig(TextEncoderOnnxConfig): + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig + ATOL_FOR_VALIDATION = 1e-4 + DEFAULT_ONNX_OPSET = 12 # int64 was supported since opset 12 + @property def inputs(self): return { @@ -2120,9 +2124,9 @@ def outputs(self) -> Dict[str, Dict[int, str]]: # for Speech2text, we need to name the second axis as # encoder_sequence_length / 2 * self._config.num_conv_layers as the axis name is # used for dummy input generation - common_outputs["last_hidden_state"][ - 1 - ] = f"{common_outputs['last_hidden_state'][1]} / {(2 * self._config.num_conv_layers)}" + common_outputs["last_hidden_state"][1] = ( + f"{common_outputs['last_hidden_state'][1]} / {(2 * self._config.num_conv_layers)}" + ) return common_outputs