diff --git a/test/dygraph_to_static/simnet_dygraph_model.py b/test/dygraph_to_static/simnet_dygraph_model.py index bbd8b8b7577e3..abcc49a84ed29 100644 --- a/test/dygraph_to_static/simnet_dygraph_model.py +++ b/test/dygraph_to_static/simnet_dygraph_model.py @@ -340,8 +340,9 @@ def _build_once(self, input): param_shape = [ reduce( lambda a, b: a * b, input_shape[self._num_flatten_dims :], 1 - ) - ] + [self._size] + ), + self._size, + ] self.__w.append( self.add_parameter( '_w%d' % i, diff --git a/test/dygraph_to_static/test_cycle_gan.py b/test/dygraph_to_static/test_cycle_gan.py index 10ee815667391..c2b095292c70a 100644 --- a/test/dygraph_to_static/test_cycle_gan.py +++ b/test/dygraph_to_static/test_cycle_gan.py @@ -544,7 +544,7 @@ def train(args): with base.dygraph.guard(place): max_images_num = args.max_images_num - data_shape = [-1] + args.image_shape + data_shape = [-1, *args.image_shape] random.seed(SEED) np.random.seed(SEED) diff --git a/test/dygraph_to_static/test_set_static_op_arg_pre_cast_hook.py b/test/dygraph_to_static/test_set_static_op_arg_pre_cast_hook.py index 192f2134f63f6..b546ec9925874 100644 --- a/test/dygraph_to_static/test_set_static_op_arg_pre_cast_hook.py +++ b/test/dygraph_to_static/test_set_static_op_arg_pre_cast_hook.py @@ -53,7 +53,7 @@ def __init__(self): # at transform time. @paddle.jit.not_to_static def forward_impl(self, x): - return paddle.concat([x] + self.extra_inputs, axis=0) + return paddle.concat([x, *self.extra_inputs], axis=0) def forward(self, x): return self.forward_impl(x) diff --git a/test/dygraph_to_static/test_tsm.py b/test/dygraph_to_static/test_tsm.py index 21d3da6e24cf6..8cb6a005a98a6 100644 --- a/test/dygraph_to_static/test_tsm.py +++ b/test/dygraph_to_static/test_tsm.py @@ -204,7 +204,7 @@ def __init__(self, name_scope, config, mode): ) def forward(self, inputs): - y = paddle.reshape(inputs, [-1] + self.reshape_list) + y = paddle.reshape(inputs, [-1, *self.reshape_list]) y = self.conv(y) y = self.pool2d_max(y) for bottleneck_block in self.bottleneck_block_list: diff --git a/test/dygraph_to_static/transformer_dygraph_model.py b/test/dygraph_to_static/transformer_dygraph_model.py index a37de5f79ad9f..b998cc6a8d2fb 100644 --- a/test/dygraph_to_static/transformer_dygraph_model.py +++ b/test/dygraph_to_static/transformer_dygraph_model.py @@ -708,7 +708,7 @@ def beam_search( ): def expand_to_beam_size(tensor, beam_size): tensor = paddle.reshape( - tensor, [tensor.shape[0], 1] + list(tensor.shape[1:]) + tensor, [tensor.shape[0], 1, *list(tensor.shape[1:])] ) tile_dims = [-1] * len(tensor.shape) tile_dims[1] = beam_size diff --git a/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py b/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py index ec6f71cd62436..d6b8cf62ce4e6 100644 --- a/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py +++ b/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py @@ -27,7 +27,7 @@ def setUp(self): self.init_data() with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( - name='x', shape=[-1] + self.shape_x, dtype=self.d_type + name='x', shape=[-1, *self.shape_x], dtype=self.d_type ) out = paddle.transpose(x, perm=[0, 1, 2, 3]) @@ -36,7 +36,7 @@ def setUp(self): out = paddle.static.nn.fc(out, size=1) self.feeds = { - "x": np.random.random([self.bs] + self.shape_x).astype( + "x": np.random.random([self.bs, *self.shape_x]).astype( self.d_type ) } diff --git a/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py b/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py index 8ebc5d0564333..dd09001f338e6 100644 --- a/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py @@ -33,10 +33,10 @@ def init_data(self): def make_network(self): with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( - name='x', shape=[-1] + self.shape_x, dtype=self.d_type + name='x', shape=[-1, *self.shape_x], dtype=self.d_type ) y = paddle.static.data( - name='y', shape=[-1] + self.shape_y, dtype=self.d_type + name='y', shape=[-1, *self.shape_y], dtype=self.d_type ) out = paddle.matmul(x, y) out = paddle.transpose(out, perm=[0, 2, 1, 3]) @@ -52,8 +52,8 @@ def setUp(self): def set_feeds(self, out): self.feeds = { - "x": np.random.random([self.bs] + self.shape_x).astype(self.d_type), - "y": np.random.random([self.bs] + self.shape_y).astype(self.d_type), + "x": np.random.random([self.bs, *self.shape_x]).astype(self.d_type), + "y": np.random.random([self.bs, *self.shape_y]).astype(self.d_type), } self.fetch_list = [out] @@ -75,10 +75,10 @@ class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): def make_network(self): with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( - name='x', shape=[-1] + self.shape_x, dtype=self.d_type + name='x', shape=[-1, *self.shape_x], dtype=self.d_type ) y = paddle.static.data( - name='y', shape=[-1] + self.shape_y, dtype=self.d_type + name='y', shape=[-1, *self.shape_y], dtype=self.d_type ) out = paddle.matmul(x, y) out = paddle.transpose(out, perm=[0, 1, 2, 3]) @@ -98,10 +98,10 @@ def init_data(self): def make_network(self): with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( - name='x', shape=[-1] + self.shape_x, dtype=self.d_type + name='x', shape=[-1, *self.shape_x], dtype=self.d_type ) y = paddle.static.data( - name='y', shape=[-1] + self.shape_y, dtype=self.d_type + name='y', shape=[-1, *self.shape_y], dtype=self.d_type ) out = paddle.matmul(x, y) out = paddle.transpose(out, perm=[0, 2, 1, 3]) diff --git a/test/ir/inference/test_reshape2_matmul_fuse_pass.py b/test/ir/inference/test_reshape2_matmul_fuse_pass.py index 09dee9420df7f..178c7a604533f 100644 --- a/test/ir/inference/test_reshape2_matmul_fuse_pass.py +++ b/test/ir/inference/test_reshape2_matmul_fuse_pass.py @@ -48,7 +48,7 @@ def sample_program_config(self, draw): st.integers(min_value=1, max_value=10), min_size=2, max_size=2 ) ) - x_shape = reshape + [1, 1] + x_shape = [*reshape, 1, 1] # 2. Generate attr:transpose_X/transpose_Y/alpha of matmul alpha = 1.0 diff --git a/test/ir/inference/test_trt_convert_deformable_conv.py b/test/ir/inference/test_trt_convert_deformable_conv.py index 5437682124bce..c26bc3f6eee02 100644 --- a/test/ir/inference/test_trt_convert_deformable_conv.py +++ b/test/ir/inference/test_trt_convert_deformable_conv.py @@ -62,7 +62,7 @@ def generate_input1( kernel_sizes: List[int], attrs: List[Dict[str, Any]], ): - return np.random.random([batch, 3] + input_size).astype(np.float32) + return np.random.random([batch, 3, *input_size]).astype(np.float32) def generate_offset1( batch: int, @@ -72,7 +72,7 @@ def generate_offset1( ): output_size = compute_output_size(input_size, kernel_sizes, attrs) return np.random.random( - [batch, 2 * np.prod(kernel_sizes)] + output_size + [batch, 2 * np.prod(kernel_sizes), *output_size] ).astype(np.float32) def generate_mask1( @@ -83,7 +83,7 @@ def generate_mask1( ): output_size = compute_output_size(input_size, kernel_sizes, attrs) return np.random.random( - [batch, np.prod(kernel_sizes)] + output_size + [batch, np.prod(kernel_sizes), *output_size] ).astype(np.float32) def generate_filter1( @@ -92,7 +92,7 @@ def generate_filter1( kernel_sizes: List[int], attrs: List[Dict[str, Any]], ): - filter = np.random.random([6, 3] + kernel_sizes) + filter = np.random.random([6, 3, *kernel_sizes]) filter[0][0][0][0] = 8.8978638e-08 return filter.astype(np.float32) diff --git a/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py b/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py index 7fe31ae5b4034..b6cf8ea22b01c 100644 --- a/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py +++ b/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py @@ -28,14 +28,14 @@ class SkipLayernormFusePassTest(InferencePassTest): def setUp(self): self.set_args() - input_shape_with_batch = [self.batch_size] + self.input_shape - min_input_shape_with_batch = [1] + self.min_input_shape + input_shape_with_batch = [self.batch_size, *self.input_shape] + min_input_shape_with_batch = [1, *self.min_input_shape] with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( - name='data1', shape=[-1] + self.input_shape, dtype='float32' + name='data1', shape=[-1, *self.input_shape], dtype='float32' ) data2 = paddle.static.data( - name='data2', shape=[-1] + self.input_shape, dtype='float32' + name='data2', shape=[-1, *self.input_shape], dtype='float32' ) eltwise_out = paddle.add(data1, data2) out = paddle.nn.LayerNorm(eltwise_out.shape[-1:])(eltwise_out)