Skip to content

Commit

Permalink
[CodeStyle][Ruff][BUAA][G-[171-180]] Fix ruff RUF005 diagnostic for 1…
Browse files Browse the repository at this point in the history
…0 files (#67492)


---------

Co-authored-by: Nyakku Shigure <[email protected]>
  • Loading branch information
Wizard-ZP and SigureMo authored Aug 17, 2024
1 parent 6a70fdc commit 2ea8c3b
Show file tree
Hide file tree
Showing 10 changed files with 26 additions and 25 deletions.
5 changes: 3 additions & 2 deletions test/dygraph_to_static/simnet_dygraph_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,8 +340,9 @@ def _build_once(self, input):
param_shape = [
reduce(
lambda a, b: a * b, input_shape[self._num_flatten_dims :], 1
)
] + [self._size]
),
self._size,
]
self.__w.append(
self.add_parameter(
'_w%d' % i,
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/test_cycle_gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ def train(args):

with base.dygraph.guard(place):
max_images_num = args.max_images_num
data_shape = [-1] + args.image_shape
data_shape = [-1, *args.image_shape]

random.seed(SEED)
np.random.seed(SEED)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self):
# at transform time.
@paddle.jit.not_to_static
def forward_impl(self, x):
return paddle.concat([x] + self.extra_inputs, axis=0)
return paddle.concat([x, *self.extra_inputs], axis=0)

def forward(self, x):
return self.forward_impl(x)
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/test_tsm.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def __init__(self, name_scope, config, mode):
)

def forward(self, inputs):
y = paddle.reshape(inputs, [-1] + self.reshape_list)
y = paddle.reshape(inputs, [-1, *self.reshape_list])
y = self.conv(y)
y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list:
Expand Down
2 changes: 1 addition & 1 deletion test/dygraph_to_static/transformer_dygraph_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -708,7 +708,7 @@ def beam_search(
):
def expand_to_beam_size(tensor, beam_size):
tensor = paddle.reshape(
tensor, [tensor.shape[0], 1] + list(tensor.shape[1:])
tensor, [tensor.shape[0], 1, *list(tensor.shape[1:])]
)
tile_dims = [-1] * len(tensor.shape)
tile_dims[1] = beam_size
Expand Down
4 changes: 2 additions & 2 deletions test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def setUp(self):
self.init_data()
with base.program_guard(self.main_program, self.startup_program):
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
name='x', shape=[-1, *self.shape_x], dtype=self.d_type
)

out = paddle.transpose(x, perm=[0, 1, 2, 3])
Expand All @@ -36,7 +36,7 @@ def setUp(self):
out = paddle.static.nn.fc(out, size=1)

self.feeds = {
"x": np.random.random([self.bs] + self.shape_x).astype(
"x": np.random.random([self.bs, *self.shape_x]).astype(
self.d_type
)
}
Expand Down
16 changes: 8 additions & 8 deletions test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ def init_data(self):
def make_network(self):
with base.program_guard(self.main_program, self.startup_program):
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
name='x', shape=[-1, *self.shape_x], dtype=self.d_type
)
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
name='y', shape=[-1, *self.shape_y], dtype=self.d_type
)
out = paddle.matmul(x, y)
out = paddle.transpose(out, perm=[0, 2, 1, 3])
Expand All @@ -52,8 +52,8 @@ def setUp(self):

def set_feeds(self, out):
self.feeds = {
"x": np.random.random([self.bs] + self.shape_x).astype(self.d_type),
"y": np.random.random([self.bs] + self.shape_y).astype(self.d_type),
"x": np.random.random([self.bs, *self.shape_x]).astype(self.d_type),
"y": np.random.random([self.bs, *self.shape_y]).astype(self.d_type),
}
self.fetch_list = [out]

Expand All @@ -75,10 +75,10 @@ class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp):
def make_network(self):
with base.program_guard(self.main_program, self.startup_program):
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
name='x', shape=[-1, *self.shape_x], dtype=self.d_type
)
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
name='y', shape=[-1, *self.shape_y], dtype=self.d_type
)
out = paddle.matmul(x, y)
out = paddle.transpose(out, perm=[0, 1, 2, 3])
Expand All @@ -98,10 +98,10 @@ def init_data(self):
def make_network(self):
with base.program_guard(self.main_program, self.startup_program):
x = paddle.static.data(
name='x', shape=[-1] + self.shape_x, dtype=self.d_type
name='x', shape=[-1, *self.shape_x], dtype=self.d_type
)
y = paddle.static.data(
name='y', shape=[-1] + self.shape_y, dtype=self.d_type
name='y', shape=[-1, *self.shape_y], dtype=self.d_type
)
out = paddle.matmul(x, y)
out = paddle.transpose(out, perm=[0, 2, 1, 3])
Expand Down
2 changes: 1 addition & 1 deletion test/ir/inference/test_reshape2_matmul_fuse_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def sample_program_config(self, draw):
st.integers(min_value=1, max_value=10), min_size=2, max_size=2
)
)
x_shape = reshape + [1, 1]
x_shape = [*reshape, 1, 1]

# 2. Generate attr:transpose_X/transpose_Y/alpha of matmul
alpha = 1.0
Expand Down
8 changes: 4 additions & 4 deletions test/ir/inference/test_trt_convert_deformable_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def generate_input1(
kernel_sizes: list[int],
attrs: list[dict[str, Any]],
):
return np.random.random([batch, 3] + input_size).astype(np.float32)
return np.random.random([batch, 3, *input_size]).astype(np.float32)

def generate_offset1(
batch: int,
Expand All @@ -74,7 +74,7 @@ def generate_offset1(
):
output_size = compute_output_size(input_size, kernel_sizes, attrs)
return np.random.random(
[batch, 2 * np.prod(kernel_sizes)] + output_size
[batch, 2 * np.prod(kernel_sizes), *output_size]
).astype(np.float32)

def generate_mask1(
Expand All @@ -85,7 +85,7 @@ def generate_mask1(
):
output_size = compute_output_size(input_size, kernel_sizes, attrs)
return np.random.random(
[batch, np.prod(kernel_sizes)] + output_size
[batch, np.prod(kernel_sizes), *output_size]
).astype(np.float32)

def generate_filter1(
Expand All @@ -94,7 +94,7 @@ def generate_filter1(
kernel_sizes: list[int],
attrs: list[dict[str, Any]],
):
filter = np.random.random([6, 3] + kernel_sizes)
filter = np.random.random([6, 3, *kernel_sizes])
filter[0][0][0][0] = 8.8978638e-08
return filter.astype(np.float32)

Expand Down
8 changes: 4 additions & 4 deletions test/ir/inference/test_trt_skip_layernorm_fuse_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@
class SkipLayernormFusePassTest(InferencePassTest):
def setUp(self):
self.set_args()
input_shape_with_batch = [self.batch_size] + self.input_shape
min_input_shape_with_batch = [1] + self.min_input_shape
input_shape_with_batch = [self.batch_size, *self.input_shape]
min_input_shape_with_batch = [1, *self.min_input_shape]
with base.program_guard(self.main_program, self.startup_program):
data1 = paddle.static.data(
name='data1', shape=[-1] + self.input_shape, dtype='float32'
name='data1', shape=[-1, *self.input_shape], dtype='float32'
)
data2 = paddle.static.data(
name='data2', shape=[-1] + self.input_shape, dtype='float32'
name='data2', shape=[-1, *self.input_shape], dtype='float32'
)
eltwise_out = paddle.add(data1, data2)
out = paddle.nn.LayerNorm(eltwise_out.shape[-1:])(eltwise_out)
Expand Down

0 comments on commit 2ea8c3b

Please sign in to comment.