Skip to content

Commit

Permalink
[CodeStyle][Ruff][BUAA][G-[68-76]] Fix ruff RUF005 diagnostic for 9 f…
Browse files Browse the repository at this point in the history
…iles in `python/paddle/static`, `python/paddle/text` and `python/paddle/tensor` (PaddlePaddle#67333)
  • Loading branch information
Fripping authored and Jeff114514 committed Aug 14, 2024
1 parent b4e6619 commit 6ef019c
Show file tree
Hide file tree
Showing 9 changed files with 38 additions and 37 deletions.
15 changes: 8 additions & 7 deletions python/paddle/static/nn/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,8 +212,9 @@ def fc_base(
if num_flatten_dims == -1:
num_flatten_dims = len(input_shape) - 1
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1),
size,
]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False
)
Expand Down Expand Up @@ -1027,7 +1028,7 @@ def _update_padding(padding, data_format):

padding = _update_padding(padding, data_format)

filter_shape = [num_filters, int(num_filter_channels)] + filter_size
filter_shape = [num_filters, int(num_filter_channels), *filter_size]

def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
Expand Down Expand Up @@ -1322,7 +1323,7 @@ def _update_padding(padding, data_format):
padding = _update_padding(padding, data_format)

input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
filter_shape = [num_filters, num_filter_channels, *filter_size]

def _get_default_param_initializer():
filter_elem_num = (
Expand Down Expand Up @@ -1718,7 +1719,7 @@ def _update_padding(padding, data_format):
f"but received the groups of input is {groups}"
)

filter_shape = [input_channel, num_filters // groups] + filter_size
filter_shape = [input_channel, num_filters // groups, *filter_size]

img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr
Expand Down Expand Up @@ -2076,7 +2077,7 @@ def _update_padding(padding, data_format):
f"Received: Attr(num_filters) is {num_filters}, the groups is {groups}"
)

filter_shape = [input_channel, num_filters // groups] + filter_size
filter_shape = [input_channel, num_filters // groups, *filter_size]
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr
)
Expand Down Expand Up @@ -2292,7 +2293,7 @@ def deformable_conv(
dilation = paddle.utils.convert_to_list(dilation, 2, 'dilation')

input_shape = input.shape
filter_shape = [num_filters, int(num_filter_channels)] + filter_size
filter_shape = [num_filters, int(num_filter_channels), *filter_size]

def _get_default_param_initializer():
filter_elem_num = filter_size[0] * filter_size[1] * num_channels
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/static/nn/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -903,7 +903,7 @@ def _deal_with_undefined_var(output_vars, loop_vars):

def create_var_like(o_var):
if (
isinstance(o_var, (Variable,) + support_ret_buildin_type)
isinstance(o_var, (Variable, *support_ret_buildin_type))
or o_var is None
):
return create_undefined_variable()
Expand Down Expand Up @@ -1456,7 +1456,7 @@ def get_expected_precision(out_with_blocks):
]

if any(isinstance(out, paddle.pir.Value) for out in outs) and all(
isinstance(out, (paddle.pir.Value,) + promotion_builtin_types)
isinstance(out, (paddle.pir.Value, *promotion_builtin_types))
for out in outs
):
warnings.warn(
Expand Down Expand Up @@ -1950,10 +1950,10 @@ def start_select_input():
)
elif (
isinstance(false_var, UndefinedVar)
and isinstance(true_var, (Variable,) + support_ret_buildin_type)
and isinstance(true_var, (Variable, *support_ret_buildin_type))
) or (
isinstance(true_var, UndefinedVar)
and isinstance(false_var, (Variable,) + support_ret_buildin_type)
and isinstance(false_var, (Variable, *support_ret_buildin_type))
):
true_var, false_var = to_static_variable(true_var), to_static_variable(
false_var
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/static/quantization/adaround.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,12 +270,12 @@ def run_adaround(
)
orig_out_tensor = static.data(
name='orig_out_tensor',
shape=(-1,) + fp32_fetch_list.shape,
shape=(-1, *fp32_fetch_list.shape),
dtype='float32',
)
adaround_out_tensor = static.data(
name='adaround_out_tensor',
shape=(-1,) + fp32_fetch_list.shape,
shape=(-1, *fp32_fetch_list.shape),
dtype='float32',
)
beta_tensor = static.data(
Expand Down
26 changes: 13 additions & 13 deletions python/paddle/tensor/einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def build_global_view(
# Put all labels in alphabetical order
concat = sorted(''.join(nop_labels).replace('.', ''))
labels, count = [], []
for a, b in zip(['.'] + concat, concat):
for a, b in zip(['.', *concat], concat):
if a != b:
labels.append(b)
count.append(1)
Expand Down Expand Up @@ -400,16 +400,16 @@ def plan_matmul(
and k > 0
and -1 not in np.concatenate((op1_vshape, op2_vshape))
):
op1_shape = (
list(op1_vshape[I])
+ [np.prod(op1_vshape[J1])]
+ [np.prod(op1_vshape[K])]
)
op2_shape = (
list(op2_vshape[I])
+ [np.prod(op2_vshape[J2])]
+ [np.prod(op2_vshape[K])]
)
op1_shape = [
*list(op1_vshape[I]),
np.prod(op1_vshape[J1]),
np.prod(op1_vshape[K]),
]
op2_shape = [
*list(op2_vshape[I]),
np.prod(op2_vshape[J2]),
np.prod(op2_vshape[K]),
]

# Merge J dims and K dims by reshaping
step = reshape, [var1], var1, op1_shape
Expand Down Expand Up @@ -465,14 +465,14 @@ def plan_matmul(
reshape,
[var1],
var1,
list(op1_vshape[I]) + [1] + [np.prod(op1_vshape[K])],
[*list(op1_vshape[I]), 1, np.prod(op1_vshape[K])],
)
plan.add_step(step)
step = (
reshape,
[var2],
var2,
list(op2_vshape[I]) + [1] + [np.prod(op2_vshape[K])],
[*list(op2_vshape[I]), 1, np.prod(op2_vshape[K])],
)
plan.add_step(step)
step = matmul, [var1, var2], var2, False, True
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2881,7 +2881,7 @@ def _tensor_split_indices(x, total_n, indices, axis):

starts = 0
ends = 0
for idx in list(indices) + [total_n]:
for idx in [*list(indices), total_n]:
ends = idx
# convert index < 0 to positive
starts_index = starts if starts >= 0 else total_n + starts
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -2310,9 +2310,9 @@ def __check_input(x, y):
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
x_shape = [1, *x_shape]
if len(y_shape) == 1:
y_shape = y_shape + [1]
y_shape = [*y_shape, 1]

# check the inner 2 dimensions
if x_shape[-1] != y_shape[-2]:
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/text/datasets/imikolov.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,16 +178,16 @@ def _load_anno(self) -> None:
for l in f:
if self.data_type == 'NGRAM':
assert self.window_size > -1, 'Invalid gram length'
l = ['<s>'] + l.strip().split() + ['<e>']
l = ["<s>", *l.strip().split(), "<e>"]
if len(l) >= self.window_size:
l = [self.word_idx.get(w, UNK) for w in l]
for i in range(self.window_size, len(l) + 1):
self.data.append(tuple(l[i - self.window_size : i]))
elif self.data_type == 'SEQ':
l = l.strip().split()
l = [self.word_idx.get(w, UNK) for w in l]
src_seq = [self.word_idx['<s>']] + l
trg_seq = l + [self.word_idx['<e>']]
src_seq = [self.word_idx["<s>"], *l]
trg_seq = [*l, self.word_idx["<e>"]]
if self.window_size > 0 and len(src_seq) > self.window_size:
continue
self.data.append((src_seq, trg_seq))
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/text/datasets/wmt14.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def __to_dict(fd, size: int) -> dict[str, int]:
src_words = src_seq.split()
src_ids = [
self.src_dict.get(w, UNK_IDX)
for w in [START] + src_words + [END]
for w in [START, *src_words, END]
]

trg_seq = line_split[1] # one target sequence
Expand All @@ -193,8 +193,8 @@ def __to_dict(fd, size: int) -> dict[str, int]:
# remove sequence whose length > 80 in training mode
if len(src_ids) > 80 or len(trg_ids) > 80:
continue
trg_ids_next = trg_ids + [self.trg_dict[END]]
trg_ids = [self.trg_dict[START]] + trg_ids
trg_ids_next = [*trg_ids, self.trg_dict[END]]
trg_ids = [self.trg_dict[START], *trg_ids]

self.src_ids.append(src_ids)
self.trg_ids.append(trg_ids)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/text/datasets/wmt16.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,8 @@ def _load_data(self) -> None:
trg_words = line_split[trg_col].split()
trg_ids = [self.trg_dict.get(w, unk_id) for w in trg_words]

trg_ids_next = trg_ids + [end_id]
trg_ids = [start_id] + trg_ids
trg_ids_next = [*trg_ids, end_id]
trg_ids = [start_id, *trg_ids]

self.src_ids.append(src_ids)
self.trg_ids.append(trg_ids)
Expand Down

0 comments on commit 6ef019c

Please sign in to comment.