Skip to content

Commit

Permalink
[CodeStyle][task 8] enable Ruff C408 rule in python/paddle/base (Padd…
Browse files Browse the repository at this point in the history
…lePaddle#57864)

* [CodeStyle][task 8] enable Ruff C408 rule in python/paddle/base

* [CodeStyle][task 8] enable Ruff C408 rule in python/paddle/base

* [CodeStyle][task 8] enable Ruff C408 rule in python/paddle/base

* [CodeStyle][task 8] enable Ruff C408 rule in python/paddle/base
  • Loading branch information
Kaedeharai authored and jiahy0825 committed Oct 16, 2023
1 parent 981423b commit 1637880
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 29 deletions.
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ ignore = [

# Temporarily ignored
"python/paddle/base/**" = [
"C408",
"UP030",
"C405",
"B019", # Confirmation required
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/base/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -812,7 +812,7 @@ def insert_output(self, var):
assert isinstance(var, Var)
self.outputs.append(var)

var_versions = dict()
var_versions = {}

def _create_node(name):
if name not in var_versions.keys():
Expand Down Expand Up @@ -1808,7 +1808,7 @@ def _rename_grad_(


def _get_stop_gradients_(program):
no_grad_dict = dict()
no_grad_dict = {}
assert isinstance(program, framework.Program)
for block in program.blocks:
assert isinstance(block, framework.Block)
Expand Down Expand Up @@ -2032,7 +2032,7 @@ def append_backward(
for idx in son_parent_block_idx_dict:
block_fwd_op_num_dict[idx] = program.block(idx).desc.op_size()

grad_to_var = dict()
grad_to_var = {}

# pass the cuda_graph_attr to the fill_constant which generates the loss_grad
op_desc = _create_loss_op_desc_(loss)
Expand All @@ -2046,7 +2046,7 @@ def append_backward(
map(_strip_grad_suffix_, no_grad_dict[block_idx])
)

op_path_dict = dict()
op_path_dict = {}
op_path = _find_op_path_(
block, [loss], [], block_no_grad_set, op_path_dict
)
Expand Down Expand Up @@ -2109,7 +2109,7 @@ def append_backward(
grad_op_id_to_fwd_op=grad_op_id_to_fwd_op,
)

grad_info_map = dict()
grad_info_map = {}

# if in control flow, target_grad_block is a created new block which only contains grad ops,
# so fwd_op_num is set to 0.
Expand Down Expand Up @@ -2310,7 +2310,7 @@ def _find_op_path_(
input_names = {inp.name for inp in inputs}
output_names = _get_output_names(block, targets)
if op_path_dict is None:
op_path_dict = dict()
op_path_dict = {}

relevant_op_flags = [True] * len(block.ops)

Expand Down Expand Up @@ -2456,7 +2456,7 @@ def calc_gradient_helper(
raise ValueError("input must be in the same program as targets")
block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))

op_path_dict = dict()
op_path_dict = {}
op_path = _find_op_path_(
block, targets, inputs, block_no_grad_set, op_path_dict
)
Expand Down Expand Up @@ -2507,8 +2507,8 @@ def calc_gradient_helper(
block_no_grad_set.update(no_grad_vars)

no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set)))
grad_to_var = dict()
grad_info_map = dict()
grad_to_var = {}
grad_info_map = {}
_append_backward_ops_(
block,
op_path,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/base/default_scope_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def get_cur_scope():
"""
cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None)
if cur_scope_stack is None:
__tl_scope__.cur_scope = list()
__tl_scope__.cur_scope = []
if len(__tl_scope__.cur_scope) == 0:
__tl_scope__.cur_scope.append(paddle.base.core.Scope())
return __tl_scope__.cur_scope[-1]
Expand Down
16 changes: 8 additions & 8 deletions python/paddle/base/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1091,18 +1091,18 @@ def __init__(self, place=None):
self.place = expected_place
else:
self.place = framework._get_paddle_place(place)
self.program_caches = dict()
self.ctx_caches = dict()
self.trainer_caches = dict()
self.scope_caches = dict()
self.micro_scope_cache = dict()
self.var_caches = dict()
self.pruned_program_caches = dict()
self.program_caches = {}
self.ctx_caches = {}
self.trainer_caches = {}
self.scope_caches = {}
self.micro_scope_cache = {}
self.var_caches = {}
self.pruned_program_caches = {}
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
self.pruned_program_scope_caches = dict()
self.pruned_program_scope_caches = {}
self._prepare_to_run_called = False

self._auto_checkpoint_name = unique_name.generate(
Expand Down
16 changes: 8 additions & 8 deletions python/paddle/base/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -1022,7 +1022,7 @@ def cuda_pinned_places(device_count=None):

class NameScope:
def __init__(self, name="", parent=None):
self._children = dict()
self._children = {}
self._name = name
self._parent = parent

Expand Down Expand Up @@ -1218,7 +1218,7 @@ def _debug_string_(proto, throw_on_error=True):
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
error_fields = []
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError(
f"{error_fields} are not initialized.\nThe message is {proto}:\n"
Expand Down Expand Up @@ -2931,7 +2931,7 @@ def __init__(
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
op_attrs = {}
del attrs

# attr for static graph mode cuda graph
Expand Down Expand Up @@ -3955,7 +3955,7 @@ class Block:
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.ops = [] # operator list
self.program = program

def __str__(self):
Expand Down Expand Up @@ -4113,7 +4113,7 @@ def _find_var_recursive(self, name):
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
frontier = []
visited = set()

frontier.append(self)
Expand Down Expand Up @@ -5426,7 +5426,7 @@ def safe_remove_nodes(self, remove_nodes):

def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
var_nodes = {}
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
Expand Down Expand Up @@ -5483,7 +5483,7 @@ def build_adjacency_list(self):
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
wrapped_adj_list = {}
for k, v in adj_list.items():
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
Expand Down Expand Up @@ -7121,7 +7121,7 @@ def condition(var):

var_list = filter(condition, self.list_vars())

state_dict = dict()
state_dict = {}
for var in var_list:
var_temp = scope.find_var(var.name)
if var_temp is None:
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/base/layers/layer_function_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def func(*args, **kwargs):

dtype = infer_and_check_dtype(op_proto, *args, **kwargs)

inputs = dict()
inputs = {}
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
Expand All @@ -225,7 +225,7 @@ def func(*args, **kwargs):
args = args[1:]
inputs[ipt.name] = val

outputs = dict()
outputs = {}
out = kwargs.pop(_convert_(o_name), [])
if out:
out_var = out[0] if (isinstance(out, (list, tuple))) else out
Expand Down

0 comments on commit 1637880

Please sign in to comment.