diff --git a/python/paddle/base/backward.py b/python/paddle/base/backward.py index 563e423e0c7ea..d6c0a61a5c92b 100755 --- a/python/paddle/base/backward.py +++ b/python/paddle/base/backward.py @@ -2378,7 +2378,7 @@ def _find_op_path_( # If block is while block, dealing with op specifically again. # TODO(liym27): Consider special types of ops. for i, op in reversed(list(enumerate(block.ops))): - if relevant_op_flags[i] == False and _some_in_set_( + if relevant_op_flags[i] is False and _some_in_set_( op.desc.output_arg_names(), output_names ): relevant_op_flags[i] = True diff --git a/python/paddle/base/device_worker.py b/python/paddle/base/device_worker.py index 706febd44ba0e..2397ce39b97e4 100644 --- a/python/paddle/base/device_worker.py +++ b/python/paddle/base/device_worker.py @@ -457,7 +457,7 @@ def _gen_worker_desc(self, trainer_desc): if ( opt_info["use_cvm"] or "no_cvm" in opt_info - and opt_info["no_cvm"] == True + and opt_info["no_cvm"] is True ): sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ i @@ -567,7 +567,7 @@ def _gen_worker_desc(self, trainer_desc): if ( opt_info["use_cvm"] or "no_cvm" in opt_info - and opt_info["no_cvm"] == True + and opt_info["no_cvm"] is True ): sparse_table.emb_dim = self._fleet_desc.server_param.downpour_server_param.downpour_table_param[ i diff --git a/python/paddle/base/dygraph/base.py b/python/paddle/base/dygraph/base.py index 7edb748026d84..e3b299ee4eb4b 100644 --- a/python/paddle/base/dygraph/base.py +++ b/python/paddle/base/dygraph/base.py @@ -908,7 +908,7 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): # (2): when used in flask framework, it may result in hang. # Details: https://github.com/PaddlePaddle/Paddle/issues/26635 # So, we temporally diable the zero_copy strategy. - if zero_copy == True: + if zero_copy is True: warnings.warn( "Currently, zero_copy is not supported, and it will be discarded." ) diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index 9ea3d566c824a..3058fb55172b7 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -1692,7 +1692,7 @@ def _run_impl( if isinstance(program, Program) and program._heter_pipeline_opt: # print("program._heter_pipeline_opt: {}".format( # program._heter_pipeline_opt)) - ## change default executor + # change default executor heter_place = program._heter_pipeline_opt["heter_place"] heter_place = framework._get_paddle_place(heter_place) p = core.Place() @@ -1849,12 +1849,12 @@ def _run_impl( varobj = global_block.vars[varname] if ( - vardesc.persistable() == False + vardesc.persistable() is False and vardesc.type() == core.VarDesc.VarType.LOD_TENSOR - and vardesc.need_check_feed() == True - and varobj.stop_gradient == True - and varobj.is_data == True - and varobj.belong_to_optimizer == False + and vardesc.need_check_feed() is True + and varobj.stop_gradient is True + and varobj.is_data is True + and varobj.belong_to_optimizer is False and varname not in feed ): raise ValueError('Need feed data for variable %s' % varname) @@ -2146,7 +2146,7 @@ def _prepare_trainer( ): is_heter = 0 use_ps_gpu = 0 - if not program._fleet_opt is None: + if program._fleet_opt is not None: if program._fleet_opt.get("worker_class", "") == "HeterCpuWorker": is_heter = 1 if program._fleet_opt.get("trainer", "") == "HeterXpuTrainer": @@ -2272,7 +2272,7 @@ def _run_from_dataset( raise RuntimeError( "dataset is need and should be initialized" ) - ## change default executor + # change default executor heter_place = framework._get_paddle_place(heter_place) p = core.Place() p.set_place(heter_place) diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index 0440af415a7d0..f847b535b0a5d 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -3011,7 +3011,7 @@ def __init__( if ( type == 'less_than' and op_attrs['force_cpu'] is not None - ) or op_attrs['force_cpu'] != False: + ) or op_attrs['force_cpu'] is not False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " "please use 'device_guard' instead. 'device_guard' has higher priority when they are " @@ -4303,7 +4303,7 @@ def _rename_var(self, name, new_name): return var def _remove_var(self, name, sync=True): - if sync == True: + if sync is True: self._sync_with_cpp() self.desc._remove_var(name.encode()) del self.vars[name] @@ -4492,7 +4492,7 @@ def _remove_op(self, index, sync=True): Returns: None """ - if sync == True: + if sync is True: self._sync_with_cpp() self.desc._remove_op(index, index + 1) del self.ops[index] diff --git a/python/paddle/base/reader.py b/python/paddle/base/reader.py index 63b97ee2bd495..cf05e0c624ae8 100644 --- a/python/paddle/base/reader.py +++ b/python/paddle/base/reader.py @@ -46,7 +46,7 @@ import logging import warnings -### Dygraph DataLoader configs ### +# Dygraph DataLoader configs ### import multiprocessing import queue diff --git a/python/paddle/base/trainer_desc.py b/python/paddle/base/trainer_desc.py index 48cc427ac8e7e..c5845d00719db 100644 --- a/python/paddle/base/trainer_desc.py +++ b/python/paddle/base/trainer_desc.py @@ -119,7 +119,7 @@ def _set_infer(self, infer): def _set_fleet_desc(self, fleet_desc): self._fleet_desc = fleet_desc - ## serialize fleet_desc + # serialize fleet_desc from google.protobuf import text_format fleet_desc_str = text_format.MessageToString(fleet_desc) diff --git a/python/paddle/base/trainer_factory.py b/python/paddle/base/trainer_factory.py index cf197fab524e0..a7087460d645c 100644 --- a/python/paddle/base/trainer_factory.py +++ b/python/paddle/base/trainer_factory.py @@ -186,7 +186,7 @@ def handler_launch_func(self, scope, handler): elapsed_secs = 0 while True: self.running_lock.acquire() - if self.running == False: + if self.running is False: break if elapsed_secs < period_secs: # TODO(guru4elephant): needs customized condition diff --git a/python/paddle/base/variable_index.py b/python/paddle/base/variable_index.py index 1b3039c5a8cbe..e5fd3b055f9df 100644 --- a/python/paddle/base/variable_index.py +++ b/python/paddle/base/variable_index.py @@ -342,7 +342,7 @@ def get_value_for_bool_tensor(var, item): def idx_not_empty(var, item): from ..tensor import gather_nd - bool_2_idx = paddle.nonzero(item == True) + bool_2_idx = paddle.nonzero(item is True) return gather_nd(var, bool_2_idx) from paddle.static.nn import cond