Skip to content

Commit 666239a

Browse files
committed
Use 'not in'
Signed-off-by: cyy <[email protected]>
1 parent 66b6fcc commit 666239a

File tree

13 files changed

+24
-24
lines changed

13 files changed

+24
-24
lines changed

deepspeed/checkpoint/deepspeed_checkpoint.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def get_final_norm_layer_id(self):
137137
return self.layer_keys[self.final_layer_norm_idx]
138138

139139
def get_iteration(self):
140-
if not ITERATION_KEY in self.global_state:
140+
if ITERATION_KEY not in self.global_state:
141141
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'), weights_only=False)
142142
self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0)
143143

@@ -157,7 +157,7 @@ def get_embedding_files(self, tp_index: int) -> list:
157157
return self.tp_to_embedding_map[tp_index]
158158

159159
def _get_checkpoint_value(self, key):
160-
if not key in self.global_state:
160+
if key not in self.global_state:
161161
sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu'), weights_only=False)
162162
self.global_state[key] = sd.get(key, None)
163163

@@ -254,7 +254,7 @@ def _build_transformer_file_map(self):
254254
layer_file_partitions = partition_data(layer_files, self.tp_degree)
255255
for tp_index in range(self.tp_degree):
256256
map_key = (tp_index, pp_index)
257-
if not map_key in file_map.keys():
257+
if map_key not in file_map.keys():
258258
file_map[map_key] = []
259259
file_map[map_key].append(layer_file_partitions[tp_index])
260260

@@ -286,7 +286,7 @@ def _get_layer_keys(self):
286286
def _merge_state_dicts(self, sd_list):
287287
merged_sd = {}
288288
for key in sd_list[0].keys():
289-
if not key in SEQUENTIAL_LAYERS:
289+
if key not in SEQUENTIAL_LAYERS:
290290
cat_dim = LAYER_CONCAT_DIM.get(key, 0)
291291
merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim)
292292
else:

deepspeed/checkpoint/reshape_meg_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def add_data(self, pp_index, tp_index, data):
2424
assert type(data) is list
2525

2626
key = self._make_key(pp_index, tp_index)
27-
if not key in self.map.keys():
27+
if key not in self.map.keys():
2828
self.map[key] = []
2929
self.map[key] += data
3030

deepspeed/compile/fx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def add_free_activations(graph_id: int, graph: Graph, activation_node_names: Lis
115115
def _should_free(node: Node) -> bool:
116116
if not hasattr(node, "meta"):
117117
return False
118-
if not "tensor_meta" in node.meta:
118+
if "tensor_meta" not in node.meta:
119119
return False
120120
return True
121121

deepspeed/compile/passes/offload_activation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def _gen():
3434
def _should_offload(node: Node) -> bool:
3535
if not hasattr(node, "meta"):
3636
return False
37-
if not "tensor_meta" in node.meta:
37+
if "tensor_meta" not in node.meta:
3838
return False
3939

4040
return True

deepspeed/nvme/parse_nvme_stats.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def get_metric(file, metric):
101101

102102

103103
def validate_args(args):
104-
if not args.metric in PERF_METRICS:
104+
if args.metric not in PERF_METRICS:
105105
print(f'{args.metric} is not a valid performance metrics')
106106
return False
107107

deepspeed/runtime/engine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3198,7 +3198,7 @@ def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters
31983198
if load_optimizer_states:
31993199
deepspeed_states.append('optimizer')
32003200

3201-
client_state = {key: value for key, value in checkpoint.items() if not key in deepspeed_states}
3201+
client_state = {key: value for key, value in checkpoint.items() if key not in deepspeed_states}
32023202

32033203
if optim_checkpoint is not None:
32043204
client_state['optimizer'] = optim_checkpoint['optimizer']

deepspeed/runtime/lr_schedules.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ def get_config_from_args(args):
209209
if not hasattr(args, LR_SCHEDULE) or args.lr_schedule is None:
210210
return None, '--{} not specified on command line'.format(LR_SCHEDULE)
211211

212-
if not args.lr_schedule in VALID_LR_SCHEDULES:
212+
if args.lr_schedule not in VALID_LR_SCHEDULES:
213213
return None, '{} is not supported LR schedule'.format(args.lr_schedule)
214214

215215
config = {}
@@ -227,16 +227,16 @@ def get_config_from_args(args):
227227

228228

229229
def get_lr_from_config(config):
230-
if not 'type' in config:
230+
if 'type' not in config:
231231
return None, 'LR schedule type not defined in config'
232232

233-
if not 'params' in config:
233+
if 'params' not in config:
234234
return None, 'LR schedule params not defined in config'
235235

236236
lr_schedule = config['type']
237237
lr_params = config['params']
238238

239-
if not lr_schedule in VALID_LR_SCHEDULES:
239+
if lr_schedule not in VALID_LR_SCHEDULES:
240240
return None, '{} is not a valid LR schedule'.format(lr_schedule)
241241

242242
if lr_schedule == LR_RANGE_TEST:

deepspeed/runtime/swap_tensor/optimizer_utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def get_swap_buffers_and_paths(self, pinned):
102102
def get_or_create_gradient_paths(self, offsets, lengths):
103103
gradient_paths = []
104104
for offset, length in zip(offsets, lengths):
105-
if not offset in self.swapped_gradients.keys():
105+
if offset not in self.swapped_gradients.keys():
106106
path = os.path.join(self.swap_folder, f'{self.param_id}_gradient_{offset}_{length}.tensor.swp')
107107
self.swapped_gradients[offset] = FlattenedTensorSwapInfo(path, length, offset)
108108

@@ -233,7 +233,7 @@ def _flush_gradient_swapper(self, gradient_swapper):
233233
self.timer_names.update(gradient_swapper.get_timer_names())
234234

235235
def _swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors, gradient_swapper):
236-
if not OptimizerSwapper.parameter_id(parameter) in self.swap_params_info.keys():
236+
if OptimizerSwapper.parameter_id(parameter) not in self.swap_params_info.keys():
237237
return
238238

239239
swap_info = self.swap_params_info[OptimizerSwapper.parameter_id(parameter)]
@@ -471,7 +471,7 @@ def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer):
471471
)
472472

473473
def _get_state_tensors(self, parameter):
474-
if not parameter in self.optimizer.state:
474+
if parameter not in self.optimizer.state:
475475
return []
476476

477477
tensor_list = []
@@ -490,7 +490,7 @@ def _update_param_state_info(self, swap_info, parameter):
490490

491491
def _create_param_swap_info(self, parameter, numel):
492492
param_id = OptimizerSwapper.parameter_id(parameter)
493-
assert not param_id in self.swap_params_info
493+
assert param_id not in self.swap_params_info
494494

495495
self.swap_params_info[param_id] = OptimizerStateSwapInfo(parameter=parameter,
496496
numel=numel,

deepspeed/runtime/swap_tensor/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def swap_out_tensors(swap_handle, tensor_buffers, swap_paths):
3030
def print_object(obj, name, exclude_list=[]):
3131
logger.info('{}:'.format(name))
3232
for arg in sorted(vars(obj)):
33-
if not arg in exclude_list:
33+
if arg not in exclude_list:
3434
dots = '.' * (29 - len(arg))
3535
logger.info(' {} {} {}'.format(arg, dots, getattr(obj, arg)))
3636

@@ -55,7 +55,7 @@ def insert_tensor(self, tensor, swap_path, aligned_numel):
5555

5656
def allocate_tensor(self, swap_path, numel, aligned_numel):
5757
assert self.has_space(aligned_numel)
58-
assert not self.offset in self.swap_tensors
58+
assert self.offset not in self.swap_tensors
5959

6060
allocate_offset = self.offset
6161
swap_tensor = self.buffer.narrow(0, allocate_offset, aligned_numel)

deepspeed/runtime/zero/contiguous_memory_allocator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def assign_to_param(self, tensor, param, numel, shape):
8585

8686
assert tensor_id in self.tensor_map.keys(), "No such tensor allocated by the allocator."
8787
assert tensor.numel() >= numel, "Assert tensor buffer does is not large enough"
88-
assert not tensor_id in self.id_to_params.keys(), "This tensor has already been assigned to a param"
88+
assert tensor_id not in self.id_to_params.keys(), "This tensor has already been assigned to a param"
8989

9090
self.id_to_params[tensor_id] = [param]
9191

0 commit comments

Comments
 (0)