@@ -324,28 +324,6 @@ def configure_optimizers(self):
324
324
return OSS (params = base_optimizer .param_groups , optim = type (base_optimizer ), ** base_optimizer .defaults )
325
325
326
326
327
- @RunIf (min_cuda_gpus = 2 , fairscale = True )
328
- @pytest .mark .parametrize ("strategy" , (pytest .param ("ddp_sharded" , marks = RunIf (standalone = True )), "ddp_sharded_spawn" ))
329
- def test_ddp_sharded_strategy_checkpoint_multi_gpu_fairscale_optimizer (tmpdir , strategy ):
330
- """Test to ensure that checkpoint is saved correctly when using fairscale optimizers."""
331
- model = BoringFairScaleOptimizerModel ()
332
- with pytest .deprecated_call (match = "FairScale has been deprecated in v1.9.0" ):
333
- trainer = Trainer (accelerator = "gpu" , devices = 2 , strategy = strategy , max_steps = 1 )
334
-
335
- trainer .fit (model )
336
-
337
- checkpoint_path = os .path .join (tmpdir , "model.pt" )
338
- # need to broadcast because tmpdir is different on each process
339
- checkpoint_path = trainer .strategy .broadcast (checkpoint_path )
340
- trainer .save_checkpoint (checkpoint_path )
341
- trainer .strategy .barrier () # ensure the checkpoint is saved before load
342
- saved_model = BoringModel .load_from_checkpoint (checkpoint_path )
343
-
344
- # Assert model parameters are identical after loading
345
- for trained_param , loaded_param in zip (model .parameters (), saved_model .parameters ()):
346
- assert torch .equal (trained_param .to ("cpu" ), loaded_param )
347
-
348
-
349
327
@RunIf (min_cuda_gpus = 2 , fairscale = True )
350
328
def test_ddp_sharded_strategy_fit_ckpt_path_downsize_gpus (tmpdir ):
351
329
model = ModelWithAdamOptimizer ()
0 commit comments