@@ -214,7 +214,8 @@ def test_dist_backend_accelerator_mapping(*_):
214
214
215
215
216
216
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 2 )
217
- def test_ipython_incompatible_backend_error (_ , monkeypatch ):
217
+ @mock .patch ("lightning_lite.utilities.device_parser._get_all_available_mps_gpus" , return_value = [0 , 1 ])
218
+ def test_ipython_incompatible_backend_error (_ , __ , monkeypatch ):
218
219
monkeypatch .setattr (pytorch_lightning .utilities , "_IS_INTERACTIVE" , True )
219
220
with pytest .raises (MisconfigurationException , match = r"strategy='ddp'\)`.*is not compatible" ):
220
221
Trainer (strategy = "ddp" , accelerator = "gpu" , devices = 2 )
@@ -252,6 +253,7 @@ def test_ipython_compatible_strategy_ddp_fork(monkeypatch):
252
253
assert trainer .strategy .launcher .is_interactive_compatible
253
254
254
255
256
+ @RunIf (mps = False )
255
257
@pytest .mark .parametrize (
256
258
["strategy" , "strategy_class" ],
257
259
[
@@ -462,7 +464,7 @@ def test_strategy_choice_ddp_fork_cpu():
462
464
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 2 )
463
465
@mock .patch ("lightning_lite.utilities.device_parser.is_cuda_available" , return_value = True )
464
466
def test_strategy_choice_ddp (* _ ):
465
- trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "gpu " , devices = 1 )
467
+ trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "cuda " , devices = 1 )
466
468
assert isinstance (trainer .accelerator , CUDAAccelerator )
467
469
assert isinstance (trainer .strategy , DDPStrategy )
468
470
assert isinstance (trainer .strategy .cluster_environment , LightningEnvironment )
@@ -471,8 +473,8 @@ def test_strategy_choice_ddp(*_):
471
473
@mock .patch .dict (os .environ , {"CUDA_VISIBLE_DEVICES" : "0,1" })
472
474
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 2 )
473
475
@mock .patch ("lightning_lite.utilities.device_parser.is_cuda_available" , return_value = True )
474
- def test_strategy_choice_ddp_spawn (cuda_available_mock , device_count_mock ):
475
- trainer = Trainer (fast_dev_run = True , strategy = "ddp_spawn" , accelerator = "gpu " , devices = 1 )
476
+ def test_strategy_choice_ddp_spawn (* _ ):
477
+ trainer = Trainer (fast_dev_run = True , strategy = "ddp_spawn" , accelerator = "cuda " , devices = 1 )
476
478
assert isinstance (trainer .accelerator , CUDAAccelerator )
477
479
assert isinstance (trainer .strategy , DDPSpawnStrategy )
478
480
assert isinstance (trainer .strategy .cluster_environment , LightningEnvironment )
@@ -515,13 +517,10 @@ def test_strategy_choice_ddp_slurm(_, __, strategy, job_name, expected_env):
515
517
"TORCHELASTIC_RUN_ID" : "1" ,
516
518
},
517
519
)
518
- @mock .patch ("torch.cuda.set_device" )
519
520
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 2 )
520
521
@mock .patch ("lightning_lite.utilities.device_parser.is_cuda_available" , return_value = True )
521
- @mock .patch ("pytorch_lightning.strategies.DDPStrategy.setup_distributed" , autospec = True )
522
- @mock .patch ("lightning_lite.utilities.device_parser.is_cuda_available" , return_value = True )
523
522
def test_strategy_choice_ddp_te (* _ ):
524
- trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "gpu " , devices = 2 )
523
+ trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "cuda " , devices = 2 )
525
524
assert isinstance (trainer .accelerator , CUDAAccelerator )
526
525
assert isinstance (trainer .strategy , DDPStrategy )
527
526
assert isinstance (trainer .strategy .cluster_environment , TorchElasticEnvironment )
@@ -562,12 +561,10 @@ def test_strategy_choice_ddp_cpu_te(*_):
562
561
"RANK" : "1" ,
563
562
},
564
563
)
565
- @mock .patch ("torch.cuda.set_device" )
566
564
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 1 )
567
565
@mock .patch ("lightning_lite.utilities.device_parser.is_cuda_available" , return_value = True )
568
- @mock .patch ("pytorch_lightning.strategies.DDPStrategy.setup_distributed" , autospec = True )
569
566
def test_strategy_choice_ddp_kubeflow (* _ ):
570
- trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "gpu " , devices = 1 )
567
+ trainer = Trainer (fast_dev_run = True , strategy = "ddp" , accelerator = "cuda " , devices = 1 )
571
568
assert isinstance (trainer .accelerator , CUDAAccelerator )
572
569
assert isinstance (trainer .strategy , DDPStrategy )
573
570
assert isinstance (trainer .strategy .cluster_environment , KubeflowEnvironment )
@@ -780,10 +777,10 @@ def test_gpu_accelerator_backend_choice(expected_accelerator_flag, expected_acce
780
777
assert isinstance (trainer .accelerator , expected_accelerator_class )
781
778
782
779
780
+ @RunIf (mps = False )
783
781
@mock .patch ("lightning_lite.utilities.device_parser.num_cuda_devices" , return_value = 1 )
784
782
def test_gpu_accelerator_backend_choice_cuda (_ ):
785
783
trainer = Trainer (accelerator = "gpu" )
786
-
787
784
assert trainer ._accelerator_connector ._accelerator_flag == "cuda"
788
785
assert isinstance (trainer .accelerator , CUDAAccelerator )
789
786
0 commit comments