|
| 1 | +import os |
| 2 | +from copy import deepcopy |
| 3 | +from functools import partial |
| 4 | +from unittest import mock |
| 5 | + |
| 6 | +import pytest |
| 7 | +from lightning_utilities.core.imports import module_available |
| 8 | +from tests_app.helpers.utils import no_warning_call |
| 9 | + |
| 10 | +import lightning_lite as ll |
| 11 | +from lightning_app.components.multi_node.lite import _LiteRunExecutor |
| 12 | + |
| 13 | + |
| 14 | +class DummyLite(ll.LightningLite): |
| 15 | + def run(self): |
| 16 | + pass |
| 17 | + |
| 18 | + |
| 19 | +def dummy_callable(**kwargs): |
| 20 | + lite = DummyLite(**kwargs) |
| 21 | + return lite._all_passed_kwargs |
| 22 | + |
| 23 | + |
| 24 | +def dummy_init(self, **kwargs): |
| 25 | + self._all_passed_kwargs = kwargs |
| 26 | + |
| 27 | + |
| 28 | +def _get_args_after_tracer_injection(**kwargs): |
| 29 | + with mock.patch.object(ll.LightningLite, "__init__", dummy_init): |
| 30 | + ret_val = _LiteRunExecutor.run( |
| 31 | + local_rank=0, |
| 32 | + work_run=partial(dummy_callable, **kwargs), |
| 33 | + main_address="1.2.3.4", |
| 34 | + main_port=5, |
| 35 | + node_rank=6, |
| 36 | + num_nodes=7, |
| 37 | + nprocs=8, |
| 38 | + ) |
| 39 | + env_vars = deepcopy(os.environ) |
| 40 | + return ret_val, env_vars |
| 41 | + |
| 42 | + |
| 43 | +def check_lightning_lite_mps(): |
| 44 | + if module_available("lightning_lite"): |
| 45 | + return ll.accelerators.MPSAccelerator.is_available() |
| 46 | + return False |
| 47 | + |
| 48 | + |
| 49 | +@pytest.mark.skipif(not check_lightning_lite_mps(), reason="Lightning lite not available or mps not available") |
| 50 | +@pytest.mark.parametrize("accelerator_given,accelerator_expected", [("cpu", "cpu"), ("auto", "cpu"), ("gpu", "cpu")]) |
| 51 | +def test_lite_run_executor_mps_forced_cpu(accelerator_given, accelerator_expected): |
| 52 | + warning_str = ( |
| 53 | + r"Forcing accelerator=cpu as other accelerators \(specifically MPS\) are not supported " |
| 54 | + + "by PyTorch for distributed training on mps capable devices" |
| 55 | + ) |
| 56 | + if accelerator_expected != accelerator_given: |
| 57 | + warning_context = pytest.warns(UserWarning, match=warning_str) |
| 58 | + else: |
| 59 | + warning_context = no_warning_call(match=warning_str + "*") |
| 60 | + |
| 61 | + with warning_context: |
| 62 | + ret_val, env_vars = _get_args_after_tracer_injection(accelerator=accelerator_given) |
| 63 | + assert ret_val["accelerator"] == accelerator_expected |
| 64 | + |
| 65 | + |
| 66 | +@pytest.mark.parametrize( |
| 67 | + "args_given,args_expected", |
| 68 | + [ |
| 69 | + ({"devices": 1, "num_nodes": 1, "accelerator": "gpu"}, {"devices": 8, "num_nodes": 7, "accelerator": "auto"}), |
| 70 | + ({"strategy": "ddp_spawn"}, {"strategy": "ddp"}), |
| 71 | + ({"strategy": "ddp_sharded_spawn"}, {"strategy": "ddp_sharded"}), |
| 72 | + ], |
| 73 | +) |
| 74 | +@pytest.mark.skipif(not module_available("lightning"), reason="Lightning is required for this test") |
| 75 | +def test_trainer_run_executor_arguments_choices(args_given: dict, args_expected: dict): |
| 76 | + |
| 77 | + # ddp with mps devices not available (tested separately, just patching here for cross-os testing of other args) |
| 78 | + if ll.accelerators.MPSAccelerator.is_available(): |
| 79 | + args_expected["accelerator"] = "cpu" |
| 80 | + |
| 81 | + ret_val, env_vars = _get_args_after_tracer_injection(**args_given) |
| 82 | + |
| 83 | + for k, v in args_expected.items(): |
| 84 | + assert ret_val[k] == v |
| 85 | + |
| 86 | + assert env_vars["MASTER_ADDR"] == "1.2.3.4" |
| 87 | + assert env_vars["MASTER_PORT"] == "5" |
| 88 | + assert env_vars["GROUP_RANK"] == "6" |
| 89 | + assert env_vars["RANK"] == str(0 + 6 * 8) |
| 90 | + assert env_vars["LOCAL_RANK"] == "0" |
| 91 | + assert env_vars["WORLD_SIZE"] == str(7 * 8) |
| 92 | + assert env_vars["LOCAL_WORLD_SIZE"] == "8" |
| 93 | + assert env_vars["TORCHELASTIC_RUN_ID"] == "1" |
| 94 | + assert env_vars["LT_CLI_USED"] == "1" |
| 95 | + |
| 96 | + |
| 97 | +@pytest.mark.skipif(not module_available("lightning"), reason="Lightning not available") |
| 98 | +def test_lite_run_executor_invalid_strategy_instances(): |
| 99 | + with pytest.raises(ValueError, match="DDP Spawned strategies aren't supported yet."): |
| 100 | + _, _ = _get_args_after_tracer_injection(strategy=ll.strategies.DDPSpawnStrategy()) |
| 101 | + |
| 102 | + with pytest.raises(ValueError, match="DDP Spawned strategies aren't supported yet."): |
| 103 | + _, _ = _get_args_after_tracer_injection(strategy=ll.strategies.DDPSpawnShardedStrategy()) |
0 commit comments