Skip to content

Commit cccc8c8

Browse files
awaelchlipre-commit-ci[bot]
authored andcommitted
Remove deprecated auto_select_gpus Trainer argument (#16184)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 69d69bc commit cccc8c8

File tree

7 files changed

+1
-212
lines changed

7 files changed

+1
-212
lines changed

src/pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,9 @@
7575
TPUSpawnStrategy,
7676
)
7777
from pytorch_lightning.strategies.ddp_spawn import _DDP_FORK_ALIASES
78-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus
7978
from pytorch_lightning.utilities.exceptions import MisconfigurationException
8079
from pytorch_lightning.utilities.imports import _IPU_AVAILABLE
81-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
80+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
8281

8382
log = logging.getLogger(__name__)
8483

@@ -98,7 +97,6 @@ def __init__(
9897
benchmark: Optional[bool] = None,
9998
replace_sampler_ddp: bool = True,
10099
deterministic: Optional[Union[bool, _LITERAL_WARN]] = False,
101-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in v1.10.0
102100
) -> None:
103101
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
104102
components such as the Accelerator and Precision plugins.
@@ -161,7 +159,6 @@ def __init__(
161159
self._parallel_devices: List[Union[int, torch.device, str]] = []
162160
self._layer_sync: Optional[LayerSync] = NativeSyncBatchNorm() if sync_batchnorm else None
163161
self.checkpoint_io: Optional[CheckpointIO] = None
164-
self._auto_select_gpus: Optional[bool] = auto_select_gpus
165162

166163
self._check_config_and_set_final_flags(
167164
strategy=strategy,
@@ -426,7 +423,6 @@ def _set_parallel_devices_and_init_accelerator(self) -> None:
426423
)
427424

428425
self._set_devices_flag_if_auto_passed()
429-
self._set_devices_flag_if_auto_select_gpus_passed()
430426
self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
431427
if not self._parallel_devices:
432428
self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)
@@ -435,24 +431,6 @@ def _set_devices_flag_if_auto_passed(self) -> None:
435431
if self._devices_flag == "auto" or self._devices_flag is None:
436432
self._devices_flag = self.accelerator.auto_device_count()
437433

438-
def _set_devices_flag_if_auto_select_gpus_passed(self) -> None:
439-
if self._auto_select_gpus is not None:
440-
rank_zero_deprecation(
441-
"The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0 and will be removed in v1.10.0."
442-
" Please use the function `pytorch_lightning.accelerators.find_usable_cuda_devices` instead."
443-
)
444-
if (
445-
self._auto_select_gpus
446-
and isinstance(self._devices_flag, int)
447-
and isinstance(self.accelerator, CUDAAccelerator)
448-
):
449-
self._devices_flag = pick_multiple_gpus(
450-
self._devices_flag,
451-
# we already show a deprecation message when user sets Trainer(auto_select_gpus=...)
452-
_show_deprecation=False,
453-
)
454-
log.info(f"Auto select gpus: {self._devices_flag}")
455-
456434
def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
457435
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
458436
return self._cluster_environment_flag

src/pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ def __init__(
120120
gradient_clip_algorithm: Optional[str] = None,
121121
num_nodes: int = 1,
122122
devices: Optional[Union[List[int], str, int]] = None,
123-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in 2.0
124123
enable_progress_bar: bool = True,
125124
overfit_batches: Union[int, float] = 0.0,
126125
track_grad_norm: Union[int, float, str] = -1,
@@ -363,7 +362,6 @@ def __init__(
363362
benchmark=benchmark,
364363
replace_sampler_ddp=replace_sampler_ddp,
365364
deterministic=deterministic,
366-
auto_select_gpus=auto_select_gpus,
367365
precision=precision,
368366
plugins=plugins,
369367
)

src/pytorch_lightning/tuner/auto_gpu_select.py

Lines changed: 0 additions & 96 deletions
This file was deleted.

tests/tests_pytorch/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ def reset_deterministic_algorithm():
115115
def mock_cuda_count(monkeypatch, n: int) -> None:
116116
monkeypatch.setattr(lightning_fabric.accelerators.cuda, "num_cuda_devices", lambda: n)
117117
monkeypatch.setattr(pytorch_lightning.accelerators.cuda, "num_cuda_devices", lambda: n)
118-
monkeypatch.setattr(pytorch_lightning.tuner.auto_gpu_select, "num_cuda_devices", lambda: n)
119118

120119

121120
@pytest.fixture(scope="function")

tests/tests_pytorch/deprecated_api/test_remove_1-10.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from torch.utils.data import DataLoader
2222

2323
import pytorch_lightning.profiler as profiler
24-
from lightning_fabric.utilities.exceptions import MisconfigurationException
2524
from pytorch_lightning import Trainer
2625
from pytorch_lightning.accelerators.cpu import CPUAccelerator
2726
from pytorch_lightning.cli import LightningCLI
@@ -34,7 +33,6 @@
3433
from pytorch_lightning.strategies.bagua import LightningBaguaModule
3534
from pytorch_lightning.strategies.utils import on_colab_kaggle
3635
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
37-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus, pick_single_gpu
3836
from pytorch_lightning.utilities.apply_func import (
3937
apply_to_collection,
4038
apply_to_collections,
@@ -329,23 +327,3 @@ def test_profiler_classes_deprecated_warning(cls):
329327
f" Use .*profilers.{cls.__name__}` class instead."
330328
):
331329
cls()
332-
333-
334-
def test_auto_select_gpus():
335-
with pytest.deprecated_call(match="The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0"):
336-
Trainer(auto_select_gpus=False)
337-
338-
339-
def test_pick_multiple_gpus():
340-
with pytest.deprecated_call(match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"), pytest.raises(
341-
MisconfigurationException
342-
):
343-
pick_multiple_gpus(0)
344-
345-
346-
@mock.patch("pytorch_lightning.tuner.auto_gpu_select.num_cuda_devices", return_value=0)
347-
def test_pick_single_gpu(_):
348-
with pytest.deprecated_call(match="The function `pick_single_gpu` has been deprecated in v1.9.0"), pytest.raises(
349-
RuntimeError
350-
):
351-
pick_single_gpu([])

tests/tests_pytorch/trainer/properties/test_auto_gpu_select.py

Lines changed: 0 additions & 59 deletions
This file was deleted.

tests/tests_pytorch/trainer/test_trainer.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,15 +1168,6 @@ def test_invalid_gradient_clip_algo(tmpdir):
11681168
Trainer(default_root_dir=tmpdir, gradient_clip_algorithm="norm2")
11691169

11701170

1171-
@RunIf(min_cuda_gpus=1)
1172-
def test_invalid_gpu_choice_with_auto_select_gpus():
1173-
num_gpus = torch.cuda.device_count()
1174-
with pytest.raises(MisconfigurationException, match=r".*but your machine only has.*"), pytest.deprecated_call(
1175-
match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"
1176-
):
1177-
Trainer(accelerator="gpu", devices=num_gpus + 1, auto_select_gpus=True)
1178-
1179-
11801171
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.5, 5])
11811172
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
11821173
"""Test that the number of sanity check batches is clipped to `limit_val_batches`."""

0 commit comments

Comments
 (0)