Skip to content

Commit 9f0116f

Browse files
awaelchlicarmocca
authored andcommitted
Remove the deprecated auto_select_gpus Trainer argument (#16184)
Co-authored-by: Carlos Mocholí <[email protected]>
1 parent d3b0481 commit 9f0116f

File tree

8 files changed

+5
-212
lines changed

8 files changed

+5
-212
lines changed

src/pytorch_lightning/CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
5050

5151
- Removed the deprecated `resume_from_checkpoint` Trainer argument ([#16167](https://github.com/Lightning-AI/lightning/pull/16167))
5252

53+
- Removed the deprecated automatic GPU selection ([#16184](https://github.com/Lightning-AI/lightning/pull/16184))
54+
* Removed the `Trainer(auto_select_gpus=...)` argument
55+
* Removed the `pytorch_lightning.tuner.auto_gpu_select.{pick_single_gpu,pick_multiple_gpus}` functions
56+
5357

5458
### Fixed
5559

src/pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,9 @@
7575
TPUSpawnStrategy,
7676
)
7777
from pytorch_lightning.strategies.ddp_spawn import _DDP_FORK_ALIASES
78-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus
7978
from pytorch_lightning.utilities.exceptions import MisconfigurationException
8079
from pytorch_lightning.utilities.imports import _IPU_AVAILABLE
81-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
80+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
8281

8382
log = logging.getLogger(__name__)
8483

@@ -101,7 +100,6 @@ def __init__(
101100
benchmark: Optional[bool] = None,
102101
replace_sampler_ddp: bool = True,
103102
deterministic: Optional[Union[bool, _LITERAL_WARN]] = False,
104-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in v2.0.0
105103
) -> None:
106104
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
107105
components such as the Accelerator and Precision plugins.
@@ -163,7 +161,6 @@ def __init__(
163161
self._parallel_devices: List[Union[int, torch.device, str]] = []
164162
self._layer_sync: Optional[LayerSync] = NativeSyncBatchNorm() if sync_batchnorm else None
165163
self.checkpoint_io: Optional[CheckpointIO] = None
166-
self._auto_select_gpus: Optional[bool] = auto_select_gpus # TODO: Remove in v2.0.0
167164

168165
self._check_config_and_set_final_flags(
169166
strategy=strategy,
@@ -442,7 +439,6 @@ def _set_parallel_devices_and_init_accelerator(self) -> None:
442439
)
443440

444441
self._set_devices_flag_if_auto_passed()
445-
self._set_devices_flag_if_auto_select_gpus_passed()
446442
self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
447443
if not self._parallel_devices:
448444
self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)
@@ -451,24 +447,6 @@ def _set_devices_flag_if_auto_passed(self) -> None:
451447
if self._devices_flag == "auto" or self._devices_flag is None:
452448
self._devices_flag = self.accelerator.auto_device_count()
453449

454-
def _set_devices_flag_if_auto_select_gpus_passed(self) -> None:
455-
if self._auto_select_gpus is not None:
456-
rank_zero_deprecation(
457-
"The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0 and will be removed in v2.0.0."
458-
" Please use the function `pytorch_lightning.accelerators.find_usable_cuda_devices` instead."
459-
)
460-
if (
461-
self._auto_select_gpus
462-
and isinstance(self._devices_flag, int)
463-
and isinstance(self.accelerator, CUDAAccelerator)
464-
):
465-
self._devices_flag = pick_multiple_gpus(
466-
self._devices_flag,
467-
# we already show a deprecation message when user sets Trainer(auto_select_gpus=...)
468-
_show_deprecation=False,
469-
)
470-
log.info(f"Auto select gpus: {self._devices_flag}")
471-
472450
def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
473451
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
474452
return self._cluster_environment_flag

src/pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ def __init__(
125125
gradient_clip_algorithm: Optional[str] = None,
126126
num_nodes: int = 1,
127127
devices: Optional[Union[List[int], str, int]] = None,
128-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in 2.0
129128
enable_progress_bar: bool = True,
130129
overfit_batches: Union[int, float] = 0.0,
131130
track_grad_norm: Union[int, float, str] = -1,
@@ -368,7 +367,6 @@ def __init__(
368367
benchmark=benchmark,
369368
replace_sampler_ddp=replace_sampler_ddp,
370369
deterministic=deterministic,
371-
auto_select_gpus=auto_select_gpus,
372370
precision=precision,
373371
plugins=plugins,
374372
)

src/pytorch_lightning/tuner/auto_gpu_select.py

Lines changed: 0 additions & 96 deletions
This file was deleted.

tests/tests_pytorch/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ def reset_deterministic_algorithm():
115115
def mock_cuda_count(monkeypatch, n: int) -> None:
116116
monkeypatch.setattr(lightning_fabric.accelerators.cuda, "num_cuda_devices", lambda: n)
117117
monkeypatch.setattr(pytorch_lightning.accelerators.cuda, "num_cuda_devices", lambda: n)
118-
monkeypatch.setattr(pytorch_lightning.tuner.auto_gpu_select, "num_cuda_devices", lambda: n)
119118

120119

121120
@pytest.fixture(scope="function")

tests/tests_pytorch/deprecated_api/test_remove_2-0.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from torch.utils.data import DataLoader
2222

2323
import pytorch_lightning.profiler as profiler
24-
from lightning_fabric.utilities.exceptions import MisconfigurationException
2524
from pytorch_lightning import Callback, Trainer
2625
from pytorch_lightning.accelerators.cpu import CPUAccelerator
2726
from pytorch_lightning.cli import LightningCLI
@@ -34,7 +33,6 @@
3433
from pytorch_lightning.strategies.bagua import LightningBaguaModule
3534
from pytorch_lightning.strategies.utils import on_colab_kaggle
3635
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
37-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus, pick_single_gpu
3836
from pytorch_lightning.utilities.apply_func import (
3937
apply_to_collection,
4038
apply_to_collections,
@@ -586,23 +584,3 @@ def test_profiler_classes_deprecated_warning(cls):
586584
f" Use .*profilers.{cls.__name__}` class instead."
587585
):
588586
cls()
589-
590-
591-
def test_auto_select_gpus():
592-
with pytest.deprecated_call(match="The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0"):
593-
Trainer(auto_select_gpus=False)
594-
595-
596-
def test_pick_multiple_gpus():
597-
with pytest.deprecated_call(match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"), pytest.raises(
598-
MisconfigurationException
599-
):
600-
pick_multiple_gpus(0)
601-
602-
603-
@mock.patch("pytorch_lightning.tuner.auto_gpu_select.num_cuda_devices", return_value=0)
604-
def test_pick_single_gpu(_):
605-
with pytest.deprecated_call(match="The function `pick_single_gpu` has been deprecated in v1.9.0"), pytest.raises(
606-
RuntimeError
607-
):
608-
pick_single_gpu([])

tests/tests_pytorch/trainer/properties/test_auto_gpu_select.py

Lines changed: 0 additions & 59 deletions
This file was deleted.

tests/tests_pytorch/trainer/test_trainer.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,15 +1168,6 @@ def test_invalid_gradient_clip_algo(tmpdir):
11681168
Trainer(default_root_dir=tmpdir, gradient_clip_algorithm="norm2")
11691169

11701170

1171-
@RunIf(min_cuda_gpus=1)
1172-
def test_invalid_gpu_choice_with_auto_select_gpus():
1173-
num_gpus = torch.cuda.device_count()
1174-
with pytest.raises(MisconfigurationException, match=r".*but your machine only has.*"), pytest.deprecated_call(
1175-
match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"
1176-
):
1177-
Trainer(accelerator="gpu", devices=num_gpus + 1, auto_select_gpus=True)
1178-
1179-
11801171
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.5, 5])
11811172
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
11821173
"""Test that the number of sanity check batches is clipped to `limit_val_batches`."""

0 commit comments

Comments
 (0)