Skip to content

Commit ffee774

Browse files
awaelchlipre-commit-ci[bot]
authored andcommitted
Remove deprecated auto_select_gpus Trainer argument (#16184)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent ef97aee commit ffee774

File tree

8 files changed

+5
-190
lines changed

8 files changed

+5
-190
lines changed

src/pytorch_lightning/CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
2424
* Removed the `Trainer(ipus=...)` argument
2525
* Removed the `Trainer(num_processes=...)` argument
2626

27+
- Removed the deprecated automatic GPU selection ([#16184](https://github.com/Lightning-AI/lightning/pull/16184))
28+
* Removed the `Trainer(auto_select_gpus=...)` argument
29+
* Removed the `pytorch_lightning.tuner.auto_gpu_select.{pick_single_gpu,pick_multiple_gpus}` functions
30+
2731

2832
- Removed the deprecated `resume_from_checkpoint` Trainer argument ([#16167](https://github.com/Lightning-AI/lightning/pull/16167))
2933

src/pytorch_lightning/trainer/connectors/accelerator_connector.py

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,9 @@
7575
TPUSpawnStrategy,
7676
)
7777
from pytorch_lightning.strategies.ddp_spawn import _DDP_FORK_ALIASES
78-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus
7978
from pytorch_lightning.utilities.exceptions import MisconfigurationException
8079
from pytorch_lightning.utilities.imports import _IPU_AVAILABLE
81-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
80+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
8281

8382
log = logging.getLogger(__name__)
8483

@@ -98,7 +97,6 @@ def __init__(
9897
benchmark: Optional[bool] = None,
9998
replace_sampler_ddp: bool = True,
10099
deterministic: Optional[Union[bool, _LITERAL_WARN]] = False,
101-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in v1.10.0
102100
) -> None:
103101
"""The AcceleratorConnector parses several Trainer arguments and instantiates the Strategy including other
104102
components such as the Accelerator and Precision plugins.
@@ -428,7 +426,6 @@ def _set_parallel_devices_and_init_accelerator(self) -> None:
428426
)
429427

430428
self._set_devices_flag_if_auto_passed()
431-
self._set_devices_flag_if_auto_select_gpus_passed()
432429
self._devices_flag = accelerator_cls.parse_devices(self._devices_flag)
433430
if not self._parallel_devices:
434431
self._parallel_devices = accelerator_cls.get_parallel_devices(self._devices_flag)
@@ -437,24 +434,6 @@ def _set_devices_flag_if_auto_passed(self) -> None:
437434
if self._devices_flag == "auto" or self._devices_flag is None:
438435
self._devices_flag = self.accelerator.auto_device_count()
439436

440-
def _set_devices_flag_if_auto_select_gpus_passed(self) -> None:
441-
if self._auto_select_gpus is not None:
442-
rank_zero_deprecation(
443-
"The Trainer argument `auto_select_gpus` has been deprecated in v1.9.0 and will be removed in v1.10.0."
444-
" Please use the function `pytorch_lightning.accelerators.find_usable_cuda_devices` instead."
445-
)
446-
if (
447-
self._auto_select_gpus
448-
and isinstance(self._devices_flag, int)
449-
and isinstance(self.accelerator, CUDAAccelerator)
450-
):
451-
self._devices_flag = pick_multiple_gpus(
452-
self._devices_flag,
453-
# we already show a deprecation message when user sets Trainer(auto_select_gpus=...)
454-
_show_deprecation=False,
455-
)
456-
log.info(f"Auto select gpus: {self._devices_flag}")
457-
458437
def _choose_and_init_cluster_environment(self) -> ClusterEnvironment:
459438
if isinstance(self._cluster_environment_flag, ClusterEnvironment):
460439
return self._cluster_environment_flag

src/pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ def __init__(
120120
gradient_clip_algorithm: Optional[str] = None,
121121
num_nodes: int = 1,
122122
devices: Optional[Union[List[int], str, int]] = None,
123-
auto_select_gpus: Optional[bool] = None, # TODO: Remove in 2.0
124123
enable_progress_bar: bool = True,
125124
overfit_batches: Union[int, float] = 0.0,
126125
track_grad_norm: Union[int, float, str] = -1,
@@ -363,7 +362,6 @@ def __init__(
363362
benchmark=benchmark,
364363
replace_sampler_ddp=replace_sampler_ddp,
365364
deterministic=deterministic,
366-
auto_select_gpus=auto_select_gpus,
367365
precision=precision,
368366
plugins=plugins,
369367
)

src/pytorch_lightning/tuner/auto_gpu_select.py

Lines changed: 0 additions & 96 deletions
This file was deleted.

tests/tests_pytorch/conftest.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ def reset_deterministic_algorithm():
115115
def mock_cuda_count(monkeypatch, n: int) -> None:
116116
monkeypatch.setattr(lightning_fabric.accelerators.cuda, "num_cuda_devices", lambda: n)
117117
monkeypatch.setattr(pytorch_lightning.accelerators.cuda, "num_cuda_devices", lambda: n)
118-
monkeypatch.setattr(pytorch_lightning.tuner.auto_gpu_select, "num_cuda_devices", lambda: n)
119118

120119

121120
@pytest.fixture(scope="function")

tests/tests_pytorch/deprecated_api/test_remove_1-10.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
from pytorch_lightning.strategies.bagua import LightningBaguaModule
3737
from pytorch_lightning.strategies.utils import on_colab_kaggle
3838
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
39-
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus, pick_single_gpu
4039
from pytorch_lightning.utilities.apply_func import (
4140
apply_to_collection,
4241
apply_to_collections,

tests/tests_pytorch/trainer/properties/test_auto_gpu_select.py

Lines changed: 0 additions & 59 deletions
This file was deleted.

tests/tests_pytorch/trainer/test_trainer.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,15 +1168,6 @@ def test_invalid_gradient_clip_algo(tmpdir):
11681168
Trainer(default_root_dir=tmpdir, gradient_clip_algorithm="norm2")
11691169

11701170

1171-
@RunIf(min_cuda_gpus=1)
1172-
def test_invalid_gpu_choice_with_auto_select_gpus():
1173-
num_gpus = torch.cuda.device_count()
1174-
with pytest.raises(MisconfigurationException, match=r".*but your machine only has.*"), pytest.deprecated_call(
1175-
match="The function `pick_multiple_gpus` has been deprecated in v1.9.0"
1176-
):
1177-
Trainer(accelerator="gpu", devices=num_gpus + 1, auto_select_gpus=True)
1178-
1179-
11801171
@pytest.mark.parametrize("limit_val_batches", [0.0, 1, 1.0, 0.5, 5])
11811172
def test_num_sanity_val_steps(tmpdir, limit_val_batches):
11821173
"""Test that the number of sanity check batches is clipped to `limit_val_batches`."""

0 commit comments

Comments
 (0)