Skip to content

Commit 4be79ea

Browse files
carmoccaBorda
authored andcommitted
Remove the deprecated resume_from_checkpoint Trainer argument (#16167)
1 parent bf55ea6 commit 4be79ea

File tree

5 files changed

+7
-107
lines changed

5 files changed

+7
-107
lines changed

docs/source-pytorch/common/trainer.rst

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1142,31 +1142,6 @@ By setting to False, you have to add your own distributed sampler:
11421142
11431143
.. note:: For iterable datasets, we don't do this automatically.
11441144

1145-
resume_from_checkpoint
1146-
^^^^^^^^^^^^^^^^^^^^^^
1147-
1148-
.. warning:: ``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
1149-
Please pass ``trainer.fit(ckpt_path="some/path/to/my_checkpoint.ckpt")`` instead.
1150-
1151-
1152-
.. raw:: html
1153-
1154-
<video width="50%" max-width="400px" controls
1155-
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/resume_from_checkpoint.jpg"
1156-
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/resume_from_checkpoint.mp4"></video>
1157-
1158-
|
1159-
1160-
To resume training from a specific checkpoint pass in the path here. If resuming from a mid-epoch
1161-
checkpoint, training will start from the beginning of the next epoch.
1162-
1163-
.. testcode::
1164-
1165-
# default used by the Trainer
1166-
trainer = Trainer(resume_from_checkpoint=None)
1167-
1168-
# resume from a specific checkpoint
1169-
trainer = Trainer(resume_from_checkpoint="some/path/to/my_checkpoint.ckpt")
11701145

11711146
strategy
11721147
^^^^^^^^

src/pytorch_lightning/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
4848
* Removed the `Trainer(ipus=...)` argument
4949
* Removed the `Trainer(num_processes=...)` argument
5050

51+
- Removed the deprecated `resume_from_checkpoint` Trainer argument ([#16167](https://github.com/Lightning-AI/lightning/pull/16167))
52+
5153

5254
### Fixed
5355

src/pytorch_lightning/trainer/connectors/checkpoint_connector.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
from pytorch_lightning.utilities.imports import _fault_tolerant_training
3737
from pytorch_lightning.utilities.migration import pl_legacy_patch
3838
from pytorch_lightning.utilities.migration.utils import _pl_migrate_checkpoint
39-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
39+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
4040

4141
if _OMEGACONF_AVAILABLE:
4242
from omegaconf import Container
@@ -46,16 +46,9 @@
4646

4747

4848
class CheckpointConnector:
49-
def __init__(self, trainer: "pl.Trainer", resume_from_checkpoint: Optional[_PATH] = None) -> None:
49+
def __init__(self, trainer: "pl.Trainer") -> None:
5050
self.trainer = trainer
5151
self.resume_checkpoint_path: Optional[_PATH] = None
52-
# TODO: remove resume_from_checkpoint_fit_path in v2.0
53-
self.resume_from_checkpoint_fit_path: Optional[_PATH] = resume_from_checkpoint
54-
if resume_from_checkpoint is not None:
55-
rank_zero_deprecation(
56-
"Setting `Trainer(resume_from_checkpoint=)` is deprecated in v1.5 and"
57-
" will be removed in v2.0. Please pass `Trainer.fit(ckpt_path=)` directly instead."
58-
)
5952
self._loaded_checkpoint: Dict[str, Any] = {}
6053

6154
@property
@@ -193,12 +186,6 @@ def resume_end(self) -> None:
193186
rank_zero_info(f"Restored all states from the checkpoint file at {self.resume_checkpoint_path}")
194187
elif self.trainer.state.fn in (TrainerFn.VALIDATING, TrainerFn.TESTING, TrainerFn.PREDICTING):
195188
rank_zero_info(f"Loaded model weights from checkpoint at {self.resume_checkpoint_path}")
196-
# TODO: remove resume_from_checkpoint_fit_path in v2.0
197-
if (
198-
self.trainer.state.fn == TrainerFn.FITTING
199-
and self.resume_checkpoint_path == self.resume_from_checkpoint_fit_path
200-
):
201-
self.resume_from_checkpoint_fit_path = None
202189
self.resume_checkpoint_path = None
203190
self._loaded_checkpoint = {}
204191

src/pytorch_lightning/trainer/trainer.py

Lines changed: 3 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
from contextlib import contextmanager
3030
from copy import deepcopy
3131
from datetime import timedelta
32-
from pathlib import Path
3332
from typing import Any, Dict, Generator, Iterable, List, Optional, Type, Union
3433
from weakref import proxy
3534

@@ -150,7 +149,6 @@ def __init__(
150149
precision: _PRECISION_INPUT = 32,
151150
enable_model_summary: bool = True,
152151
num_sanity_val_steps: int = 2,
153-
resume_from_checkpoint: Optional[Union[Path, str]] = None,
154152
profiler: Optional[Union[Profiler, str]] = None,
155153
benchmark: Optional[bool] = None,
156154
deterministic: Optional[Union[bool, _LITERAL_WARN]] = None,
@@ -316,14 +314,6 @@ def __init__(
316314
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
317315
you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.
318316
319-
resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is
320-
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
321-
training will start from the beginning of the next epoch.
322-
323-
.. deprecated:: v1.5
324-
``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
325-
Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.
326-
327317
strategy: Supports different training strategies with aliases
328318
as well custom strategies.
329319
Default: ``None``.
@@ -384,7 +374,7 @@ def __init__(
384374
)
385375
self._logger_connector = LoggerConnector(self)
386376
self._callback_connector = CallbackConnector(self)
387-
self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)
377+
self._checkpoint_connector = CheckpointConnector(self)
388378
self._signal_connector = SignalConnector(self)
389379
self.tuner = Tuner(self)
390380

@@ -586,11 +576,10 @@ def _fit_impl(
586576
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
587577
)
588578

589-
# TODO: ckpt_path only in v2.0
590-
ckpt_path = ckpt_path or self.resume_from_checkpoint
579+
ckpt_path = ckpt_path
591580
self._ckpt_path = self._checkpoint_connector._set_ckpt_path(
592581
self.state.fn,
593-
ckpt_path, # type: ignore[arg-type]
582+
ckpt_path,
594583
model_provided=True,
595584
model_connected=self.lightning_module is not None,
596585
)
@@ -1823,18 +1812,6 @@ def progress_bar_callback(self) -> Optional[ProgressBarBase]:
18231812
return c
18241813
return None
18251814

1826-
@property
1827-
def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:
1828-
resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path
1829-
if resume_from_checkpoint is not None:
1830-
rank_zero_deprecation(
1831-
"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0."
1832-
" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.",
1833-
stacklevel=5,
1834-
)
1835-
1836-
return resume_from_checkpoint
1837-
18381815
@property
18391816
def ckpt_path(self) -> Optional[str]:
18401817
"""Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,

tests/tests_pytorch/deprecated_api/test_remove_2-0.py

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -68,50 +68,9 @@
6868
from pytorch_lightning.utilities.optimizer import optimizer_to_device, optimizers_to_device
6969
from pytorch_lightning.utilities.seed import pl_worker_init_function, reset_seed, seed_everything
7070
from pytorch_lightning.utilities.xla_device import inner_f, pl_multi_process, XLADeviceUtils
71-
from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback
7271
from tests_pytorch.helpers.runif import RunIf
7372

7473

75-
def test_v2_0_0_resume_from_checkpoint_trainer_constructor(tmpdir):
76-
# test resume_from_checkpoint still works until v2.0 deprecation
77-
model = BoringModel()
78-
callback = OldStatefulCallback(state=111)
79-
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, callbacks=[callback])
80-
trainer.fit(model)
81-
ckpt_path = trainer.checkpoint_callback.best_model_path
82-
83-
callback = OldStatefulCallback(state=222)
84-
with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"):
85-
trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback], resume_from_checkpoint=ckpt_path)
86-
with pytest.deprecated_call(match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
87-
_ = trainer.resume_from_checkpoint
88-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
89-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
90-
trainer.validate(model=model, ckpt_path=ckpt_path)
91-
assert callback.state == 222
92-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
93-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
94-
with pytest.deprecated_call(match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
95-
trainer.fit(model)
96-
ckpt_path = trainer.checkpoint_callback.best_model_path # last `fit` replaced the `best_model_path`
97-
assert callback.state == 111
98-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
99-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
100-
trainer.predict(model=model, ckpt_path=ckpt_path)
101-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
102-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
103-
trainer.fit(model)
104-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
105-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
106-
107-
# test fit(ckpt_path=) precedence over Trainer(resume_from_checkpoint=) path
108-
model = BoringModel()
109-
with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"):
110-
trainer = Trainer(resume_from_checkpoint="trainer_arg_path")
111-
with pytest.raises(FileNotFoundError, match="Checkpoint at fit_arg_ckpt_path not found. Aborting training."):
112-
trainer.fit(model, ckpt_path="fit_arg_ckpt_path")
113-
114-
11574
def test_v2_0_0_callback_on_load_checkpoint_hook(tmpdir):
11675
class TestCallbackLoadHook(Callback):
11776
def on_load_checkpoint(self, trainer, pl_module, callback_state):

0 commit comments

Comments
 (0)