Skip to content

Commit b2f33b4

Browse files
awaelchlicarmocca
authored andcommitted
Remove the deprecated resume_from_checkpoint Trainer argument (#16167)
1 parent 79dbefc commit b2f33b4

File tree

5 files changed

+7
-107
lines changed

5 files changed

+7
-107
lines changed

docs/source-pytorch/common/trainer.rst

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1141,31 +1141,6 @@ By setting to False, you have to add your own distributed sampler:
11411141
11421142
.. note:: For iterable datasets, we don't do this automatically.
11431143

1144-
resume_from_checkpoint
1145-
^^^^^^^^^^^^^^^^^^^^^^
1146-
1147-
.. warning:: ``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
1148-
Please pass ``trainer.fit(ckpt_path="some/path/to/my_checkpoint.ckpt")`` instead.
1149-
1150-
1151-
.. raw:: html
1152-
1153-
<video width="50%" max-width="400px" controls
1154-
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/resume_from_checkpoint.jpg"
1155-
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/resume_from_checkpoint.mp4"></video>
1156-
1157-
|
1158-
1159-
To resume training from a specific checkpoint pass in the path here. If resuming from a mid-epoch
1160-
checkpoint, training will start from the beginning of the next epoch.
1161-
1162-
.. testcode::
1163-
1164-
# default used by the Trainer
1165-
trainer = Trainer(resume_from_checkpoint=None)
1166-
1167-
# resume from a specific checkpoint
1168-
trainer = Trainer(resume_from_checkpoint="some/path/to/my_checkpoint.ckpt")
11691144

11701145
strategy
11711146
^^^^^^^^

src/pytorch_lightning/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
3333
* Removed the `Trainer(ipus=...)` argument
3434
* Removed the `Trainer(num_processes=...)` argument
3535

36+
- Removed the deprecated `resume_from_checkpoint` Trainer argument ([#16167](https://github.com/Lightning-AI/lightning/pull/16167))
37+
3638

3739
## [unreleased] - 202Y-MM-DD
3840

src/pytorch_lightning/trainer/connectors/checkpoint_connector.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
from pytorch_lightning.utilities.imports import _fault_tolerant_training
3737
from pytorch_lightning.utilities.migration import pl_legacy_patch
3838
from pytorch_lightning.utilities.migration.utils import _pl_migrate_checkpoint
39-
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
39+
from pytorch_lightning.utilities.rank_zero import rank_zero_info, rank_zero_warn
4040

4141
if _OMEGACONF_AVAILABLE:
4242
from omegaconf import Container
@@ -46,16 +46,9 @@
4646

4747

4848
class CheckpointConnector:
49-
def __init__(self, trainer: "pl.Trainer", resume_from_checkpoint: Optional[_PATH] = None) -> None:
49+
def __init__(self, trainer: "pl.Trainer") -> None:
5050
self.trainer = trainer
5151
self.resume_checkpoint_path: Optional[_PATH] = None
52-
# TODO: remove resume_from_checkpoint_fit_path in v2.0
53-
self.resume_from_checkpoint_fit_path: Optional[_PATH] = resume_from_checkpoint
54-
if resume_from_checkpoint is not None:
55-
rank_zero_deprecation(
56-
"Setting `Trainer(resume_from_checkpoint=)` is deprecated in v1.5 and"
57-
" will be removed in v2.0. Please pass `Trainer.fit(ckpt_path=)` directly instead."
58-
)
5952
self._loaded_checkpoint: Dict[str, Any] = {}
6053

6154
@property
@@ -193,12 +186,6 @@ def resume_end(self) -> None:
193186
rank_zero_info(f"Restored all states from the checkpoint file at {self.resume_checkpoint_path}")
194187
elif self.trainer.state.fn in (TrainerFn.VALIDATING, TrainerFn.TESTING, TrainerFn.PREDICTING):
195188
rank_zero_info(f"Loaded model weights from checkpoint at {self.resume_checkpoint_path}")
196-
# TODO: remove resume_from_checkpoint_fit_path in v2.0
197-
if (
198-
self.trainer.state.fn == TrainerFn.FITTING
199-
and self.resume_checkpoint_path == self.resume_from_checkpoint_fit_path
200-
):
201-
self.resume_from_checkpoint_fit_path = None
202189
self.resume_checkpoint_path = None
203190
self._loaded_checkpoint = {}
204191

src/pytorch_lightning/trainer/trainer.py

Lines changed: 3 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
from contextlib import contextmanager
3030
from copy import deepcopy
3131
from datetime import timedelta
32-
from pathlib import Path
3332
from typing import Any, Dict, Generator, Iterable, List, Optional, Type, Union
3433
from weakref import proxy
3534

@@ -145,7 +144,6 @@ def __init__(
145144
precision: Union[int, str] = 32,
146145
enable_model_summary: bool = True,
147146
num_sanity_val_steps: int = 2,
148-
resume_from_checkpoint: Optional[Union[Path, str]] = None,
149147
profiler: Optional[Union[Profiler, str]] = None,
150148
benchmark: Optional[bool] = None,
151149
deterministic: Optional[Union[bool, _LITERAL_WARN]] = None,
@@ -311,14 +309,6 @@ def __init__(
311309
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
312310
you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.
313311
314-
resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is
315-
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
316-
training will start from the beginning of the next epoch.
317-
318-
.. deprecated:: v1.5
319-
``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
320-
Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.
321-
322312
strategy: Supports different training strategies with aliases
323313
as well custom strategies.
324314
Default: ``None``.
@@ -379,7 +369,7 @@ def __init__(
379369
)
380370
self._logger_connector = LoggerConnector(self)
381371
self._callback_connector = CallbackConnector(self)
382-
self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)
372+
self._checkpoint_connector = CheckpointConnector(self)
383373
self._signal_connector = SignalConnector(self)
384374
self.tuner = Tuner(self)
385375

@@ -581,11 +571,10 @@ def _fit_impl(
581571
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
582572
)
583573

584-
# TODO: ckpt_path only in v2.0
585-
ckpt_path = ckpt_path or self.resume_from_checkpoint
574+
ckpt_path = ckpt_path
586575
self._ckpt_path = self._checkpoint_connector._set_ckpt_path(
587576
self.state.fn,
588-
ckpt_path, # type: ignore[arg-type]
577+
ckpt_path,
589578
model_provided=True,
590579
model_connected=self.lightning_module is not None,
591580
)
@@ -1818,18 +1807,6 @@ def progress_bar_callback(self) -> Optional[ProgressBarBase]:
18181807
return c
18191808
return None
18201809

1821-
@property
1822-
def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:
1823-
resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path
1824-
if resume_from_checkpoint is not None:
1825-
rank_zero_deprecation(
1826-
"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0."
1827-
" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.",
1828-
stacklevel=5,
1829-
)
1830-
1831-
return resume_from_checkpoint
1832-
18331810
@property
18341811
def ckpt_path(self) -> Optional[str]:
18351812
"""Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,

tests/tests_pytorch/deprecated_api/test_remove_2-0.py

Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -19,47 +19,6 @@
1919
from pytorch_lightning import Callback, Trainer
2020
from pytorch_lightning.cli import LightningCLI
2121
from pytorch_lightning.demos.boring_classes import BoringModel
22-
from tests_pytorch.callbacks.test_callbacks import OldStatefulCallback
23-
24-
25-
def test_v2_0_0_resume_from_checkpoint_trainer_constructor(tmpdir):
26-
# test resume_from_checkpoint still works until v2.0 deprecation
27-
model = BoringModel()
28-
callback = OldStatefulCallback(state=111)
29-
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, callbacks=[callback])
30-
trainer.fit(model)
31-
ckpt_path = trainer.checkpoint_callback.best_model_path
32-
33-
callback = OldStatefulCallback(state=222)
34-
with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"):
35-
trainer = Trainer(default_root_dir=tmpdir, max_steps=2, callbacks=[callback], resume_from_checkpoint=ckpt_path)
36-
with pytest.deprecated_call(match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
37-
_ = trainer.resume_from_checkpoint
38-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
39-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
40-
trainer.validate(model=model, ckpt_path=ckpt_path)
41-
assert callback.state == 222
42-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
43-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path == ckpt_path
44-
with pytest.deprecated_call(match=r"trainer.resume_from_checkpoint` is deprecated in v1.5"):
45-
trainer.fit(model)
46-
ckpt_path = trainer.checkpoint_callback.best_model_path # last `fit` replaced the `best_model_path`
47-
assert callback.state == 111
48-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
49-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
50-
trainer.predict(model=model, ckpt_path=ckpt_path)
51-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
52-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
53-
trainer.fit(model)
54-
assert trainer._checkpoint_connector.resume_checkpoint_path is None
55-
assert trainer._checkpoint_connector.resume_from_checkpoint_fit_path is None
56-
57-
# test fit(ckpt_path=) precedence over Trainer(resume_from_checkpoint=) path
58-
model = BoringModel()
59-
with pytest.deprecated_call(match=r"Setting `Trainer\(resume_from_checkpoint=\)` is deprecated in v1.5"):
60-
trainer = Trainer(resume_from_checkpoint="trainer_arg_path")
61-
with pytest.raises(FileNotFoundError, match="Checkpoint at fit_arg_ckpt_path not found. Aborting training."):
62-
trainer.fit(model, ckpt_path="fit_arg_ckpt_path")
6322

6423

6524
def test_v2_0_0_callback_on_load_checkpoint_hook(tmpdir):

0 commit comments

Comments
 (0)