From 05407669d3b4a5310f2fbccd660b39b4d727a353 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 28 Aug 2022 12:18:02 +0200 Subject: [PATCH 1/3] remove the deprecated weights_save_path --- docs/source-pytorch/common/trainer.rst | 38 ------------ .../callbacks/model_checkpoint.py | 14 ++--- .../trainer/connectors/callback_connector.py | 8 --- .../connectors/checkpoint_connector.py | 3 +- .../trainer/connectors/signal_connector.py | 4 +- src/pytorch_lightning/trainer/trainer.py | 37 ------------ .../checkpointing/test_model_checkpoint.py | 31 ---------- .../deprecated_api/test_remove_1-8.py | 7 --- tests/tests_pytorch/loggers/test_all.py | 59 +------------------ 9 files changed, 8 insertions(+), 193 deletions(-) diff --git a/docs/source-pytorch/common/trainer.rst b/docs/source-pytorch/common/trainer.rst index 53148c9fab583..e4e62b1530a6f 100644 --- a/docs/source-pytorch/common/trainer.rst +++ b/docs/source-pytorch/common/trainer.rst @@ -1501,44 +1501,6 @@ Can specify as float or int. total_fit_batches = total_train_batches + total_val_batches -weights_save_path -^^^^^^^^^^^^^^^^^ - - -.. warning:: `weights_save_path` has been deprecated in v1.6 and will be removed in v1.8. Please pass - ``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` - callback. - - -.. raw:: html - - - -| - -Directory of where to save weights if specified. - -.. testcode:: - - # default used by the Trainer - trainer = Trainer(weights_save_path=os.getcwd()) - - # save to your custom path - trainer = Trainer(weights_save_path="my/path") - -Example:: - - # if checkpoint callback used, then overrides the weights path - # **NOTE: this saves weights to some/path NOT my/path - checkpoint = ModelCheckpoint(dirpath='some/path') - trainer = Trainer( - callbacks=[checkpoint], - weights_save_path='my/path' - ) - - enable_model_summary ^^^^^^^^^^^^^^^^^^^^ diff --git a/src/pytorch_lightning/callbacks/model_checkpoint.py b/src/pytorch_lightning/callbacks/model_checkpoint.py index 1ad86a0917dac..54dfaf3d42627 100644 --- a/src/pytorch_lightning/callbacks/model_checkpoint.py +++ b/src/pytorch_lightning/callbacks/model_checkpoint.py @@ -66,8 +66,7 @@ class ModelCheckpoint(Checkpoint): By default, dirpath is ``None`` and will be set at runtime to the location specified by :class:`~pytorch_lightning.trainer.trainer.Trainer`'s - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` or - :paramref:`~pytorch_lightning.trainer.trainer.Trainer.weights_save_path` arguments, + :paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` argument, and if the Trainer uses a logger, the path will also contain logger name and version. filename: checkpoint filename. Can contain named formatting options to be auto-filled. @@ -576,9 +575,8 @@ def __resolve_ckpt_dir(self, trainer: "pl.Trainer") -> None: determine where to save checkpoints. The path for saving weights is set in this priority: 1. The ``ModelCheckpoint``'s ``dirpath`` if passed in - 2. The ``Trainer``'s ``weights_saved_path`` if passed in (deprecated) - 3. The ``Logger``'s ``log_dir`` if the trainer has loggers - 4. The ``Trainer``'s ``default_root_dir`` if the trainer has no loggers + 2. The ``Logger``'s ``log_dir`` if the trainer has loggers + 3. The ``Trainer``'s ``default_root_dir`` if the trainer has no loggers The path gets extended with subdirectory "checkpoints". """ @@ -586,11 +584,7 @@ def __resolve_ckpt_dir(self, trainer: "pl.Trainer") -> None: # short circuit if dirpath was passed to ModelCheckpoint return - # TODO: Remove weights_save_path logic here in v1.8 - if trainer._weights_save_path_internal != trainer.default_root_dir: - # the user has changed weights_save_path - ckpt_path = os.path.join(trainer._weights_save_path_internal, "checkpoints") - elif len(trainer.loggers) > 0: + if len(trainer.loggers) > 0: if trainer.loggers[0].save_dir is not None: save_dir = trainer.loggers[0].save_dir else: diff --git a/src/pytorch_lightning/trainer/connectors/callback_connector.py b/src/pytorch_lightning/trainer/connectors/callback_connector.py index 32d67d44ad44c..139d4b655f229 100644 --- a/src/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/src/pytorch_lightning/trainer/connectors/callback_connector.py @@ -47,20 +47,12 @@ def on_trainer_init( enable_checkpointing: bool, enable_progress_bar: bool, default_root_dir: Optional[str], - weights_save_path: Optional[str], enable_model_summary: bool, max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None, accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None, ) -> None: # init folder paths for checkpoint + weights save callbacks self.trainer._default_root_dir = default_root_dir or os.getcwd() - if weights_save_path: - rank_zero_deprecation( - "Setting `Trainer(weights_save_path=)` has been deprecated in v1.6 and will be" - " removed in v1.8. Please pass ``dirpath`` directly to the `ModelCheckpoint` callback" - ) - - self.trainer._weights_save_path = weights_save_path or self.trainer._default_root_dir # init callbacks if isinstance(callbacks, Callback): diff --git a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py index f0b35e054d6d5..245e1305375e0 100644 --- a/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/src/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -56,8 +56,7 @@ def __init__(self, trainer: "pl.Trainer", resume_from_checkpoint: Optional[_PATH @property def _hpc_resume_path(self) -> Optional[str]: - # TODO: in v1.8 set this equal to self.trainer.default_root_dir - dir_path_hpc = self.trainer._weights_save_path_internal + dir_path_hpc = self.trainer.default_root_dir fs = get_filesystem(dir_path_hpc) if not fs.isdir(dir_path_hpc): return None diff --git a/src/pytorch_lightning/trainer/connectors/signal_connector.py b/src/pytorch_lightning/trainer/connectors/signal_connector.py index 8d7c0bb51a0ce..008623d36b70f 100644 --- a/src/pytorch_lightning/trainer/connectors/signal_connector.py +++ b/src/pytorch_lightning/trainer/connectors/signal_connector.py @@ -68,8 +68,8 @@ def slurm_sigusr1_handler_fn(self, signum: _SIGNUM, frame: FrameType) -> None: # save logger to make sure we get all the metrics for logger in self.trainer.loggers: logger.finalize("finished") - # TODO: in v1.8 change this to use self.trainer.default_root_dir - hpc_save_path = self.trainer._checkpoint_connector.hpc_save_path(self.trainer._weights_save_path_internal) + + hpc_save_path = self.trainer._checkpoint_connector.hpc_save_path(self.trainer.default_root_dir) self.trainer.save_checkpoint(hpc_save_path) if self.trainer.is_global_zero: diff --git a/src/pytorch_lightning/trainer/trainer.py b/src/pytorch_lightning/trainer/trainer.py index 4c9456e8ead37..c508599095a51 100644 --- a/src/pytorch_lightning/trainer/trainer.py +++ b/src/pytorch_lightning/trainer/trainer.py @@ -169,7 +169,6 @@ def __init__( sync_batchnorm: bool = False, precision: Union[int, str] = 32, enable_model_summary: bool = True, - weights_save_path: Optional[str] = None, # TODO: Remove in 1.8 num_sanity_val_steps: int = 2, resume_from_checkpoint: Optional[Union[Path, str]] = None, profiler: Optional[Union[Profiler, str]] = None, @@ -402,17 +401,6 @@ def __init__( enable_model_summary: Whether to enable model summarization by default. Default: ``True``. - weights_save_path: Where to save weights if specified. Will override default_root_dir - for checkpoints only. Use this if for whatever reason you need the checkpoints - stored in a different place than the logs written in `default_root_dir`. - Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/' - Defaults to `default_root_dir`. - - .. deprecated:: v1.6 - ``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass - ``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` - callback. - move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu. This can save some gpu memory, but can make training slower. Use with attention. Default: ``False``. @@ -488,7 +476,6 @@ def __init__( enable_checkpointing, enable_progress_bar, default_root_dir, - weights_save_path, enable_model_summary, max_time, accumulate_grad_batches, @@ -2260,30 +2247,6 @@ def default_root_dir(self) -> str: return os.path.normpath(self._default_root_dir) return self._default_root_dir - @property - def weights_save_path(self) -> str: - """ - The default root location to save weights (checkpoints), e.g., when the - :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path. - - .. deprecated:: v1.6 - `Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8. - """ - rank_zero_deprecation("`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.") - return self._weights_save_path_internal - - # TODO: Remove _weights_save_path_internal in v1.8 - @property - def _weights_save_path_internal(self) -> str: - """This is an internal implementation of weights_save_path which allows weights_save_path to be used - internally by the framework without emitting a deprecation warning. - - To be removed in v1.8. - """ - if get_filesystem(self._weights_save_path).protocol == "file": - return os.path.normpath(self._weights_save_path) - return self._weights_save_path - @property def early_stopping_callback(self) -> Optional[EarlyStopping]: """The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the diff --git a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py index 60ec4ec0f23de..cc4bb93fe1a11 100644 --- a/tests/tests_pytorch/checkpointing/test_model_checkpoint.py +++ b/tests/tests_pytorch/checkpointing/test_model_checkpoint.py @@ -1339,37 +1339,6 @@ def test_last_global_step_saved(): assert model_checkpoint._last_global_step_saved == 0 -# TODO: remove test_dirpath_weights_save_path in v1.8 -@pytest.mark.parametrize( - "logger_setting", - [ - False, - TensorBoardLogger(save_dir="logger1"), - [TensorBoardLogger(save_dir="logger1"), TensorBoardLogger(save_dir="logger2")], - ], -) -def test_dirpath_weights_save_path(tmpdir, logger_setting): - """Tests that the ModelCheckpoint.dirpath is set correctly when user specifies weights_save_path with no - loggers, one logger, and multiple loggers.""" - model = BoringModel() - mc = ModelCheckpoint(monitor="epoch", save_top_k=-1) - with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"): - trainer = Trainer( - default_root_dir=tmpdir, - weights_save_path=tmpdir / "weights_save_path", - limit_train_batches=1, - limit_val_batches=1, - num_sanity_val_steps=0, - max_epochs=5, - check_val_every_n_epoch=2, - callbacks=mc, - enable_model_summary=False, - logger=logger_setting, - ) - trainer.fit(model) - assert mc.dirpath == tmpdir / "weights_save_path" / "checkpoints" - - @pytest.mark.parametrize("every_n_epochs", (0, 5)) def test_save_last_every_n_epochs_interaction(tmpdir, every_n_epochs): """Test that `save_last` ignores `every_n_epochs`.""" diff --git a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py index 07223b88b7710..3bf0ab77ec7ff 100644 --- a/tests/tests_pytorch/deprecated_api/test_remove_1-8.py +++ b/tests/tests_pytorch/deprecated_api/test_remove_1-8.py @@ -541,13 +541,6 @@ def on_pretrain_routine_end(self, trainer, pl_module): trainer.fit(model) -def test_v1_8_0_weights_save_path(tmpdir): - with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"): - trainer = Trainer(weights_save_path=tmpdir) - with pytest.deprecated_call(match=r"`Trainer.weights_save_path` has been deprecated in v1.6"): - _ = trainer.weights_save_path - - @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list): diff --git a/tests/tests_pytorch/loggers/test_all.py b/tests/tests_pytorch/loggers/test_all.py index 612d7bf035c2f..ef9a9a0af1f68 100644 --- a/tests/tests_pytorch/loggers/test_all.py +++ b/tests/tests_pytorch/loggers/test_all.py @@ -23,6 +23,7 @@ import tests_pytorch.helpers.utils as tutils from pytorch_lightning import Callback, Trainer +from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel from pytorch_lightning.loggers import ( CometLogger, @@ -157,64 +158,6 @@ def log_metrics(self, metrics, step): assert log_metric_names == expected -@pytest.mark.parametrize("logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE) -def test_loggers_save_dir_and_weights_save_path_all(tmpdir, monkeypatch, logger_class): - """Test the combinations of save_dir, weights_save_path and default_root_dir.""" - - with contextlib.ExitStack() as stack: - for mgr in LOGGER_CTX_MANAGERS: - stack.enter_context(mgr) - _patch_comet_atexit(monkeypatch) - _test_loggers_save_dir_and_weights_save_path(tmpdir, CometLogger) - - -def _test_loggers_save_dir_and_weights_save_path(tmpdir, logger_class): - class TestLogger(logger_class): - # for this test it does not matter what these attributes are - # so we standardize them to make testing easier - @property - def version(self): - return "version" - - @property - def name(self): - return "name" - - model = BoringModel() - trainer_args = dict(default_root_dir=tmpdir, max_steps=3) - - # no weights_save_path given - save_dir = tmpdir / "logs" - weights_save_path = None - logger = TestLogger(**_get_logger_args(TestLogger, save_dir)) - trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path) - trainer.fit(model) - assert trainer._weights_save_path_internal == trainer.default_root_dir - assert trainer.checkpoint_callback.dirpath == os.path.join(str(logger.save_dir), "name", "version", "checkpoints") - assert trainer.default_root_dir == tmpdir - - # with weights_save_path given, the logger path and checkpoint path should be different - save_dir = tmpdir / "logs" - weights_save_path = tmpdir / "weights" - logger = TestLogger(**_get_logger_args(TestLogger, save_dir)) - with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"): - trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path) - trainer.fit(model) - assert trainer._weights_save_path_internal == weights_save_path - assert trainer.logger.save_dir == save_dir - assert trainer.checkpoint_callback.dirpath == weights_save_path / "checkpoints" - assert trainer.default_root_dir == tmpdir - - # no logger given - weights_save_path = tmpdir / "weights" - with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"): - trainer = Trainer(**trainer_args, logger=False, weights_save_path=weights_save_path) - trainer.fit(model) - assert trainer._weights_save_path_internal == weights_save_path - assert trainer.checkpoint_callback.dirpath == weights_save_path / "checkpoints" - assert trainer.default_root_dir == tmpdir - - @pytest.mark.parametrize( "logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE ) # WandbLogger and NeptuneLogger get tested separately From 165fa277acc10f9456489a96f24383d14e60ad50 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 28 Aug 2022 12:18:51 +0200 Subject: [PATCH 2/3] remove unused importa --- src/pytorch_lightning/trainer/connectors/callback_connector.py | 2 +- tests/tests_pytorch/loggers/test_all.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/pytorch_lightning/trainer/connectors/callback_connector.py b/src/pytorch_lightning/trainer/connectors/callback_connector.py index 139d4b655f229..a1144c2912c95 100644 --- a/src/pytorch_lightning/trainer/connectors/callback_connector.py +++ b/src/pytorch_lightning/trainer/connectors/callback_connector.py @@ -32,7 +32,7 @@ from pytorch_lightning.callbacks.timer import Timer from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0, _PYTHON_GREATER_EQUAL_3_10_0 -from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info +from pytorch_lightning.utilities.rank_zero import rank_zero_info _log = logging.getLogger(__name__) diff --git a/tests/tests_pytorch/loggers/test_all.py b/tests/tests_pytorch/loggers/test_all.py index ef9a9a0af1f68..279a1aeab7e69 100644 --- a/tests/tests_pytorch/loggers/test_all.py +++ b/tests/tests_pytorch/loggers/test_all.py @@ -13,7 +13,6 @@ # limitations under the License. import contextlib import inspect -import os import pickle from unittest import mock from unittest.mock import ANY @@ -23,7 +22,6 @@ import tests_pytorch.helpers.utils as tutils from pytorch_lightning import Callback, Trainer -from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.demos.boring_classes import BoringModel from pytorch_lightning.loggers import ( CometLogger, From 1fd8b9da7984653c4fcadb98dbfd1772c9fcb6c5 Mon Sep 17 00:00:00 2001 From: awaelchli Date: Sun, 28 Aug 2022 12:21:32 +0200 Subject: [PATCH 3/3] changelog --- src/pytorch_lightning/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md index d6c233e4a17ca..0a9a595eae44b 100644 --- a/src/pytorch_lightning/CHANGELOG.md +++ b/src/pytorch_lightning/CHANGELOG.md @@ -101,6 +101,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Removed deprecated support for old torchtext versions ([#14375](https://github.com/Lightning-AI/lightning/pull/14375)) +- Removed the deprecated `weights_save_path` Trainer argumnent and `Trainer.weights_save_path` property ([#14424](https://github.com/Lightning-AI/lightning/pull/14424)) + + + ### Fixed - Fixed `LightningDataModule` hparams parsing ([#12806](https://github.com/PyTorchLightning/pytorch-lightning/pull/12806))