Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 0 additions & 38 deletions docs/source-pytorch/common/trainer.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1507,44 +1507,6 @@ Can specify as float or int.
total_fit_batches = total_train_batches + total_val_batches


weights_save_path
^^^^^^^^^^^^^^^^^


.. warning:: `weights_save_path` has been deprecated in v1.6 and will be removed in v1.8. Please pass
``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
callback.


.. raw:: html

<video width="50%" max-width="400px" controls
poster="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/thumb/weights_save_path.jpg"
src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/pl_docs/trainer_flags/weights_save_path.mp4"></video>

|

Directory of where to save weights if specified.

.. testcode::

# default used by the Trainer
trainer = Trainer(weights_save_path=os.getcwd())

# save to your custom path
trainer = Trainer(weights_save_path="my/path")

Example::

# if checkpoint callback used, then overrides the weights path
# **NOTE: this saves weights to some/path NOT my/path
checkpoint = ModelCheckpoint(dirpath='some/path')
trainer = Trainer(
callbacks=[checkpoint],
weights_save_path='my/path'
)


enable_model_summary
^^^^^^^^^^^^^^^^^^^^

Expand Down
3 changes: 3 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Removed deprecated support for old torchtext versions ([#14375](https://github.com/Lightning-AI/lightning/pull/14375))


- Removed the deprecated `weights_save_path` Trainer argumnent and `Trainer.weights_save_path` property ([#14424](https://github.com/Lightning-AI/lightning/pull/14424))


- Remove the deprecated ([#14471](https://github.com/Lightning-AI/lightning/pull/14471))
* `pytorch_lightning.utilities.distributed.rank_zero_only` in favor of `pytorch_lightning.utilities.rank_zero.rank_zero_only`
* `pytorch_lightning.utilities.distributed.rank_zero_debug` in favor of `pytorch_lightning.utilities.rank_zero.rank_zero_debug`
Expand Down
14 changes: 4 additions & 10 deletions src/pytorch_lightning/callbacks/model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ class ModelCheckpoint(Checkpoint):

By default, dirpath is ``None`` and will be set at runtime to the location
specified by :class:`~pytorch_lightning.trainer.trainer.Trainer`'s
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` or
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.weights_save_path` arguments,
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.default_root_dir` argument,
and if the Trainer uses a logger, the path will also contain logger name and version.

filename: checkpoint filename. Can contain named formatting options to be auto-filled.
Expand Down Expand Up @@ -577,21 +576,16 @@ def __resolve_ckpt_dir(self, trainer: "pl.Trainer") -> None:
determine where to save checkpoints. The path for saving weights is set in this priority:

1. The ``ModelCheckpoint``'s ``dirpath`` if passed in
2. The ``Trainer``'s ``weights_saved_path`` if passed in (deprecated)
3. The ``Logger``'s ``log_dir`` if the trainer has loggers
4. The ``Trainer``'s ``default_root_dir`` if the trainer has no loggers
2. The ``Logger``'s ``log_dir`` if the trainer has loggers
3. The ``Trainer``'s ``default_root_dir`` if the trainer has no loggers

The path gets extended with subdirectory "checkpoints".
"""
if self.dirpath is not None:
# short circuit if dirpath was passed to ModelCheckpoint
return

# TODO: Remove weights_save_path logic here in v1.8
if trainer._weights_save_path_internal != trainer.default_root_dir:
# the user has changed weights_save_path
ckpt_path = os.path.join(trainer._weights_save_path_internal, "checkpoints")
elif len(trainer.loggers) > 0:
if len(trainer.loggers) > 0:
if trainer.loggers[0].save_dir is not None:
save_dir = trainer.loggers[0].save_dir
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _PYTHON_GREATER_EQUAL_3_8_0, _PYTHON_GREATER_EQUAL_3_10_0
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info
from pytorch_lightning.utilities.rank_zero import rank_zero_info

_log = logging.getLogger(__name__)

Expand All @@ -47,20 +47,12 @@ def on_trainer_init(
enable_checkpointing: bool,
enable_progress_bar: bool,
default_root_dir: Optional[str],
weights_save_path: Optional[str],
enable_model_summary: bool,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
) -> None:
# init folder paths for checkpoint + weights save callbacks
self.trainer._default_root_dir = default_root_dir or os.getcwd()
if weights_save_path:
rank_zero_deprecation(
"Setting `Trainer(weights_save_path=)` has been deprecated in v1.6 and will be"
" removed in v1.8. Please pass ``dirpath`` directly to the `ModelCheckpoint` callback"
)

self.trainer._weights_save_path = weights_save_path or self.trainer._default_root_dir

# init callbacks
if isinstance(callbacks, Callback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ def __init__(self, trainer: "pl.Trainer", resume_from_checkpoint: Optional[_PATH

@property
def _hpc_resume_path(self) -> Optional[str]:
# TODO: in v1.8 set this equal to self.trainer.default_root_dir
dir_path_hpc = self.trainer._weights_save_path_internal
dir_path_hpc = self.trainer.default_root_dir
fs = get_filesystem(dir_path_hpc)
if not fs.isdir(dir_path_hpc):
return None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def slurm_sigusr1_handler_fn(self, signum: _SIGNUM, frame: FrameType) -> None:
# save logger to make sure we get all the metrics
for logger in self.trainer.loggers:
logger.finalize("finished")
# TODO: in v1.8 change this to use self.trainer.default_root_dir
hpc_save_path = self.trainer._checkpoint_connector.hpc_save_path(self.trainer._weights_save_path_internal)

hpc_save_path = self.trainer._checkpoint_connector.hpc_save_path(self.trainer.default_root_dir)
self.trainer.save_checkpoint(hpc_save_path)

if self.trainer.is_global_zero:
Expand Down
37 changes: 0 additions & 37 deletions src/pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ def __init__(
sync_batchnorm: bool = False,
precision: Union[int, str] = 32,
enable_model_summary: bool = True,
weights_save_path: Optional[str] = None, # TODO: Remove in 1.8
num_sanity_val_steps: int = 2,
resume_from_checkpoint: Optional[Union[Path, str]] = None,
profiler: Optional[Union[Profiler, str]] = None,
Expand Down Expand Up @@ -402,17 +401,6 @@ def __init__(
enable_model_summary: Whether to enable model summarization by default.
Default: ``True``.

weights_save_path: Where to save weights if specified. Will override default_root_dir
for checkpoints only. Use this if for whatever reason you need the checkpoints
stored in a different place than the logs written in `default_root_dir`.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
Defaults to `default_root_dir`.

.. deprecated:: v1.6
``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass
``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
callback.

move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
This can save some gpu memory, but can make training slower. Use with attention.
Default: ``False``.
Expand Down Expand Up @@ -488,7 +476,6 @@ def __init__(
enable_checkpointing,
enable_progress_bar,
default_root_dir,
weights_save_path,
enable_model_summary,
max_time,
accumulate_grad_batches,
Expand Down Expand Up @@ -2233,30 +2220,6 @@ def default_root_dir(self) -> str:
return os.path.normpath(self._default_root_dir)
return self._default_root_dir

@property
def weights_save_path(self) -> str:
"""
The default root location to save weights (checkpoints), e.g., when the
:class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.

.. deprecated:: v1.6
`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.
"""
rank_zero_deprecation("`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.")
return self._weights_save_path_internal

# TODO: Remove _weights_save_path_internal in v1.8
@property
def _weights_save_path_internal(self) -> str:
"""This is an internal implementation of weights_save_path which allows weights_save_path to be used
internally by the framework without emitting a deprecation warning.

To be removed in v1.8.
"""
if get_filesystem(self._weights_save_path).protocol == "file":
return os.path.normpath(self._weights_save_path)
return self._weights_save_path

@property
def early_stopping_callback(self) -> Optional[EarlyStopping]:
"""The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the
Expand Down
31 changes: 0 additions & 31 deletions tests/tests_pytorch/checkpointing/test_model_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -1339,37 +1339,6 @@ def test_last_global_step_saved():
assert model_checkpoint._last_global_step_saved == 0


# TODO: remove test_dirpath_weights_save_path in v1.8
@pytest.mark.parametrize(
"logger_setting",
[
False,
TensorBoardLogger(save_dir="logger1"),
[TensorBoardLogger(save_dir="logger1"), TensorBoardLogger(save_dir="logger2")],
],
)
def test_dirpath_weights_save_path(tmpdir, logger_setting):
"""Tests that the ModelCheckpoint.dirpath is set correctly when user specifies weights_save_path with no
loggers, one logger, and multiple loggers."""
model = BoringModel()
mc = ModelCheckpoint(monitor="epoch", save_top_k=-1)
with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"):
trainer = Trainer(
default_root_dir=tmpdir,
weights_save_path=tmpdir / "weights_save_path",
limit_train_batches=1,
limit_val_batches=1,
num_sanity_val_steps=0,
max_epochs=5,
check_val_every_n_epoch=2,
callbacks=mc,
enable_model_summary=False,
logger=logger_setting,
)
trainer.fit(model)
assert mc.dirpath == tmpdir / "weights_save_path" / "checkpoints"


@pytest.mark.parametrize("every_n_epochs", (0, 5))
def test_save_last_every_n_epochs_interaction(tmpdir, every_n_epochs):
"""Test that `save_last` ignores `every_n_epochs`."""
Expand Down
7 changes: 0 additions & 7 deletions tests/tests_pytorch/deprecated_api/test_remove_1-8.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,13 +399,6 @@ def on_pretrain_routine_end(self, trainer, pl_module):
trainer.fit(model)


def test_v1_8_0_weights_save_path(tmpdir):
with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"):
trainer = Trainer(weights_save_path=tmpdir)
with pytest.deprecated_call(match=r"`Trainer.weights_save_path` has been deprecated in v1.6"):
_ = trainer.weights_save_path


@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):
Expand Down
59 changes: 0 additions & 59 deletions tests/tests_pytorch/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.
import contextlib
import inspect
import os
import pickle
from unittest import mock
from unittest.mock import ANY
Expand Down Expand Up @@ -157,64 +156,6 @@ def log_metrics(self, metrics, step):
assert log_metric_names == expected


@pytest.mark.parametrize("logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE)
def test_loggers_save_dir_and_weights_save_path_all(tmpdir, monkeypatch, logger_class):
"""Test the combinations of save_dir, weights_save_path and default_root_dir."""

with contextlib.ExitStack() as stack:
for mgr in LOGGER_CTX_MANAGERS:
stack.enter_context(mgr)
_patch_comet_atexit(monkeypatch)
_test_loggers_save_dir_and_weights_save_path(tmpdir, CometLogger)


def _test_loggers_save_dir_and_weights_save_path(tmpdir, logger_class):
class TestLogger(logger_class):
# for this test it does not matter what these attributes are
# so we standardize them to make testing easier
@property
def version(self):
return "version"

@property
def name(self):
return "name"

model = BoringModel()
trainer_args = dict(default_root_dir=tmpdir, max_steps=3)

# no weights_save_path given
save_dir = tmpdir / "logs"
weights_save_path = None
logger = TestLogger(**_get_logger_args(TestLogger, save_dir))
trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer._weights_save_path_internal == trainer.default_root_dir
assert trainer.checkpoint_callback.dirpath == os.path.join(str(logger.save_dir), "name", "version", "checkpoints")
assert trainer.default_root_dir == tmpdir

# with weights_save_path given, the logger path and checkpoint path should be different
save_dir = tmpdir / "logs"
weights_save_path = tmpdir / "weights"
logger = TestLogger(**_get_logger_args(TestLogger, save_dir))
with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"):
trainer = Trainer(**trainer_args, logger=logger, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer._weights_save_path_internal == weights_save_path
assert trainer.logger.save_dir == save_dir
assert trainer.checkpoint_callback.dirpath == weights_save_path / "checkpoints"
assert trainer.default_root_dir == tmpdir

# no logger given
weights_save_path = tmpdir / "weights"
with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"):
trainer = Trainer(**trainer_args, logger=False, weights_save_path=weights_save_path)
trainer.fit(model)
assert trainer._weights_save_path_internal == weights_save_path
assert trainer.checkpoint_callback.dirpath == weights_save_path / "checkpoints"
assert trainer.default_root_dir == tmpdir


@pytest.mark.parametrize(
"logger_class", ALL_LOGGER_CLASSES_WO_NEPTUNE
) # WandbLogger and NeptuneLogger get tested separately
Expand Down