Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions docs/source-pytorch/common/optimization.rst
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ For example, here step optimizer A every batch and optimizer B every 2 batches.
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_lbfgs=False,
):
# update generator every step
Expand Down Expand Up @@ -181,7 +180,6 @@ Here we add a manual learning rate warm-up without an lr scheduler.
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_lbfgs=False,
):
# update params
Expand Down Expand Up @@ -212,7 +210,6 @@ to perform a step, Lightning won't be able to support accelerators, precision an
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_lbfgs=False,
):
optimizer.step(closure=optimizer_closure)
Expand All @@ -228,7 +225,6 @@ to perform a step, Lightning won't be able to support accelerators, precision an
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu=False,
using_lbfgs=False,
):
optimizer = optimizer.optimizer
Expand Down
3 changes: 3 additions & 0 deletions src/pytorch_lightning/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
* Removed `Trainer(auto_scale_batch_size=...)` in favor of `Tuner(trainer).scale_batch_size()` ([#16462](https://github.com/Lightning-AI/lightning/pull/16462))
* Removed `Trainer(auto_lr_find=...)` in favor of `Tuner(trainer).lr_find()` ([#16462](https://github.com/Lightning-AI/lightning/pull/16462))

- Removed the `on_tpu` argument from `LightningModule.optimizer_step` hook ([#16537](https://github.com/Lightning-AI/lightning/pull/16537))


### Fixed

- Fixed an unintended limitation for calling `save_hyperparameters` on mixin classes that don't subclass `LightningModule`/`LightningDataModule` ([#16369](https://github.com/Lightning-AI/lightning/pull/16369))
Expand Down
7 changes: 2 additions & 5 deletions src/pytorch_lightning/core/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -1630,7 +1630,6 @@ def optimizer_step(
optimizer: Union[Optimizer, LightningOptimizer],
optimizer_idx: int = 0,
optimizer_closure: Optional[Callable[[], Any]] = None,
on_tpu: bool = False,
using_lbfgs: bool = False,
) -> None:
r"""
Expand All @@ -1648,19 +1647,18 @@ def optimizer_step(
optimizer_idx: If you used multiple optimizers, this indexes into that list.
optimizer_closure: The optimizer closure. This closure must be executed as it includes the
calls to ``training_step()``, ``optimizer.zero_grad()``, and ``backward()``.
on_tpu: ``True`` if TPU backward is required
using_lbfgs: True if the matching optimizer is :class:`torch.optim.LBFGS`

Examples::

# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_lbfgs):
optimizer_closure, using_lbfgs):
optimizer.step(closure=optimizer_closure)

# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_lbfgs):
optimizer_closure, using_lbfgs):
# update generator opt every step
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure)
Expand Down Expand Up @@ -1689,7 +1687,6 @@ def optimizer_step(
optimizer,
optimizer_idx,
optimizer_closure,
on_tpu,
using_lbfgs,
):
# update params
Expand Down
2 changes: 0 additions & 2 deletions src/pytorch_lightning/loops/optimization/optimizer_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from torch.optim import Optimizer
from typing_extensions import OrderedDict

from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loops import _Loop
from pytorch_lightning.loops.optimization.closure import AbstractClosure, OutputResult
Expand Down Expand Up @@ -356,7 +355,6 @@ def _optimizer_step(
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=isinstance(self.trainer.accelerator, TPUAccelerator),
using_lbfgs=is_lbfgs,
)

Expand Down
2 changes: 0 additions & 2 deletions tests/tests_pytorch/core/test_lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ def optimizer_step(
optimizer,
optimizer_idx,
closure,
on_tpu=False,
using_lbfgs=False,
):
if optimizer_idx == 0:
Expand Down Expand Up @@ -216,7 +215,6 @@ def optimizer_step(
optimizer,
optimizer_idx,
closure,
on_tpu=False,
using_lbfgs=False,
):
if optimizer_idx == 0:
Expand Down
2 changes: 1 addition & 1 deletion tests/tests_pytorch/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def _auto_train_batch(
dict(
name="optimizer_step",
args=(current_epoch, i, ANY, 0, ANY),
kwargs=dict(on_tpu=False, using_lbfgs=False),
kwargs=dict(using_lbfgs=False),
),
*(
[dict(name="lr_scheduler_step", args=(ANY, 0, None))]
Expand Down