Skip to content

Commit 9ed43c6

Browse files
authored
Set the logger explicitly in tests (#15815)
1 parent 37fe3f6 commit 9ed43c6

18 files changed

+111
-33
lines changed

requirements/pytorch/test.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@ scikit-learn>0.22.1, <1.1.3
1212
onnxruntime<1.14.0
1313
psutil<5.9.4 # for `DeviceStatsMonitor`
1414
pandas>1.0, <1.5.2 # needed in benchmarks
15-
fastapi<0.87.0
16-
uvicorn<0.19.1
15+
fastapi<0.87.0 # for `ServableModuleValidator`
16+
uvicorn<0.19.1 # for `ServableModuleValidator`
1717

18-
tensorboard>=2.9.1, <2.12.0
18+
tensorboard>=2.9.1, <2.12.0 # for `TensorBoardLogger`
1919
protobuf<=3.20.1 # strict # an extra is updating protobuf, this pin prevents TensorBoard failure

tests/tests_pytorch/callbacks/progress/test_rich_progress_bar.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from pytorch_lightning.callbacks import ProgressBarBase, RichProgressBar
2424
from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme
2525
from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset, RandomIterableDataset
26+
from pytorch_lightning.loggers import CSVLogger
2627
from tests_pytorch.helpers.runif import RunIf
2728

2829

@@ -330,7 +331,7 @@ def training_step(self, *args, **kwargs):
330331

331332
progress_bar = RichProgressBar()
332333
model = CustomModel()
333-
trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True)
334+
trainer = Trainer(default_root_dir=tmpdir, callbacks=progress_bar, fast_dev_run=True, logger=CSVLogger(tmpdir))
334335

335336
trainer.fit(model)
336337
main_progress_bar_id = progress_bar.main_progress_bar_id
@@ -384,6 +385,7 @@ def test_step(self, batch, batch_idx):
384385
enable_checkpointing=False,
385386
log_every_n_steps=1,
386387
callbacks=pbar,
388+
logger=CSVLogger(tmpdir),
387389
)
388390

389391
trainer.fit(model)

tests/tests_pytorch/callbacks/progress/test_tqdm_progress_bar.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
from pytorch_lightning.callbacks.progress.tqdm_progress import Tqdm
3030
from pytorch_lightning.core.module import LightningModule
3131
from pytorch_lightning.demos.boring_classes import BoringModel, RandomDataset
32+
from pytorch_lightning.loggers import CSVLogger
3233
from pytorch_lightning.utilities.exceptions import MisconfigurationException
3334
from tests_pytorch.helpers.runif import RunIf
3435

@@ -706,6 +707,7 @@ def test_step(self, batch, batch_idx):
706707
enable_checkpointing=False,
707708
log_every_n_steps=1,
708709
callbacks=pbar,
710+
logger=CSVLogger(tmpdir),
709711
)
710712

711713
trainer.fit(model)

tests/tests_pytorch/callbacks/test_device_stats_monitor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,13 +155,13 @@ def test_prefix_metric_keys():
155155
assert converted_metrics == {"foo.1": 1.0, "foo.2": 2.0, "foo.3": 3.0}
156156

157157

158-
def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch):
158+
def test_device_stats_monitor_warning_when_psutil_not_available(monkeypatch, tmp_path):
159159
"""Test that warning is raised when psutil is not available."""
160160
import pytorch_lightning.callbacks.device_stats_monitor as imports
161161

162162
monkeypatch.setattr(imports, "_PSUTIL_AVAILABLE", False)
163163
monitor = DeviceStatsMonitor()
164-
trainer = Trainer()
164+
trainer = Trainer(logger=CSVLogger(tmp_path))
165165
assert trainer.strategy.root_device == torch.device("cpu")
166166
# TODO: raise an exception from v1.9
167167
with pytest.warns(UserWarning, match="psutil` is not installed"):

tests/tests_pytorch/callbacks/test_lr_monitor.py

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from pytorch_lightning.callbacks.callback import Callback
2121
from pytorch_lightning.callbacks.finetuning import BackboneFinetuning
2222
from pytorch_lightning.demos.boring_classes import BoringModel
23+
from pytorch_lightning.loggers import CSVLogger
2324
from pytorch_lightning.utilities.exceptions import MisconfigurationException
2425
from tests_pytorch.helpers.datamodules import ClassifDataModule
2526
from tests_pytorch.helpers.runif import RunIf
@@ -32,7 +33,12 @@ def test_lr_monitor_single_lr(tmpdir):
3233

3334
lr_monitor = LearningRateMonitor()
3435
trainer = Trainer(
35-
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
36+
default_root_dir=tmpdir,
37+
max_epochs=2,
38+
limit_val_batches=0.1,
39+
limit_train_batches=0.5,
40+
callbacks=[lr_monitor],
41+
logger=CSVLogger(tmpdir),
3642
)
3743
trainer.fit(model)
3844

@@ -70,6 +76,7 @@ def configure_optimizers(self):
7076
limit_train_batches=5,
7177
log_every_n_steps=1,
7278
callbacks=[lr_monitor],
79+
logger=CSVLogger(tmpdir),
7380
)
7481
trainer.fit(model)
7582

@@ -96,6 +103,7 @@ def configure_optimizers(self):
96103
limit_train_batches=5,
97104
log_every_n_steps=1,
98105
callbacks=[lr_monitor],
106+
logger=CSVLogger(tmpdir),
99107
)
100108
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
101109
trainer.fit(model)
@@ -117,7 +125,12 @@ def configure_optimizers(self):
117125

118126
lr_monitor = LearningRateMonitor()
119127
trainer = Trainer(
120-
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
128+
default_root_dir=tmpdir,
129+
max_epochs=2,
130+
limit_val_batches=0.1,
131+
limit_train_batches=0.5,
132+
callbacks=[lr_monitor],
133+
logger=CSVLogger(tmpdir),
121134
)
122135

123136
trainer.fit(model)
@@ -154,6 +167,7 @@ def configure_optimizers(self):
154167
limit_train_batches=5,
155168
log_every_n_steps=1,
156169
callbacks=[lr_monitor],
170+
logger=CSVLogger(tmpdir),
157171
)
158172
trainer.fit(model)
159173

@@ -179,6 +193,7 @@ def configure_optimizers(self):
179193
limit_train_batches=5,
180194
log_every_n_steps=1,
181195
callbacks=[lr_monitor],
196+
logger=CSVLogger(tmpdir),
182197
)
183198
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
184199
trainer.fit(model)
@@ -226,6 +241,7 @@ def configure_optimizers(self):
226241
limit_train_batches=7,
227242
limit_val_batches=0.1,
228243
callbacks=[lr_monitor],
244+
logger=CSVLogger(tmpdir),
229245
)
230246
trainer.fit(model)
231247

@@ -269,6 +285,7 @@ def configure_optimizers(self):
269285
limit_train_batches=7,
270286
limit_val_batches=0.1,
271287
callbacks=[lr_monitor],
288+
logger=CSVLogger(tmpdir),
272289
)
273290
trainer.fit(model)
274291

@@ -305,7 +322,12 @@ def configure_optimizers(self):
305322

306323
lr_monitor = LearningRateMonitor()
307324
trainer = Trainer(
308-
default_root_dir=tmpdir, max_epochs=2, limit_val_batches=0.1, limit_train_batches=0.5, callbacks=[lr_monitor]
325+
default_root_dir=tmpdir,
326+
max_epochs=2,
327+
limit_val_batches=0.1,
328+
limit_train_batches=0.5,
329+
callbacks=[lr_monitor],
330+
logger=CSVLogger(tmpdir),
309331
)
310332
trainer.fit(model, datamodule=dm)
311333

@@ -330,6 +352,7 @@ def configure_optimizers(self):
330352
callbacks=[lr_monitor],
331353
enable_progress_bar=False,
332354
enable_model_summary=False,
355+
logger=CSVLogger(tmpdir),
333356
)
334357
trainer.fit(TestModel())
335358
assert list(lr_monitor.lrs) == ["my_logging_name"]
@@ -349,6 +372,7 @@ def configure_optimizers(self):
349372
limit_val_batches=2,
350373
limit_train_batches=2,
351374
callbacks=[lr_monitor],
375+
logger=CSVLogger(tmpdir),
352376
enable_progress_bar=False,
353377
enable_model_summary=False,
354378
)
@@ -384,6 +408,7 @@ def configure_optimizers(self):
384408
limit_val_batches=2,
385409
limit_train_batches=2,
386410
callbacks=[lr_monitor],
411+
logger=CSVLogger(tmpdir),
387412
enable_progress_bar=False,
388413
enable_model_summary=False,
389414
)
@@ -475,6 +500,7 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
475500
limit_val_batches=0,
476501
limit_train_batches=2,
477502
callbacks=[TestFinetuning(), lr_monitor, Check()],
503+
logger=CSVLogger(tmpdir),
478504
enable_progress_bar=False,
479505
enable_model_summary=False,
480506
enable_checkpointing=False,
@@ -533,6 +559,7 @@ def configure_optimizers(self):
533559
limit_val_batches=2,
534560
limit_train_batches=2,
535561
callbacks=[lr_monitor],
562+
logger=CSVLogger(tmpdir),
536563
enable_progress_bar=False,
537564
enable_model_summary=False,
538565
)

tests/tests_pytorch/callbacks/test_stochastic_weight_avg.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,13 +303,14 @@ def _swa_resume_training_from_checkpoint(tmpdir, model, resume_model, ddp=False)
303303
"limit_val_batches": 0,
304304
"accumulate_grad_batches": 2,
305305
"enable_progress_bar": False,
306+
"logger": False,
306307
}
307308
trainer = Trainer(callbacks=SwaTestCallback(swa_epoch_start=swa_start, swa_lrs=0.1), **trainer_kwargs)
308309

309310
with _backward_patch(trainer), pytest.raises(Exception, match="SWA crash test"):
310311
trainer.fit(model)
311312

312-
checkpoint_dir = Path(tmpdir) / "lightning_logs" / "version_0" / "checkpoints"
313+
checkpoint_dir = Path(tmpdir) / "checkpoints"
313314
checkpoint_files = os.listdir(checkpoint_dir)
314315
assert len(checkpoint_files) == 1
315316
ckpt_path = str(checkpoint_dir / checkpoint_files[0])

tests/tests_pytorch/checkpointing/test_model_checkpoint.py

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
from pytorch_lightning import seed_everything, Trainer
3636
from pytorch_lightning.callbacks import ModelCheckpoint
3737
from pytorch_lightning.demos.boring_classes import BoringModel
38-
from pytorch_lightning.loggers import TensorBoardLogger
38+
from pytorch_lightning.loggers import CSVLogger, TensorBoardLogger
3939
from pytorch_lightning.utilities.exceptions import MisconfigurationException
4040
from pytorch_lightning.utilities.imports import _OMEGACONF_AVAILABLE
4141
from tests_pytorch.helpers.runif import RunIf
@@ -301,9 +301,11 @@ def test_model_checkpoint_with_non_string_input(tmpdir, save_top_k: int):
301301

302302
checkpoint = ModelCheckpoint(monitor="early_stop_on", dirpath=None, filename="{epoch}", save_top_k=save_top_k)
303303
max_epochs = 2
304-
trainer = Trainer(default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs)
304+
trainer = Trainer(
305+
default_root_dir=tmpdir, callbacks=[checkpoint], overfit_batches=0.20, max_epochs=max_epochs, logger=False
306+
)
305307
trainer.fit(model)
306-
assert checkpoint.dirpath == tmpdir / trainer.logger.name / "version_0" / "checkpoints"
308+
assert checkpoint.dirpath == tmpdir / "checkpoints"
307309

308310
if save_top_k == -1:
309311
ckpt_files = os.listdir(checkpoint.dirpath)
@@ -753,15 +755,20 @@ def test_default_checkpoint_behavior(tmpdir):
753755

754756
model = LogInTwoMethods()
755757
trainer = Trainer(
756-
default_root_dir=tmpdir, max_epochs=3, enable_progress_bar=False, limit_train_batches=5, limit_val_batches=5
758+
default_root_dir=tmpdir,
759+
max_epochs=3,
760+
enable_progress_bar=False,
761+
limit_train_batches=5,
762+
limit_val_batches=5,
763+
logger=False,
757764
)
758765

759766
with patch.object(trainer, "save_checkpoint", wraps=trainer.save_checkpoint) as save_mock:
760767
trainer.fit(model)
761768
results = trainer.test()
762769

763770
assert len(results) == 1
764-
save_dir = tmpdir / "lightning_logs" / "version_0" / "checkpoints"
771+
save_dir = tmpdir / "checkpoints"
765772
save_weights_only = trainer.checkpoint_callback.save_weights_only
766773
save_mock.assert_has_calls(
767774
[
@@ -867,6 +874,7 @@ def validation_step(self, batch, batch_idx):
867874
"enable_model_summary": False,
868875
"log_every_n_steps": 1,
869876
"default_root_dir": tmpdir,
877+
"logger": CSVLogger(tmpdir),
870878
}
871879
trainer = Trainer(**trainer_kwargs, callbacks=[checkpoint_callback])
872880
trainer.fit(model)
@@ -931,6 +939,7 @@ def assert_checkpoint_log_dir(idx):
931939
limit_val_batches=3,
932940
limit_test_batches=4,
933941
callbacks=[checkpoint_cb],
942+
logger=TensorBoardLogger(tmpdir),
934943
)
935944
trainer = Trainer(**trainer_config)
936945
assert_trainer_init(trainer)
@@ -953,6 +962,7 @@ def assert_checkpoint_log_dir(idx):
953962
assert_checkpoint_content(ckpt_dir)
954963

955964
# load from checkpoint
965+
trainer_config["logger"] = TensorBoardLogger(tmpdir)
956966
trainer = pl.Trainer(**trainer_config)
957967
assert_trainer_init(trainer)
958968

tests/tests_pytorch/loggers/test_logger.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,12 @@ def __init__(self, param_one, param_two):
239239

240240
model = TestModel("pytorch", "lightning")
241241
trainer = Trainer(
242-
default_root_dir=tmpdir, max_epochs=1, limit_train_batches=0.1, limit_val_batches=0.1, num_sanity_val_steps=0
242+
default_root_dir=tmpdir,
243+
max_epochs=1,
244+
limit_train_batches=0.1,
245+
limit_val_batches=0.1,
246+
num_sanity_val_steps=0,
247+
logger=TensorBoardLogger(tmpdir),
243248
)
244249
trainer.fit(model)
245250

@@ -270,6 +275,7 @@ class _Test:
270275

271276
trainer = Trainer(
272277
default_root_dir=tmpdir,
278+
logger=TensorBoardLogger(tmpdir),
273279
max_epochs=1,
274280
limit_train_batches=0.1,
275281
limit_val_batches=0.1,
@@ -294,6 +300,7 @@ class _Test:
294300
dm = TestDataModule(diff_params)
295301
trainer = Trainer(
296302
default_root_dir=tmpdir,
303+
logger=TensorBoardLogger(tmpdir),
297304
max_epochs=1,
298305
limit_train_batches=0.1,
299306
limit_val_batches=0.1,
@@ -311,6 +318,7 @@ class _Test:
311318
dm = TestDataModule(tensor_params)
312319
trainer = Trainer(
313320
default_root_dir=tmpdir,
321+
logger=TensorBoardLogger(tmpdir),
314322
max_epochs=1,
315323
limit_train_batches=0.1,
316324
limit_val_batches=0.1,

tests/tests_pytorch/loggers/test_tensorboard.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(self, b1=0.5, b2=0.999):
3939
super().__init__()
4040
self.save_hyperparameters()
4141

42-
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
42+
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=TensorBoardLogger(tmpdir))
4343
model = CustomModel()
4444
assert trainer.log_dir == trainer.logger.log_dir
4545
trainer.fit(model)

tests/tests_pytorch/models/test_grad_norm.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
from pytorch_lightning import Trainer
2020
from pytorch_lightning.demos.boring_classes import BoringModel
21+
from pytorch_lightning.loggers import CSVLogger
2122

2223

2324
class ModelWithManualGradTracker(BoringModel):
@@ -86,7 +87,13 @@ def on_train_batch_end(self, *_) -> None:
8687
@pytest.mark.parametrize("log_every_n_steps", [1, 2, 3])
8788
def test_grad_tracking_interval(tmpdir, log_every_n_steps):
8889
"""Test that gradient norms get tracked in the right interval and that everytime the same keys get logged."""
89-
trainer = Trainer(default_root_dir=tmpdir, track_grad_norm=2, log_every_n_steps=log_every_n_steps, max_steps=10)
90+
trainer = Trainer(
91+
default_root_dir=tmpdir,
92+
track_grad_norm=2,
93+
log_every_n_steps=log_every_n_steps,
94+
max_steps=10,
95+
logger=CSVLogger(tmpdir),
96+
)
9097

9198
with patch.object(trainer.logger, "log_metrics") as mocked:
9299
model = BoringModel()

0 commit comments

Comments
 (0)