20
20
from pytorch_lightning .callbacks .callback import Callback
21
21
from pytorch_lightning .callbacks .finetuning import BackboneFinetuning
22
22
from pytorch_lightning .demos .boring_classes import BoringModel
23
+ from pytorch_lightning .loggers import CSVLogger
23
24
from pytorch_lightning .utilities .exceptions import MisconfigurationException
24
25
from tests_pytorch .helpers .datamodules import ClassifDataModule
25
26
from tests_pytorch .helpers .runif import RunIf
@@ -32,7 +33,12 @@ def test_lr_monitor_single_lr(tmpdir):
32
33
33
34
lr_monitor = LearningRateMonitor ()
34
35
trainer = Trainer (
35
- default_root_dir = tmpdir , max_epochs = 2 , limit_val_batches = 0.1 , limit_train_batches = 0.5 , callbacks = [lr_monitor ]
36
+ default_root_dir = tmpdir ,
37
+ max_epochs = 2 ,
38
+ limit_val_batches = 0.1 ,
39
+ limit_train_batches = 0.5 ,
40
+ callbacks = [lr_monitor ],
41
+ logger = CSVLogger (tmpdir ),
36
42
)
37
43
trainer .fit (model )
38
44
@@ -70,6 +76,7 @@ def configure_optimizers(self):
70
76
limit_train_batches = 5 ,
71
77
log_every_n_steps = 1 ,
72
78
callbacks = [lr_monitor ],
79
+ logger = CSVLogger (tmpdir ),
73
80
)
74
81
trainer .fit (model )
75
82
@@ -96,6 +103,7 @@ def configure_optimizers(self):
96
103
limit_train_batches = 5 ,
97
104
log_every_n_steps = 1 ,
98
105
callbacks = [lr_monitor ],
106
+ logger = CSVLogger (tmpdir ),
99
107
)
100
108
with pytest .warns (RuntimeWarning , match = "optimizers do not have momentum." ):
101
109
trainer .fit (model )
@@ -117,7 +125,12 @@ def configure_optimizers(self):
117
125
118
126
lr_monitor = LearningRateMonitor ()
119
127
trainer = Trainer (
120
- default_root_dir = tmpdir , max_epochs = 2 , limit_val_batches = 0.1 , limit_train_batches = 0.5 , callbacks = [lr_monitor ]
128
+ default_root_dir = tmpdir ,
129
+ max_epochs = 2 ,
130
+ limit_val_batches = 0.1 ,
131
+ limit_train_batches = 0.5 ,
132
+ callbacks = [lr_monitor ],
133
+ logger = CSVLogger (tmpdir ),
121
134
)
122
135
123
136
trainer .fit (model )
@@ -154,6 +167,7 @@ def configure_optimizers(self):
154
167
limit_train_batches = 5 ,
155
168
log_every_n_steps = 1 ,
156
169
callbacks = [lr_monitor ],
170
+ logger = CSVLogger (tmpdir ),
157
171
)
158
172
trainer .fit (model )
159
173
@@ -179,6 +193,7 @@ def configure_optimizers(self):
179
193
limit_train_batches = 5 ,
180
194
log_every_n_steps = 1 ,
181
195
callbacks = [lr_monitor ],
196
+ logger = CSVLogger (tmpdir ),
182
197
)
183
198
with pytest .warns (RuntimeWarning , match = "optimizers do not have momentum." ):
184
199
trainer .fit (model )
@@ -226,6 +241,7 @@ def configure_optimizers(self):
226
241
limit_train_batches = 7 ,
227
242
limit_val_batches = 0.1 ,
228
243
callbacks = [lr_monitor ],
244
+ logger = CSVLogger (tmpdir ),
229
245
)
230
246
trainer .fit (model )
231
247
@@ -269,6 +285,7 @@ def configure_optimizers(self):
269
285
limit_train_batches = 7 ,
270
286
limit_val_batches = 0.1 ,
271
287
callbacks = [lr_monitor ],
288
+ logger = CSVLogger (tmpdir ),
272
289
)
273
290
trainer .fit (model )
274
291
@@ -305,7 +322,12 @@ def configure_optimizers(self):
305
322
306
323
lr_monitor = LearningRateMonitor ()
307
324
trainer = Trainer (
308
- default_root_dir = tmpdir , max_epochs = 2 , limit_val_batches = 0.1 , limit_train_batches = 0.5 , callbacks = [lr_monitor ]
325
+ default_root_dir = tmpdir ,
326
+ max_epochs = 2 ,
327
+ limit_val_batches = 0.1 ,
328
+ limit_train_batches = 0.5 ,
329
+ callbacks = [lr_monitor ],
330
+ logger = CSVLogger (tmpdir ),
309
331
)
310
332
trainer .fit (model , datamodule = dm )
311
333
@@ -330,6 +352,7 @@ def configure_optimizers(self):
330
352
callbacks = [lr_monitor ],
331
353
enable_progress_bar = False ,
332
354
enable_model_summary = False ,
355
+ logger = CSVLogger (tmpdir ),
333
356
)
334
357
trainer .fit (TestModel ())
335
358
assert list (lr_monitor .lrs ) == ["my_logging_name" ]
@@ -349,6 +372,7 @@ def configure_optimizers(self):
349
372
limit_val_batches = 2 ,
350
373
limit_train_batches = 2 ,
351
374
callbacks = [lr_monitor ],
375
+ logger = CSVLogger (tmpdir ),
352
376
enable_progress_bar = False ,
353
377
enable_model_summary = False ,
354
378
)
@@ -384,6 +408,7 @@ def configure_optimizers(self):
384
408
limit_val_batches = 2 ,
385
409
limit_train_batches = 2 ,
386
410
callbacks = [lr_monitor ],
411
+ logger = CSVLogger (tmpdir ),
387
412
enable_progress_bar = False ,
388
413
enable_model_summary = False ,
389
414
)
@@ -475,6 +500,7 @@ def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
475
500
limit_val_batches = 0 ,
476
501
limit_train_batches = 2 ,
477
502
callbacks = [TestFinetuning (), lr_monitor , Check ()],
503
+ logger = CSVLogger (tmpdir ),
478
504
enable_progress_bar = False ,
479
505
enable_model_summary = False ,
480
506
enable_checkpointing = False ,
@@ -533,6 +559,7 @@ def configure_optimizers(self):
533
559
limit_val_batches = 2 ,
534
560
limit_train_batches = 2 ,
535
561
callbacks = [lr_monitor ],
562
+ logger = CSVLogger (tmpdir ),
536
563
enable_progress_bar = False ,
537
564
enable_model_summary = False ,
538
565
)
0 commit comments