We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c22ff0a commit 60683d6Copy full SHA for 60683d6
tests/tests_pytorch/models/test_ddp_fork_amp.py
@@ -15,7 +15,7 @@
15
16
import torch
17
18
-from pytorch_lightning.plugins import NativeMixedPrecisionPlugin
+from pytorch_lightning.plugins import MixedPrecisionPlugin
19
from tests_pytorch.helpers.runif import RunIf
20
21
@@ -24,7 +24,7 @@
24
def test_amp_gpus_ddp_fork():
25
"""Ensure the use of native AMP with `ddp_fork` (or associated alias strategies) does not generate CUDA
26
initialization errors."""
27
- _ = NativeMixedPrecisionPlugin(precision=16, device="cuda")
+ _ = MixedPrecisionPlugin(precision=16, device="cuda")
28
with multiprocessing.get_context("fork").Pool(1) as pool:
29
in_bad_fork = pool.apply(torch.cuda._is_in_bad_fork)
30
assert not in_bad_fork
0 commit comments