Skip to content

Commit 6937e72

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 116bd74 commit 6937e72

File tree

2 files changed

+8
-7
lines changed

2 files changed

+8
-7
lines changed

src/lightning/fabric/utilities/distributed.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -322,8 +322,7 @@ def _get_default_process_group_backend_for_device(device: torch.device) -> str:
322322
device_backend_map = torch.distributed.Backend.default_device_backend_map
323323
if device.type in device_backend_map:
324324
return device_backend_map[device.type]
325-
else:
326-
return "gloo"
325+
return "gloo"
327326

328327

329328
class _DatasetSamplerWrapper(Dataset):

tests/tests_fabric/utilities/test_distributed.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@
1717
from lightning.fabric.utilities.distributed import (
1818
_destroy_dist_connection,
1919
_gather_all_tensors,
20+
_get_default_process_group_backend_for_device,
2021
_InfiniteBarrier,
2122
_init_dist_connection,
22-
_get_default_process_group_backend_for_device,
2323
_is_dtensor,
2424
_set_num_threads_if_needed,
2525
_suggested_max_num_threads,
@@ -246,13 +246,15 @@ def test_init_dist_connection_registers_destruction_handler(_, atexit_mock):
246246
def test_get_default_process_group_backend_for_device():
247247
# register a custom backend for test
248248
torch.utils.rename_privateuse1_backend("pcu")
249+
249250
def mock_backend(store, group_rank, group_size, timeout):
250251
pass
252+
251253
torch.distributed.Backend.register_backend(
252-
"pccl",
253-
lambda store, group_rank, group_size, timeout: mock_backend(store, group_rank, group_size, timeout
254-
),
255-
devices=["pcu"])
254+
"pccl",
255+
lambda store, group_rank, group_size, timeout: mock_backend(store, group_rank, group_size, timeout),
256+
devices=["pcu"],
257+
)
256258

257259
# test that the default backend is correctly set for each device
258260
devices = [torch.device("cpu"), torch.device("cuda:0"), torch.device("xpu:0"), torch.device("pcu:0")]

0 commit comments

Comments
 (0)