Skip to content

Commit c68cfd6

Browse files
authored
Rename LiteMultiNode to FabricMultiNode (#16505)
1 parent f812cb8 commit c68cfd6

File tree

11 files changed

+28
-26
lines changed

11 files changed

+28
-26
lines changed

.github/checkgroup.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ subprojects:
205205
- "examples/fabric/**"
206206
- "examples/run_fabric_examples.sh"
207207
- "tests/tests_fabric/run_standalone_*.sh"
208-
- "tests/tests_pytorch/run_standalone_tests.sh" # used by Lite through a symlink
208+
- "tests/tests_pytorch/run_standalone_tests.sh" # used by Fabric through a symlink
209209
- "requirements/fabric/**"
210210
- "src/lightning_fabric/**"
211211
- "tests/tests_fabric/**"

docs/source-app/api_reference/components.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ ___________________
2828
~serve.python_server.PythonServer
2929
~serve.streamlit.ServeStreamlit
3030
~multi_node.base.MultiNode
31-
~multi_node.lite.LiteMultiNode
31+
~multi_node.fabric.FabricMultiNode
3232
~multi_node.pytorch_spawn.PyTorchSpawnMultiNode
3333
~multi_node.trainer.LightningTrainerMultiNode
3434
~serve.auto_scaler.AutoScaler

docs/source-pytorch/accelerators/gpu_basic.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,10 +102,10 @@ use the following utility function to pick GPU indices that are "accessible", wi
102102
# Find two GPUs on the system that are not already occupied
103103
trainer = Trainer(accelerator="cuda", devices=find_usable_cuda_devices(2))
104104
105-
from lightning.lite.accelerators import find_usable_cuda_devices
105+
from lightning.fabric.accelerators import find_usable_cuda_devices
106106
107-
# Works with LightningLite too
108-
lite = LightningLite(accelerator="cuda", devices=find_usable_cuda_devices(2))
107+
# Works with Fabric too
108+
fabric = Fabric(accelerator="cuda", devices=find_usable_cuda_devices(2))
109109
110110
111111
This is especially useful when GPUs are configured to be in "exclusive compute mode", such that only one process at a time is allowed access to the device.

docs/source-pytorch/fabric/guide/multi_node/cloud.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ Launch multi-node training in the cloud
4242
:caption: app.py
4343
4444
import lightning as L
45-
from lightning.app.components import LiteMultiNode
45+
from lightning.app.components import FabricMultiNode
4646
4747
# 1. Put your code inside a LightningWork
4848
class MyTrainingComponent(L.LightningWork):
@@ -58,16 +58,16 @@ Launch multi-node training in the cloud
5858
model, optimizer = fabric.setup(model, optimizer)
5959
...
6060
61-
**Step 2:** Init a :class:`~lightning_app.core.app.LightningApp` with the ``LiteMultiNode`` component.
61+
**Step 2:** Init a :class:`~lightning_app.core.app.LightningApp` with the ``FabricMultiNode`` component.
6262
Configure the number of nodes, the number of GPUs per node, and the type of GPU:
6363

6464
.. code-block:: python
6565
:emphasize-lines: 5,7
6666
:caption: app.py
6767
68-
# 2. Create the app with the LiteMultiNode component inside
68+
# 2. Create the app with the FabricMultiNode component inside
6969
app = L.LightningApp(
70-
LiteMultiNode(
70+
FabricMultiNode(
7171
MyTrainingComponent,
7272
# Run with 2 nodes
7373
num_nodes=2,

examples/app_multi_node/train_fabric.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import torch
22

33
import lightning as L
4-
from lightning.app.components import LiteMultiNode
4+
from lightning.app.components import FabricMultiNode
55
from lightning.fabric import Fabric
66

77

8-
class LitePyTorchDistributed(L.LightningWork):
8+
class FabricPyTorchDistributed(L.LightningWork):
99
def run(self):
1010
# 1. Prepare the model
1111
model = torch.nn.Sequential(
@@ -33,8 +33,8 @@ def run(self):
3333

3434
# 8 GPUs: (2 nodes of 4 x v100)
3535
app = L.LightningApp(
36-
LiteMultiNode(
37-
LitePyTorchDistributed,
36+
FabricMultiNode(
37+
FabricPyTorchDistributed,
3838
cloud_compute=L.CloudCompute("gpu-fast-multi"), # 4 x V100
3939
num_nodes=2,
4040
)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ warn_no_return = "False"
9999
# the list can be generated with:
100100
# mypy --no-error-summary 2>&1 | tr ':' ' ' | awk '{print $1}' | sort | uniq | sed 's/\.py//g; s|src/||g; s|\/|\.|g' | xargs -I {} echo '"{}",'
101101
module = [
102-
"lightning_app.components.multi_node.lite",
102+
"lightning_app.components.multi_node.fabric",
103103
"lightning_app.components.multi_node.base",
104104
"lightning_app.components.multi_node.pytorch_spawn",
105105
"lightning_app.components.multi_node.trainer",

src/lightning_app/CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1818

1919
- Add support for async predict method in PythonServer and remove torch context ([#16453](https://github.com/Lightning-AI/lightning/pull/16453))
2020

21+
- Renamed `lightning.app.components.LiteMultiNode` to `lightning.app.components.FabricMultiNode` ([#16505](https://github.com/Lightning-AI/lightning/pull/16505))
22+
2123

2224
### Deprecated
2325

src/lightning_app/components/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
from lightning_app.components.database.client import DatabaseClient
22
from lightning_app.components.database.server import Database
33
from lightning_app.components.multi_node import (
4+
FabricMultiNode,
45
LightningTrainerMultiNode,
5-
LiteMultiNode,
66
MultiNode,
77
PyTorchSpawnMultiNode,
88
)
@@ -33,7 +33,7 @@
3333
"Category",
3434
"Text",
3535
"MultiNode",
36-
"LiteMultiNode",
36+
"FabricMultiNode",
3737
"LightningTrainerScript",
3838
"PyTorchLightningScriptRunner",
3939
"PyTorchSpawnMultiNode",
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from lightning_app.components.multi_node.base import MultiNode
2-
from lightning_app.components.multi_node.lite import LiteMultiNode
2+
from lightning_app.components.multi_node.fabric import FabricMultiNode
33
from lightning_app.components.multi_node.pytorch_spawn import PyTorchSpawnMultiNode
44
from lightning_app.components.multi_node.trainer import LightningTrainerMultiNode
55

6-
__all__ = ["LiteMultiNode", "MultiNode", "PyTorchSpawnMultiNode", "LightningTrainerMultiNode"]
6+
__all__ = ["FabricMultiNode", "MultiNode", "PyTorchSpawnMultiNode", "LightningTrainerMultiNode"]

src/lightning_app/components/multi_node/lite.py renamed to src/lightning_app/components/multi_node/fabric.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929

3030
@runtime_checkable
31-
class _LiteWorkProtocol(Protocol):
31+
class _FabricWorkProtocol(Protocol):
3232
@staticmethod
3333
def run() -> None:
3434
...
@@ -71,11 +71,11 @@ def run(
7171
os.environ["LOCAL_WORLD_SIZE"] = str(nprocs)
7272
os.environ["TORCHELASTIC_RUN_ID"] = "1"
7373

74-
# Used to force Lite to setup the distributed environnement.
74+
# Used to force Fabric to setup the distributed environnement.
7575
os.environ["LT_CLI_USED"] = "1"
7676

77-
# Used to pass information to Lite directly.
78-
def pre_fn(lite, *args, **kwargs):
77+
# Used to pass information to Fabric directly.
78+
def pre_fn(fabric, *args, **kwargs):
7979
kwargs["devices"] = nprocs
8080
kwargs["num_nodes"] = num_nodes
8181

@@ -110,7 +110,7 @@ def pre_fn(lite, *args, **kwargs):
110110
return ret_val
111111

112112

113-
class LiteMultiNode(MultiNode):
113+
class FabricMultiNode(MultiNode):
114114
def __init__(
115115
self,
116116
work_cls: Type["LightningWork"],
@@ -119,7 +119,7 @@ def __init__(
119119
*work_args: Any,
120120
**work_kwargs: Any,
121121
) -> None:
122-
assert issubclass(work_cls, _LiteWorkProtocol)
122+
assert issubclass(work_cls, _FabricWorkProtocol)
123123

124124
# Note: Private way to modify the work run executor
125125
# Probably exposed to the users in the future if needed.

0 commit comments

Comments
 (0)