|
17 | 17 | import torch
|
18 | 18 | from torch.nn.parallel.distributed import DistributedDataParallel
|
19 | 19 |
|
20 |
| -from lightning_fabric.utilities.distributed import _all_gather_ddp_if_available as new_all_gather_ddp_if_available |
21 | 20 | from lightning_fabric.utilities.distributed import _distributed_available as new_distributed_available
|
22 |
| -from lightning_fabric.utilities.distributed import _gather_all_tensors as new_gather_all_tensors |
23 |
| -from lightning_fabric.utilities.distributed import ( |
24 |
| - _get_default_process_group_backend_for_device as new_get_default_process_group_backend_for_device, |
25 |
| -) |
26 |
| -from lightning_fabric.utilities.distributed import _init_dist_connection as new_init_dist_connection |
27 |
| -from lightning_fabric.utilities.distributed import _sync_ddp as new_sync_ddp |
28 |
| -from lightning_fabric.utilities.distributed import _sync_ddp_if_available as new_sync_ddp_if_available |
29 |
| -from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_deprecation, rank_zero_info |
| 21 | +from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_info |
30 | 22 |
|
31 | 23 |
|
32 | 24 | def register_ddp_comm_hook(
|
@@ -150,80 +142,3 @@ def _collect_states_on_rank_zero(state: Dict[str, Any]) -> Dict[int, Any]:
|
150 | 142 | if not new_distributed_available():
|
151 | 143 | return {0: state}
|
152 | 144 | return {rank: _broadcast_object_list(state, rank) for rank in range(torch.distributed.get_world_size())}
|
153 |
| - |
154 |
| - |
155 |
| -def all_gather_ddp_if_available(*args: Any, **kwargs: Any) -> Any: |
156 |
| - rank_zero_deprecation( |
157 |
| - "`pytorch_lightning.utilities.distributed.all_gather_ddp_if_available` has been deprecated in v1.8.0 and will" |
158 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
159 |
| - ) |
160 |
| - return new_all_gather_ddp_if_available(*args, **kwargs) |
161 |
| - |
162 |
| - |
163 |
| -def distributed_available() -> Any: |
164 |
| - rank_zero_deprecation( |
165 |
| - "`pytorch_lightning.utilities.distributed.distributed_available` has been deprecated in v1.8.0 and will" |
166 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
167 |
| - ) |
168 |
| - return new_distributed_available() |
169 |
| - |
170 |
| - |
171 |
| -def gather_all_tensors(*args: Any, **kwargs: Any) -> Any: |
172 |
| - rank_zero_deprecation( |
173 |
| - "`pytorch_lightning.utilities.distributed.gather_all_tensors` has been deprecated in v1.8.0 and will" |
174 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
175 |
| - ) |
176 |
| - return new_gather_all_tensors(*args, **kwargs) |
177 |
| - |
178 |
| - |
179 |
| -def get_default_process_group_backend_for_device(*args: Any, **kwargs: Any) -> Any: |
180 |
| - rank_zero_deprecation( |
181 |
| - "`pytorch_lightning.utilities.distributed.get_default_process_group_backend_for_device` has been deprecated" |
182 |
| - " in v1.8.0 and will be removed in v2.0.0. This function is internal but you can copy over its implementation." |
183 |
| - " `lightning_fabric.utilities.distributed.get_default_process_group_backend_for_device` instead." |
184 |
| - ) |
185 |
| - return new_get_default_process_group_backend_for_device(*args, **kwargs) |
186 |
| - |
187 |
| - |
188 |
| -def init_dist_connection(*args: Any, **kwargs: Any) -> Any: |
189 |
| - rank_zero_deprecation( |
190 |
| - "`pytorch_lightning.utilities.distributed.init_dist_connection` has been deprecated in v1.8.0 and will" |
191 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
192 |
| - ) |
193 |
| - return new_init_dist_connection(*args, **kwargs) |
194 |
| - |
195 |
| - |
196 |
| -def sync_ddp(*args: Any, **kwargs: Any) -> Any: |
197 |
| - rank_zero_deprecation( |
198 |
| - "`pytorch_lightning.utilities.distributed.sync_ddp` has been deprecated in v1.8.0 and will" |
199 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
200 |
| - ) |
201 |
| - return new_sync_ddp(*args, **kwargs) |
202 |
| - |
203 |
| - |
204 |
| -def sync_ddp_if_available(*args: Any, **kwargs: Any) -> Any: |
205 |
| - rank_zero_deprecation( |
206 |
| - "`pytorch_lightning.utilities.distributed.sync_ddp_if_available` has been deprecated in v1.8.0 and will" |
207 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
208 |
| - ) |
209 |
| - return new_sync_ddp_if_available(*args, **kwargs) |
210 |
| - |
211 |
| - |
212 |
| -def tpu_distributed() -> bool: |
213 |
| - rank_zero_deprecation( |
214 |
| - "`pytorch_lightning.utilities.distributed.tpu_distributed` has been deprecated in v1.8.0 and will" |
215 |
| - " be removed in v2.0.0. This function is internal but you can copy over its implementation." |
216 |
| - ) |
217 |
| - from lightning_fabric.accelerators.tpu import _tpu_distributed |
218 |
| - |
219 |
| - return _tpu_distributed() |
220 |
| - |
221 |
| - |
222 |
| -def rank_zero_only(*args: Any, **kwargs: Any) -> Any: |
223 |
| - rank_zero_deprecation( |
224 |
| - "`pytorch_lightning.utilities.distributed.rank_zero_only` has been deprecated in v1.8.1 and will" |
225 |
| - " be removed in v2.0.0. You can import it from `pytorch_lightning.utilities` instead." |
226 |
| - ) |
227 |
| - from pytorch_lightning.utilities.rank_zero import rank_zero_only as new_rank_zero_only |
228 |
| - |
229 |
| - return new_rank_zero_only(*args, **kwargs) |
0 commit comments