Skip to content

Commit 46cdd05

Browse files
[API Compatiblity] Add more coverage (#74949)
* fix place cov * update timeout for test_creation_and_Tensor_creation * split into different UT * support XPUPINNED * support *size for Tensor.new_zeros/ones/empty * update UT * fix * optimize runtime * fix xpu pin memory * fix stop_gradient of out * fix range&arange setting requires_grad to out * skip pin_memory on xpu * skip xpu
1 parent 151b3e5 commit 46cdd05

File tree

16 files changed

+1967
-1372
lines changed

16 files changed

+1967
-1372
lines changed

paddle/phi/core/compat/convert_utils.cc

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,13 @@ Backend TransToPhiBackend(const phi::Place& place) {
4444
}
4545
case AllocationType::XPU:
4646
return Backend::XPU;
47+
case AllocationType::XPUPINNED: {
48+
if (FLAGS_pinned_memory_as_cpu_backend) {
49+
return Backend::CPU;
50+
} else {
51+
return Backend::XPU;
52+
}
53+
}
4754
case AllocationType::IPU:
4855
return Backend::IPU;
4956
case AllocationType::UNDEFINED:

python/paddle/base/dygraph/math_op_patch.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020

2121
import paddle
2222
from paddle import _C_ops
23+
from paddle.utils.decorator_utils import (
24+
size_args_decorator_patch,
25+
)
2326

2427
from .. import core
2528
from ..framework import convert_np_dtype_to_dtype_
@@ -312,6 +315,7 @@ def _new_full_(
312315
pin_memory=pin_memory,
313316
)
314317

318+
@size_args_decorator_patch
315319
def _new_empty_(
316320
var: Tensor,
317321
size: ShapeLike,
@@ -334,6 +338,7 @@ def _new_empty_(
334338
pin_memory=pin_memory,
335339
)
336340

341+
@size_args_decorator_patch
337342
def _new_ones_(
338343
var: Tensor,
339344
size: ShapeLike,
@@ -357,6 +362,7 @@ def _new_ones_(
357362
pin_memory=pin_memory,
358363
)
359364

365+
@size_args_decorator_patch
360366
def _new_zeros_(
361367
var: Tensor,
362368
size: ShapeLike,

python/paddle/base/dygraph/tensor_patch_methods.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1157,10 +1157,16 @@ def cuda(
11571157

11581158
@framework.dygraph_only
11591159
def pin_memory(self: Tensor, blocking: bool = True) -> Tensor:
1160-
if self.place.is_cuda_pinned_place():
1160+
if (
1161+
self.place.is_cuda_pinned_place()
1162+
or self.place.is_xpu_pinned_place()
1163+
):
11611164
return self
11621165
else:
1163-
res = self._copy_to(core.CUDAPinnedPlace(), blocking)
1166+
if paddle.device.is_compiled_with_xpu():
1167+
res = self._copy_to(core.XPUPinnedPlace(), blocking)
1168+
else:
1169+
res = self._copy_to(core.CUDAPinnedPlace(), blocking)
11641170
res.stop_gradient = self.stop_gradient
11651171
res.persistable = self.persistable
11661172
return res

python/paddle/pir/math_op_patch.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@
2525
from paddle import _C_ops
2626
from paddle.base.libpaddle import DataType
2727
from paddle.base.wrapped_decorator import wrap_decorator
28+
from paddle.utils.decorator_utils import (
29+
size_args_decorator_patch,
30+
)
2831

2932
from . import Value
3033

@@ -686,6 +689,7 @@ def _new_full_(
686689
pin_memory=pin_memory,
687690
)
688691

692+
@size_args_decorator_patch
689693
def _new_empty_(
690694
self,
691695
size: ShapeLike,
@@ -731,6 +735,7 @@ def _new_empty_(
731735
pin_memory=pin_memory,
732736
)
733737

738+
@size_args_decorator_patch
734739
def _new_ones_(
735740
self,
736741
size: ShapeLike,
@@ -777,6 +782,7 @@ def _new_ones_(
777782
pin_memory=pin_memory,
778783
)
779784

785+
@size_args_decorator_patch
780786
def _new_zeros_(
781787
self,
782788
size: ShapeLike,

python/paddle/tensor/creation.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1792,6 +1792,8 @@ def _check_attr(attr, message):
17921792
)
17931793
if requires_grad is True:
17941794
tensor.stop_gradient = False
1795+
if out is not None:
1796+
out.stop_gradient = False
17951797
if pin_memory and in_dynamic_mode():
17961798
tensor = tensor.pin_memory()
17971799
return tensor
@@ -1960,6 +1962,8 @@ def full(
19601962
)
19611963
if requires_grad is True:
19621964
tensor.stop_gradient = False
1965+
if out is not None:
1966+
out.stop_gradient = False
19631967
if pin_memory and in_dynamic_mode():
19641968
tensor = tensor.pin_memory()
19651969
return tensor
@@ -2109,6 +2113,8 @@ def arange(
21092113
out=out,
21102114
)
21112115
tensor.stop_gradient = not requires_grad
2116+
if out is not None:
2117+
out.stop_gradient = not requires_grad
21122118
if pin_memory and in_dynamic_mode():
21132119
tensor = tensor.pin_memory()
21142120
return tensor
@@ -2161,6 +2167,8 @@ def arange(
21612167
out=out,
21622168
)
21632169
tensor.stop_gradient = not requires_grad
2170+
if out is not None:
2171+
out.stop_gradient = not requires_grad
21642172
if pin_memory and in_dynamic_mode():
21652173
tensor = tensor.pin_memory()
21662174
return tensor
@@ -2299,6 +2307,8 @@ def range(
22992307
out=out,
23002308
)
23012309
tensor.stop_gradient = not requires_grad
2310+
if out is not None:
2311+
out.stop_gradient = not requires_grad
23022312
return tensor
23032313

23042314
if not isinstance(start, (Variable, paddle.pir.Value)):
@@ -2332,6 +2342,8 @@ def range(
23322342
out=out,
23332343
)
23342344
tensor.stop_gradient = not requires_grad
2345+
if out is not None:
2346+
out.stop_gradient = not requires_grad
23352347
return tensor
23362348

23372349

@@ -3013,6 +3025,8 @@ def empty(
30133025
tensor = tensor.pin_memory()
30143026
if requires_grad is True:
30153027
tensor.stop_gradient = False
3028+
if out is not None:
3029+
out.stop_gradient = False
30163030
return tensor
30173031
else:
30183032
helper = LayerHelper("empty", **locals())

python/paddle/utils/decorator_utils.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,35 @@ def wrapped_func(*args: Any, **kwargs: Any) -> Any:
295295
return wrapped_func
296296

297297

298+
def size_args_decorator_patch(method: Callable) -> Callable:
299+
"""
300+
A decorator that allow *size for patching method to Tensor.
301+
e.g. Tensor.method(*size, *, ...).
302+
303+
Usage Example:
304+
305+
paddle.randn([]).new_ones(1, dtype=paddle.float32)
306+
paddle.randn([]).new_ones(1, 2, 3, dtype=paddle.float32)
307+
paddle.randn([]).new_ones([1, 2, 3], dtype=paddle.float32)
308+
paddle.randn([]).new_ones(size=[1, 2, 3], dtype=paddle.float32)
309+
paddle.randn([]).new_ones([1, 2, 3], paddle.float32)
310+
"""
311+
312+
@functools.wraps(method)
313+
def wrapped_func(*args: Any, **kwargs: Any) -> Any:
314+
if len(args) >= 2 and isinstance(args[1], int):
315+
# args[0]: Tensor
316+
# args[1:]: *size
317+
kwargs['size'] = list(args[1:])
318+
args = (args[0],)
319+
320+
return method(*args, **kwargs)
321+
322+
wrapped_func.__signature__ = inspect.signature(method)
323+
324+
return wrapped_func
325+
326+
298327
class VariableArgsDecorator(DecoratorBase):
299328
def __init__(self, var: str) -> None:
300329
super().__init__()

0 commit comments

Comments
 (0)