Ejemplo n.º 1
0
 def __init__(self, num_feats, in_feats, num_hops, sample_size):
     super(PartialWeightedAggregator, self).__init__()
     self.weight_store = []
     self.agg_feats = nn.ParameterList()
     self.discounts = nn.ParameterList()
     self.num_hops = num_hops
     for _ in range(num_hops):
         self.weight_store.append(paddle.Tensor(num_feats, in_feats))
         # self.agg_feats.append(nn.Parameter(torch.Tensor(sample_size, in_feats)))
         # self.discounts.append(nn.Parameter(torch.Tensor(in_feats)))
         # nn.init.xavier_uniform_(self.weight_store[-1])
         self.agg_feats.append(paddle.create_parameter(shape=paddle.Tensor(sample_size, in_feats),dtype='float32',attr=paddle.framework.ParamAttr(name="linear_weight", initializer=paddle.nn.initializer.XavierNormal(self.agg_feats[-1]))))
         self.discounts.append(paddle.create_parameter(shape=paddle.Tensor(in_feats),dtype='float32',attr=paddle.framework.ParamAttr(name="linear_weight", initializer=paddle.nn.initializer.XavierNormal(self.agg_feats[-1]))))
     self.reset_parameters()
Ejemplo n.º 2
0
def test_io_struct():
    n = 16
    x1 = ti.Struct.field({"a": ti.i32, "b": ti.f32}, shape=(n, ))
    p1 = {
        "a": paddle.Tensor(2 * np.ones(n, dtype=np.int32)),
        "b": paddle.Tensor(3 * np.ones(n, dtype=np.float32)),
    }

    x1.from_paddle(p1)
    for i in range(n):
        assert x1[i].a == 2
        assert x1[i].b == 3

    p2 = x1.to_paddle()
    for k in p1:
        assert (p1[k] == p2[k]).all()
Ejemplo n.º 3
0
def tokenize(texts: Union[str, List[str]], context_length: int = 77):
    """
    Returns the tokenized representation of given input string(s)

    Parameters
    ----------
    texts : Union[str, List[str]]
        An input string or a list of input strings to tokenize

    context_length : int
        The context length to use; all CLIP models use 77 as the context length

    Returns
    -------
    A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
    """
    if isinstance(texts, str):
        texts = [texts]

    sot_token = _tokenizer.encoder["<|startoftext|>"]
    eot_token = _tokenizer.encoder["<|endoftext|>"]
    all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token]
                  for text in texts]
    result = paddle.zeros((len(all_tokens), context_length), dtype='int64')

    for i, tokens in enumerate(all_tokens):
        if len(tokens) > context_length:
            raise RuntimeError(
                f"Input {texts[i]} is too long for context length {context_length}"
            )
        result[i, :len(tokens)] = paddle.Tensor(np.array(tokens))

    return result
Ejemplo n.º 4
0
    def pbtPredict_Callback(self):
        __img, img_array = [], [
        ]  # 将图像统一从qimage->pil image -> np.array [1, 1, 28, 28]

        # 获取qimage格式图像
        if self.mode == MODE_MNIST:
            __img = self.lbDataArea.pixmap()  # label内若无图像返回None
            if __img == None:  # 无图像则用纯黑代替
                # __img = QImage(224, 224, QImage.Format_Grayscale8)
                __img = ImageQt.ImageQt(
                    Image.fromarray(np.uint8(np.zeros([224, 224]))))
            else:
                __img = __img.toImage()
        elif self.mode == MODE_WRITE:
            __img = self.paintBoard.getContentAsQImage()

        # 转换成pil image类型处理
        pil_img = ImageQt.fromqimage(__img)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)

        img_array = np.array(pil_img.convert('L')).reshape(1, 1, 28, 28)
        img = normalize(img_array)
        img = paddle.Tensor(img)
        __result = network(img)
        argmax__result = paddle.argmax(__result).numpy()

        self.result[0] = argmax__result[0]  # 置信度

        m = F.sigmoid(__result).numpy()

        self.result[1] = m[0][self.result[0]]

        self.lbResult.setText("%d" % (self.result[0]))
        self.lbCofidence.setText("%.8f" % (self.result[1]))
Ejemplo n.º 5
0
    def test_type_Tensor(self):
        paddle.disable_static()
        inx = np.array([1, 2])
        tensorx = paddle.Tensor(inx)
        typex_str = str(type(tensorx))
        expectx = "<class 'paddle.Tensor'>"
        self.assertEqual((typex_str == expectx), True)

        tensorx = paddle.tensor.logic.Tensor(inx)
        typex_str = str(type(tensorx))
        expectx = "<class 'paddle.Tensor'>"
        self.assertEqual((typex_str == expectx), True)
Ejemplo n.º 6
0
def to_tensor(string_values, name="text"):
    """
    Create the tensor that the value holds the list of string.
    NOTICE: The value will be holded in the cpu place.
    Parameters:
        string_values(list[string]): The value will be setted to the tensor.
        name(string): The name of the tensor.
    """
    tensor = paddle.Tensor(core.VarDesc.VarType.STRING, [], name,
                           core.VarDesc.VarType.STRINGS, False)
    tensor.value().set_string_list(string_values)
    return tensor
def to_map_tensor(string_dict, name):
    """
    Create the tensor that the value holds the map, the type of key is the string
    and the value is the int. 
    NOTICE: The value will be holded in the cpu place. 
 
    Args:
        string_dict(dict): The value will be setted to the tensor.
        name(string): The name of the tensor.
    """
    tensor = paddle.Tensor(core.VarDesc.VarType.RAW, [], name,
                           core.VarDesc.VarType.VOCAB, True)
    tensor.value().set_vocab(string_dict)
    return tensor
Ejemplo n.º 8
0
def test_io_simple():
    n = 32

    x1 = ti.field(ti.f32, shape=(n, n))
    p1 = paddle.Tensor(3 * np.ones((n, n), dtype=np.float32))

    x2 = ti.Matrix.field(2, 3, ti.f32, shape=(n, n))
    p2 = paddle.Tensor(3 * np.ones((n, n, 2, 3), dtype=np.float32))

    x1.from_paddle(p1)
    for i in range(n):
        for j in range(n):
            assert x1[i, j] == 3

    x2.from_paddle(p2)
    for i in range(n):
        for j in range(n):
            for k in range(2):
                for l in range(3):
                    assert x2[i, j][k, l] == 3

    p3 = x2.to_paddle()
    assert (p2 == p3).all()
Ejemplo n.º 9
0
def random_crop2d(s,
                  crop_len,
                  tempo_axis=0):  # random crop according to temporal direction
    assert tempo_axis < s.ndim, 'axis out of range'
    n = s.shape[tempo_axis]
    idx = randint(high=n - crop_len)
    if type(s) == np.ndarray:
        sli = [slice(None) for i in range(s.ndim)]
        sli[tempo_axis] = slice(idx, idx + crop_len)
        out = s[tuple(sli)]
    else:
        out = paddle.index_select(
            s,
            paddle.Tensor(np.array([i for i in range(idx, idx + crop_len)])),
            axis=tempo_axis)
    return out
Ejemplo n.º 10
0
def from_dlpack(dlpack):
    tensor_from_dlpack = fluid.core.from_dlpack(dlpack)
    place = tensor_from_dlpack._place()
    if True:  # "win" in platform: # CPU env
        if "int32" in str(tensor_from_dlpack):
            return paddorch.convertTensor(
                paddle.to_tensor(np.array(tensor_from_dlpack), dtype="int32"))
        else:
            return paddorch.Tensor(
                paddle.to_tensor(np.array(tensor_from_dlpack)))
    else:
        with paddle.fluid.dygraph.guard(place=place):
            tensor_from_dlpack.__class__ = paddle.fluid.LoDTensor
            ret = paddle.Tensor(tensor_from_dlpack)
            if "int32" in str(tensor_from_dlpack):
                ret = paddle.to_tensor(ret, dtype="int32")
            tensor_from_dlpack.__class__ = paddle.fluid.core_avx.Tensor
        return ret
Ejemplo n.º 11
0
def load_and_extract_feature(file):
    s, r = pa.load(file, sr=c['sample_rate'])
    x = pa.features.mel_spect(s,
                              sample_rate=c['sample_rate'],
                              window_size=c['window_size'],
                              hop_length=c['hop_size'],
                              mel_bins=c['mel_bins'],
                              fmin=c['fmin'],
                              fmax=c['fmax'],
                              window='hann',
                              center=True,
                              pad_mode='reflect',
                              ref=1.0,
                              amin=1e-10,
                              top_db=None)

    x = x.T  #!!
    x = paddle.Tensor(x).unsqueeze((0, 1))
    return x
Ejemplo n.º 12
0
RN50_TEXT_MEAN = 0.20489279925823212
VIT_TEXT_MEAN = 0.2554764747619629

#ViT-B/32 0.2554764747619629
#RN50 0.20489279925823212
#RN101 0.1773824542760849

print('Start testing to make sure all models are aligned with official impl')

print('======testing RN101======')

model = build_rn101_model()
sd = paddle.load('./assets/RN101.pdparams')
model.load_dict(sd)
image_input = np.load('./assets/image.npy')
image_input = paddle.Tensor(image_input)
model.eval()

text = tokenize(["a diagram", "a dog", "a cat"])
text_feature = model.encode_text(text)
if abs(float(paddle.mean(text_feature**2)) - RN101_TEXT_MEAN) < EPS:
    print('Testing passed for text transformer')
else:
    print('Testing failed for text transformer')

out = model.encode_image(image_input)
if abs(float(paddle.mean(out**2)) - RN101_OUT_MEAN) < EPS:
    print('Testing passed for RN101 image encoder')
else:
    print('Testing failed for RN101 image encoder')
Ejemplo n.º 13
0
paddle.Tensor.view = paddorch.Tensor.view
paddle.Tensor.repeat = paddorch.Tensor.repeat
paddle.Tensor.add = paddorch.Tensor.add
paddle.Tensor.item = paddorch.Tensor.item
paddle.Tensor.t = paddorch.Tensor.t
paddle.Tensor.reshape = paddorch.Tensor.reshape
paddle.Tensor.__setitem__ = paddorch.Tensor.__setitem__
paddle.Tensor.__getitem__ = paddorch.Tensor.__getitem__
paddle.Tensor.index_copy_ = paddorch.Tensor.index_copy_
paddle.Tensor.index_copy = paddorch.Tensor.index_copy
paddle.Tensor.new_empty = paddorch.Tensor.new_empty
paddle.Tensor.view_as = paddorch.Tensor.view_as
paddle.Tensor.clamp = paddorch.Tensor.clamp
paddle.Tensor.requires_grad_ = paddorch.Tensor.requires_grad_
paddle.Tensor.set_gradient = paddorch.Tensor.set_gradient
paddle.Tensor.backward = paddorch.Tensor.backward
paddle.Tensor.new_zeros = paddorch.Tensor.new_zeros
paddle.Tensor.new_ones = paddorch.Tensor.new_ones
paddle.Tensor.sort = paddorch.Tensor.sort
paddle.Tensor.index_select = paddorch.Tensor.index_select
paddle.Tensor.masked_fill_ = paddorch.Tensor.masked_fill_
paddle.Tensor.argmax = paddorch.Tensor.argmax
paddle.Tensor.tolist = paddorch.Tensor.tolist
paddle.Tensor.uniform_ = paddorch.Tensor.uniform_
paddle.Tensor.__getstate__ = paddorch.Tensor.__getstate__
paddle.Tensor.__setstate__ = paddorch.Tensor.__setstate__

import numpy as np
a = paddle.Tensor(np.random.rand(3, 4))

print(a.view(-1))
Ejemplo n.º 14
0
out=np.array([[[0.9427, 0.0364, 0.2218],
         [0.5494, 0.0364, 0.2218],
         [0.4433, 0.3639, 0.2218],
         [0.4433, 0.0364, 0.2587],
         [0.5494, 0.0364, 0.2218]],
        [[0.2322, 0.3581, 0.3620],
         [0.6472, 0.0879, 0.7137],
         [0.2322, 0.3581, 0.8765],
         [0.6472, 0.9749, 0.7137],
         [0.6472, 0.9749, 0.7137]]])
print(out.shape)
torch_out=torch.gather(torch.FloatTensor(x),dim=-1,index=torch.LongTensor(i))

print("torch out",torch_out)

ind=paddle.Tensor(i.astype("int64"))
ind=ind.flatten()

row=paddle.expand( paddle.reshape(paddle.arange(x.shape[0]), (i.shape[0],1,1)), (i.shape)).flatten()
print(row)
col=paddle.expand( paddle.reshape(paddle.arange(x.shape[1]), (1,i.shape[1],1)), (i.shape)).flatten()
print(col)

ind2=paddle.stack([row,col,ind]).transpose([1,0])
print(ind2.shape)

paddle_out=paddle.gather_nd(paddle.Tensor(x), ind2).reshape(i.shape)
print("paddle out",paddle_out)


Ejemplo n.º 15
0
                           batch_size=64,
                           shuffle=False)

# net definition
net = Net(reid=True)
assert os.path.isfile(
    "./checkpoint/ckpt.t7"), "Error: no checkpoint file found!"
print('Loading from checkpoint/ckpt.t7')
checkpoint = torch.load("./checkpoint/ckpt.t7")
net_dict = checkpoint['net_dict']
net.load_state_dict(net_dict, strict=False)
net.eval()
net.to(device)

# compute features
query_features = torch.Tensor([]).float()
query_labels = torch.Tensor([]).long()
gallery_features = torch.Tensor([]).float()
gallery_labels = torch.Tensor([]).long()

with torch.no_grad():
    for idx, (inputs, labels) in enumerate(queryloader):
        inputs = inputs.to(device)
        features = net(inputs).cpu()
        query_features = cat((query_features, features), axis=0)
        query_labels = cat((query_labels, labels))

    for idx, (inputs, labels) in enumerate(galleryloader):
        inputs = inputs.to(device)
        features = net(inputs).cpu()
        gallery_features = cat((gallery_features, features), axis=0)
Ejemplo n.º 16
0
def fmod(x, y):
    if isinstance(y, int):
        y = paddle.Tensor(np.array([y], dtype="float32"))
    return convertTensor(paddle.floor_mod(x, y))
Ejemplo n.º 17
0
def test_sqeuclidean(get_a_b):
    a, b = get_a_b
    r_torch = sqe_paddle(paddle.Tensor(a), paddle.Tensor(b))
    r_numpy = sqe_numpy(a, b)
    np.testing.assert_almost_equal(r_torch, r_numpy)
Ejemplo n.º 18
0
def to_tensor(data, dtype=None, place=None, stop_gradient=True):
    r"""
    Constructs a ``paddle.Tensor`` from ``data`` , 
    which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor.

    If the ``data`` is already a tensor, and ``dtype`` or ``place`` does't change, no copy 
    will be performed and return origin tensor, otherwise a new tensor will be constructed
    and returned. 

    Args:
        data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
            Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor.
        dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , 
            'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8',
            'complex64' , 'complex128'. Default: None, infers dtype from ``data`` 
            except for python float number which gets dtype from ``get_default_type`` .
        place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be  
            CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place.
        stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True.

    Returns:
        Tensor: A Tensor constructed from ``data`` .

    Raises:
        TypeError: If the data type of ``data`` is not scalar, list, tuple, numpy.ndarray, paddle.Tensor
        ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]
        TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128
        ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace

    Examples:

    .. code-block:: python

        import paddle
                
        type(paddle.to_tensor(1))
        # <class 'paddle.Tensor'>

        paddle.to_tensor(1)
        # Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
        #        [1])

        x = paddle.to_tensor(1)
        paddle.to_tensor(x, dtype='int32', place=paddle.CPUPlace()) # A new tensor will be constructed due to different dtype or place
        # Tensor(shape=[1], dtype=int32, place=CPUPlace, stop_gradient=True,
        #        [1])

        paddle.to_tensor((1.1, 2.2), place=paddle.CUDAPinnedPlace())
        # Tensor(shape=[1], dtype=float32, place=CUDAPinnedPlace, stop_gradient=True,
        #        [1])

        paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CUDAPlace(0), stop_gradient=False)
        # Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
        #        [[0.10000000, 0.20000000],
        #         [0.30000001, 0.40000001]])

        type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64'))
        # <class 'paddle.VarBase'>

        paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')
        # Tensor(shape=[2, 2], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
        #        [[(1+1j), (2+0j)],
        #         [(3+2j), (4+0j)]])
    """

    if place is None:
        place = _current_expected_place()
    elif not isinstance(
            place,
        (core.Place, core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)):
        raise ValueError(
            "'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace"
        )

    #Todo(zhouwei): Support allocate tensor on any other specified card
    if isinstance(place, core.CUDAPlace) and isinstance(
            _current_expected_place(), core.CUDAPlace
    ) and place._get_device_id() != _current_expected_place()._get_device_id():
        place = _current_expected_place()

    if not isinstance(data, np.ndarray):
        if np.isscalar(data) and not isinstance(data, str):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data)
            if data.dtype == np.object:
                raise ValueError(
                    "\n\tFaild to convert input data to a regular ndarray :\n\t - Usually "
                    "this means the input data contains nested lists with different lengths. "
                )
        elif isinstance(data, paddle.Tensor):
            data.stop_gradient = stop_gradient
            if not data.place._equals(place):
                data = data._copy_to(place, False)
            if dtype:
                if convert_dtype(dtype) != convert_dtype(data.dtype):
                    return data.astype(convert_dtype(dtype))
            return data
        else:
            raise TypeError(
                "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor"
                .format(type(data)))
        if not dtype and data.dtype in [
                'float16', 'float32', 'float64', 'complex64', 'complex128'
        ]:
            default_type = paddle.get_default_dtype()
            if np.iscomplexobj(data):
                default_type = 'complex64' if default_type in [
                    'float16', 'float32'
                ] else 'complex128'
            data = data.astype(default_type)

    if dtype and convert_dtype(dtype) != data.dtype:
        data = data.astype(dtype)

    return paddle.Tensor(value=data,
                         place=place,
                         persistable=False,
                         zero_copy=False,
                         stop_gradient=stop_gradient)
Ejemplo n.º 19
0
import paddle
tensor = paddle.randn((3, 4))
dlpack = tensor.value().get_tensor()._to_dlpack()
tensor_from_dlpack = paddle.fluid.core.from_dlpack(dlpack)
tensor_from_dlpack.__class__ = paddle.fluid.LoDTensor

bb = paddle.Tensor(tensor_from_dlpack)
bb = bb.cpu()
print(bb)
paddle.set_device()
tensor_from_dlpack.__class__ = paddle.fluid.core_avx.Tensor
# paddle.fluid.dygraph.to_variable( tensor_from_dlpack)
Ejemplo n.º 20
0
def to_tensor(data, dtype=None, place=None, stop_gradient=True):
    """
    Constructs a ``paddle.Tensor`` or ``paddle.ComplexTensor`` from ``data`` , 
    which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor, paddle\.ComplexTensor.

    If the ``data`` is already a tensor, and ``dtype`` or ``place`` does't change, no copy 
    will be performed and return origin tensor, otherwise a new tensor will be constructed
    and returned. Similarly, if the data is an numpy\.ndarray of with the same ``dtype`` 
    and the current place is cpu, no copy will be performed.

    The ``ComplexTensor`` is a unique type of paddle. If x is ``ComplexTensor``, then 
    ``x.real`` is the real part, and ``x.imag`` is the imaginary part.

    Args:
        data(scalar|tuple|list|ndarray|Tensor|ComplexTensor): Initial data for the tensor.
            Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor, paddle\.ComplexTensor.
        dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , 
            'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8'. And
            'complex64' , 'complex128' only for ComplexTensor. Default: None, infers dtype from ``data`` 
            except for python float number which gets dtype from ``get_default_type`` .
        place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be  
            CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place.
        stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True.

    Returns:
        Tensor: A Tensor or ComplexTensor constructed from ``data`` .

    Raises:
        TypeError: If the data type of ``data`` is not scalar, list, tuple, numpy.ndarray, paddle.Tensor, paddle.ComplexTensor
        ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]
        TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128
        ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace

    Examples:

    .. code-block:: python

        import paddle
        import numpy as np
        paddle.disable_static()
                
        type(paddle.to_tensor(1))
        # <class 'paddle.Tensor'>

        paddle.to_tensor(1)
        # Tensor: generated_tensor_0
        # - place: CUDAPlace(0)   # allocate on global default place CPU:0
        # - shape: [1]
        # - layout: NCHW
        # - dtype: int64_t
        # - data: [1]

        x = paddle.to_tensor(1)
        paddle.to_tensor(x, dtype='int32', place=paddle.CPUPlace()) # A new tensor will be constructed due to different dtype or place
        # Tensor: generated_tensor_01
        # - place: CPUPlace
        # - shape: [1]
        # - layout: NCHW
        # - dtype: int
        # - data: [1]

        paddle.to_tensor((1.1, 2.2), place=paddle.CUDAPinnedPlace())
        # Tensor: generated_tensor_1
        #   - place: CUDAPinnedPlace
        #   - shape: [2]
        #   - layout: NCHW
        #   - dtype: double
        #   - data: [1.1 2.2]

        paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CUDAPlace(0), stop_gradient=False)
        # Tensor: generated_tensor_2
        #   - place: CUDAPlace(0)
        #   - shape: [2, 2]
        #   - layout: NCHW
        #   - dtype: double
        #   - data: [0.1 0.2 0.3 0.4]

        type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]]), dtype='complex64')
        # <class 'paddle.ComplexTensor'>

        paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')
        # ComplexTensor[real]: generated_tensor_0.real
        #   - place: CUDAPlace(0)
        #   - shape: [2, 2]
        #   - layout: NCHW
        #   - dtype: float
        #   - data: [1 2 3 4]
        # ComplexTensor[imag]: generated_tensor_0.imag
        #   - place: CUDAPlace(0)
        #   - shape: [2, 2]
        #   - layout: NCHW
        #   - dtype: float
        #   - data: [1 0 2 0]
    """

    if place is None:
        place = _current_expected_place()
    elif not isinstance(place,
                        (core.CPUPlace, core.CUDAPinnedPlace, core.CUDAPlace)):
        raise ValueError(
            "'place' must be any of paddle.Place, paddle.CUDAPinnedPlace, paddle.CUDAPlace"
        )

    #Todo(zhouwei): Support allocate tensor on any other specified card
    if isinstance(place, core.CUDAPlace) and isinstance(
            _current_expected_place(), core.CUDAPlace) and place._get_device_id(
            ) != _current_expected_place()._get_device_id():
        place = _current_expected_place()

    if not isinstance(data, np.ndarray):
        if np.isscalar(data) and not isinstance(data, str):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data)
            if data.dtype == np.object:
                raise ValueError(
                    "\n\tFaild to convert input data to a regular ndarray :\n\t - Usually "
                    "this means the input data contains nested lists with different lengths. "
                )
        elif isinstance(data, paddle.Tensor):
            data.stop_gradient = stop_gradient
            if not data.place._equals(place):
                data = data._copy_to(place, False)
            if dtype:
                if convert_dtype(dtype) != convert_dtype(data.dtype):
                    return data.astype(convert_dtype(dtype))
            return data
        elif isinstance(data, paddle.ComplexTensor):
            return data
        else:
            raise TypeError(
                "Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor|paddle.ComplexTensor".
                format(type(data)))
        if not dtype and data.dtype in [
                'float16', 'float32', 'float64', 'complex64', 'complex128'
        ]:
            default_type = paddle.get_default_dtype()
            if np.iscomplexobj(data):
                default_type = 'complex64' if default_type in [
                    'float16', 'float32'
                ] else 'complex128'
            data = data.astype(default_type)

    if dtype and convert_dtype(dtype) != data.dtype:
        data = data.astype(dtype)

    if not np.iscomplexobj(data):
        if dtype and convert_dtype(dtype) != data.dtype:
            data = data.astype(dtype)
        return paddle.Tensor(
            value=data,
            place=place,
            persistable=False,
            zero_copy=True,
            stop_gradient=stop_gradient)
    else:
        name = unique_name.generate('generated_tensor')
        real_tensor = paddle.Tensor(
            value=data.real,
            place=place,
            zero_copy=True,
            name=name + ".real",
            stop_gradient=stop_gradient)
        imag_tensor = paddle.Tensor(
            value=data.imag,
            place=place,
            zero_copy=True,
            name=name + ".imag",
            stop_gradient=stop_gradient)
        return paddle.ComplexTensor(real_tensor, imag_tensor)
Ejemplo n.º 21
0
from paddle import fluid
import paddorch as torch
import paddle
from scipy.sparse import csr_matrix
from paddorch.nn.init import xavier_uniform_
from paddorch.sparse import  FloatTensor
import numpy as np
place = fluid.CPUPlace()
with fluid.dygraph.guard(place=place):
    i = torch.from_numpy(np.array([[0, 2], [1, 0], [1, 2]]) ).astype("int32")
    v = paddle.Tensor(np.array([3, 4, 5])).astype("float32")
    x=FloatTensor(i,v,(2,3))
    print(x)

    A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]]).todense().astype("float32")
    x= paddle.Tensor(A)
    # x = torch.randn((4, 23, 16))
    print(xavier_uniform_(x))



Ejemplo n.º 22
0
def test_cosine(get_a_b):
    a, b = get_a_b
    r_torch = cosine_paddle(paddle.Tensor(a), paddle.Tensor(b))
    r_numpy = cosine_numpy(a, b)
    np.testing.assert_almost_equal(r_torch, r_numpy)