コード例 #1
0
def sample_inputs_conv2d(has_bias,
                         self,
                         device,
                         dtype,
                         requires_grad,
                         extra_args=(),
                         groups=1):
    in_ch, out_ch = 6, 4
    inp = make_tensor((2, in_ch * groups, 7, 5),
                      device=device,
                      dtype=dtype,
                      requires_grad=requires_grad,
                      low=-1,
                      high=1)
    weight = make_tensor((out_ch * groups, in_ch, 3, 2),
                         device=device,
                         dtype=dtype,
                         requires_grad=requires_grad,
                         low=-1,
                         high=1)
    bias = None
    if has_bias:
        bias = make_tensor((out_ch * groups, ),
                           device=device,
                           dtype=dtype,
                           requires_grad=requires_grad,
                           low=-1,
                           high=1)
    return [SampleInput(inp, args=((weight, bias) + extra_args))]
コード例 #2
0
def generate_numeric_tensors(device, dtype, *,
                             domain=(None, None),
                             include_large_values=True,
                             include_extremal_values=True):
    medium_length = 812
    large_size = (1029, 917)
    offset = 63

    assert large_size[1] > (medium_length + offset)
    assert medium_length % 4 == 0

    # Special-cases bool
    if dtype is torch.bool:
        tensors = (torch.empty(0, device=device, dtype=torch.bool),
                   torch.tensor(True, device=device),
                   torch.tensor(False, device=device),
                   torch.tensor((True, False), device=device),
                   make_tensor((medium_length,), device=device, dtype=dtype, low=None, high=None),
                   make_tensor(large_size, device=device, dtype=dtype, low=None, high=None))
        return tensors

    # Acquires dtype-specific vals
    if dtype.is_floating_point or dtype.is_complex:
        large_vals = _large_float_vals if include_large_values else tuple()
        extremals = _float_extremals if include_extremal_values else tuple()
        vals = _float_vals + large_vals + extremals

        # Converts float -> complex vals if dtype is complex
        if dtype.is_complex:
            vals = tuple(complex(x, y) for x, y in product(vals, vals))
    elif dtype is torch.uint8:
        vals = _unsigned_int_vals
    else:  # dtypes is a signed integer type
        assert dtype in (torch.int8, torch.int16, torch.int32, torch.int64)
        large_vals = _large_int_vals if include_large_values else tuple()
        vals = _int_vals + large_vals

    assert len(vals) < medium_length

    # Constructs the large tensor containing vals
    large_tensor = make_tensor(large_size, device=device, dtype=dtype, low=domain[0], high=domain[1])

    # Inserts the vals at an odd place
    large_tensor[57][offset:offset + len(vals)] = torch.tensor(vals, device=device, dtype=dtype)

    # Takes a medium sized copy of the large tensor containing vals
    medium_tensor = large_tensor[57][offset:offset + medium_length]

    # Constructs small tensors (4 elements)
    small_tensors = (t for t in torch.split(medium_tensor, 4))

    # Constructs scalar tensors
    scalar_tensors = (t.squeeze() for t in torch.split(medium_tensor, 1))

    # Tensors with no elements
    empty_sizes = ((0,), (0, 3, 3), (1, 0, 5), (6, 0, 0, 0), (3, 0, 1, 0))
    empty_tensors = (torch.empty(size, device=device, dtype=dtype) for size in empty_sizes)

    return chain(empty_tensors, scalar_tensors, small_tensors, (medium_tensor,), (large_tensor,))
コード例 #3
0
 def test_expanded_weight_error(self, device):
     batch_size = 3
     sample_input = make_tensor((batch_size, 4),
                                device,
                                torch.float32,
                                requires_grad=True)
     sample_weight = make_tensor((4),
                                 device,
                                 torch.float32,
                                 requires_grad=True)
     with self.assertRaisesRegex(
             RuntimeError,
             r"Expanded Weights encountered but cannot handle function"):
         torch.add(sample_input, ExpandedWeight(sample_weight, batch_size))
コード例 #4
0
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
    S = 5
    test_args = [
        ([1, 2], ),
        (slice(0, 3), ),
        ([slice(0, 3), 1], ),
        ([[0, 2, 3], [1, 3, 3], [0, 0, 2]], ),
        ([[0, 0, 3], [1, 1, 3], [0, 0, 2]], ),
        ([slice(None), slice(None), [0, 3]], ),
        ([slice(None), [0, 3], slice(None)], ),
        ([[0, 3], slice(None), slice(None)], ),
        ([[0, 3], [1, 2], slice(None)], ),
        ([
            [0, 3],
        ], ),
        ([[0, 3], slice(None)], ),
        ([[0, 3], Ellipsis], ),
        ([[0, 2, 3], [1, 3, 3],
          torch.LongTensor([0, 0, 2])], ),
    ]

    return tuple(
        SampleInput(make_tensor((S, S, S),
                                device=device,
                                dtype=dtype,
                                low=None,
                                high=None,
                                requires_grad=requires_grad),
                    args=args) for args in test_args)
コード例 #5
0
    def test_variant_consistency(self, device, dtype, op):
        def _fn(t):
            return op(t)

        t = make_tensor((5, 5),
                        device,
                        dtype,
                        low=op.domain[0],
                        high=op.domain[1])
        expected = op(t)

        for alt, inplace in ((op.get_method(), False),
                             (op.get_inplace(), True), (torch.jit.script(_fn),
                                                        False)):
            if alt is None:
                with self.assertRaises(RuntimeError):
                    alt(t.clone())

            if inplace and op.promotes_integers_to_float and dtype in integral_types(
            ) + (torch.bool, ):
                # Assert that RuntimeError is raised
                # for inplace variant of Operators that
                # promote integer input to floating dtype.
                with self.assertRaises(RuntimeError):
                    alt(t.clone())
                continue

            actual = alt(t.clone())
            self.assertEqual(actual, expected, rtol=0, atol=0)
コード例 #6
0
    def test_nan_to_num(self, device, dtype):
        for contiguous in [False, True]:
            x = make_tensor((64, 64), low=0., high=100., dtype=dtype, device=device)

            if dtype.is_floating_point:
                # Add extremal values.
                extremals = [float('nan'), float('inf'), -float('inf')]
                for idx, extremal in zip(torch.randint(0, 63, (3,)), extremals):
                    x[idx, :] = extremal

            if not contiguous:
                x = x.T

            # With args
            nan = random.random()
            posinf = random.random() * 5
            neginf = random.random() * 10

            self.compare_with_numpy(lambda x: x.nan_to_num(nan=nan, posinf=posinf),
                                    lambda x: np.nan_to_num(x, nan=nan, posinf=posinf),
                                    x)
            self.compare_with_numpy(lambda x: x.nan_to_num(posinf=posinf, neginf=neginf),
                                    lambda x: np.nan_to_num(x, posinf=posinf, neginf=neginf),
                                    x)

            # Out Variant
            out = torch.empty_like(x)
            result = torch.nan_to_num(x)
            torch.nan_to_num(x, out=out)
            self.assertEqual(result, out)

            result = torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
            torch.nan_to_num(x, out=out, nan=nan, posinf=posinf, neginf=neginf)
            self.assertEqual(result, out)
コード例 #7
0
        def check(size, low, high, requires_grad, noncontiguous):
            t = make_tensor(size,
                            device,
                            dtype,
                            low=low,
                            high=high,
                            requires_grad=requires_grad,
                            noncontiguous=noncontiguous)

            self.assertEqual(t.shape, size)
            self.assertEqual(t.device, torch.device(device))
            self.assertEqual(t.dtype, dtype)

            low = -9 if low is None else low
            high = 9 if high is None else high

            if t.numel() > 0 and dtype in [torch.long, torch.float]:
                self.assertTrue(t.le(high).logical_and(t.ge(low)).all().item())

            if dtype in [torch.float, torch.cfloat]:
                self.assertEqual(t.requires_grad, requires_grad)
            else:
                self.assertFalse(t.requires_grad)

            if t.numel() > 1:
                self.assertEqual(t.is_contiguous(), not noncontiguous)
            else:
                self.assertTrue(t.is_contiguous())
コード例 #8
0
def _create_random_mask(shape, device):
    return make_tensor(shape,
                       device=device,
                       dtype=torch.bool,
                       low=0,
                       high=1,
                       requires_grad=False)
コード例 #9
0
 def make_arg(shape, low=None, high=None):
     return make_tensor(shape,
                        device='cpu',
                        dtype=torch.int64,
                        low=low,
                        high=high,
                        requires_grad=False)
コード例 #10
0
def _generate_sample_data(device="cpu",
                          dtype=torch.float,
                          requires_grad=True,
                          layout=torch.strided):
    assert layout in {
        torch.strided,
        torch.sparse_coo,
        torch.sparse_csr,
    }, "Layout must be strided/sparse_coo/sparse_csr"
    shapes = [
        [],
        [2],
        [3, 5],
        [3, 2, 1, 2],
    ]
    inputs = []
    for s in shapes:
        data = make_tensor(
            s, device=device, dtype=dtype,
            requires_grad=requires_grad)  # type: ignore[arg-type]
        mask = _create_random_mask(s, device)
        if layout == torch.sparse_coo:
            mask = mask.to_sparse_coo().coalesce()
            data = data.sparse_mask(mask).requires_grad_(requires_grad)
        elif layout == torch.sparse_csr:
            if data.ndim != 2 and mask.ndim != 2:
                continue
            mask = mask.to_sparse_csr()
            data = data.sparse_mask(mask)
        inputs.append(SampleInput(data, kwargs={"mask": mask}))
    return inputs
def jit_serialization():
    test_cases = {}
    for dtype, device in itertools.product(all_dtypes, all_devices):
        base_name = f'jit_serialization_{dtype_name(dtype)}_{device}'

        if dtype.is_floating_point:
            #m = torch.nn.Linear(50, 10, dtype=dtype, device=device)
            m = torch.nn.Conv1d(16,
                                33,
                                3,
                                stride=2,
                                dtype=dtype,
                                device=device)
            test_cases[f'{base_name}_script'] = torch.jit.script(m)

            #m = torch.nn.Linear(50, 10, dtype=dtype, device=device)
            m = torch.nn.Conv1d(16,
                                33,
                                3,
                                stride=2,
                                dtype=dtype,
                                device=device)
            a = make_tensor((20, 16, 50),
                            device=device,
                            dtype=dtype,
                            low=-9,
                            high=9)
            test_cases[f'{base_name}_trace'] = torch.jit.trace(m, a)

    return test_cases
コード例 #12
0
 def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
     if alpha_beta is None:
         alpha = random.random()
         beta = random.random()
     else:
         alpha, beta = alpha_beta
     if broadcast:
         D1 = make_tensor((), dtype=dtype, device=device)
     else:
         D1 = make_tensor([n, p], dtype=dtype, device=device)
     D2 = make_tensor([m, p], dtype=dtype, device=device)
     S = self.genSparseCSRTensor([n, m], nnz, dtype=dtype, device=device, index_dtype=torch.int32)
     S_dense = S.to_dense()
     Y = torch.sparse.addmm(D1, S, D2, beta=beta, alpha=alpha)
     Y_dense = torch.addmm(D1, S_dense, D2, beta=beta, alpha=alpha)
     self.assertEqual(Y, Y_dense)
コード例 #13
0
    def test_shared_buffer(self, device, dtype):
        x = common.make_tensor((1,), device, dtype)
        # Modify the whole tensor
        arr, tensor = self._run_test(SHAPE, dtype)
        tensor[:] = x
        self.assertEqual(arr, tensor)
        self.assertTrue((tensor == x).all().item())

        # Modify the whole tensor from all valid offsets, given
        # a count value
        for count in range(-1, SIZE + 1):
            if count == 0:
                continue

            actual_count = count if count > 0 else SIZE
            for first in range(SIZE - actual_count):
                last = first + actual_count
                arr, tensor = self._run_test(SHAPE, dtype, first=first, count=count)
                tensor[:] = x
                self.assertEqual(arr[first:last], tensor)
                self.assertTrue((tensor == x).all().item())

                # Modify the first value in the array
                arr[first] = x.item() - 1
                self.assertEqual(arr[first:last], tensor)
コード例 #14
0
    def test_batch_vs_slicing(self, device, dtype, op):
        input = make_tensor((1024, 512), dtype=dtype, device=device,
                            low=op.domain[0], high=op.domain[1])

        actual = op(input)
        expected = torch.stack([op(slice) for slice in input])

        self.assertEqual(actual, expected)
コード例 #15
0
        def test(shape):
            tensor = make_tensor(shape, device, dtype, low=-9, high=9)
            expected_dtype = tensor.sum().dtype
            expected_dtype = torch_to_numpy_dtype_dict[expected_dtype]

            result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype)
            expected = torch.tensor(result, device=device)
            self.assertEqual(tensor.trace(), expected)
コード例 #16
0
    def test_coo_csr_conversion(self, device, dtype):
        for m, n in itertools.product([5, 2, 0], [5, 2, 0]):
            size = (m, n)
            dense = make_tensor(size, dtype=dtype, device=device)
            coo_sparse = dense.to_sparse()
            csr_sparse = coo_sparse.to_sparse_csr()

            self.assertEqual(csr_sparse.to_dense(), dense)
コード例 #17
0
        def _case_two_transform(t):
            wrong_shape = list(t.shape)

            if len(wrong_shape) == 0:
                # Handles scalar tensor case (empty list)
                wrong_shape = [2]
            else:
                wrong_shape[-1] = wrong_shape[-1] + 1
            return make_tensor(wrong_shape, dtype=t.dtype, device=t.device)
コード例 #18
0
    def test_contig_vs_transposed(self, device, dtype, op):
        contig = make_tensor((789, 357), device=device, dtype=dtype,
                             low=op.domain[0], high=op.domain[1])
        non_contig = contig.T

        self.assertTrue(contig.is_contiguous())
        self.assertFalse(non_contig.is_contiguous())

        self.assertEqual(op(contig).T, op(non_contig))
コード例 #19
0
    def test_contig_vs_every_other(self, device, dtype, op):
        contig = make_tensor((1026,), device=device, dtype=dtype,
                             low=op.domain[0], high=op.domain[1])
        non_contig = contig[::2]

        self.assertTrue(contig.is_contiguous())
        self.assertFalse(non_contig.is_contiguous())

        self.assertEqual(op(contig)[::2], op(non_contig))
コード例 #20
0
def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):
    # Short for "advanced index"
    adv_idx = torch.LongTensor([[0, 1], [2, 3]])
    S = 5
    # self_dim, indices
    test_args = [
        (3, ([1, 2], )),
        (3, (slice(0, 3), )),
        (3, ([slice(0, 3), 1], )),
        (3, ([[0, 2, 3], [1, 3, 3], [0, 0, 2]], )),
        (3, ([[0, 0, 3], [1, 1, 3], [0, 0, 2]], )),
        (3, ([slice(None), slice(None), [0, 3]], )),
        (3, ([slice(None), [0, 3], slice(None)], )),
        (3, ([[0, 3], slice(None), slice(None)], )),
        (3, ([[0, 3], [1, 2], slice(None)], )),
        (3, ([
            [0, 3],
        ], )),
        (3, ([[0, 3], slice(None)], )),
        (3, ([[0, 3], Ellipsis], )),
        (3, ([[0, 2, 3], [1, 3, 3],
              torch.LongTensor([0, 0, 2])], )),
        (4, ([slice(None), adv_idx, adv_idx,
              slice(None)], )),
        (4, ([slice(None), adv_idx, slice(None), adv_idx], )),
        (4, ([adv_idx, slice(None), slice(None), adv_idx], )),
        (4, ([slice(None), slice(None), adv_idx, adv_idx], )),
        (4, ([Ellipsis, adv_idx, adv_idx], )),
        (5, ([slice(None),
              slice(None), adv_idx,
              slice(None), adv_idx], )),
        (5, ([slice(None),
              slice(None), adv_idx, adv_idx,
              slice(None)], )),
        (5, ([slice(None),
              slice(None), adv_idx, None, adv_idx,
              slice(None)], )),
        (6, ([slice(None),
              slice(None),
              slice(None), adv_idx, adv_idx], )),
        (6, ([slice(None), slice(None), adv_idx, adv_idx, adv_idx], )),
        (6, ([slice(None),
              slice(None), None, adv_idx, adv_idx, adv_idx], )),
    ]

    def get_shape(dim):
        return tuple(S + i for i in range(dim))

    return tuple(
        SampleInput(make_tensor(get_shape(self_dim),
                                device=device,
                                dtype=dtype,
                                low=None,
                                high=None,
                                requires_grad=requires_grad),
                    args=args) for self_dim, args in test_args)
コード例 #21
0
    def test_out_arg_all_dtypes(self, device, dtype, op):
        input = make_tensor((64, 64), dtype=dtype, device=device,
                            low=op.domain[0], high=op.domain[1])

        for out_dtype in all_types_and_complex_and(torch.bool, torch.half):
            out = torch.empty_like(input, dtype=out_dtype)
            if op.promotes_integers_to_float:
                self._test_out_promote_int_to_float_op(op, input, out)
            else:
                self._test_out_arg(op, input, out)
コード例 #22
0
    def test_non_contig_index(self, device, dtype, op):
        contig = make_tensor((2, 2, 1, 2), device, dtype,
                             low=op.domain[0], high=op.domain[1])
        non_contig = contig[:, 1, ...]
        contig = non_contig.contiguous()

        self.assertTrue(contig.is_contiguous())
        self.assertFalse(non_contig.is_contiguous())

        self.assertEqual(op(contig), op(non_contig))
コード例 #23
0
 def test_assertEqual_numpy(self, device, dtype):
     S = 10
     test_sizes = [(), (0, ), (S, ), (S, S), (0, S), (S, 0)]
     for test_size in test_sizes:
         a = make_tensor(test_size, device, dtype, low=-5, high=5)
         a_n = a.cpu().numpy()
         msg = f'size: {test_size}'
         self.assertEqual(a_n, a, rtol=0, atol=0, msg=msg)
         self.assertEqual(a, a_n, rtol=0, atol=0, msg=msg)
         self.assertEqual(a_n, a_n, rtol=0, atol=0, msg=msg)
コード例 #24
0
    def test_contig_size1(self, device, dtype, op):
        contig = make_tensor((5, 100), device, dtype,
                             low=op.domain[0], high=op.domain[1])
        contig = contig[:1, :50]
        contig2 = torch.empty(contig.size(), device=device, dtype=dtype)
        contig2.copy_(contig)

        self.assertTrue(contig.is_contiguous())
        self.assertTrue(contig2.is_contiguous())

        self.assertEqual(op(contig), op(contig2))
コード例 #25
0
    def test_contig_size1_large_dim(self, device, dtype, op):
        contig = make_tensor((5, 2, 3, 1, 4, 5, 3, 2, 1, 2, 3, 4), device, dtype,
                             low=op.domain[0], high=op.domain[1])
        contig = contig[:1, :, :, :, :, :, :, :, :, :, :, :]
        contig2 = torch.empty(contig.size(), device=device, dtype=dtype)
        contig2.copy_(contig)

        self.assertTrue(contig.is_contiguous())
        self.assertTrue(contig2.is_contiguous())

        self.assertEqual(op(contig), op(contig2))
コード例 #26
0
    def test_non_contig(self, device, dtype, op):
        shapes = [(5, 7), (1024,)]
        for shape in shapes:
            contig = make_tensor(shape, device, dtype,
                                 low=op.domain[0], high=op.domain[1])
            non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
            non_contig.copy_(contig)

            self.assertTrue(contig.is_contiguous())
            self.assertFalse(non_contig.is_contiguous())

            self.assertEqual(op(contig), op(non_contig))
コード例 #27
0
        def test(shape):
            tensor = make_tensor(shape, device, dtype, low=-9, high=9)
            if tensor.size() != torch.Size([]):
                expected = torch.from_numpy(np.msort(tensor.cpu().numpy()))
            else:
                expected = tensor  # numpy.msort() does not support empty shapes tensor

            result = torch.msort(tensor)
            self.assertEqual(result, expected)

            out = torch.empty_like(result)
            torch.msort(tensor, out=out)
            self.assertEqual(out, expected)
コード例 #28
0
    def test_variant_consistency(self, device, dtype, op):
        def _fn(t):
            return op(t)

        t = make_tensor((5, 5), device, dtype, low=op.domain[0], high=op.domain[1])
        expected = op(t)

        for alt in (op.get_method(), op.get_inplace(), torch.jit.script(_fn)):
            if alt is None:
                with self.assertRaises(RuntimeError):
                    alt(t.clone())

            actual = alt(t.clone())
            self.assertEqual(actual, expected, rtol=0, atol=0)
コード例 #29
0
    def test_sparse_csr_to_dense(self, device, dtype):
        mn = [5, 2, 0]
        for (m, n) in itertools.product(mn, mn):
            size = (m, n)
            dense = make_tensor(size, dtype=dtype, device=device)
            sparse = dense.to_sparse_csr()
            self.assertEqual(sparse.to_dense(), dense)

        crow_indices = torch.tensor([0, 3, 5])
        col_indices = torch.tensor([0, 1, 2, 0, 1])
        values = torch.tensor([1, 2, 1, 3, 4], dtype=dtype)
        csr = torch.sparse_csr_tensor(crow_indices, col_indices,
                                      values, dtype=dtype, device=device)
        dense = torch.tensor([[1, 2, 1], [3, 4, 0]], dtype=dtype, device=device)
        self.assertEqual(csr.to_dense(), dense)
コード例 #30
0
    def test_non_contig_expand(self, device, dtype, op):
        shapes = [(1, 3), (1, 7), (5, 7)]
        for shape in shapes:
            contig = make_tensor(shape, device, dtype,
                                 low=op.domain[0], high=op.domain[1])
            non_contig = contig.clone().expand(3, -1, -1)

            self.assertTrue(contig.is_contiguous())
            self.assertFalse(non_contig.is_contiguous())

            contig = op(contig)
            non_contig = op(non_contig)
            for i in range(3):
                self.assertEqual(contig, non_contig[i],
                                 msg='non-contiguous expand[' + str(i) + ']')