def test_isin_different_dtypes(self, device): supported_types = all_types() if device == 'cpu' else all_types_and(torch.half) for mult in [1, 10]: for assume_unique in [False, True]: for dtype1, dtype2 in product(supported_types, supported_types): a = torch.tensor([1, 2, 3], device=device, dtype=dtype1) b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2) ec = torch.tensor([False, False, True], device=device) c = torch.isin(a, b, assume_unique=assume_unique) self.assertEqual(c, ec)
class TestSortAndSelect(TestCase): def assertIsOrdered(self, order, x, mxx, ixx, task): SIZE = x.size(1) if order == 'descending': def check_order(a, b): # `a != a` because we put NaNs # at the end of ascending sorted lists, # and the beginning of descending ones. return ((a != a) | (a >= b)).all().item() elif order == 'ascending': def check_order(a, b): # see above return ((b != b) | (a <= b)).all().item() else: error('unknown order "{}", must be "ascending" or "descending"'.format(order)) are_ordered = True for k in range(1, SIZE): self.assertTrue(check_order(mxx[:, k - 1], mxx[:, k]), 'torch.sort ({}) values unordered for {}'.format(order, task)) seen = set() indicesCorrect = True size0 = x.size(0) size = x.size(x.dim() - 1) x = x.tolist() mxx = mxx.tolist() ixx = ixx.tolist() for k in range(size0): seen.clear() for j in range(size): self.assertEqual(x[k][ixx[k][j]], mxx[k][j], msg='torch.sort ({}) indices wrong for {}'.format(order, task)) seen.add(ixx[k][j]) self.assertEqual(len(seen), size) def test_sort(self, device): # on CUDA 2048 vs >2048 have different code path for the dim being sorted for SIZE in (4, 2049): x = torch.rand(4, SIZE, device=device) res1val, res1ind = torch.sort(x) # Test inplace y = x.clone() y_inds = torch.tensor((), dtype=torch.int64, device=device) torch.sort(y, out=(y, y_inds)) x_vals, x_inds = torch.sort(x) self.assertEqual(x_vals, y) self.assertEqual(x_inds, y_inds) # Test use of result tensor res2val = torch.tensor((), device=device) res2ind = torch.tensor((), device=device, dtype=torch.long) torch.sort(x, out=(res2val, res2ind)) self.assertEqual(res1val, res2val, atol=0, rtol=0) self.assertEqual(res1ind, res2ind, atol=0, rtol=0) self.assertEqual(torch.argsort(x), res1ind) self.assertEqual(x.argsort(), res1ind) # Test sorting of random numbers self.assertIsOrdered('ascending', x, res2val, res2ind, 'random') # Test simple sort self.assertEqual( torch.sort(torch.tensor((50, 40, 30, 20, 10), device=device))[0], torch.tensor((10, 20, 30, 40, 50), device=device), atol=0, rtol=0 ) # Test that we still have proper sorting with duplicate keys x = torch.floor(torch.rand(4, SIZE, device=device) * 10) torch.sort(x, out=(res2val, res2ind)) self.assertIsOrdered('ascending', x, res2val, res2ind, 'random with duplicate keys') # DESCENDING SORT x = torch.rand(4, SIZE, device=device) res1val, res1ind = torch.sort(x, x.dim() - 1, True) # Test use of result tensor res2val = torch.tensor((), device=device) res2ind = torch.tensor((), device=device, dtype=torch.long) torch.sort(x, x.dim() - 1, True, out=(res2val, res2ind)) self.assertEqual(res1val, res2val, atol=0, rtol=0) self.assertEqual(res1ind, res2ind, atol=0, rtol=0) self.assertEqual(torch.argsort(x, x.dim() - 1, True), res1ind) self.assertEqual(x.argsort(x.dim() - 1, True), res1ind) # Test sorting of random numbers self.assertIsOrdered('descending', x, res2val, res2ind, 'random') # Test simple sort task self.assertEqual( torch.sort(torch.tensor((10, 20, 30, 40, 50), device=device), 0, True)[0], torch.tensor((50, 40, 30, 20, 10), device=device), atol=0, rtol=0 ) # Test that we still have proper sorting with duplicate keys self.assertIsOrdered('descending', x, res2val, res2ind, 'random with duplicate keys') # Test sorting with NaNs x = torch.rand(4, SIZE, device=device) x[1][2] = float('NaN') x[3][0] = float('NaN') torch.sort(x, out=(res2val, res2ind)) self.assertIsOrdered('ascending', x, res2val, res2ind, 'random with NaNs') torch.sort(x, out=(res2val, res2ind), descending=True) self.assertIsOrdered('descending', x, res2val, res2ind, 'random with NaNs') # FIXME: remove torch.bool from unsupported types once support is added for cub sort @dtypes(*set(get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128}) def test_stable_sort(self, device, dtype): if TEST_WITH_ROCM and dtype == torch.bfloat16: return sizes = (100, 1000, 10000) for ncopies in sizes: x = torch.tensor([0, 1] * ncopies, dtype=dtype, device=device) _, idx = x.sort(stable=True) self.assertEqual( idx[:ncopies], torch.arange(start=0, end=2 * ncopies, step=2, device=device) ) self.assertEqual( idx[ncopies:], torch.arange(start=1, end=2 * ncopies, step=2, device=device) ) @onlyCUDA @dtypes(torch.uint8) @largeTensorTest('200GB') # Unfortunately 80GB A100 is not large enough def test_sort_large(self, device, dtype): t0 = torch.randperm(8192, device=device).to(dtype) t = t0.view(1, 8192).expand(2 ** 18 + 1, -1).contiguous() v, i = t.sort() del t iv, im = i.var_mean(dim=0) del i vv, vm = v.var_mean(dim=0) del v self.assertEqual(vv, torch.zeros_like(vv)) self.assertEqual(iv, torch.zeros_like(iv)) self.assertEqual(vm, torch.arange(255, dtype=dtype, device=device)) self.assertEqual(im, t0.sort().indices) def _test_sort_discontiguous(self, device, dtype): # on CUDA 2048 vs >2048 have different code path for the dim being sorted sizes = (5, 7, 2049) for shape in permutations(sizes): for perm in permutations((0, 1, 2)): for dim in range(3): t = torch.randn(shape, device=device, dtype=dtype).permute(perm) r1 = t.sort(dim=dim) r2 = t.contiguous().sort(dim=dim) self.assertEqual(r1, r2) n = t.size(dim) # assert ordered self.assertTrue((r1.values.narrow(dim, 1, n - 1) >= r1.values.narrow(dim, 0, n - 1)).all()) # assert that different segments does not mix, which can easily happen # if the stride is not handled correctly self.assertTrue((t.unsqueeze(-1).transpose(dim, -1) == r1.values.unsqueeze(-1)).any(dim=dim).any(dim=-1).all()) # assert stride is preserved if self.device_type == 'cuda': # FIXME: this behavior should be true for all cases, not # just the one specified in if condition self.assertEqual(r1.values.stride(), t.stride()) self.assertEqual(r1.indices.stride(), t.stride()) @onlyCUDA @dtypes(torch.float32) def test_sort_discontiguous(self, device, dtype): self._test_sort_discontiguous(device, dtype) @slowTest # this test is slow on CPU, but not on CUDA @onlyCPU @dtypes(torch.float32) def test_sort_discontiguous_slow(self, device, dtype): self._test_sort_discontiguous(device, dtype) @dtypes(torch.float32) def test_sort_1d_output_discontiguous(self, device, dtype): tensor = torch.randn(12, device=device, dtype=dtype)[:6] values = torch.empty_like(tensor)[::2] indices = torch.empty(18, device=device, dtype=torch.long)[::3] torch.sort(tensor, out=(values, indices)) values_cont, indices_cont = tensor.sort() self.assertEqual(indices, indices_cont) self.assertEqual(values, values_cont) @dtypes(torch.float32) def test_topk_1d_output_discontiguous(self, device, dtype): tensor = torch.randn(12, device=device, dtype=dtype) values = torch.empty_like(tensor)[::2] indices = torch.empty(18, device=device, dtype=torch.long)[::3] for sorted in (True, False): # outputs of `sorted=False` test are not guaranteed to be the same, # but with current implementation they are torch.topk(tensor, 6, sorted=sorted, out=(values, indices)) values_cont, indices_cont = tensor.topk(6, sorted=sorted) self.assertEqual(indices, indices_cont) self.assertEqual(values, values_cont) # FIXME: remove torch.bool from unsupported types once support is added for cub sort @dtypes(*set(get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128}) def test_stable_sort_against_numpy(self, device, dtype): if TEST_WITH_ROCM and dtype == torch.bfloat16: return if dtype in floating_types_and(torch.float16, torch.bfloat16): inf = float('inf') neg_inf = -float('inf') nan = float('nan') else: if dtype != torch.bool: # no torch.iinfo support for torch.bool inf = torch.iinfo(dtype).max neg_inf = torch.iinfo(dtype).min else: inf = True neg_inf = ~inf # no nan for integral types, we use inf instead for simplicity nan = inf def generate_samples(): from itertools import chain, combinations for sizes in [(1025,), (10000,)]: size = sizes[0] # binary strings yield (torch.tensor([0, 1] * size, dtype=dtype, device=device), 0) if self.device_type == 'cuda': return yield (torch.tensor([0, 1] * 100, dtype=dtype, device=device), 0) def repeated_index_fill(t, dim, idxs, vals): res = t for idx, val in zip(idxs, vals): res = res.index_fill(dim, idx, val) return res for sizes in [(1, 10), (10, 1), (10, 10), (10, 10, 10)]: size = min(*sizes) x = (torch.randn(*sizes, device=device) * size).to(dtype) yield (x, 0) # Generate tensors which are being filled at random locations # with values from the non-empty subsets of the set (inf, neg_inf, nan) # for each dimension. n_fill_vals = 3 # cardinality of (inf, neg_inf, nan) for dim in range(len(sizes)): idxs = (torch.randint(high=size, size=(size // 10,)) for i in range(n_fill_vals)) vals = (inf, neg_inf, nan) subsets = chain.from_iterable(combinations(list(zip(idxs, vals)), r) for r in range(1, n_fill_vals + 1)) for subset in subsets: idxs_subset, vals_subset = zip(*subset) yield (repeated_index_fill(x, dim, idxs_subset, vals_subset), dim) for sample, dim in generate_samples(): _, idx_torch = sample.sort(dim=dim, stable=True) if dtype is torch.bfloat16: sample_numpy = sample.float().cpu().numpy() else: sample_numpy = sample.cpu().numpy() idx_numpy = np.argsort(sample_numpy, axis=dim, kind='stable') self.assertEqual(idx_torch, idx_numpy) @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_msort(self, device, dtype): if TEST_WITH_ROCM and dtype == torch.bfloat16: return def test(shape): tensor = make_tensor(shape, device, dtype, low=-9, high=9) if tensor.size() != torch.Size([]): if dtype is torch.bfloat16: expected = torch.from_numpy(np.msort(tensor.float().cpu().numpy())).bfloat16() else: expected = torch.from_numpy(np.msort(tensor.cpu().numpy())) else: expected = tensor # numpy.msort() does not support empty shapes tensor result = torch.msort(tensor) self.assertEqual(result, expected) out = torch.empty_like(result) torch.msort(tensor, out=out) self.assertEqual(out, expected) shapes = ( [], [0, ], [20, ], [1, 20], [30, 30], [10, 20, 30] ) for shape in shapes: test(shape) def test_topk(self, device): def topKViaSort(t, k, dim, dir): sorted, indices = t.sort(dim, dir) return sorted.narrow(dim, 0, k), indices.narrow(dim, 0, k) def compareTensors(t, res1, ind1, res2, ind2, dim): # Values should be exactly equivalent self.assertEqual(res1, res2, atol=0, rtol=0) # Indices might differ based on the implementation, since there is # no guarantee of the relative order of selection if not ind1.eq(ind2).all(): # To verify that the indices represent equivalent elements, # gather from the input using the topk indices and compare against # the sort indices vals = t.gather(dim, ind2) self.assertEqual(res1, vals, atol=0, rtol=0) def compare(t, k, dim, dir): topKVal, topKInd = t.topk(k, dim, dir, True) sortKVal, sortKInd = topKViaSort(t, k, dim, dir) compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim) t = torch.rand(random.randint(1, SIZE), random.randint(1, SIZE), random.randint(1, SIZE), device=device) for _kTries in range(3): for _dimTries in range(3): for transpose in (True, False): for dir in (True, False): testTensor = t if transpose: dim1 = random.randrange(t.ndimension()) dim2 = dim1 while dim1 == dim2: dim2 = random.randrange(t.ndimension()) testTensor = t.transpose(dim1, dim2) dim = random.randrange(testTensor.ndimension()) k = random.randint(1, testTensor.size(dim)) compare(testTensor, k, dim, dir) # This tests the code path where on CUDA, topk is implemented with sort. t = torch.randn((2, 100000), device=device) compare(t, 2000, 1, True) compare(t, 2000, 1, False) def test_topk_arguments(self, device): q = torch.randn(10, 2, 10, device=device) # Make sure True isn't mistakenly taken as the 2nd dimension (interpreted as 1) self.assertRaises(TypeError, lambda: q.topk(4, True)) @skipCUDAIfRocm def test_unique_dim(self, device): self.assertFalse(hasattr(torch, 'unique_dim')) def run_test(device, dtype): x = torch.tensor([[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]], dtype=dtype, device=device) x_empty = torch.empty(5, 0, dtype=dtype, device=device) x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device) x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device) if dtype in floating_types_and(torch.float16, torch.bfloat16): x_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device) expected_unique_dim0 = torch.tensor([[[1., 1.], [0., 1.], [2., 1.], [0., 1.]]], dtype=dtype, device=device) expected_inverse_dim0 = torch.tensor([0, 0]) expected_counts_dim0 = torch.tensor([2]) expected_unique_dim1 = torch.tensor([[[0., 1.], [1., 1.], [2., 1.]], [[0., 1.], [1., 1.], [2., 1.]]], dtype=dtype, device=device) expected_unique_dim1_bool = torch.tensor([[[False, True], [True, True]], [[False, True], [True, True]]], dtype=torch.bool, device=device) expected_inverse_dim1 = torch.tensor([1, 0, 2, 0]) expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0]) expected_counts_dim1 = torch.tensor([2, 1, 1]) expected_counts_dim1_bool = torch.tensor([2, 2]) expected_unique_dim2 = torch.tensor([[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]], dtype=dtype, device=device) expected_inverse_dim2 = torch.tensor([0, 1]) expected_counts_dim2 = torch.tensor([1, 1]) expected_unique_empty = torch.tensor([], dtype=dtype, device=device) expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device) expected_counts_empty = torch.tensor([], dtype=torch.long, device=device) if dtype in floating_types_and(torch.float16, torch.bfloat16): expected_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device) expected_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device) expected_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device) # dim0 x_unique = torch.unique(x, dim=0) self.assertEqual(expected_unique_dim0, x_unique) x_unique, x_inverse = torch.unique( x, return_inverse=True, dim=0) self.assertEqual(expected_unique_dim0, x_unique) self.assertEqual(expected_inverse_dim0, x_inverse) x_unique, x_counts = torch.unique( x, return_inverse=False, return_counts=True, dim=0) self.assertEqual(expected_unique_dim0, x_unique) self.assertEqual(expected_counts_dim0, x_counts) x_unique, x_inverse, x_counts = torch.unique( x, return_inverse=True, return_counts=True, dim=0) self.assertEqual(expected_unique_dim0, x_unique) self.assertEqual(expected_inverse_dim0, x_inverse) self.assertEqual(expected_counts_dim0, x_counts) # dim1 x_unique = torch.unique(x, dim=1) if x.dtype == torch.bool: self.assertEqual(expected_unique_dim1_bool, x_unique) else: self.assertEqual(expected_unique_dim1, x_unique) x_unique, x_inverse = torch.unique( x, return_inverse=True, dim=1) if x.dtype == torch.bool: self.assertEqual(expected_unique_dim1_bool, x_unique) self.assertEqual(expected_inverse_dim1_bool, x_inverse) else: self.assertEqual(expected_unique_dim1, x_unique) self.assertEqual(expected_inverse_dim1, x_inverse) x_unique, x_counts = torch.unique( x, return_inverse=False, return_counts=True, dim=1) if x.dtype == torch.bool: self.assertEqual(expected_unique_dim1_bool, x_unique) self.assertEqual(expected_counts_dim1_bool, x_counts) else: self.assertEqual(expected_unique_dim1, x_unique) self.assertEqual(expected_counts_dim1, x_counts) x_unique, x_inverse, x_counts = torch.unique( x, return_inverse=True, return_counts=True, dim=1) if x.dtype == torch.bool: self.assertEqual(expected_unique_dim1_bool, x_unique) self.assertEqual(expected_inverse_dim1_bool, x_inverse) self.assertEqual(expected_counts_dim1_bool, x_counts) else: self.assertEqual(expected_unique_dim1, x_unique) self.assertEqual(expected_inverse_dim1, x_inverse) self.assertEqual(expected_counts_dim1, x_counts) # dim2 x_unique = torch.unique(x, dim=2) self.assertEqual(expected_unique_dim2, x_unique) x_unique, x_inverse = torch.unique( x, return_inverse=True, dim=2) self.assertEqual(expected_unique_dim2, x_unique) self.assertEqual(expected_inverse_dim2, x_inverse) x_unique, x_counts = torch.unique( x, return_inverse=False, return_counts=True, dim=2) self.assertEqual(expected_unique_dim2, x_unique) self.assertEqual(expected_counts_dim2, x_counts) x_unique, x_inverse, x_counts = torch.unique( x, return_inverse=True, return_counts=True, dim=2) self.assertEqual(expected_unique_dim2, x_unique) self.assertEqual(expected_inverse_dim2, x_inverse) self.assertEqual(expected_counts_dim2, x_counts) # test empty tensor x_unique, x_inverse, x_counts = torch.unique( x_empty, return_inverse=True, return_counts=True, dim=1) self.assertEqual(expected_unique_empty, x_unique) self.assertEqual(expected_inverse_empty, x_inverse) self.assertEqual(expected_counts_empty, x_counts) # test tensor with nan if dtype in floating_types_and(torch.float16, torch.bfloat16): x_unique, x_inverse, x_counts = torch.unique( x_nan, return_inverse=True, return_counts=True, dim=0) self.assertEqual(expected_unique_nan, x_unique) self.assertEqual(expected_inverse_nan, x_inverse) self.assertEqual(expected_counts_nan, x_counts) # test not a well formed tensor # Checking for runtime error, as this is the expected behaviour with self.assertRaises(RuntimeError): torch.unique( x_ill_formed_empty, return_inverse=True, return_counts=True, dim=1) # test along dim2 with self.assertRaises(RuntimeError): torch.unique( x_ill_formed_empty_another, return_inverse=True, return_counts=True, dim=2) # test consecutive version y = torch.tensor( [[0, 1], [0, 1], [0, 1], [1, 2], [1, 2], [3, 4], [0, 1], [0, 1], [3, 4], [1, 2]], dtype=dtype, device=device ) # test tensor with nan if dtype in floating_types_and(torch.float16, torch.bfloat16): y_nan = torch.tensor([float("nan"), 0, 0, float("nan"), float("nan"), 1], dtype=dtype, device=device) expected_y_unique = torch.tensor( [[0, 1], [1, 2], [3, 4], [0, 1], [3, 4], [1, 2]], dtype=dtype, device=device ) expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5], dtype=torch.int64, device=device) expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1], dtype=torch.int64, device=device) expected_y_inverse_bool = torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 3, 3], dtype=torch.int64, device=device) expected_y_counts_bool = torch.tensor([3, 3, 2, 2], dtype=torch.int64, device=device) if dtype in floating_types_and(torch.float16, torch.bfloat16): expected_y_unique_nan = torch.tensor([float("nan"), 0, float("nan"), float("nan"), 1], dtype=dtype, device=device) expected_y_inverse_nan = torch.tensor([0, 1, 1, 2, 3, 4], dtype=torch.long, device=device) expected_y_counts_nan = torch.tensor([1, 2, 1, 1, 1], dtype=torch.long, device=device) y_unique, y_inverse, y_counts = torch.unique_consecutive(y, return_inverse=True, return_counts=True, dim=0) if x.dtype == torch.bool: self.assertEqual(expected_y_inverse_bool, y_inverse) self.assertEqual(expected_y_counts_bool, y_counts) else: self.assertEqual(expected_y_inverse, y_inverse) self.assertEqual(expected_y_counts, y_counts) # test tensor with nan if dtype in floating_types_and(torch.float16, torch.bfloat16): y_unique, y_inverse, y_counts = torch.unique_consecutive( y_nan, return_inverse=True, return_counts=True, dim=0) self.assertEqual(expected_y_unique_nan, y_unique) self.assertEqual(expected_y_inverse_nan, y_inverse) self.assertEqual(expected_y_counts_nan, y_counts) run_test(device, torch.float) run_test(device, torch.double) run_test(device, torch.long) run_test(device, torch.uint8) run_test(device, torch.bool) @onlyCUDA def test_topk_noncontiguous_gpu(self, device): t = torch.randn(20, device=device)[::2] top1, idx1 = t.topk(5) top2, idx2 = t.contiguous().topk(5) self.assertEqual(top1, top2) self.assertEqual(idx1, idx2) def _test_topk_dtype(self, device, dtype, integral, size): if integral: a = torch.randint(torch.iinfo(dtype).min, torch.iinfo(dtype).max, size=(size,), dtype=dtype, device=device) else: a = torch.randn(size=(size,), dtype=dtype, device=device) sort_topk = a.sort()[0][-(size // 2):].flip(0) topk = a.topk(size // 2) self.assertEqual(sort_topk, topk[0]) # check values self.assertEqual(sort_topk, a[topk[1]]) # check indices @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64) def test_topk_integral(self, device, dtype): small = 10 large = 4096 for curr_size in (small, large): self._test_topk_dtype(device, dtype, True, curr_size) @onlyCUDA @dtypes(torch.bfloat16) @skipCUDAIfRocm def test_topk_bfloat16(self, device, dtype): small = 10 large = 8192 for curr_size in (small, large): self._test_topk_dtype(device, dtype, False, curr_size) @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float, torch.double, torch.bfloat16) def test_topk_nonfinite(self, device, dtype): if TEST_WITH_ROCM and dtype == torch.bfloat16: return x = torch.tensor([float('nan'), float('inf'), 1e4, 0, -1e4, -float('inf')], device=device, dtype=dtype) val, idx = x.topk(4) expect = torch.tensor([float('nan'), float('inf'), 1e4, 0], device=device, dtype=dtype) self.assertEqual(val, expect) self.assertEqual(idx, [0, 1, 2, 3]) val, idx = x.topk(4, largest=False) expect = torch.tensor([-float('inf'), -1e4, 0, 1e4], device=device, dtype=dtype) self.assertEqual(val, expect) self.assertEqual(idx, [5, 4, 3, 2]) def test_topk_4d(self, device): x = torch.ones(2, 3072, 2, 2, device=device) x[:, 1, :, :] *= 2. x[:, 10, :, :] *= 1.5 val, ind = torch.topk(x, k=2, dim=1) expected_ind = torch.ones(2, 2, 2, 2, dtype=torch.long, device=device) expected_ind[:, 1, :, :] = 10 expected_val = torch.ones(2, 2, 2, 2, device=device) expected_val[:, 0, :, :] *= 2. expected_val[:, 1, :, :] *= 1.5 self.assertEqual(val, expected_val, atol=0, rtol=0) self.assertEqual(ind, expected_ind, atol=0, rtol=0) @onlyNativeDeviceTypes @dtypesIfCUDA(*(get_all_dtypes(include_complex=False, include_bool=False, include_half=False, include_bfloat16=True))) @dtypes(*(get_all_dtypes(include_complex=False, include_bool=False, include_half=False, include_bfloat16=False))) def test_topk_zero(self, device, dtype): if TEST_WITH_ROCM and dtype == torch.bfloat16: return # https://github.com/pytorch/pytorch/issues/49205 t = torch.rand(2, 2, device=device).to(dtype=dtype) val, idx = torch.topk(t, k=0, largest=False) self.assertEqual(val.size(), torch.Size([2, 0])) self.assertEqual(idx.size(), torch.Size([2, 0])) def _test_unique_scalar_empty(self, dtype, device, f): # test scalar x = torch.tensor(0, dtype=dtype, device=device) unique, inverse, counts = f(x, return_inverse=True, return_counts=True) expected_unique = torch.tensor([0], dtype=dtype, device=device) expected_inverse = torch.tensor(0, device=device) expected_counts = torch.tensor([1], device=device) self.assertEqual(unique, expected_unique) self.assertEqual(inverse, expected_inverse) self.assertEqual(counts, expected_counts) # test zero sized tensor x = torch.zeros((0, 0, 3), dtype=dtype, device=device) unique, inverse, counts = f(x, return_inverse=True, return_counts=True) expected_unique = torch.tensor([], dtype=dtype, device=device) expected_inverse = torch.empty((0, 0, 3), dtype=torch.long, device=device) expected_counts = torch.tensor([], dtype=torch.long, device=device) self.assertEqual(unique, expected_unique) self.assertEqual(inverse, expected_inverse) self.assertEqual(counts, expected_counts) def _test_unique_with_expects(self, device, dtype, f, x, expected_unique, expected_inverse, expected_counts, additional_shape): def ensure_tuple(x): if isinstance(x, torch.Tensor): return (x,) return x for return_inverse in [True, False]: for return_counts in [True, False]: # test with expected ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts)) self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts)) self.assertEqual(expected_unique, ret[0]) if return_inverse: self.assertEqual(expected_inverse, ret[1]) if return_counts: count_index = 1 + int(return_inverse) self.assertEqual(expected_counts, ret[count_index]) # tests per-element unique on a higher rank tensor. y = x.view(additional_shape) y_unique, y_inverse, y_counts = f(y, return_inverse=True, return_counts=True) self.assertEqual(expected_unique, y_unique) self.assertEqual(expected_inverse.view(additional_shape), y_inverse) self.assertEqual(expected_counts, y_counts) @dtypesIfCPU(*set(get_all_dtypes()) - {torch.complex64, torch.complex128}) @dtypes(*set(get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128}) def test_unique(self, device, dtype): if dtype is torch.half and self.device_type == 'cpu': return # CPU does not have half support def ensure_tuple(x): if isinstance(x, torch.Tensor): return (x,) return x if dtype is torch.bool: x = torch.tensor([True, False, False, False, True, False, True, False], dtype=torch.bool, device=device) expected_unique = torch.tensor([False, True], dtype=torch.bool, device=device) expected_inverse = torch.tensor([1, 0, 0, 0, 1, 0, 1, 0], dtype=torch.long, device=device) expected_counts = torch.tensor([5, 3], dtype=torch.long, device=device) else: x = torch.tensor([1, 2, 3, 2, 8, 5, 2, 3], dtype=dtype, device=device) expected_unique = torch.tensor([1, 2, 3, 5, 8], dtype=dtype, device=device) expected_inverse = torch.tensor([0, 1, 2, 1, 4, 3, 1, 2], device=device) expected_counts = torch.tensor([1, 3, 2, 1, 1], device=device) # test sorted unique fs = ( lambda x, **kwargs: torch.unique(x, sorted=True, **kwargs), lambda x, **kwargs: x.unique(sorted=True, **kwargs), ) x_sliced = torch.empty(x.size(0) * 2, dtype=dtype, device=device)[::2].copy_(x) xs = (x, x_sliced) for f, x in product(fs, xs): self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (2, 2, 2)) self._test_unique_scalar_empty(dtype, device, f) # test unsorted unique fs = ( lambda x, **kwargs: torch.unique(x, sorted=False, **kwargs), lambda x, **kwargs: x.unique(sorted=False, **kwargs) ) for f, x in product(fs, xs): self._test_unique_scalar_empty(dtype, device, f) for return_inverse, return_counts in product((True, False), repeat=2): ret = ensure_tuple(f(x, return_inverse=return_inverse, return_counts=return_counts)) self.assertEqual(len(ret), 1 + int(return_inverse) + int(return_counts)) x_list = x.tolist() x_unique_list = ret[0].tolist() self.assertEqual(expected_unique.tolist(), sorted(x_unique_list)) if return_inverse: x_inverse_list = ret[1].tolist() for i, j in enumerate(x_inverse_list): self.assertEqual(x_list[i], x_unique_list[j]) if return_counts: count_index = 1 + int(return_inverse) x_counts_list = ret[count_index].tolist() for i, j in zip(x_unique_list, x_counts_list): count = 0 for k in x_list: if k == i: count += 1 self.assertEqual(j, count) @dtypesIfCPU(*set(get_all_dtypes()) - {torch.complex64, torch.complex128}) @dtypes(*set(get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128}) def test_unique_consecutive(self, device, dtype): if dtype is torch.half and self.device_type == 'cpu': return # CPU does not have half support if dtype is torch.bool: x = torch.tensor([True, False, False, False, True, True, False, False, False], dtype=torch.bool, device=device) expected_unique = torch.tensor([True, False, True, False], dtype=torch.bool, device=device) expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 3], dtype=torch.long, device=device) expected_counts = torch.tensor([1, 3, 2, 3], dtype=torch.long, device=device) else: x = torch.tensor([1, 2, 2, 2, 5, 5, 2, 2, 3], dtype=dtype, device=device) expected_unique = torch.tensor([1, 2, 5, 2, 3], dtype=dtype, device=device) expected_inverse = torch.tensor([0, 1, 1, 1, 2, 2, 3, 3, 4], device=device) expected_counts = torch.tensor([1, 3, 2, 2, 1], device=device) for f in [torch.unique_consecutive, lambda x, **kwargs: x.unique_consecutive(**kwargs)]: self._test_unique_with_expects(device, dtype, f, x, expected_unique, expected_inverse, expected_counts, (3, 3)) self._test_unique_scalar_empty(dtype, device, f) @dtypes(torch.double) def test_kthvalue(self, device, dtype): SIZE = 50 x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device) x0 = x.clone() k = random.randint(1, SIZE) res1val, res1ind = torch.kthvalue(x, k, keepdim=False) res2val, res2ind = torch.sort(x) self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0) self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0) # test use of result tensors k = random.randint(1, SIZE) res1val = torch.tensor([], dtype=dtype, device=device) res1ind = torch.tensor([], dtype=torch.long, device=device) torch.kthvalue(x, k, keepdim=False, out=(res1val, res1ind)) res2val, res2ind = torch.sort(x) self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0) self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0) # test non-default dim k = random.randint(1, SIZE) res1val, res1ind = torch.kthvalue(x, k, 0, keepdim=False) res2val, res2ind = torch.sort(x, 0) self.assertEqual(res1val, res2val[k - 1], atol=0, rtol=0) self.assertEqual(res1ind, res2ind[k - 1], atol=0, rtol=0) # non-contiguous y = x.narrow(1, 0, 1) y0 = y.contiguous() k = random.randint(1, SIZE) res1val, res1ind = torch.kthvalue(y, k) res2val, res2ind = torch.kthvalue(y0, k) self.assertEqual(res1val, res2val, atol=0, rtol=0) self.assertEqual(res1ind, res2ind, atol=0, rtol=0) # non-contiguous [Reference: https://github.com/pytorch/pytorch/issues/45721] non_contig_t = torch.tensor([0, -1, 1, -2, 2], dtype=dtype, device=device)[::2] expected_val, expected_ind = non_contig_t.contiguous().kthvalue(2) non_contig_cpu_t = non_contig_t.cpu() expected_val_cpu, expected_ind_cpu = non_contig_cpu_t.kthvalue(2) out_val, out_ind = non_contig_t.kthvalue(2) self.assertEqual(expected_val, out_val, atol=0, rtol=0) self.assertEqual(expected_ind, out_ind, atol=0, rtol=0) self.assertEqual(expected_val_cpu, out_val, atol=0, rtol=0) self.assertEqual(expected_ind_cpu, out_ind, atol=0, rtol=0) # check that the input wasn't modified self.assertEqual(x, x0, atol=0, rtol=0) # simple test case (with repetitions) y = torch.tensor((3., 5, 4, 1, 1, 5), dtype=dtype, device=device) self.assertEqual(torch.kthvalue(y, 3)[0], 3, atol=0, rtol=0) self.assertEqual(torch.kthvalue(y, 2)[0], 1, atol=0, rtol=0) # simple test case (with NaN) SIZE = 50 x = torch.rand(SIZE, SIZE, SIZE, dtype=dtype, device=device) x[torch.arange(SIZE), :, torch.randint(50, (50,))] = nan ks = [random.randint(1, SIZE), 1, SIZE, SIZE - 1] res2val, res2ind = torch.sort(x) for k in ks: res1val, res1ind = torch.kthvalue(x, k, keepdim=False) self.assertEqual(res1val[:, :], res2val[:, :, k - 1], atol=0, rtol=0) self.assertEqual(res1ind[:, :], res2ind[:, :, k - 1], atol=0, rtol=0) @dtypes(torch.float) @onlyNativeDeviceTypes # Fails on XLA def test_kthvalue_scalar(self, device, dtype): # Test scalar input (test case from https://github.com/pytorch/pytorch/issues/30818) # Tests that passing a scalar tensor or 1D tensor with 1 element work either way res = torch.tensor(2, device=device, dtype=dtype).kthvalue(1) ref = torch.tensor([2], device=device, dtype=dtype).kthvalue(1) self.assertEqual(res[0], ref[0].squeeze()) self.assertEqual(res[1], ref[1].squeeze()) @dtypes(*all_types()) @dtypesIfCUDA(*all_types_and(torch.half)) def test_isin(self, device, dtype): def assert_isin_equal(a, b): # Compare to the numpy reference implementation. x = torch.isin(a, b) a = a.cpu().numpy() if torch.is_tensor(a) else np.array(a) b = b.cpu().numpy() if torch.is_tensor(b) else np.array(b) y = np.isin(a, b) self.assertEqual(x, y) # multi-dim tensor, multi-dim tensor a = torch.arange(24, device=device, dtype=dtype).reshape([2, 3, 4]) b = torch.tensor([[10, 20, 30], [0, 1, 3], [11, 22, 33]], device=device, dtype=dtype) assert_isin_equal(a, b) # zero-dim tensor zero_d = torch.tensor(3, device=device, dtype=dtype) assert_isin_equal(zero_d, b) assert_isin_equal(a, zero_d) assert_isin_equal(zero_d, zero_d) # empty tensor empty = torch.tensor([], device=device, dtype=dtype) assert_isin_equal(empty, b) assert_isin_equal(a, empty) assert_isin_equal(empty, empty) # scalar assert_isin_equal(a, 6) assert_isin_equal(5, b) def define_expected(lst, invert=False): expected = torch.tensor(lst, device=device) if invert: expected = expected.logical_not() return expected # Adapted from numpy's in1d tests for mult in [1, 10]: for invert in [False, True]: a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype) b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype) ec = define_expected([True, False, True, True], invert=invert) c = torch.isin(a, b, assume_unique=True, invert=invert) self.assertEqual(c, ec) a[0] = 8 ec = define_expected([False, False, True, True], invert=invert) c = torch.isin(a, b, assume_unique=True, invert=invert) self.assertEqual(c, ec) a[0], a[3] = 4, 8 ec = define_expected([True, False, True, False], invert=invert) c = torch.isin(a, b, assume_unique=True, invert=invert) self.assertEqual(c, ec) a = torch.tensor([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], device=device, dtype=dtype) b = torch.tensor([2, 3, 4] * mult, device=device, dtype=dtype) ec = define_expected([False, True, False, True, True, True, True, True, True, False, True, False, False, False], invert=invert) c = torch.isin(a, b, invert=invert) self.assertEqual(c, ec) b = torch.tensor([2, 3, 4] * mult + [5, 5, 4] * mult, device=device, dtype=dtype) ec = define_expected([True, True, True, True, True, True, True, True, True, True, True, False, True, True], invert=invert) c = torch.isin(a, b, invert=invert) self.assertEqual(c, ec) a = torch.tensor([5, 7, 1, 2], device=device, dtype=dtype) b = torch.tensor([2, 4, 3, 1, 5] * mult, device=device, dtype=dtype) ec = define_expected([True, False, True, True], invert=invert) c = torch.isin(a, b, invert=invert) self.assertEqual(c, ec) a = torch.tensor([5, 7, 1, 1, 2], device=device, dtype=dtype) b = torch.tensor([2, 4, 3, 3, 1, 5] * mult, device=device, dtype=dtype) ec = define_expected([True, False, True, True, True], invert=invert) c = torch.isin(a, b, invert=invert) self.assertEqual(c, ec) a = torch.tensor([5, 5], device=device, dtype=dtype) b = torch.tensor([2, 2] * mult, device=device, dtype=dtype) ec = define_expected([False, False], invert=invert) c = torch.isin(a, b, invert=invert) self.assertEqual(c, ec) # multi-dimensional input case using sort-based algo for assume_unique in [False, True]: a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3]) b = torch.arange(3, 30, device=device, dtype=dtype) ec = define_expected([[False, False, False], [True, True, True]], invert=invert) c = torch.isin(a, b, invert=invert, assume_unique=assume_unique) self.assertEqual(c, ec) def test_isin_different_dtypes(self, device): supported_types = all_types() if device == 'cpu' else all_types_and(torch.half) for mult in [1, 10]: for assume_unique in [False, True]: for dtype1, dtype2 in product(supported_types, supported_types): a = torch.tensor([1, 2, 3], device=device, dtype=dtype1) b = torch.tensor([3, 4, 5] * mult, device=device, dtype=dtype2) ec = torch.tensor([False, False, True], device=device) c = torch.isin(a, b, assume_unique=assume_unique) self.assertEqual(c, ec) @onlyCUDA @dtypes(*all_types()) def test_isin_different_devices(self, device, dtype): a = torch.arange(6, device=device, dtype=dtype).reshape([2, 3]) b = torch.arange(3, 30, device='cpu', dtype=dtype) with self.assertRaises(RuntimeError): torch.isin(a, b) c = torch.arange(6, device='cpu', dtype=dtype).reshape([2, 3]) d = torch.arange(3, 30, device=device, dtype=dtype) with self.assertRaises(RuntimeError): torch.isin(c, d)
class TestShapeOps(TestCase): # TODO: update to work on CUDA, too @onlyCPU def test_unbind(self, device): x = torch.rand(2, 3, 4, 5) for dim in range(4): res = torch.unbind(x, dim) res2 = x.unbind(dim) self.assertEqual(x.size(dim), len(res)) self.assertEqual(x.size(dim), len(res2)) for i in range(dim): self.assertEqual(x.select(dim, i), res[i]) self.assertEqual(x.select(dim, i), res2[i]) # TODO: update to work on CUDA, too? @onlyCPU def test_tolist(self, device): list0D = [] tensor0D = torch.tensor(list0D) self.assertEqual(tensor0D.tolist(), list0D) table1D = [1., 2., 3.] tensor1D = torch.tensor(table1D) storage = torch.Storage(table1D) self.assertEqual(tensor1D.tolist(), table1D) self.assertEqual(storage.tolist(), table1D) self.assertEqual(tensor1D.tolist(), table1D) self.assertEqual(storage.tolist(), table1D) table2D = [[1, 2], [3, 4]] tensor2D = torch.tensor(table2D) self.assertEqual(tensor2D.tolist(), table2D) tensor3D = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) tensorNonContig = tensor3D.select(1, 1) self.assertFalse(tensorNonContig.is_contiguous()) self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]]) @dtypes(torch.int64, torch.float, torch.complex128) def test_movedim_invalid(self, device, dtype): shape = self._rand_shape(4, min_size=5, max_size=10) x = _generate_input(shape, dtype, device, False) for fn in [torch.movedim, torch.moveaxis]: # Invalid `source` and `destination` dimension with self.assertRaisesRegex(IndexError, "Dimension out of range"): fn(x, 5, 0) with self.assertRaisesRegex(IndexError, "Dimension out of range"): fn(x, 0, 5) # Mismatch in size of `source` and `destination` with self.assertRaisesRegex( RuntimeError, "movedim: Invalid source or destination dims:"): fn(x, (1, 0), (0, )) with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"): fn(x, (0, 0), (0, 1)) with self.assertRaisesRegex(RuntimeError, "movedim: repeated dim in `source`"): fn(x, (0, 1, 0), (0, 1, 2)) with self.assertRaisesRegex( RuntimeError, "movedim: repeated dim in `destination`"): fn(x, (0, 1), (1, 1)) with self.assertRaisesRegex( RuntimeError, "movedim: repeated dim in `destination`"): fn(x, (0, 1, 2), (1, 0, 1)) @dtypes(torch.int64, torch.float, torch.complex128) def test_movedim(self, device, dtype): for fn in [torch.moveaxis, torch.movedim]: for nd in range(5): shape = self._rand_shape(nd, min_size=5, max_size=10) x = _generate_input(shape, dtype, device, with_extremal=False) for random_negative in [True, False]: for src_dim, dst_dim in permutations(range(nd), r=2): random_prob = random.random() if random_negative and random_prob > 0.66: src_dim = src_dim - nd elif random_negative and random_prob > 0.33: dst_dim = dst_dim - nd elif random_negative: src_dim = src_dim - nd dst_dim = dst_dim - nd # Integer `source` and `destination` torch_fn = partial(fn, source=src_dim, destination=dst_dim) np_fn = partial(np.moveaxis, source=src_dim, destination=dst_dim) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) if nd == 0: continue def make_index_negative(sequence, idx): sequence = list(sequence) sequence[random_idx] = sequence[random_idx] - nd return tuple(src_sequence) for src_sequence in permutations(range(nd), r=random.randint(1, nd)): # Sequence `source` and `destination` dst_sequence = tuple( random.sample(range(nd), len(src_sequence))) # Randomly change a dim to a negative dim representation of itself. random_prob = random.random() if random_negative and random_prob > 0.66: random_idx = random.randint( 0, len(src_sequence) - 1) src_sequence = make_index_negative( src_sequence, random_idx) elif random_negative and random_prob > 0.33: random_idx = random.randint( 0, len(src_sequence) - 1) dst_sequence = make_index_negative( dst_sequence, random_idx) elif random_negative: random_idx = random.randint( 0, len(src_sequence) - 1) dst_sequence = make_index_negative( dst_sequence, random_idx) random_idx = random.randint( 0, len(src_sequence) - 1) src_sequence = make_index_negative( src_sequence, random_idx) torch_fn = partial(fn, source=src_sequence, destination=dst_sequence) np_fn = partial(np.moveaxis, source=src_sequence, destination=dst_sequence) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) # Move dim to same position x = torch.randn(2, 3, 5, 7, 11) torch_fn = partial(fn, source=(0, 1), destination=(0, 1)) np_fn = partial(np.moveaxis, source=(0, 1), destination=(0, 1)) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) torch_fn = partial(fn, source=1, destination=1) np_fn = partial(np.moveaxis, source=1, destination=1) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) # Empty Sequence torch_fn = partial(fn, source=(), destination=()) np_fn = partial(np.moveaxis, source=(), destination=()) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) @dtypes(torch.float, torch.bool) def test_diag(self, device, dtype): if dtype is torch.bool: x = torch.rand(100, 100, device=device) >= 0.5 else: x = torch.rand(100, 100, dtype=dtype, device=device) res1 = torch.diag(x) res2 = torch.tensor((), dtype=dtype, device=device) torch.diag(x, out=res2) self.assertEqual(res1, res2) def test_diagonal(self, device): x = torch.randn((100, 100), device=device) result = torch.diagonal(x) expected = torch.diag(x) self.assertEqual(result, expected) x = torch.randn((100, 100), device=device) result = torch.diagonal(x, 17) expected = torch.diag(x, 17) self.assertEqual(result, expected) @onlyCPU @dtypes(torch.float) def test_diagonal_multidim(self, device, dtype): x = torch.randn(10, 11, 12, 13, dtype=dtype, device=device) xn = x.numpy() for args in [(2, 2, 3), (2, ), (-2, 1, 2), (0, -2, -1)]: result = torch.diagonal(x, *args) expected = xn.diagonal(*args) self.assertEqual(expected.shape, result.shape) self.assertEqual(expected, result) # test non-continguous xp = x.permute(1, 2, 3, 0) result = torch.diagonal(xp, 0, -2, -1) expected = xp.numpy().diagonal(0, -2, -1) self.assertEqual(expected.shape, result.shape) self.assertEqual(expected, result) @onlyNativeDeviceTypes @dtypes(*all_types()) @dtypesIfCUDA(*all_types_and(torch.half)) def test_trace(self, device, dtype): def test(shape): tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9) expected_dtype = tensor.sum().dtype expected_dtype = torch_to_numpy_dtype_dict[expected_dtype] result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype) expected = torch.tensor(result, device=device) self.assertEqual(tensor.trace(), expected) shapes = ( [10, 1], [1, 10], [100, 100], [20, 100], [100, 20], ) for shape in shapes: test(shape) def generate_clamp_baseline(self, device, dtype, *, min_vals, max_vals, with_nans): """ Creates a random tensor for a given device and dtype, and computes the expected clamped values given the min_vals and/or max_vals. If with_nans is provided, then some values are randomly set to nan. """ X = torch.rand(100, device=device).mul(50).add(-25) # uniform in [-25, 25] X = X.to(dtype) if with_nans: mask = torch.randint(0, 2, X.shape, dtype=torch.bool, device=device) X[mask] = nan if isinstance(min_vals, torch.Tensor): min_vals = min_vals.cpu().numpy() if isinstance(max_vals, torch.Tensor): max_vals = max_vals.cpu().numpy() # Use NumPy implementation as reference X_clamped = torch.tensor(np.clip(X.cpu().numpy(), a_min=min_vals, a_max=max_vals), device=device) return X, X_clamped # Tests clamp and its alias, clip @dtypes(torch.int64, torch.float32) def test_clamp(self, device, dtype): op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_, torch.clip, torch.Tensor.clip, torch.Tensor.clip_) # min/max argument product args = product((-10, None), (10, None)) for op in op_list: for min_val, max_val in args: if min_val is None and max_val is None: continue X, Y_expected = self.generate_clamp_baseline(device, dtype, min_vals=min_val, max_vals=max_val, with_nans=False) # Test op X1 = X.clone() # So that the in-place ops do not change X Y_actual = op(X1, min_val, max_val) self.assertEqual(Y_expected, Y_actual) # Test op-out behavior (out does not exist for method versions) if op in (torch.clamp, torch.clip): Y_out = torch.empty_like(X) op(X, min=min_val, max=max_val, out=Y_out) self.assertEqual(Y_expected, Y_out) def test_clamp_propagates_nans(self, device): op_list = (torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_, torch.clip, torch.Tensor.clip, torch.Tensor.clip_) # min/max argument product args = product((-10, None), (10, None)) for op in op_list: for min_val, max_val in args: if min_val is None and max_val is None: continue X, Y_expected = self.generate_clamp_baseline(device, torch.float, min_vals=min_val, max_vals=max_val, with_nans=True) Y_expected = torch.isnan(Y_expected) # Test op X1 = X.clone() # So that the in-place ops do not change X Y_actual = op(X1, min_val, max_val) self.assertEqual(Y_expected, torch.isnan(Y_actual)) # Test op-out behavior (out does not exist for method versions) if op in (torch.clamp, torch.clip): Y_out = torch.empty_like(X) op(X, min_val, max_val, out=Y_out) self.assertEqual(Y_expected, torch.isnan(Y_out)) def test_clamp_raises_arg_errors(self, device): X = torch.randn(100, dtype=torch.float, device=device) error_msg = 'At least one of \'min\' or \'max\' must not be None' with self.assertRaisesRegex(RuntimeError, error_msg): X.clamp() with self.assertRaisesRegex(RuntimeError, error_msg): X.clamp_() with self.assertRaisesRegex(RuntimeError, error_msg): torch.clamp(X) @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_flip(self, device, dtype): make_from_data = partial(torch.tensor, device=device, dtype=dtype) make_from_size = partial(make_tensor, device=device, dtype=dtype) def test_flip_impl(input_t, dims, output_t): def all_t(): yield input_t, output_t if dtype is torch.float: # We generate quantized versions as well for qdtype in (torch.quint8, torch.qint8, torch.qint32): qinput_t = torch.quantize_per_tensor( input_t, 0.1, 5, qdtype) qoutput_t = torch.quantize_per_tensor( output_t, 0.1, 5, qdtype) yield qinput_t, qoutput_t for in_t, out_t in all_t(): self.assertEqual(in_t.flip(dims), out_t) n = in_t.ndim if not isinstance(dims, tuple): # Wrap dim self.assertEqual(in_t.flip(-n + dims), out_t) else: # Permute dimensions for p_dims in permutations(dims): self.assertEqual(in_t.flip(p_dims), out_t) if len(p_dims) > 0: # Wrap 1st dim self.assertEqual( in_t.flip((-n + p_dims[0], ) + p_dims[1:]), out_t) def gen_data(): # Basic tests data = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2) nonctg = make_from_size((2, 2, 2), noncontiguous=True).copy_(data) dims_result = ((0, make_from_data([5, 6, 7, 8, 1, 2, 3, 4]).view(2, 2, 2)), (1, make_from_data([3, 4, 1, 2, 7, 8, 5, 6]).view(2, 2, 2)), (2, make_from_data([2, 1, 4, 3, 6, 5, 8, 7]).view(2, 2, 2)), ((0, 1), make_from_data([7, 8, 5, 6, 3, 4, 1, 2]).view(2, 2, 2)), ((0, 1, 2), make_from_data([8, 7, 6, 5, 4, 3, 2, 1]).view(2, 2, 2))) for in_tensor, (dims, out_tensor) in product((data, nonctg), dims_result): yield in_tensor, dims, out_tensor # Expanded in_t = make_from_data([1, 2, 3]).view(3, 1).expand(3, 2) dims = 0 out_t = make_from_data([3, 3, 2, 2, 1, 1]).view(3, 2) yield in_t, dims, out_t # Noop on expanded dimension yield in_t, 1, in_t # Transposed in_t = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2).transpose(0, 1) dims = (0, 1, 2) out_t = make_from_data([8, 7, 4, 3, 6, 5, 2, 1]).view(2, 2, 2) yield in_t, dims, out_t # Rectangular case in_t = make_from_data([1, 2, 3, 4, 5, 6]).view(2, 3) dims = 0 out_t = make_from_data([[4, 5, 6], [1, 2, 3]]) yield in_t, dims, out_t dims = 1 out_t = make_from_data([[3, 2, 1], [6, 5, 4]]) yield in_t, dims, out_t # Noops (edge cases) # Size 0 in_t = make_from_data(()) yield in_t, 0, in_t yield in_t, (), in_t # dims = () in_t = make_from_size((3, 2, 1)) yield in_t, (), in_t # Zero elements, non-zero size in_t = make_from_size((3, 0, 2)) for i in range(in_t.ndim): yield in_t, i, in_t # Size 1 in_t = make_from_size(()) yield in_t, 0, in_t in_t = make_from_size((1, )) yield in_t, 0, in_t for in_tensor, dims, out_tensor in gen_data(): test_flip_impl(in_tensor, dims, out_tensor) # test for shape size = [2, 3, 4] data = make_from_size(size) possible_dims = range(len(size)) test_dims = chain(combinations(possible_dims, 1), combinations(possible_dims, 2)) for dims in test_dims: self.assertEqual(size, list(data.flip(dims).size())) @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_flip_errors(self, device, dtype): make_arg = partial(make_tensor, dtype=dtype, device=device) data = make_arg((2, 2, 2)) # not allow flip on the same dim more than once self.assertRaises(RuntimeError, lambda: data.flip(0, 1, 1)) # not allow empty list as input self.assertRaises(TypeError, lambda: data.flip()) # not allow dim > max dim self.assertRaises(IndexError, lambda: data.flip(0, 1, 2, 3)) self.assertRaises(IndexError, lambda: data.flip(3)) def _rand_shape(self, dim, min_size, max_size): return tuple(torch.randint(min_size, max_size + 1, (dim, ))) @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_flip_numpy(self, device, dtype): make_arg = partial(make_tensor, dtype=dtype, device=device) for ndim in [3, 4]: shape = self._rand_shape(ndim, 5, 10) data = make_arg(shape) # Axis to sample for given shape. for i in range(1, ndim + 1): # Check all combinations of `i` axis. for flip_dim in combinations(range(ndim), i): torch_fn = partial(torch.flip, dims=flip_dim) np_fn = partial(np.flip, axis=flip_dim) self.compare_with_numpy(torch_fn, np_fn, data) @onlyCUDA # CPU is too slow @largeTensorTest('17GB' ) # 4 tensors of 4GB (in, out) x (torch, numpy) + 1GB @largeTensorTest( "81GB", "cpu") # even for CUDA test, sufficient system memory is required def test_flip_large_tensor(self, device): t_in = torch.empty(2**32 + 1, dtype=torch.uint8).random_() torch_fn = partial(torch.flip, dims=(0, )) np_fn = partial(np.flip, axis=0) self.compare_with_numpy(torch_fn, np_fn, t_in) del t_in def _test_fliplr_flipud(self, torch_fn, np_fn, min_dim, max_dim, device, dtype): for dim in range(min_dim, max_dim + 1): shape = self._rand_shape(dim, 5, 10) # Randomly scale the input if dtype.is_floating_point or dtype.is_complex: data = torch.randn(*shape, device=device, dtype=dtype) else: data = torch.randint(0, 10, shape, device=device, dtype=dtype) self.compare_with_numpy(torch_fn, np_fn, data) @dtypes(torch.int64, torch.double, torch.cdouble) def test_fliplr(self, device, dtype): self._test_fliplr_flipud(torch.fliplr, np.fliplr, 2, 4, device, dtype) @dtypes(torch.int64, torch.double, torch.cdouble) def test_fliplr_invalid(self, device, dtype): x = torch.randn(42).to(dtype) with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."): torch.fliplr(x) with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."): torch.fliplr(torch.tensor(42, device=device, dtype=dtype)) @dtypes(torch.int64, torch.double, torch.cdouble) def test_flipud(self, device, dtype): self._test_fliplr_flipud(torch.flipud, np.flipud, 1, 4, device, dtype) @dtypes(torch.int64, torch.double, torch.cdouble) def test_flipud_invalid(self, device, dtype): with self.assertRaisesRegex(RuntimeError, "Input must be >= 1-d."): torch.flipud(torch.tensor(42, device=device, dtype=dtype)) def test_rot90(self, device): data = torch.arange(1, 5, device=device).view(2, 2) self.assertEqual( torch.tensor([1, 2, 3, 4]).view(2, 2), data.rot90(0, [0, 1])) self.assertEqual( torch.tensor([2, 4, 1, 3]).view(2, 2), data.rot90(1, [0, 1])) self.assertEqual( torch.tensor([4, 3, 2, 1]).view(2, 2), data.rot90(2, [0, 1])) self.assertEqual( torch.tensor([3, 1, 4, 2]).view(2, 2), data.rot90(3, [0, 1])) # test for default args k=1, dims=[0, 1] self.assertEqual(data.rot90(), data.rot90(1, [0, 1])) # test for reversed order of dims self.assertEqual(data.rot90(3, [0, 1]), data.rot90(1, [1, 0])) # test for modulo of k self.assertEqual(data.rot90(5, [0, 1]), data.rot90(1, [0, 1])) self.assertEqual(data.rot90(3, [0, 1]), data.rot90(-1, [0, 1])) self.assertEqual(data.rot90(-5, [0, 1]), data.rot90(-1, [0, 1])) # test for dims out-of-range error self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, -3])) self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 2])) # test tensor with more than 2D data = torch.arange(1, 9, device=device).view(2, 2, 2) self.assertEqual( torch.tensor([2, 4, 1, 3, 6, 8, 5, 7]).view(2, 2, 2), data.rot90(1, [1, 2])) self.assertEqual(data.rot90(1, [1, -1]), data.rot90(1, [1, 2])) # test for errors self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 3])) self.assertRaises(RuntimeError, lambda: data.rot90(1, [1, 1])) self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 1, 2])) self.assertRaises(RuntimeError, lambda: data.rot90(1, [0])) @dtypes(torch.cfloat, torch.cdouble) def test_complex_rot90(self, device, dtype): shape = self._rand_shape(random.randint(2, 4), 5, 10) for rot_times in range(4): data = torch.randn(*shape, device=device, dtype=dtype) torch_fn = partial(torch.rot90, k=rot_times, dims=[0, 1]) np_fn = partial(np.rot90, k=rot_times, axes=[0, 1]) self.compare_with_numpy(torch_fn, np_fn, data) # TODO: update once warning flag is available to always trigger ONCE warnings # Ensures nonzero does not throw a warning, even when the as_tuple argument # is not provided def test_nonzero_no_warning(self, device): t = torch.randn((2, 2), device=device) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") torch.nonzero(t) t.nonzero() self.assertEqual(len(w), 0) @dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16)) def test_nonzero(self, device, dtype): shapes = [ torch.Size((12, )), torch.Size((12, 1)), torch.Size((1, 12)), torch.Size((6, 2)), torch.Size((3, 2, 2)), torch.Size((5, 5, 5)), ] def gen_nontrivial_input(shape, dtype, device): if dtype != torch.bfloat16: return torch.randint(2, shape, device=device, dtype=dtype) else: # windows does not work for bfloat16 randing return torch.randint(2, shape, device=device, dtype=torch.float).to(dtype) for shape in shapes: tensor = gen_nontrivial_input(shape, dtype, device) dst1 = torch.nonzero(tensor, as_tuple=False) dst2 = tensor.nonzero(as_tuple=False) dst3 = torch.empty([], dtype=torch.long, device=device) torch.nonzero(tensor, out=dst3) if self.device_type != 'xla': # xla does not raise runtime error self.assertRaisesRegex( RuntimeError, "scalar type Long", lambda: torch.nonzero( tensor, out=torch.empty([], dtype=torch.float, device=device))) if self.device_type == 'cuda': self.assertRaisesRegex( RuntimeError, "on the same device", lambda: torch.nonzero( tensor, out=torch.empty([], dtype=torch.long))) np_array = tensor.cpu().numpy( ) if dtype != torch.bfloat16 else tensor.float().cpu().numpy() np_result = torch.from_numpy(np.stack(np_array.nonzero())).t() self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0) self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0) self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0) tup1 = torch.nonzero(tensor, as_tuple=True) tup2 = tensor.nonzero(as_tuple=True) tup1 = torch.stack(tup1).t().cpu() tup2 = torch.stack(tup2).t().cpu() self.assertEqual(tup1, np_result, atol=0, rtol=0) self.assertEqual(tup2, np_result, atol=0, rtol=0) def test_nonzero_astuple_out(self, device): t = torch.randn((3, 3, 3), device=device) out = torch.empty_like(t, dtype=torch.long) with self.assertRaises(RuntimeError): torch.nonzero(t, as_tuple=True, out=out) self.assertEqual(torch.nonzero(t, as_tuple=False, out=out), torch.nonzero(t, out=out)) # Verifies that JIT script cannot handle the as_tuple kwarg # See Issue https://github.com/pytorch/pytorch/issues/45499. def _foo(t): tuple_result = torch.nonzero(t, as_tuple=True) nontuple_result = torch.nonzero(t, as_tuple=False) out = torch.empty_like(nontuple_result) torch.nonzero(t, as_tuple=False, out=out) return tuple_result, nontuple_result, out with self.assertRaises(RuntimeError): scripted_foo = torch.jit.script(_foo) # Verifies that JIT tracing works fine traced_foo = torch.jit.trace(_foo, t) traced_tuple, traced_nontuple, traced_out = traced_foo(t) expected_tuple = torch.nonzero(t, as_tuple=True) expected_nontuple = torch.nonzero(t) self.assertEqual(traced_tuple, expected_tuple) self.assertEqual(traced_nontuple, expected_nontuple) self.assertEqual(traced_out, expected_nontuple) @onlyNativeDeviceTypes def test_nonzero_discontiguous(self, device): shape = (4, 4) tensor = torch.randint(2, shape, device=device) tensor_nc = torch.empty(shape[0], shape[1] * 2, device=device)[:, ::2].copy_(tensor) dst1 = tensor.nonzero(as_tuple=False) dst2 = tensor_nc.nonzero(as_tuple=False) self.assertEqual(dst1, dst2, atol=0, rtol=0) dst3 = torch.empty_like(dst1) data_ptr = dst3.data_ptr() # expect dst3 storage to be reused torch.nonzero(tensor, out=dst3) self.assertEqual(data_ptr, dst3.data_ptr()) self.assertEqual(dst1, dst3, atol=0, rtol=0) # discontiguous out dst4 = torch.empty(dst1.size(0), dst1.size(1) * 2, dtype=torch.long, device=device)[:, ::2] data_ptr = dst4.data_ptr() strides = dst4.stride() torch.nonzero(tensor, out=dst4) self.assertEqual(data_ptr, dst4.data_ptr()) self.assertEqual(dst1, dst4, atol=0, rtol=0) self.assertEqual(strides, dst4.stride()) def test_nonzero_non_diff(self, device): x = torch.randn(10, requires_grad=True) nz = x.nonzero() self.assertFalse(nz.requires_grad)