def test_get_shape_per_tensor_fail(self): tensor_list = [ random_tensor(0, 32, shape=(2, 2, 2)), random_tensor(0, 32, shape=(3, 3, 3, 3)) ] with pytest.raises(ValueError, match='Expected all tensors to have 3 dimensions but got 4 at index 1'): output_shape_per_tensor = batch.get_shape_per_tensor(tensor_list)
def test_list_to_packed_fail1(self): tensor_list = [ random_tensor(0, 32, shape=(2, 2, 2)), random_tensor(0, 32, shape=(3, 3, 3)) ] with pytest.raises(ValueError, match='Expected all tensor to have last dimension 2 but ' 'got 3 at index 1'): _ = batch.list_to_packed(tensor_list)
def test_list_to_packed_fail2(self): tensor_list = [ random_tensor(0, 32, shape=(2, 2, 2), dtype=torch.long, device='cpu'), random_tensor(0, 32, shape=(2, 2, 2), dtype=torch.float, device='cuda') ] with pytest.raises(ValueError, match='Expected all tensor to have type torch.LongTensor but ' 'got torch.cuda.FloatTensor at index 1'): _ = batch.list_to_packed(tensor_list)
def test_random_tensor_seed(low, high, shape): threshold = shape[0] * shape[1] * 0.9 manual_seed(0) tensor1 = random_tensor(low, high, shape) tensor2 = random_tensor(low, high, shape) assert torch.sum(tensor1 != tensor2) > threshold manual_seed(0) tensor3 = random_tensor(low, high, shape) assert torch.equal(tensor1, tensor3) manual_seed(1) tensor4 = random_tensor(low, high, shape) assert torch.sum(tensor1 != tensor4) > threshold
def inputs(self, high_val, total_numel, dtype, device): return random_tensor(0, high_val, shape=(total_numel, 1), dtype=dtype, device=device)
def test_random_tensor(low, high, shape, dtype, device): tensor = random_tensor(low, high, shape, dtype, device) check_tensor(tensor, shape, dtype, device) assert (low <= tensor).all() assert (tensor <= high).all()
def tensor_list(self, dtype, device, high_val, shape_per_tensor, last_dim): return [random_tensor(0, high_val, shape=tuple(shape) + (last_dim,), dtype=dtype, device=device) for shape in shape_per_tensor]
def inputs(self, high_val, numel_per_tensor, dtype, device): return random_tensor(0, high_val, shape=(numel_per_tensor.shape[0],), dtype=dtype, device=device)
def tensor(self, shape, dtype, device): return random_tensor(0, 256, shape=shape, dtype=dtype, device=device)
def packed_tensor(self, total_numel, last_dim, dtype, device): return random_tensor(0, 256, shape=(total_numel, last_dim), dtype=dtype, device=device)