def test_alternative_dtypes(self): shape = [3, 4, 5, 6] array = numpy.zeros(shape) # Setting dtype to numpy.int64 should produce a torch.LongTensor when field is converted to # a tensor array_field1 = TensorField(array, dtype=numpy.int64) returned_tensor1 = array_field1.as_tensor( array_field1.get_padding_lengths()) assert returned_tensor1.dtype == torch.int64 # Setting dtype to numpy.uint8 should produce a torch.ByteTensor when field is converted to # a tensor array_field2 = TensorField(array, dtype=numpy.uint8) returned_tensor2 = array_field2.as_tensor( array_field2.get_padding_lengths()) assert returned_tensor2.dtype == torch.uint8 # Padding should not affect dtype padding_lengths = { "dimension_" + str(i): 10 for i, _ in enumerate(shape) } padded_tensor = array_field2.as_tensor(padding_lengths) assert padded_tensor.dtype == torch.uint8 # Empty fields should have the same dtype empty_field = array_field2.empty_field() assert empty_field.tensor.dtype == array_field2.tensor.dtype
def test_get_padding_lengths_correctly_returns_ordered_shape(self): shape = [3, 4, 5, 6] array = numpy.zeros(shape) array_field = TensorField(array) lengths = array_field.get_padding_lengths() for i in range(len(lengths)): assert lengths["dimension_{}".format(i)] == shape[i]
def test_as_tensor_with_scalar_keeps_dtype(self): array = TensorField(numpy.asarray(42, dtype=numpy.float32)) returned_tensor = array.as_tensor(array.get_padding_lengths()) assert returned_tensor.dtype == torch.float32
def test_as_tensor_works_with_scalar(self): array = TensorField(numpy.asarray(42)) returned_tensor = array.as_tensor(array.get_padding_lengths()) current_tensor = numpy.asarray(42) numpy.testing.assert_array_equal(returned_tensor, current_tensor)