Ejemplo n.º 1
0
def _graph_constant(g, value, dims, type, *args, **kwargs):
    assert isinstance(value, numbers.Number)
    assert type is not None
    isscalar = False
    if dims is None or dims == 0 or set(dims) == set([0]):
        dims = [1]
        isscalar = True
    type = type.lower()
    if type == "char":
        tensor = torch.CharTensor(*dims)
    elif type == "short":
        tensor = torch.ShortTensor(*dims)
    elif type == "int":
        tensor = torch.IntTensor(*dims)
    elif type == "long":
        tensor = torch.LongTensor(*dims)
    elif type == "half":
        tensor = torch.HalfTensor(*dims)
    elif type == "float":
        tensor = torch.FloatTensor(*dims)
    elif type == "double":
        tensor = torch.DoubleTensor(*dims)
    else:
        raise ValueError(
            "Unknown type, type should be one of the following strings: "
            "char, short, int, long, half, float, double")
    tensor.fill_(value)
    if isscalar:
        return g.op("Constant", *args, value_z=tensor, **kwargs)
    return g.op("Constant", *args, value_t=tensor, **kwargs)
 def store(self, state, action, reward, next_state, done):
     state = from_numpy(state).byte().to("cpu")
     reward = torch.CharTensor([reward])
     action = torch.ByteTensor([action]).to('cpu')
     next_state = from_numpy(next_state).byte().to('cpu')
     done = torch.BoolTensor([done])
     self.memory.add(state, reward, done, action, next_state)
Ejemplo n.º 3
0
def convert_to_1hot(target, noutputs, make_11=True):
    if target.is_cuda:
        target_1hot = torch.cuda.CharTensor(target.size()[0], noutputs)
    else:
        target_1hot = torch.CharTensor(target.size()[0], noutputs)
    target_1hot.zero_()
    target_1hot.scatter_(1, target.unsqueeze(1), 1)
    target_1hot = target_1hot.type(target.type())
    if make_11:
        target_1hot = target_1hot * 2 - 1
    return target_1hot
Ejemplo n.º 4
0
 def __init__(self, schema=None, mask_value=0):
     if schema is None:
         schema = uniread.schema.load_default_schema()
     self.schema = schema
     self.one_hot_converter = UnimorphTagBitVectorConverter(schema)
     self.mask_value = mask_value
     self.mask = torch.CharTensor(len(self.schema),
                                  len(self.one_hot_converter)).zero_()
     mask_offset = 0
     for row_id, schema_group in enumerate(self.schema.values()):
         self.mask[row_id, mask_offset:mask_offset + len(schema_group)] = 1
         mask_offset += len(schema_group)
Ejemplo n.º 5
0
 def test_all_dtypes():
     return (
         torch.BoolTensor([2]),
         torch.LongTensor([3]),
         torch.ByteTensor([4]),
         torch.CharTensor([5]),
         torch.DoubleTensor([6]),
         torch.FloatTensor([7]),
         torch.IntTensor([8]),
         torch.ShortTensor([1]),
         torch.HalfTensor([1]),
     )
Ejemplo n.º 6
0
    def test_cast_variable(self):
        inputs = [
            torch.ByteTensor(1),
            torch.CharTensor(1),
            torch.DoubleTensor(1),
            torch.FloatTensor(1),
            torch.IntTensor(1),
            torch.LongTensor(1),
            torch.ShortTensor(1),
        ]

        for inp in inputs:
            assert type(inp) == type(torch.cast(Variable(inp), type(inp)).data)
Ejemplo n.º 7
0
def _graph_constant(
    g,
    value,
    dims,
    type_: str,
    *args,
    **kwargs,
):
    """This helper function can create either constant tensor or constant scalar.

    If dims is None or 0 or [0], generate a 0-d tensor (scalar).
    """
    assert isinstance(value, numbers.Number)
    assert type_ is not None
    isscalar = False
    if dims is None or dims == 0 or set(dims) == {0}:
        dims = [1]
        isscalar = True
    type_ = type_.lower()
    tensor: Union[
        torch.CharTensor,
        torch.ShortTensor,
        torch.IntTensor,
        torch.LongTensor,
        torch.HalfTensor,
        torch.FloatTensor,
        torch.DoubleTensor,
    ]
    if type_ == "char":
        tensor = torch.CharTensor(*dims)
    elif type_ == "short":
        tensor = torch.ShortTensor(*dims)
    elif type_ == "int":
        tensor = torch.IntTensor(*dims)
    elif type_ == "long":
        tensor = torch.LongTensor(*dims)
    elif type_ == "half":
        tensor = torch.HalfTensor(*dims)
    elif type_ == "float":
        tensor = torch.FloatTensor(*dims)
    elif type_ == "double":
        tensor = torch.DoubleTensor(*dims)
    else:
        raise ValueError(
            "Unknown type, type should be one of the following strings: "
            "char, short, int, long, half, float, double"
        )
    tensor.fill_(value)  # type: ignore[call-overload]
    if isscalar:
        return g.op("Constant", *args, value_z=tensor, **kwargs)
    return g.op("Constant", *args, value_t=tensor, **kwargs)
Ejemplo n.º 8
0
def changeIndexesExtr_test1():
    cm = torch.CharTensor(25, 32).zero_()
    cm = torch.CharTensor(129, 254).zero_()

    ci_ref = []

    def insertChange(y, x):
        cm[y][x] = 1
        ci_ref.append(y * cm.size(-1) + x)

    insertChange(3, 3)
    insertChange(7, 5)
    insertChange(5, 7)
    insertChange(7, 1)
    insertChange(1, 5)
    insertChange(24, 31)

    ci_ref = torch.IntTensor(ci_ref)
    cm = cm.cuda()

    #    ci_gpu, cnt_gpu = changeIndexesExtr(cm)
    ci = changeIndexesExtr_python(cm)
    passed = (ci_ref.sort()[0] == ci.cpu().sort()[0]).all()
    return passed
Ejemplo n.º 9
0
 def occupy(self):
     if hasattr(self, 'memory'):
         print('Already busy')
     else:
         t = time()
         free = get_gpu_memory_map(False)
         used = get_gpu_memory_map(True)
         total = free[self.gpu] + used[self.gpu]
         print("Occupying memory: currently at {}/{}".format(
             used[self.gpu], total))
         self.memory = torch.CharTensor(int(free[self.gpu] * 1e6 *
                                            .9)).cuda()
         used_after = get_gpu_memory_map(True)
         print("Done. Memory: {0:d}/{1:d} [{2:.1f} s.]".format(
             used_after[self.gpu], total,
             time() - t))
    def test_abs(self):
        def _test_abs(tensors_dict):
            for _category, tensors in tensors_dict.items():
                for data in tensors:
                    _test_abs_single(data)

        def _test_abs_single(data):
            switch = torch.rand(
                data.size(),
                device=device).mul(2).floor().mul(2).add(-1).type(data.dtype)
            res = torch.mul(data, switch)
            self.assertTensorsSlowEqual(res.abs(), data, 1e-16)

        shapes = [(3, 4), (3, 5, 7), (2, 2, 5, 8, 2, 3), (1000, ),
                  (10, 10, 10)]

        for shape in shapes:
            # Test all except char/byte
            _test_abs(self._make_tensors(shape, val_range=(0, 1000)))

            # Test char
            _test_abs_single(torch.CharTensor(*shape).random_(0, 100))

            # Test byte
            byte_tensor = torch.ByteTensor(*shape).random_(0, 100)
            self.assertTensorsSlowEqual(byte_tensor, byte_tensor.abs(), 1e-16)

        # Checking that the right abs function is called for LongTensor
        bignumber = 2**31 + 1
        res = torch.LongTensor((-bignumber, ))
        self.assertGreater(res.abs()[0], 0)

        # One of
        rec = torch.randn(2, 2, 3, 7, 6, 2).type(torch.float64).clamp(0, 1)
        val1 = rec.select(-1, -1).data[0][0][0].sum()
        val2 = rec.select(-1, -1).data.abs()[0][0][0].sum()
        self.assertEqual(val1, val2, 1e-8, 'absolute value')

        # Both abs(0.0) and abs(-0.0) should result in 0.0
        for dtype in (torch.float, torch.double):
            for abs_zeros in (
                    torch.tensor([0.0, -0.0], dtype=dtype).abs().tolist(),
                    # test a large tensor so that the vectorized version is tested
                    torch.abs(-torch.zeros(10000, dtype=dtype)).tolist()):
                for num in abs_zeros:
                    self.assertGreater(math.copysign(1.0, num), 0.0)
Ejemplo n.º 11
0
def onehot_to_chars(embedded_text: Tensor, idx_to_char: dict) -> str:
    """
    Reverses the embedding of a text sequence, producing back the original
    sequence as a string.
    :param embedded_text: Text sequence represented as a tensor of shape
    (N, D) where each row is the one-hot encoding of a character.
    :param idx_to_char: Mapping from indices to characters.
    :return: A string containing the text sequence represented by the
    embedding.
    """
    # TODO: Implement the reverse-embedding.
    # ====== YOUR CODE: ======
    index = range(len(idx_to_char))
    index_vec = torch.CharTensor(index)
    result = ""
    string_indices = embedded_text @ index_vec
    for index in string_indices:
        result += idx_to_char[int(index)]
    # ========================
    return result
Ejemplo n.º 12
0
import torch
from torch.autograd import Variable

#  3行2列的tensor矩阵
a = torch.Tensor(3, 2)
print(a)
print(a.size())
b = torch.Tensor(3, 2, 2)
print(b)
c = torch.IntTensor(2, 2)
print(c)
print(torch.ByteTensor(3, 2))
print(torch.CharTensor(3, 2))
print(torch.DoubleTensor(3, 2))

a = torch.arange(6).view(3, 2)
print(a)
b = torch.arange(6).view(2, 3)
print(b)

print(a @ b)

# print(torch.dot(a,b))# dot只能用于一维向量
print(torch.matmul(a, b))
print(torch.mm(a, b))  # 数学中的矩阵乘法

print(a.t())
print(a.numel())  # 求a中的元素个数

a = torch.arange(1, 4)
b = torch.arange(1, 4)
Ejemplo n.º 13
0
import torch
lt = torch.LongTensor([1,2,3,4])
print(lt)
print(lt.float())
print()
bt = torch.ByteTensor([True,False,False,True])
ct = torch.CharTensor([True,False,False,True])
print(bt)
print(bt.long())
print(bt.float())
print()
print(ct)
print(ct.long())
print(ct.float())
print()
ft = torch.FloatTensor([True,False,False,True])
print(ft)
print(ft.long())
Ejemplo n.º 14
0
second_point[0] = 4
print("\nclone has changed the original tensor?",
      id(points[1]) == id(second_point))
print("transpose of points:\n", points.t())  #transpose
print("stride of points:", points.stride())
print("stride of transposed points:", points.t().stride())
print("points is contiguous?", points.is_contiguous())
points_con = points.t().contiguous()
print("transpose of points is contiguous?",
      points.t().is_contiguous(), " after contiguous method to points.t():",
      points_con.is_contiguous())

##2.4 Numeric types
double_points = torch.zeros(10, 2).double()  #method
short_points = torch.tensor([[1, 2], [3, 4]], dtype=torch.short)  #argument
char_points = torch.CharTensor([[1, 2]])  #default
print("\ndtype of double float:", double_points.dtype)
print("dtype of short int:", short_points.dtype)
print("dtype of char:", char_points.dtype)

rand_points = torch.randn(10, 2).type(torch.short)  #method
print(
    "\nyou can cast a tensor of one type as a tensor of another type by using 'type' method:\n#rand_points = torch.randn(10, 2).type(torch.short)\ndtype of rand_points:",
    rand_points.dtype)

##2.6 NumPy interoperability
points = torch.tensor([[1, 1, 1], [2, 2, 2]])
points_np = points.numpy()
print(
    "from NumPy out of tensor:\n",
    points_np)  #the array shares an underlying buffer with the tensor storage
Ejemplo n.º 15
0
    model = DanQ()
elif args.model_type == 5:
    model = BassetNormCat()

num_params = sum([p.numel() for p in model.parameters()])

model.cuda()
print("Model successfully imported\nTotal number of parameters {}".format(
    num_params),
      file=Logger)

start = time.time()
print("Reading data from file {}".format(args.data), file=Logger)
data = h5py.File(args.data)

train = torch.utils.data.TensorDataset(torch.CharTensor(data['train_in']),
                                       torch.CharTensor(data['train_out']))
val = torch.utils.data.TensorDataset(torch.CharTensor(data['valid_in']),
                                     torch.CharTensor(data['valid_out']))
test = torch.utils.data.TensorDataset(torch.CharTensor(data['test_in']),
                                      torch.CharTensor(data['test_out']))
train_loader = torch.utils.data.DataLoader(train,
                                           batch_size=args.batch_size,
                                           shuffle=True)
# train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True, num_workers=int(args.workers))
val_loader = torch.utils.data.DataLoader(val,
                                         batch_size=args.batch_size,
                                         shuffle=False)
# val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))
test_loader = torch.utils.data.DataLoader(test,
                                          batch_size=args.batch_size,
Ejemplo n.º 16
0
    assert 0 == 1, "--cell_set flag must be 'train', 'valid', or 'test'"

c_idx = [
    i for i, x in enumerate(list(data['target_labels'][:]))
    if str(x, 'utf-8') in get_type
]
if args.cell_set == 'all':
    v_idx = [
        i for i, x in enumerate(list(data['target_labels'][:]))
        if str(x, 'utf-8') in valid_type
    ]
else:
    v_idx = c_idx

train = torch.utils.data.TensorDataset(
    torch.CharTensor(data['train_in'][:]),
    torch.CharTensor(data['train_out'][:, c_idx]))
val = torch.utils.data.TensorDataset(
    torch.CharTensor(data['valid_in'][:]),
    torch.CharTensor(data['valid_out'][:, c_idx]))
test = torch.utils.data.TensorDataset(
    torch.CharTensor(data['test_in'][:]),
    torch.CharTensor(data['test_out'][:, c_idx]))
train_loader = torch.utils.data.DataLoader(train,
                                           batch_size=args.batch_size,
                                           shuffle=True)
# train_loader = torch.utils.data.DataLoader(train, batch_size=args.batch_size, shuffle=True, num_workers=int(args.workers))
val_loader = torch.utils.data.DataLoader(val,
                                         batch_size=args.batch_size,
                                         shuffle=False)
# val_loader = torch.utils.data.DataLoader(val, batch_size=args.batch_size, shuffle=False, num_workers=int(args.workers))