def test_index_select():
    x_cpu = np.random.rand(10, 5)
    x = Tensor(x_cpu, device='cuda', autograd=True)
    indices = Tensor([1, 2, 3], device='cpu', d_type=np.int32)
    embs = x.index_select(indices)
    print(x)
    print(embs)
def test_build_model():
    model = Model(
        embedding=None,
        hidden_dim=300,
        output_dim=10,
        device='cuda',
        embedding_dim=300,
        vocab_size=10000,
    )

    x = Tensor([[*range(i, i + 20)] for i in range(20)], device='cpu')
    target = Tensor(np.random.randint(0, 10, 20),
                    device='cpu',
                    d_type=np.int32,
                    autograd=True)

    criterion = CrossEntropyLoss()
    optimizer = SGD(parameters=model.get_parameters())
    for _ in tqdm(range(0, 10)):
        output = model(x)
        loss = criterion(output, target)
        t1 = time.time()
        loss.backward()
        t2 = time.time()
        print(f'time to backward loss: {t2 - t1}')

        t1 = time.time()
        optimizer.step()
        t2 = time.time()
        print(f'time to step: {t2 - t1}')
def test_backward():
    x_np = np.random.rand(2, 3)
    w_np = np.random.rand(3, 5)
    print(x_np)
    print(w_np)
    x = Tensor(x_np, device='cuda', autograd=True)
    w = Tensor(w_np, device='cuda', autograd=True)
    res = (x -
           Tensor(1, d_type=np.float32, device='cuda', autograd=True)).mm(w)
    print('res_cuda = ', res)
    print('res_softmax = ', res.softmax())
    loss = res.cross_entropy(Tensor([1, 2], device='cpu', d_type=np.int32))
    print('loss_cuda = ', loss)
    loss.backward()
    print('x.grad = ', x.grad)
    print('w.grad = ', w.grad)

    # torch
    print('-------------------------')
    x_torch = torch.Tensor(x_np)
    w_torch = torch.Tensor(w_np)
    y_torch = torch.Tensor([1, 2])
    res_torch = (x_torch - 1).mm(w_torch)
    print('res_torch = ', res_torch)
    print('res_torch_softmax = ', f.softmax(res_torch, dim=1))
    loss_func = nn.NLLLoss()
    loss_torch = loss_func(f.softmax(res_torch, dim=1), y_torch.long())
    print('loss_torch', loss_torch)
def test_relu():
    data = np.random.rand(10, 10) - 0.5
    x_gpu = Tensor(data, device='cuda')
    x_cpu = Tensor(data, device='cpu')
    x_relu = x_gpu.relu()
    print(x_relu)
    print(x_relu.relu_grad())
    print(np.sum(np.sum(x_relu.cpu().data == 0)))
def test_dropout():
    data = np.random.rand(10, 10)
    x_gpu = Tensor(data=data, device='cuda')
    x_dropout = x_gpu.dropout(0.1).cpu()
    print(x_gpu)
    print(x_dropout)
    print(x_dropout.shape)
    print(np.sum(x_dropout.data == 0))
def test_mm_graph():
    x = Tensor(np.random.rand(10, 4).astype(np.float32),
               device='cuda',
               autograd=True)
    y = Tensor(np.random.rand(4, 5).astype(np.float32),
               device='cuda',
               autograd=True)
    res = x.mm(y)
    print(f'x: {x.children}')
    print(f'y: {y.children}')
    print(f'res: {res.children}')
Exemple #7
0
 def __call__(self, data):
     xs, ys = zip(*data)
     source_subs, target_subs, users, contents = zip(*xs)
     return (
         (Tensor(users, device=settings.DEVICE, d_type=np.int32),
          Tensor(source_subs, device=settings.DEVICE, d_type=np.int32),
          Tensor(target_subs, device=settings.DEVICE, d_type=np.int32),
          Tensor(self.padding_collate(contents, padding_index=self.word_padding_index), device=settings.DEVICE,
                 d_type=np.int32),
          ),
         Tensor(ys, device='cpu', d_type=np.int32, autograd=True)
     )
def test_gpu_vs_cpu():
    vectors = np.random.normal(0, 1, size=(20, 8)) * (2 / 28)**0.5
    vectors[0, :] = 0

    embedding_gpu = Embedding.from_pretrained(
        vectors=vectors,
        padding_index=0,
        device='cuda',
        autograd=True,
    )

    embedding_cpu = Embedding.from_pretrained(
        vectors=vectors,
        padding_index=0,
        device='cpu',
        autograd=True,
    )

    indices = Tensor([[0, 1, 0, 4, 5], [1, 4, 0, 1, 2]],
                     device='cpu',
                     d_type=np.int32)
    embeds_gpu = embedding_gpu(indices)
    embeds_cpu = embedding_cpu(indices)

    linear_cpu = Linear(8, 2, device='cpu', bias=True)
    linear_gpu = Linear(8, 2, device='cuda', bias=True)
    linear_gpu.weight = linear_cpu.weight.to('cuda')
    linear_gpu.bias = linear_cpu.bias.to('cuda')
    # print(embeds_gpu[0].shape)

    out_cpu = linear_cpu(embeds_cpu[0])
    out_gpu = linear_gpu(embeds_gpu[0])
    # print(out_cpu)
    # print(out_gpu)

    target = Tensor([1, 0, 1, 0, 0], device='cpu', d_type=np.int32)
    loss_gpu = out_gpu.cross_entropy(target)
    loss_cpu = out_cpu.cross_entropy(target)
    print(loss_cpu, loss_gpu)
    loss_gpu.backward()
    loss_cpu.backward()

    print(linear_gpu.weight.grad)
    print(linear_cpu.weight.grad)

    print(embedding_gpu.weight.grad)
    print(embedding_cpu.weight.grad)
def test_expand():
    data = np.array([1, 2, 3])
    x_gpu = Tensor(data, device='cuda')
    x_cpu = Tensor(data, device='cpu')
    print(x_gpu.expand(dim=0, copies=5))
    print(x_cpu.expand(dim=0, copies=5))
    print('norm', x_gpu.norm())
def test_softmax():
    data = np.random.rand(5, 2)
    x_gpu = Tensor(data=data, device='cuda')
    x_cpu = Tensor(data=data, device='cpu')

    print(x_gpu.softmax())
    print(x_cpu.softmax())
def test_lstm_cell():
    embeddings = Embedding.init(
        vocab_size=10,
        embedding_dim=5,
        device='cuda',
        autograd=True,
    )
    lstm_cell = LSTMCell(
        input_dim=5,
        hidden_dim=100,
        device='cuda',
    )
    print('weight before backward')
    print(embeddings.weight)

    x = embeddings(Tensor([[1, 2, 3], [2, 3, 4]], device='cpu'))
    print('x')
    print(x)
    hidden = None
    for time_step in x:
        _, hidden = lstm_cell(time_step, hidden=hidden)
    target = Tensor([3, 5, 2], device='cpu', d_type=np.int32)
    criterion = CrossEntropyLoss()
    optimizer = SGD(
        parameters=[
            *embeddings.get_parameters(),
            *lstm_cell.get_parameters(),
        ],
        lr=0.01,
    )
    loss = criterion(hidden[0], target)
    print('loss = ', loss)
    loss.backward()
    optimizer.step(zero=True)
    print('weight after backward')
    print(embeddings.weight)
def test_lstm_layer():
    embeddings = Embedding.init(
        vocab_size=10,
        embedding_dim=5,
        device='cuda',
        autograd=True,
    )
    lstm = LSTMLayer(
        input_dim=5,
        hidden_dim=100,
        device='cuda',
    )
    h2o = Linear(
        n_inputs=100,
        n_outputs=10,
        bias=True,
        device='cuda',
    )
    criterion = CrossEntropyLoss()
    optimizer = SGD(parameters=[
        *embeddings.get_parameters(),
        *lstm.get_parameters(),
        *h2o.get_parameters(),
    ])
    print(len(optimizer.parameters))
    x = embeddings(Tensor([[1, 2, 3], [2, 3, 4]], device='cpu'))
    target = Tensor([3, 5, 2], device='cpu', d_type=np.int32)
    output = h2o(lstm(x)[0][-1])
    loss = criterion(input=output, target=target)
    loss.backward()
    print('embedding before backward')
    print(embeddings.weight)
    optimizer.step()
    print('--------------')
    print('embedding after backward')
    print(embeddings.weight)
def test_basic_ops(x: Tensor, y: Tensor):
    print('x = ', x)
    print('y = ', y)
    print('x + y = ', x + y)
    print('x.sigmoid = ', x.sigmoid())
    print('x.tanh = ', x.tanh())
    # print('x.softmax = ', x.softmax())
    print('2 * x = ', Tensor(2) * x)
    print('x * y = ', x * y)
    print('x - y = ', x - y)
    print('x - 2 = ', x - Tensor(2, device='cuda'))
    print('x + 2 = ', x + Tensor(2, device='cuda'))
    print('-x = ', -x)
    print('x.sum = ', x.sum(0))
    print('x.expand(0, 3) = ', x.expand(0, 3))
    print('x.expand(1, 3) = ', x.expand(1, 3))
def test_get_item():
    x_cpu = np.random.rand(10, 5, 5)
    x = Tensor(x_cpu, device='cuda')
    print(x[:, :, 1])
def test_mm():
    x1 = x.expand(0, 3)
    x2 = x.expand(1, 3)
    print(x1.mm(x2))
    x = Tensor(np.random.rand(1, 3).astype(np.float32), device='cuda')
    y = Tensor(np.random.rand(3, 1).astype(np.float32), device='cuda')
    print(x, y)
    x_t = x.transpose()
    y_t = y.transpose()
    print(x_t.mm(y_t))
    x = Tensor(np.random.rand(6), device='cuda')
    y = Tensor(np.random.rand(6), device='cuda')
    x.reshape((2, 3)).mm(y.reshape((3, 2)))