Esempio n. 1
0
def test_custom():
    fn = lambda t1, t2: t1 * t2
    t1 = minitorch.tensor_fromlist([[1, 2], [4, 5]])
    t2 = minitorch.tensor_fromlist([[1, 4]])
    ts = [
        t1,
        t2,
    ]
    minitorch.grad_check(fn, *ts)
Esempio n. 2
0
def test_conv1d_simple():
    t = minitorch.tensor_fromlist([0, 1, 2, 3]).view(1, 1, 4)
    t.requires_grad_(True)
    t2 = minitorch.tensor_fromlist([[1, 2, 3]]).view(1, 1, 3)
    out = minitorch.Conv1dFun.apply(t, t2)

    assert out[0, 0, 0] == 0 * 1 + 1 * 2 + 2 * 3
    assert out[0, 0, 1] == 1 * 1 + 2 * 2 + 3 * 3
    assert out[0, 0, 2] == 2 * 1 + 3 * 2
    assert out[0, 0, 3] == 3 * 1
Esempio n. 3
0
def test_conv1d_in_channel():
    t = minitorch.tensor_fromlist([[0, 1, 2, 3], [0, 1, 2, 3]]).view(1, 2, 4)
    t.requires_grad_(True)
    t2 = minitorch.tensor_fromlist([[1, 2, 3], [1, 2, 3]]).view(1, 2, 3)
    out = minitorch.Conv1dFun.apply(t, t2)

    assert out[0, 0, 0] == (0 * 1 + 1 * 2 + 2 * 3) * 2
    assert out[0, 0, 1] == (1 * 1 + 2 * 2 + 3 * 3) * 2
    assert out[0, 0, 2] == (2 * 1 + 3 * 2) * 2
    assert out[0, 0, 3] == (3 * 1) * 2
Esempio n. 4
0
def test_conv1d_simple2():
    t = minitorch.tensor_fromlist([[1, 2, 1, 2, 3, 4],
                                   [2, 1, 3, 2, 3, 2.]]).view(1, 2, 6)
    t.requires_grad_(True)
    t2 = minitorch.tensor_fromlist([[[3, 2, 1],
                                     [1, 2, 3.]], [[3, 2, 1],
                                                   [1, 2, 3.]]]).view(2, 2, 3)
    out = minitorch.Conv1dFun.apply(t, t2)
    print(out)
    assert out[0, 1, 2] == 26
    minitorch.grad_check(minitorch.Conv1dFun.apply, t, t2)
Esempio n. 5
0
def test_conv2():
    t = minitorch.tensor_fromlist([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
                                   [0, 1, 2, 3]]).view(1, 1, 4, 4)
    t.requires_grad_(True)

    t2 = minitorch.tensor_fromlist([[1, 1], [1, 1]]).view(1, 1, 2, 2)
    t2.requires_grad_(True)
    out = minitorch.Conv2dFun.apply(t, t2)
    out.sum().backward()

    minitorch.grad_check(minitorch.Conv2dFun.apply, t, t2)
Esempio n. 6
0
def test_reduce_forward_all_dims():
    # shape (3, 2)
    t = minitorch.tensor_fromlist([[2, 3], [4, 6], [5, 7]])

    # reduce all dims, (3 -> 1, 2 -> 1)
    t_summed_all = t.sum()

    # shape (1, 1)
    t_summed_all_expected = minitorch.tensor_fromlist([27])

    assert_close(t_summed_all[0], t_summed_all_expected[0])
Esempio n. 7
0
def test_reduce_forward_one_dim():
    # shape (3, 2)
    t = minitorch.tensor_fromlist([[2, 3], [4, 6], [5, 7]])

    # here 0 means to reduce the 0th dim, 3 -> 1
    t_summed = t.sum(0)

    # shape (1, 2)
    t_sum_expected = minitorch.tensor_fromlist([[11, 16]])

    for ind in t_summed._tensor.indices():
        assert_close(t_summed[ind], t_sum_expected[ind])
Esempio n. 8
0
def test_reduce_forward_one_dim_2():
    # shape (3, 2)
    t = minitorch.tensor_fromlist([[2, 3], [4, 6], [5, 7]])

    # here 1 means reduce the 1st dim, 2 -> 1
    t_summed_2 = t.sum(1)

    # shape (3, 1)
    t_sum_2_expected = minitorch.tensor_fromlist([[5], [10], [12]])

    for ind in t_summed_2._tensor.indices():
        assert_close(t_summed_2[ind], t_sum_2_expected[ind])
def test_mul():
    t1 = minitorch.tensor_fromlist([[1.0, 2.0]])
    t2 = minitorch.tensor_fromlist([[-1.0], [3.0]])
    expected = minitorch.tensor_fromlist([[-1.0, -2.0], [3.0, 6.0]])
    print(t1.shape)
    print(t2.shape)
    observed = t1 * t2
    print(observed)
    print(expected)

    for ind in observed._tensor.indices():
        assert_close(expected[ind], observed[ind])
Esempio n. 10
0
def _matmul(m1, m2, expect, op=minitorch.matmul):
  a = minitorch.tensor_fromlist(m1)
  b = minitorch.tensor_fromlist(m2)
  c = op(a, b)
  c_expect = minitorch.tensor_fromlist(expect)

  print(f'{a.shape} * {b.shape} = {c_expect.shape}')
  print('result', c)
  print('expect', c_expect)

  for ind in c._tensor.indices():
    assert_close(c[ind], c_expect[ind])

  return c, c_expect
Esempio n. 11
0
def test_conv1d_simple_backward():
    input_tensor = minitorch.tensor_fromlist([0, 1, 2, 3]).view(1, 1, 4)
    weight = minitorch.tensor_fromlist([[1, 2, 3]]).view(1, 1, 3)
    grad_output = minitorch.tensor_fromlist([0, 1, 2, 3]).view(1, 1, 4)
    ctx = minitorch.Context()
    ctx.save_for_backward(input_tensor, weight)
    grad_input, grad_weight = minitorch.Conv1dFun.backward(ctx, grad_output)

    assert grad_input[0, 0, 0] == weight[0, 0, 0] * grad_output[0, 0, 0]
    assert (grad_input[0, 0, 1] == weight[0, 0, 0] * grad_output[0, 0, 1] +
            weight[0, 0, 1] * grad_output[0, 0, 0])
    assert (grad_input[0, 0, 2] == weight[0, 0, 0] * grad_output[0, 0, 2] +
            weight[0, 0, 1] * grad_output[0, 0, 1] +
            weight[0, 0, 2] * grad_output[0, 0, 0])
    assert (grad_input[0, 0, 3] == weight[0, 0, 0] * grad_output[0, 0, 3] +
            weight[0, 0, 1] * grad_output[0, 0, 2] +
            weight[0, 0, 2] * grad_output[0, 0, 1])
Esempio n. 12
0
def test_tile():
    t = minitorch.tensor_fromlist([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                                   [13, 14, 15, 16]]).view(1, 1, 4, 4)
    tiled, _, _ = minitorch.tile(t, (2, 2))
    assert tiled[0, 0, 0, 0, 0] == 1
    assert tiled[0, 0, 0, 0, 1] == 2
    assert tiled[0, 0, 0, 0, 2] == 5
    assert tiled[0, 0, 0, 0, 3] == 6
Esempio n. 13
0
def test_softmax(t):
    t = minitorch.tensor_fromlist([
        [
            [
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00],
                [0.00, 0.00, 0.00, 0.00]]]])
    q = minitorch.softmax(t, 2)
    print('=====t')
    print(t)
    print('=====q')
    print(q)
    x = q.sum(dim=3)
    print('=====x')
    print(x)
    assert_close(x[0, 0, 0, 0], 1.0)

    q = minitorch.softmax(t, 1)
    x = q.sum(dim=1)
    assert_close(x[0, 0, 0, 0], 1.0)

    minitorch.grad_check(lambda a: minitorch.softmax(a, dim=2), t)
Esempio n. 14
0
        self.weights = RParam(in_size, out_size)
        self.bias = RParam(out_size)
        self.out_size = out_size

    def forward(self, x):
        # TODO: Implement for Task 2.5.
        batch, in_size = x.shape
        return (self.weights.value.view(1, in_size, self.out_size) *
                x.view(batch, in_size, 1)).sum(1).view(
                    batch, self.out_size) + self.bias.value.view(self.out_size)


model = Network()
data = DATASET

X = minitorch.tensor_fromlist(data.X)
y = minitorch.tensor(data.y)

losses = []
for epoch in range(250):
    total_loss = 0.0
    correct = 0
    start = time.time()

    # Forward
    out = model.forward(X).view(data.N)

    prob = (out * y) + (out - 1.0) * (y - 1.0)
    for i, lab in enumerate(data.y):
        if lab == 1 and out[i] > 0.5:
            correct += 1
Esempio n. 15
0
def test_fromlist():
    t = minitorch.tensor_fromlist([[2, 3, 4], [4, 5, 7]])
    t.shape == (2, 3)
    t = minitorch.tensor_fromlist([[[2, 3, 4], [4, 5, 7]]])
    t.shape == (1, 2, 3)
Esempio n. 16
0
class Linear(minitorch.Module):
    def __init__(self, in_size, out_size):
        super().__init__()
        self.weights = RParam(in_size, out_size)
        self.bias = RParam(out_size)
        self.out_size = out_size

    def forward(self, x):
        # TODO: Implement for Task 3.5.
        return x @ self.weights.value + self.bias.value.view(1, *self.bias.value.shape)


model = Network()
data = DATASET

X = minitorch.tensor_fromlist(data.X, backend=BACKEND)
y = minitorch.tensor(data.y, backend=BACKEND)


losses = []
for epoch in range(250):
    total_loss = 0.0

    start = time.time()

    # Forward
    out = model.forward(X).view(data.N)
    prob = (out * y) + (out - 1.0) * (y - 1.0)
    loss = -prob.log()
    (loss.sum().view(1)).backward()
    total_loss += loss[0]
Esempio n. 17
0

model = Network()

losses = []
for epoch in range(250):
    total_loss = 0.0
    cur = 0
    cur_y = 0

    model.train()
    for batch_num, example_num in enumerate(range(0, N, BATCH)):
        if N - example_num <= BATCH:
            continue
        y = minitorch.tensor_fromlist(
            ys[example_num: example_num + BATCH], backend=BACKEND
        )
        x = minitorch.tensor_fromlist(
            X[example_num: example_num + BATCH], backend=BACKEND
        )
        x.requires_grad_(True)
        y.requires_grad_(True)

        # Forward
        out = model.forward(x.view(BATCH, 1, H, W)).view(BATCH, C)
        prob = (out * y).sum(1)
        loss = -prob.sum()
        loss.view(1).backward()
        total_loss += loss
        losses.append(total_loss)
Esempio n. 18
0
model = Network()
for p in model.parameters():
    p.value.type_(BACKEND)

losses = []
for epoch in range(250):
    total_loss = 0.0
    cur = 0
    cur_y = 0

    model.train()
    for i, j in enumerate(range(0, N, BATCH)):
        if N - j <= BATCH:
            continue
        y = minitorch.tensor_fromlist(ys[j:j + BATCH])
        x = minitorch.tensor_fromlist(X[j:j + BATCH])
        x.requires_grad_(True)
        y.requires_grad_(True)
        y.type_(BACKEND)
        x.type_(BACKEND)

        # Forward
        out = model.forward(x.view(BATCH, 1, H, W)).view(BATCH, C)
        prob = (out * y).sum(1)
        loss = -prob.sum()
        loss.view(1).backward()
        total_loss += loss
        losses.append(total_loss)

        # Update