Beispiel #1
0
 def test_sum_practice3():
     x = [random.random() for i in range(48)]
     b = minitorch.tensor(x)
     s = b.sum()[0]
     b2 = minitorch.tensor(x, backend=shared["cuda"])
     out = minitorch.sum_practice(b2)
     assert_close(s, out._storage[0] + out._storage[1])
Beispiel #2
0
 def test_sum_practice5():
     x = [random.random() for i in range(500)]
     b = minitorch.tensor(x)
     s = b.sum()[0]
     b2 = minitorch.tensor(x, backend=shared["cuda"])
     out = b2.sum(0)
     assert_close(s, out[0])
Beispiel #3
0
 def test_sum_practice_other_dims():
     x = [[random.random() for i in range(32)] for j in range(16)]
     b = minitorch.tensor(x)
     s = b.sum(1)
     b2 = minitorch.tensor(x, backend=shared["cuda"])
     out = b2.sum(1)
     for i in range(16):
         assert_close(s[i, 0], out[i, 0])
Beispiel #4
0
def test_fromnumpy():
    t = tensor([[2, 3, 4], [4, 5, 7]])
    print(t)
    assert t.shape == (2, 3)
    n = t.to_numpy()
    t2 = tensor(n.tolist())
    for ind in t._tensor.indices():
        assert t[ind] == t2[ind]
Beispiel #5
0
def test_reduce_forward_one_dim():
    # shape (3, 2)
    t = tensor([[2, 3], [4, 6], [5, 7]])

    # here 0 means to reduce the 0th dim, 3 -> nothing
    t_summed = t.sum(0)

    # shape (2)
    t_sum_expected = tensor([[11, 16]])
    assert t_summed.is_close(t_sum_expected).all().item()
Beispiel #6
0
def test_reduce_forward_one_dim_2():
    # shape (3, 2)
    t = tensor([[2, 3], [4, 6], [5, 7]])

    # here 1 means reduce the 1st dim, 2 -> nothing
    t_summed_2 = t.sum(1)

    # shape (3)
    t_sum_2_expected = tensor([[5], [10], [12]])
    assert t_summed_2.is_close(t_sum_2_expected).all().item()
Beispiel #7
0
def test_conv1d_simple():
    t = minitorch.tensor([0, 1, 2, 3]).view(1, 1, 4)
    t.requires_grad_(True)
    t2 = minitorch.tensor([[1, 2, 3]]).view(1, 1, 3)
    out = minitorch.Conv1dFun.apply(t, t2)

    assert out[0, 0, 0] == 0 * 1 + 1 * 2 + 2 * 3
    assert out[0, 0, 1] == 1 * 1 + 2 * 2 + 3 * 3
    assert out[0, 0, 2] == 2 * 1 + 3 * 2
    assert out[0, 0, 3] == 3 * 1
Beispiel #8
0
def test_grad_size():
    "Check that extra grad dim is removed (from @WannaFy)"
    a = tensor([1], requires_grad=True)
    b = tensor([[1, 1]], requires_grad=True)

    c = (a * b).sum()

    c.backward()
    assert c.shape == (1, )
    assert a.shape == a.grad.shape
    assert b.shape == b.grad.shape
Beispiel #9
0
def build_tensor_expression(code):
    out = eval(
        code,
        {
            "x": minitorch.tensor([[1.0, 2.0, 3.0]], requires_grad=True),
            "y": minitorch.tensor([[1.0, 2.0, 3.0]], requires_grad=True),
            "z": minitorch.tensor([[1.0, 2.0, 3.0]], requires_grad=True),
        },
    )
    out.name = "out"
    return out
Beispiel #10
0
def test_reduce_forward_all_dims():
    # shape (3, 2)
    t = tensor([[2, 3], [4, 6], [5, 7]])

    # reduce all dims, (3 -> 1, 2 -> 1)
    t_summed_all = t.sum()

    # shape (1, 1)
    t_summed_all_expected = tensor([27])

    assert_close(t_summed_all[0], t_summed_all_expected[0])
Beispiel #11
0
    def test_mul_practice2():
        x = [[random.random() for i in range(32)] for j in range(32)]
        y = [[random.random() for i in range(32)] for j in range(32)]
        z = minitorch.tensor(x, backend=shared["fast"]) @ minitorch.tensor(
            y, backend=shared["fast"])

        x = minitorch.tensor(x, backend=shared["cuda"])
        y = minitorch.tensor(y, backend=shared["cuda"])
        z2 = minitorch.mm_practice(x, y)
        for i in range(32):
            for j in range(32):
                assert_close(z[i, j], z2._storage[32 * i + j])
Beispiel #12
0
def test_conv2():
    t = minitorch.tensor([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]).view(
        1, 1, 4, 4
    )
    t.requires_grad_(True)

    t2 = minitorch.tensor([[1, 1], [1, 1]]).view(1, 1, 2, 2)
    t2.requires_grad_(True)
    out = minitorch.Conv2dFun.apply(t, t2)
    out.sum().backward()

    minitorch.grad_check(minitorch.Conv2dFun.apply, t, t2)
Beispiel #13
0
    def test_mul_practice3():
        "Small real example"
        x = [[random.random() for i in range(2)] for j in range(2)]
        y = [[random.random() for i in range(2)] for j in range(2)]
        z = minitorch.tensor(x, backend=shared["fast"]) @ minitorch.tensor(
            y, backend=shared["fast"])

        x = minitorch.tensor(x, backend=shared["cuda"])
        y = minitorch.tensor(y, backend=shared["cuda"])
        z2 = x @ y

        for i in range(2):
            for j in range(2):
                assert_close(z[i, j], z2[i, j])
Beispiel #14
0
    def test_mul_practice4():
        "Extend to require 2 blocks"
        size = 33
        x = [[random.random() for i in range(size)] for j in range(size)]
        y = [[random.random() for i in range(size)] for j in range(size)]
        z = minitorch.tensor(x, backend=shared["fast"]) @ minitorch.tensor(
            y, backend=shared["fast"])

        x = minitorch.tensor(x, backend=shared["cuda"])
        y = minitorch.tensor(y, backend=shared["cuda"])
        z2 = x @ y

        for i in range(size):
            for j in range(size):
                assert_close(z[i, j], z2[i, j])
Beispiel #15
0
def vals(draw, size, number):
    pts = draw(lists(
        number,
        min_size=size,
        max_size=size,
    ))
    return minitorch.tensor(pts)
Beispiel #16
0
    def train(self,
              data,
              learning_rate,
              max_epochs=500,
              log_fn=default_log_fn):

        self.model = Network(self.hidden_layers, self.backend)
        optim = minitorch.SGD(self.model.parameters(), learning_rate)
        BATCH = 10
        losses = []

        for epoch in range(max_epochs):
            total_loss = 0.0
            c = list(zip(data.X, data.y))
            random.shuffle(c)
            X_shuf, y_shuf = zip(*c)

            for i in range(0, len(X_shuf), BATCH):
                optim.zero_grad()
                X = minitorch.tensor(X_shuf[i:i + BATCH], backend=self.backend)
                y = minitorch.tensor(y_shuf[i:i + BATCH], backend=self.backend)
                # Forward

                out = self.model.forward(X).view(y.shape[0])
                prob = (out * y) + (out - 1.0) * (y - 1.0)
                loss = -prob.log()
                (loss / y.shape[0]).sum().view(1).backward()

                total_loss = loss.sum().view(1)[0]

                # Update
                optim.step()

            losses.append(total_loss)
            # Logging
            if epoch % 10 == 0 or epoch == max_epochs:
                X = minitorch.tensor(data.X, backend=self.backend)
                y = minitorch.tensor(data.y, backend=self.backend)
                out = self.model.forward(X).view(y.shape[0])
                y2 = minitorch.tensor(data.y)
                correct = int(((out.get_data() > 0.5) == y2).sum()[0])
                log_fn(epoch, total_loss, correct, losses)
Beispiel #17
0
def test_view():
    t = tensor([[2, 3, 4], [4, 5, 7]])
    assert t.shape == (2, 3)
    t2 = t.view(6)
    assert t2.shape == (6, )
    t2 = t2.view(1, 6)
    assert t2.shape == (1, 6)
    t2 = t2.view(6, 1)
    assert t2.shape == (6, 1)
    t2 = t2.view(2, 3)
    assert t.is_close(t2).all().item() == 1.0
Beispiel #18
0
    def test_mul_practice6():
        "Extend to require a batch"
        size_a = 45
        size_b = 40
        size_in = 33
        x = [[[random.random() for i in range(size_in)] for j in range(size_a)]
             for _ in range(2)]
        y = [[[random.random() for i in range(size_b)] for j in range(size_in)]
             for _ in range(2)]
        z = minitorch.tensor(x, backend=shared["fast"]) @ minitorch.tensor(
            y, backend=shared["fast"])

        x = minitorch.tensor(x, backend=shared["cuda"])
        y = minitorch.tensor(y, backend=shared["cuda"])
        z2 = x @ y

        for b in range(2):
            for i in range(size_a):
                for j in range(size_b):
                    print(i, j)
                    assert_close(z[b, i, j], z2[b, i, j])
Beispiel #19
0
    def train(self,
              data,
              learning_rate,
              max_epochs=500,
              log_fn=default_log_fn):

        self.learning_rate = learning_rate
        self.max_epochs = max_epochs
        self.model = Network(self.hidden_layers)
        optim = minitorch.SGD(self.model.parameters(), learning_rate)

        X = minitorch.tensor(data.X)
        y = minitorch.tensor(data.y)

        losses = []
        for epoch in range(1, self.max_epochs + 1):
            total_loss = 0.0
            correct = 0
            optim.zero_grad()

            # Forward
            out = self.model.forward(X).view(data.N)
            prob = (out * y) + (out - 1.0) * (y - 1.0)

            loss = -prob.log()
            (loss / data.N).sum().view(1).backward()
            total_loss = loss.sum().view(1)[0]
            losses.append(total_loss)

            # Update
            optim.step()

            # Logging
            if epoch % 10 == 0 or epoch == max_epochs:
                y2 = minitorch.tensor(data.y)
                correct = int(((out.get_data() > 0.5) == y2).sum()[0])
                log_fn(epoch, total_loss, correct, losses)
Beispiel #20
0
def test_permute_view():
    t = tensor([[2, 3, 4], [4, 5, 7]])
    assert t.shape == (2, 3)
    t2 = t.permute(1, 0)
    t2.view(6)
Beispiel #21
0
    def train(
        self,
        data_train,
        learning_rate,
        batch_size=10,
        max_epochs=500,
        data_val=None,
        log_fn=default_log_fn,
    ):
        model = self.model
        (X_train, y_train) = data_train
        n_training_samples = len(X_train)
        optim = minitorch.SGD(self.model.parameters(), learning_rate)
        losses = []
        train_accuracy = []
        validation_accuracy = []
        for epoch in range(1, max_epochs + 1):
            total_loss = 0.0

            model.train()
            train_predictions = []
            batch_size = min(batch_size, n_training_samples)
            for batch_num, example_num in enumerate(
                    range(0, n_training_samples, batch_size)):
                y = minitorch.tensor(y_train[example_num:example_num +
                                             batch_size],
                                     backend=BACKEND)
                x = minitorch.tensor(X_train[example_num:example_num +
                                             batch_size],
                                     backend=BACKEND)
                x.requires_grad_(True)
                y.requires_grad_(True)
                # Forward
                out = model.forward(x)
                prob = (out * y) + (out - 1.0) * (y - 1.0)
                loss = -(prob.log() / y.shape[0]).sum()
                loss.view(1).backward()

                # Save train predictions
                train_predictions += get_predictions_array(y, out)
                total_loss += loss[0]

                # Update
                optim.step()

            # Evaluate on validation set at the end of the epoch
            validation_predictions = []
            if data_val is not None:
                (X_val, y_val) = data_val
                model.eval()
                y = minitorch.tensor(
                    y_val,
                    backend=BACKEND,
                )
                x = minitorch.tensor(
                    X_val,
                    backend=BACKEND,
                )
                out = model.forward(x)
                validation_predictions += get_predictions_array(y, out)
                validation_accuracy.append(
                    get_accuracy(validation_predictions))
                model.train()

            train_accuracy.append(get_accuracy(train_predictions))
            losses.append(total_loss)
            log_fn(
                epoch,
                total_loss,
                losses,
                train_predictions,
                train_accuracy,
                validation_predictions,
                validation_accuracy,
            )
            total_loss = 0.0
Beispiel #22
0
 def plot(x):
     return model.forward(minitorch.tensor(x, (1, 2), backend=BACKEND))[0, 0]
Beispiel #23
0
def test_create(t1):
    t2 = minitorch.tensor(t1)
    for i in range(len(t1)):
        assert t1[i] == t2[i]
Beispiel #24
0
def test_fromlist():
    t = tensor([[2, 3, 4], [4, 5, 7]])
    assert t.shape == (2, 3)
    t = tensor([[[2, 3, 4], [4, 5, 7]]])
    assert t.shape == (1, 2, 3)
Beispiel #25
0
def test_create(backend, t1):
    "Create different tensors."
    t2 = minitorch.tensor(t1, backend=shared[backend])
    for i in range(len(t1)):
        assert t1[i] == t2[i]
Beispiel #26
0
        )
        self.bias = minitorch.Parameter(2 * (minitorch.rand((out_size,)) - 0.5))
        self.out_size = out_size

    def forward(self, x):
        batch, in_size = x.shape
        return (
            self.weights.value.view(1, in_size, self.out_size)
            * x.view(batch, in_size, 1)
        ).sum(1).view(batch, self.out_size) + self.bias.value.view(1, self.out_size)


model = Network()
data = DATASET

X = minitorch.tensor([v for x in data.X for v in x], (data.N, 2))
y = minitorch.tensor(data.y)

for epoch in range(250):
    total_loss = 0.0
    correct = 0
    start = time.time()
    out = model.forward(X).view(data.N)
    out.name_("out")

    loss = (out * y) + (out - 1.0) * (y - 1.0)
    for i, lab in enumerate(data.y):
        if lab == 1 and out[i] > 0.5:
            correct += 1
        if lab == 0 and out[i] < 0.5:
            correct += 1
Beispiel #27
0
    def train(self,
              data_train,
              data_val,
              learning_rate,
              max_epochs=500,
              log_fn=default_log_fn):
        (X_train, y_train) = data_train
        (X_val, y_val) = data_val
        self.model = Network()
        model = self.model
        n_training_samples = len(X_train)
        optim = minitorch.SGD(self.model.parameters(), learning_rate)
        losses = []
        for epoch in range(1, max_epochs + 1):
            total_loss = 0.0

            model.train()
            for batch_num, example_num in enumerate(
                    range(0, n_training_samples, BATCH)):

                if n_training_samples - example_num <= BATCH:
                    continue
                y = minitorch.tensor(y_train[example_num:example_num + BATCH],
                                     backend=BACKEND)
                x = minitorch.tensor(X_train[example_num:example_num + BATCH],
                                     backend=BACKEND)
                x.requires_grad_(True)
                y.requires_grad_(True)
                # Forward
                out = model.forward(x.view(BATCH, 1, H, W)).view(BATCH, C)
                prob = (out * y).sum(1)
                loss = -(prob / y.shape[0]).sum()

                assert loss.backend == BACKEND
                loss.view(1).backward()

                total_loss += loss[0]
                losses.append(total_loss)

                # Update
                optim.step()

                if batch_num % 5 == 0:
                    model.eval()
                    # Evaluate on 5 held-out batches

                    correct = 0
                    for val_example_num in range(0, 1 * BATCH, BATCH):
                        y = minitorch.tensor(
                            y_val[val_example_num:val_example_num + BATCH],
                            backend=BACKEND,
                        )
                        x = minitorch.tensor(
                            X_val[val_example_num:val_example_num + BATCH],
                            backend=BACKEND,
                        )
                        out = model.forward(x.view(BATCH, 1, H,
                                                   W)).view(BATCH, C)
                        for i in range(BATCH):
                            m = -1000
                            ind = -1
                            for j in range(C):
                                if out[i, j] > m:
                                    ind = j
                                    m = out[i, j]
                            if y[i, ind] == 1.0:
                                correct += 1
                    log_fn(epoch, total_loss, correct, losses, model)

                    total_loss = 0.0
                    model.train()
Beispiel #28
0
def test_index():
    t = tensor([[2, 3, 4], [4, 5, 7]])
    assert t.shape == (2, 3)
    t[50, 2]
Beispiel #29
0
        self.bias = RParam(out_size)
        self.out_size = out_size

    def forward(self, x):
        # TODO: Implement for Task 2.5.
        batch, in_size = x.shape
        return (self.weights.value.view(1, in_size, self.out_size) *
                x.view(batch, in_size, 1)).sum(1).view(
                    batch, self.out_size) + self.bias.value.view(self.out_size)


model = Network()
data = DATASET

X = minitorch.tensor_fromlist(data.X)
y = minitorch.tensor(data.y)

losses = []
for epoch in range(250):
    total_loss = 0.0
    correct = 0
    start = time.time()

    # Forward
    out = model.forward(X).view(data.N)

    prob = (out * y) + (out - 1.0) * (y - 1.0)
    for i, lab in enumerate(data.y):
        if lab == 1 and out[i] > 0.5:
            correct += 1
        if lab == 0 and out[i] < 0.5:
Beispiel #30
0
    def __init__(self, in_size, out_size):
        super().__init__()
        self.weights = RParam(in_size, out_size)
        self.bias = RParam(out_size)
        self.out_size = out_size

    def forward(self, x):
        # TODO: Implement for Task 3.5.
        return x @ self.weights.value + self.bias.value.view(1, *self.bias.value.shape)


model = Network()
data = DATASET

X = minitorch.tensor_fromlist(data.X, backend=BACKEND)
y = minitorch.tensor(data.y, backend=BACKEND)


losses = []
for epoch in range(250):
    total_loss = 0.0

    start = time.time()

    # Forward
    out = model.forward(X).view(data.N)
    prob = (out * y) + (out - 1.0) * (y - 1.0)
    loss = -prob.log()
    (loss.sum().view(1)).backward()
    total_loss += loss[0]
    losses.append(total_loss)