コード例 #1
0
def test_sum_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = Tensor([5, -2, -9], requires_grad=True)
    t3 = (t1 + t2).sum()
    assert t3.values == 3
    t3.backward(2)
    assert t1.grad.tolist() == [2, 2, 2]
    assert t2.grad.tolist() == [2, 2, 2]
コード例 #2
0
def test_div_ops():
    t1 = Tensor([1, 2, 5], requires_grad=True)
    t2 = Tensor([8, -2, -10], requires_grad=True)
    t3 = t1 / t2
    assert t3.values.tolist() == [0.125, -1, -0.5]
    t3.backward([1, 1, 1])

    assert t1.grad.tolist() == [0.125, -0.5, -0.1]
    assert t2.grad.tolist() == [-0.015625, -0.5, -0.05]
コード例 #3
0
def test_mul_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = Tensor([5, -2, -9], requires_grad=True)
    t3 = t1 * t2
    assert t3.values.tolist() == [5, -6, -45]
    t3.backward([2, 2, 2])

    assert t1.grad.tolist() == [2 * 5, 2 * (-2), 2 * (-9)]
    assert t2.grad.tolist() == [2 * 1, 2 * 3, 2 * 5]
コード例 #4
0
def test_minimum_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = Tensor([5, -2, 9], requires_grad=True)
    t3 = ops.minimum_(t1, t2)

    assert t3.values.tolist() == [1, -2, 5]
    t3.backward([1, 2, 1])
    assert t1.grad.tolist() == [1, 0, 1]
    assert t2.grad.tolist() == [0, 2, 0]
コード例 #5
0
def test_dot_ops():
    t1 = Tensor([[1, 3, 5], [5, -2, 9]], requires_grad=True)
    t2 = Tensor([[9, 8, 9, 7], [4, 0, 3, 0], [0, 8, 2, 7]], requires_grad=True)
    t3 = t1 @ t2
    assert t3.values.tolist() == [[21, 48, 28, 42], [37, 112, 57, 98]]
    t3.backward([[1, 2, 3, 4], [4, 3, 2, 1]])
    assert t1.grad.tolist() == [[80, 13, 50], [85, 22, 35]]
    assert t2.grad.tolist() == [[21, 17, 13, 9], [-5, 0, 5, 10],
                                [41, 37, 33, 29]]
コード例 #6
0
def test_max_ops():
    t1 = Tensor([[1, 3, 5], [3, 7, -2]], requires_grad=True)
    t2 = ops.max(t1, axis=None)
    t3 = ops.max(t1, axis=0)
    assert t2.values == 7
    assert t3.values.tolist() == [3, 7, 5]

    t2.backward()
    assert t1.grad.tolist() == [[0, 0, 0], [0, 1, 0]]
    t1.zero_grad()
    t3.backward([1, 1, 1])
    assert t1.grad.tolist() == [[0, 0, 1], [1, 1, 0]]
コード例 #7
0
def main(args):
    if args.seed >= 0:
        random_seed(args.seed)

    train_set, valid_set, test_set = prepare_dataset(args.data_dir)
    train_x, train_y = train_set
    test_x, test_y = test_set
    train_y = get_one_hot(train_y, 10)

    train_x = Tensor(train_x)
    train_y = Tensor(train_y)
    test_x = Tensor(test_x)
    test_y = Tensor(test_y)

    net = Net([
        Dense(200),
        ReLU(),
        Dense(100),
        ReLU(),
        Dense(70),
        ReLU(),
        Dense(30),
        ReLU(),
        Dense(10)
    ])

    model = Model(net=net,
                  loss=SoftmaxCrossEntropyLoss(),
                  optimizer=Adam(lr=args.lr))
    loss_layer = SoftmaxCrossEntropyLoss()
    iterator = BatchIterator(batch_size=args.batch_size)
    evaluator = AccEvaluator()
    loss_list = list()
    for epoch in range(args.num_ep):
        t_start = time.time()
        for batch in iterator(train_x, train_y):
            model.zero_grad()
            pred = model.forward(batch.inputs)
            loss = loss_layer.loss(pred, batch.targets)
            loss.backward()
            model.step()
            loss_list.append(loss.values)
        print("Epoch %d tim cost: %.4f" % (epoch, time.time() - t_start))
        # evaluate
        model.set_phase("TEST")
        test_pred = model.forward(test_x)
        test_pred_idx = np.argmax(test_pred, axis=1)
        test_y_idx = test_y.values
        res = evaluator.evaluate(test_pred_idx, test_y_idx)
        print(res)
        model.set_phase("TRAIN")
コード例 #8
0
def test_transpose_ops():
    shape = [2, 4, 6]
    data = np.random.randn(*shape)
    t1 = Tensor(data, requires_grad=True)
    t2 = t1.T
    assert list(t2.shape) == shape[::-1]

    t2.backward(np.ones_like(t2.values))
    assert list(t1.grad.shape) == shape

    t2 = t1.transpose((2, 0, 1))
    assert list(t2.shape) == [6, 2, 4]

    t2.backward(np.ones_like(t2.values))
    assert list(t1.grad.shape) == shape
コード例 #9
0
def test_reshape_ops():
    t1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True)
    t2 = ops.reshape(t1, (6, ))
    assert t2.values.tolist() == [1, 2, 3, 4, 5, 6]

    t2.backward(np.ones(6))
    assert t1.grad.tolist() == [[1, 1, 1], [1, 1, 1]]
コード例 #10
0
def test_neg_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = -t1
    assert t2.values.tolist() == [-1, -3, -5]

    t2.backward([1, 2, 3])
    assert t1.grad.tolist() == [-1, -2, -3]
コード例 #11
0
def test_clip_ops():
    t1 = Tensor([1, -3, 5], requires_grad=True)
    t2 = ops.clip(t1, 0)
    assert t2.values.tolist() == [1, 0, 5]

    grad = np.array([1, 2, 3])
    t2.backward(grad)
    assert t1.grad.tolist() == [1, 0, 3]
コード例 #12
0
def test_flatten_ops():
    t1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True)
    t2 = ops.flatten(t1)
    assert t2.values.tolist() == [1, 2, 3, 4, 5, 6]

    t2.backward(np.ones_like(t2.values))
    assert t1.grad.shape == t1.shape
    assert t1.grad.tolist() == [[1, 1, 1], [1, 1, 1]]
コード例 #13
0
def test_log_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = ops.log(t1)
    assert t2.values.tolist() == np.log(t1.values).tolist()

    grad = np.array([1, 2, 3])
    t2.backward(grad)
    assert t1.grad.tolist() == (grad / np.array([1, 3, 5])).tolist()
コード例 #14
0
def test_epx_ops():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = ops.exp(t1)
    assert t2.values.tolist() == np.exp(t1.values).tolist()

    t2.backward([1, 2, 3])
    assert t1.grad.tolist() == (np.exp(t1.values) *
                                np.array([1, 2, 3])).tolist()
コード例 #15
0
def test_pad_ops():
    t1 = Tensor([[1, 2, 3], [4, 5, 6]], requires_grad=True)
    pad_width = [(1, 0), (1, 0)]
    t2 = ops.pad(t1, pad_width)
    assert t2.values.tolist() == [[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6]]

    t2.backward(np.ones_like(t2.values))
    assert t1.grad.shape == t1.shape
    assert t1.grad.tolist() == [[1, 1, 1], [1, 1, 1]]
コード例 #16
0
def test_add_op():
    t1 = Tensor([1, 3, 5], requires_grad=True)
    t2 = Tensor([5, -2, -9], requires_grad=True)
    t3 = t1 + t2
    assert t3.values.tolist() == [6, 1, -4]
    t3.backward([2, 2, 2])

    assert t1.grad.tolist() == [2, 2, 2]
    assert t2.grad.tolist() == [2, 2, 2]

    # broadcast (2, 3) + (3,) -> (2, 3)
    t1 = Tensor([[1, 3, 5], [2, 3, 0]], requires_grad=True)
    t2 = Tensor([5, -2, -9], requires_grad=True)
    t3 = t1 + t2
    assert t3.values.tolist() == [[6, 1, -4], [7, 1, -9]]
    t3.backward([[1, 1, 1], [2, 2, 2]])
    assert t1.grad.tolist() == [[1, 1, 1], [2, 2, 2]]
    assert t2.grad.tolist() == [3, 3, 3]

    # broadcast (2, 3) + (1, 3) -> (2, 3)
    t1 = Tensor([[1, 3, 5], [2, 3, 0]], requires_grad=True)
    t2 = Tensor([[5, -2, -9]], requires_grad=True)
    t3 = t1 + t2
    assert t3.values.tolist() == [[6, 1, -4], [7, 1, -9]]
    t3.backward([[1, 1, 1], [2, 2, 2]])
    assert t1.grad.tolist() == [[1, 1, 1], [2, 2, 2]]
    assert t2.grad.tolist() == [[3, 3, 3]]
コード例 #17
0
def test_minimal_nn():
    x = Tensor(np.random.normal(0, 1.0, (100, 3)))
    y = x * 3.14 + 30

    w1 = Tensor(np.random.normal(0, 1.0, (3, 3)), requires_grad=True)
    b1 = Tensor(np.random.normal(0, 1.0, 3), requires_grad=True)

    previous_loss = 1e10
    for _ in range(100):
        w1.zero_grad()
        b1.zero_grad()
        predicted = x @ w1 + b1
        err = predicted - y
        loss = (err**2).sum()
        loss.backward()
        w1 -= 0.001 * w1.grad
        b1 -= 0.001 * b1.grad
        assert loss.values < previous_loss
        previous_loss = loss.values
コード例 #18
0
 def __call__(self, shape):
     values = self.init(shape)
     return Tensor(values, requires_grad=True, dtype=np.float32)
コード例 #19
0
def test_pow_ops():
    t1 = Tensor([1, -3, 5], requires_grad=True)
    t2 = t1**3
    assert t2.values.tolist() == [1, -27, 125]
    t2.backward([2, 2, 2])
    assert t1.grad.tolist() == [2 * 3 * 1**2, 2 * 3 * (-3)**2, 2 * 3 * 5**2]