Ejemplo n.º 1
0
def auto_diff_lr():

    # 注意,以下实现某些情况会有很大的数值误差,
    # 所以一般真实系统实现会提供高阶算子,从而减少数值误差

    N = 100
    X_val, Y_val = gen_2d_data(N)
    w_val = np.ones(3)
    w = Tensor(w_val)
    plot(N, X_val, Y_val, w_val)
    test_accuracy(w_val, X_val, Y_val)
    alpha = 0.01
    max_iters = 300
    for iteration in range(max_iters):
        acc_L_val = 0
        for i in range(N):
            w.zero_gard()
            x_val = X_val[i]
            y_val = np.array(Y_val[i])
            x = Tensor(x_val)
            y = Tensor(y_val)
            h = 1 / (1 + (-(w * x).sum()).exp())
            l = y * h.log() + (1 - y) * (1 - h).log()
            l.backward()
            w.data += alpha * w.grad
            acc_L_val += l.data
        print("iter = %d, likelihood = %s, w = %s" %
              (iteration, acc_L_val, w_val))
    test_accuracy(w_val, X_val, Y_val)
    plot(N, X_val, Y_val, w_val, True)
Ejemplo n.º 2
0
def test_reduce_sum_mix():
    x1_val = 2 * np.ones(3)
    x1 = Tensor(x1_val)
    y = x1.sum().exp()
    expected_y_val = np.exp(np.sum(x1_val))
    assert np.array_equal(y.data, expected_y_val)
    y.backward()
    assert np.array_equal(x1.grad, expected_y_val * np.ones_like(x1_val))
    x1.zero_gard()
    y2 = x1.sum().log()
    expected_y2_val = np.log(np.sum(x1_val))
    assert np.array_equal(y2.data, expected_y2_val)
    y2.backward()
    assert np.array_equal(x1.grad, (1 / np.sum(x1_val)) * np.ones_like(x1_val))