コード例 #1
0
def gradient_descent(x_init, f, be, learning_rate=0.1, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    while True:
        x_new[:] = x_init - learning_rate * grad_f.get_grad_tensor([x_init])[0]
        if conv_vec_test(x_init, x_new, be) < epsilon:
        # if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new
コード例 #2
0
def gradient_descent(x_init, f, be, learning_rate=0.1, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    while True:
        x_new[:] = x_init - learning_rate * grad_f.get_grad_tensor([x_init])[0]
        if conv_vec_test(x_init, x_new, be) < epsilon:
            # if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new