def fletcher_reeves(x_init, f, be, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be, next_error=None)
    grad_f = grad_f.get_grad_op_tree([x_init])[0]
    while True:
        alpha, _ = None # implement the line search
예제 #2
0
def fletcher_reeves(x_init, f, be, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be, next_error=None)
    grad_f = grad_f.get_grad_op_tree([x_init])[0]
    while True:
        alpha, _ = None  # implement the line search
def gradient_descent(x_init, f, be, learning_rate=0.1, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    while True:
        x_new[:] = x_init - learning_rate * grad_f.get_grad_tensor([x_init])[0]
        if conv_vec_test(x_init, x_new, be) < epsilon:
        # if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new
예제 #4
0
def gradient_descent(x_init, f, be, learning_rate=0.1, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    while True:
        x_new[:] = x_init - learning_rate * grad_f.get_grad_tensor([x_init])[0]
        if conv_vec_test(x_init, x_new, be) < epsilon:
            # if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new
def newton_method(x_init, f, be, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    grad_f = grad_f.get_grad_op_tree([x_init])[0]
    hessian_f = Autodiff(grad_f, be=be, next_error=None)
    hessian_f = hessian_f.get_grad_op_tree([x_init])[0]
    while True:
        x_new[:] = x_init - grad_f / hessian_f
        # if conv_vec_test(x_init, x_new, be) < epsilon:
        if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new
예제 #6
0
파일: layer.py 프로젝트: hunterlang/neon
    def bprop(self, error):
        """
        Use Autodiff.back_prop_grad to back propagate gradients for the
        corresponding tensors.
        """
        if not self.deltas:
            self.deltas = error.reshape((self.nfm, -1))

        # autodiff will automatically cache and reuse the object
        # if we know the `error` buffer at init, we can also create the autodiff
        # object at layer's init
        ad = Autodiff(self.fprop_op_tree, self.be, next_error=self.deltas)

        # back propagate
        ad.back_prop_grad([self.x, self.gamma, self.beta], [self.deltas, self.grad_gamma, self.grad_beta])

        return error
예제 #7
0
    def bprop(self, error):
        """
        Use Autodiff.back_prop_grad to back propagate gradients for the
        corresponding tensors.
        """
        if not self.deltas:
            self.deltas = error.reshape(self.bn_shape)

        # autodiff will automatically cache and reuse the object
        # if we know the `error` buffer at init, we can also create the autodiff
        # object at layer's init
        ad = Autodiff(self.fprop_op_tree, self.be, next_error=self.deltas)

        # back propagate
        ad.back_prop_grad([self.x, self.gamma, self.beta],
                          [self.deltas, self.grad_gamma, self.grad_beta])

        return error
예제 #8
0
def newton_method(x_init, f, be, epsilon=1e-9):
    x_new = be.zeros_like(x_init)
    f_init = f(x_init)
    f_new = f(x_new)
    grad_f = Autodiff(f_init, be=be, next_error=None)
    grad_f = grad_f.get_grad_op_tree([x_init])[0]
    hessian_f = Autodiff(grad_f, be=be, next_error=None)
    hessian_f = hessian_f.get_grad_op_tree([x_init])[0]
    while True:
        x_new[:] = x_init - grad_f / hessian_f
        # if conv_vec_test(x_init, x_new, be) < epsilon:
        if conv_test(f_init, f_new, be) < epsilon:
            f_val = be.empty((1, 1))
            f_val[:] = f_new
            return x_new, f_val
        x_init[:] = x_new