예제 #1
0
    def minimize(self, step):
        grad_x, grad_y = grad_func(self.x, self.y)

        self.grad_first_x = self.beta1 * self.grad_first_x + (1.0 - self.beta1) * grad_x
        self.grad_first_y = self.beta1 * self.grad_first_y + (1.0 - self.beta1) * grad_y

        self.grad_second_y = (
            self.beta2 * self.grad_second_y + (1.0 - self.beta2) * grad_y ** 2
        )
        self.grad_second_x = (
            self.beta2 * self.grad_second_x + (1.0 - self.beta2) * grad_x ** 2
        )

        # Bias correction
        self.grad_first_x_unbiased = self.grad_first_x / (1.0 - self.beta1 ** step)
        self.grad_first_y_unbiased = self.grad_first_y / (1.0 - self.beta1 ** step)

        self.grad_second_x_unbiased = self.grad_second_x / (1.0 - self.beta2 ** step)
        self.grad_second_y_unbiased = self.grad_second_y / (1.0 - self.beta2 ** step)
        
        # WEIGHT UPDATE
        self.x = self.x - self.x * self.grad_first_x_unbiased / (
            np.sqrt(self.grad_second_x_unbiased) + self.eps
        )
        self.y = self.y - self.y * self.grad_first_y_unbiased / (
            np.sqrt(self.grad_second_y_unbiased) + self.eps
        )
       
        #for visualization purposes
        z = function(self.x, self.y)
        self.x_hist.append(self.x)
        self.y_hist.append(self.y)
        self.z_hist.append(z)
예제 #2
0
    def minimize(self):

        grad_x, grad_y = grad_func(self.x, self.y)

        self.x = self.x - self.lr * grad_x
        self.y = self.y - self.lr * grad_y
        # for visualization purpose
        z = function(self.x, self.y)
        self.x_hist.append(self.x)
        self.y_hist.append(self.y)
        self.z_hist.append(z)
예제 #3
0
    def minmize(self):

        grad_x, grad_y = grad_func(self.x, self.y)

        self.grad_sqr_x += np.square(grad_x)
        self.grad_sqr_y += np.square(grad_y)

        new_grad_x = self.lr * (1 / np.sqrt(self.eps + self.grad_sqr_x)) * grad_x
        new_grad_y = self.lr * (1 / np.sqrt(self.eps + self.grad_sqr_y)) * grad_y

        self.x = self.x - new_grad_x
        self.y = self.y - new_grad_y

        # for visualization purposes
        z = function(self.x, self.y)

        self.x_hist.append(self.x)
        self.y_hist.append(self.y)
        self.z_hist.append(z)
예제 #4
0
l1, l2, l3, l4 = [0, 0, 0, 0]

P = ut.get_projecting_matrix(knots)
splrep = ut.get_splrep(X, knots)

# =============================================================================
# Test objective_func and grad_func function 
# =============================================================================

y0 = np.arange(1,8,dx)
u=3.0
coefs = np.concatenate((-0.02*coef0, coef0))
objfunc = ut.loss(coefs, y0, knots, splrep, P, u, 0, 0, 0, 0)
#%%
print("objection function =",objfunc)
gradfunc = np.round(ut.grad_func(coefs, y0, knots, splrep, P, u),2)
print()
print("---- Test grad_func function by comparing it with approx_fprime function -----")
print("grad-func = ", gradfunc)
def func(coefs):
    return ut.loss(coefs, y0, knots, splrep, P, u, 0, 0, 0, 0)  
approx_fprime = np.round(optimize.approx_fprime(coefs, func, epsilon=1e-6),2)
print("approx-fprime = ", approx_fprime)
print("Is grad-func equal to approx-fprime? :", np.array_equal(gradfunc, approx_fprime))
#%%

print()
print("-----Test return_group_to_index--------")
groups = ut.return_group_to_index(coefs, knots)
print(groups)
#%%