コード例 #1
0
def conj_grad_iter(x, epsilon, bracket_high):

    # get the start time of the iteration
    start_time = time.time()

    d = -1 * gradient(x)
    y = x
    j = 1
    # perform the inner loop of the method but one time since scalar output
    while np.linalg.norm(gradient(y), ord=2) > epsilon:
        # line 2
        step_size = exact_line_search(y, -1 * d, bracket_high=bracket_high)
        y_next = y + (step_size * d)

        if j == x.shape[0]:
            break

        # line 3
        d = (-1 * gradient(y_next)) + (
            polak_rebiere(gradient(y_next), gradient(y)) * d)

        y = y_next
        j += 1

    # update x_next
    x_next = y

    return [x_next, epsilon, bracket_high], (time.time() - start_time)
コード例 #2
0
def bb_iter(x, x_prev, grad_prev):

    # check if it x_0 (k=0th) iteration since no x_prev or f'(x_prev)
    # are available, instead imply perform gradient descent
    if (x_prev is None and grad_prev is None):

        # get the start time of the iteration
        start_time = time.time()

        grad = gradient(x)

        # simple line search with alpha = beta = 0.5
        step_size = inexact_line_search(grad, eval_objective(x), x, 0.5, 0.5)

        # simple gradient descent
        x_next = x - np.dot(step_size, grad)

        return [x_next, x, grad], (time.time() - start_time)

    # get the start time of the iteration
    start_time = time.time()

    grad = gradient(x)
    r = x - x_prev
    q = grad - grad_prev

    step_size = np.dot(r, q) / np.dot(q, q)

    x_next = x - (step_size * grad)

    return [x_next, x, grad], (time.time() - start_time)
コード例 #3
0
def fista_iter(x, y, t, alpha, beta):

    # get the start time of the iteration
    start_time = time.time()

    t_next = 0.5 * (1 + math.sqrt(1 + (4 * (t**2))))

    step_size = inexact_line_search(gradient(x), eval_objective(x), x, alpha,
                                    beta)

    x_next = y - (step_size * gradient(y))

    y_next = x_next + (((t - 1) / t_next) * (x_next - x))

    return [x_next, y_next, t_next, alpha, beta], (time.time() - start_time)
コード例 #4
0
def HOG(img_name):
    im = Image.open(img_name).resize((IMG_SIZE, IMG_SIZE)).convert('L')
    img = np.array(im).astype(np.float32)

    M, D = hp.gradient(img)
    cells = make_cells(M, D)
    histogram = hog_feature_vector(cells).flatten()

    return histogram
コード例 #5
0
def linear_regression(X, phi, max_itr, delta):
	RAW_POINTS, LABELS = helpers.get_X_Y(X)
	POINTS = phi(RAW_POINTS) # Apply kernel function of input data
	n = POINTS.shape[0]
	learning_rate = 1e-3 # Learning rate (hyperparameter)
	final_parameters = np.zeros((POINTS.shape[1], 1))
	for i in range(max_itr):
		final_parameters = final_parameters - learning_rate * helpers.gradient(POINTS, final_parameters, LABELS, delta)
	return final_parameters
コード例 #6
0
def agd_iter(x, y, alpha, beta, a):

    # get the start time of the iteration
    start_time = time.time()

    grad = gradient(x)
    step_size = inexact_line_search(grad, eval_objective(x), x, alpha, beta)

    # find x_{k+1} (regular gradient step)
    y_next = x - (step_size * gradient(x))

    # find a_{k+1} from a_k
    a_next = (1 + math.sqrt(4 * (a**2))) / 2
    #a_next = find_a_next(a, recip_kappa)

    dynamic_momentum = (1 - a) / a_next
    #dynamic_momentum = find_dynamic_momentum(a, a_next)

    # find y_{k_1} (sliding step)
    x_next = y_next + (dynamic_momentum * (y_next - y))

    return [x_next, y_next, alpha, beta, a_next], (time.time() - start_time)
コード例 #7
0
def hb_iter(x, x_prev, alpha, beta, momentum):

    # get the start time of the iteration
    start_time = time.time()

    # evaluate at x
    grad = gradient(x)
    obj_val = eval_objective(x)

    # perform backtracking line search
    step_size = inexact_line_search(grad, obj_val, x, alpha, beta)

    # subtract the gradient and add momentum
    x_next = (x - np.dot(step_size, grad)) + np.dot(momentum, (x - x_prev))

    return [x_next, x, alpha, beta, momentum], (time.time() - start_time)
コード例 #8
0
def gd_iter(x, alpha, beta):

    # get the start time of the iteration
    start_time = time.time()

    # evaluate at x
    grad = gradient(x)
    obj_val = eval_objective(x)

    # perform backtracking line search
    step_size = inexact_line_search(grad, obj_val, x, alpha, beta)

    # perform actual gradient descent
    x_next = x - np.dot(step_size, grad)

    return [x_next, alpha, beta], (time.time() - start_time)