Exemplo n.º 1
0
def least_squares_SGD(y, tx, initial_w, max_iters, gamma):
    """
    Linear regression using stochastic gradient descent
    Args:
        y: labels
        tx: features
        initial_w: initial weight vector
        max_iters: number of steps to run
        gamma: step-size
    Returns:
        w: optimized weight vector for the model
        loss: optimized final loss based on mean squared error
    """
    threshold = 1e-8
    ws = [initial_w]
    losses = []
    w = initial_w
    for _ in range(max_iters):
        random_index = np.random.randint(len(y))
        # sample a random data point from y vector
        y_random = y[random_index] 
        # sample a random row vector from tx matrix
        tx_random = tx[random_index] 
        error_vector = compute_error_vector(y_random, tx_random, w)
        loss = compute_mse(error_vector)
        gradient_vector = compute_gradient(tx_random, error_vector)
        w = w - gamma * gradient_vector
        ws.append(w)
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break # convergence criterion met
    return ws[-1], losses[-1]
Exemplo n.º 2
0
def least_squares_GD(y, tx, initial_w, max_iters, gamma):
    """
    Linear regression using gradient descent
    Args:
        y: labels
        tx: features
        initial_w: initial weight vector
        max_iters: number of steps to run
        gamma: step-size
    Returns:
        w: optimized weight vector for the model
        loss: optimized final loss based on mean squared error
    """
    threshold = 1e-8
    ws = [initial_w]
    losses = []
    w = initial_w
    for _ in range(max_iters):
        error_vector = compute_error_vector(y, tx, w)
        loss = compute_mse(error_vector)
        gradient_vector = compute_gradient(tx, error_vector)
        w = w - gamma * gradient_vector
        ws.append(w)
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break # convergence criterion met
    return ws[-1], losses[-1]
Exemplo n.º 3
0
def least_squares_SGD(y, tx, initial_w, max_iters, gamma):
    """
    Linear regression using stochastic gradient descent
    :param y: labels
    :param tx: training data
    :param initial_w: initial value of weights
    :param max_iters: maximum iterations used in gradient descent process
    :param gamma: learning rate
    :return: optimized loss value based on MSE, optimized weight vectors for the model
    """
    threshold = 1e-9
    ws = [initial_w]
    losses = []
    w = initial_w
    for _ in range(max_iters):
        random_index = np.random.randint(len(y))
        y_random = y[random_index]
        tx_random = tx[random_index]
        error_vector = compute_error(y_random, tx_random, w)
        loss = compute_mse(error_vector)
        gradient = compute_gradient(tx_random, error_vector)
        w = w - gamma * gradient
        ws.append(w)
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break
    return ws[-1], losses[-1]
Exemplo n.º 4
0
def least_squares(y, tx):
    """
    Least squares regression using the normal equation
    :param y: labels
    :param tx: training data
    :return: optimized loss value based on MSE, optimized weight vectors for the model
    """
    coefficient_matrix = tx.T.dot(tx)
    constant = tx.T.dot(y)
    w = np.linalg.solve(coefficient_matrix, constant)
    loss = compute_mse(compute_error(y, tx, w))
    return w, loss
Exemplo n.º 5
0
def ridge_regression(y, tx, lambda_):
    """
    Ridge regression using the normal equation
    :param y: labels
    :param tx: training data
    :param lambda_: regularization parameter
    :return: optimized loss value based on MSE, optimized weight vectors for the model
    """
    coefficient_matrix = tx.T.dot(tx) + 2 * len(y) * lambda_ * np.identity(
        tx.shape[1])
    constant_vector = tx.T.dot(y)
    w = np.linalg.solve(coefficient_matrix, constant_vector)
    error_vector = compute_error(y, tx, w)
    loss = compute_mse(error_vector)
    return w, loss
Exemplo n.º 6
0
def least_squares(y, tx):
    """
    Least squares regression using normal equations
    Args:
        y: labels
        tx: features
    Returns:
        w: optimized weight vector for the model
        loss: optimized final loss based on mean squared error
    """
    coefficient_matrix = tx.T.dot(tx)
    constant_vector = tx.T.dot(y)
    w = np.linalg.solve(coefficient_matrix, constant_vector)
    error_vector = compute_error_vector(y, tx, w)
    loss = compute_mse(error_vector)
    return w, loss
Exemplo n.º 7
0
def ridge_regression(y, tx, lambda_):
    """
    Ridge regression using normal equations
    Args:
        y: labels
        tx: features
        lambda_: regularization parameter
    Returns:
        w: optimized weight vector for the model
        loss: optimized final loss based on mean squared error
    """
    coefficient_matrix = tx.T.dot(tx) + 2 * len(y) * lambda_ * np.identity(tx.shape[1])
    constant_vector = tx.T.dot(y)
    w = np.linalg.solve(coefficient_matrix, constant_vector)
    error_vector = compute_error_vector(y, tx, w)
    loss = compute_mse(error_vector)
    return w, loss
Exemplo n.º 8
0
def least_squares_GD(y, tx, initial_w, max_iters, gamma):
    """
    Linear regression using gradient descent
    :param y: labels
    :param tx: training data
    :param initial_w: initial value of weights
    :param max_iters: maximum iterations used in gradient descent process
    :param gamma: learning rate
    :return: optimized loss value based on MSE, optimized weight vectors for the model
    """
    # Define parameters to store w and loss
    threshold = 1e-9
    ws = [initial_w]
    losses = []
    w = initial_w
    for _ in range(max_iters):
        loss = compute_mse(compute_error(y, tx, w))
        gradient = compute_gradient(y, tx, w)
        w = w - gamma * gradient
        ws.append(w)
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break
    return ws[-1], losses[-1]