示例#1
0
def cross_validation(y, x, k_indices, k, lambda_, degree):
    """return the loss of ridge regression."""

    loss_tr = []
    loss_te = []
    for j in range(k):

        index_te = k_indices[j]

        ind = np.ones(k_indices.shape[0], bool)
        ind[j] = False
        index_tr = k_indices[ind].flatten()

        x_tr = x[index_tr]
        x_te = x[index_te]
        y_tr = y[index_tr]
        y_te = y[index_te]

        xpoly_tr = build_poly(x_tr, degree)
        xpoly_te = build_poly(x_te, degree)

        w_s = ridge_regression(y_tr, xpoly_tr, lambda_)

        loss_tr.append(compute_mse(y_tr, xpoly_tr, w_s))
        loss_te.append(compute_mse(y_te, xpoly_te, w_s))

    return np.mean(loss_tr), np.mean(loss_te)
示例#2
0
def least_squares(y, tx):
    """calculate the least squares."""

    A = tx.T.dot(tx)
    b = tx.T.dot(y)
    w = np.linalg.solve(A, b)
    return compute_mse(y, tx, w), w
def stochastic_gradient_descent(
        y, tx, initial_w, batch_size, max_iters, gamma):
    """Stochastic gradient descent algorithm."""
    # ***************************************************
    # Define parameters to store w and loss
    ws = [initial_w]
    losses = []
    w = initial_w
    for n_iter in range(max_iters):
        # ***************************************************
        gradient=0
        for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size):
            gradient += compute_stoch_gradient(minibatch_y, minibatch_tx, w)
        gradient = gradient/batch_size
        loss = compute_mse(y, tx, w)
        # ***************************************************
        #raise NotImplementedError
        # ***************************************************
        w = w-gamma*gradient
        # ***************************************************
        #raise NotImplementedError
        # store w and loss
        ws.append(w)
        losses.append(loss)
#        print("Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}".format(
#            bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))
    return losses, ws
示例#4
0
def least_squares(y, tx):
    """Least squares using normal equations."""
    a = tx.T.dot(tx)
    b = tx.T.dot(y)

    w = np.linalg.solve(a, b)
    loss = costs.compute_mse(y, tx, w)
    return w, loss
示例#5
0
def grid_search(y, tx, w0, w1):
    """Algorithm for grid search."""
    losses = np.zeros((len(w0), len(w1)))
    for id_row, weight_0 in enumerate(w0):
        for id_col, weight_1 in enumerate(w1):
            losses[id_row][id_col] = costs.compute_mse(
                y, tx, np.array([weight_0, weight_1]))
    return losses
示例#6
0
def least_squares(y, tx):
    """calculate the least squares."""
    # ***************************************************
    # least squares:
    # returns mse, and optimal weights
    # ***************************************************
    ws = np.linalg.inv(tx.T@tx)@tx.T@y
    mse = compute_mse(y,tx,ws)
    return mse, ws
示例#7
0
def least_squares_SGD(y, tx, initial_w, max_iters, gamma):
    """Stochastic gradient descent algorithm."""
    w = initial_w
    
    for (batch_y, batch_tx) in batch_iter(y, tx, 1, max_iters):
        gradient = compute_gradient(batch_y, batch_tx, w)
        w = w - gamma*gradient

    loss = compute_mse(y, tx, w)
    return w, loss
示例#8
0
def least_squares_GD(y, tx, initial_w, max_iters, gamma):
    """Gradient descent algorithm."""
    w = initial_w
    
    for n_iter in range(max_iters):
        gradient = compute_gradient(y, tx, w)
        w = w - gamma*gradient
        
    loss = compute_mse(y, tx, w)
    return w, loss
示例#9
0
def least_squares(y, tx):
    """calculate the least squares."""
    # ***************************************************
    a = np.matmul(np.transpose(tx), tx)
    b = np.matmul(np.transpose(tx), y)
    w = np.linalg.solve(a, b)
    MSE = compute_mse(y, tx, w)
    return MSE, w
    # ***************************************************
    raise NotImplementedError
示例#10
0
def ridge_regression(y, tx, lambda_):
    """implement ridge regression."""
    lambda_prime = (lambda_)*(2*len(y))
    tx_t = tx.T
    
    try:
        w_star = np.linalg.solve(tx_t@tx + lambda_prime*np.identity(tx.shape[1]), tx_t@y)
    except np.linalg.LinAlgError:
        print("********** SINGULAR MATRIX, SKIPPING... **********")
        w_star = np.ones(tx.shape[1])
        
    mse = compute_mse(y, tx, w_star)
    
    return w_star, mse
示例#11
0
def least_squares(y, tx):
    """calculate the least squares."""
    # ***************************************************
    # INSERT YOUR CODE HERE
    # least squares: TODO
    # returns mse, and optimal weights
    # ***************************************************
    a = tx.T.dot(tx)
    b = tx.T.dot(y)
    w = np.linalg.solve(a, b)

    loss = compute_mse(y, tx, w)

    return loss, w
示例#12
0
def gradient_descent(y, tx, initial_w, max_iters, gamma):
    """Gradient descent algorithm."""
    # Define parameters to store w and loss
    ws = [initial_w]
    losses = []
    w = initial_w
    for n_iter in range(max_iters):
        # compute loss, gradient
        grad, err = compute_gradient(y, tx, w)
        loss = compute_mse(err)
        # gradient w by descent update
        w = w - gamma * grad
        # store w and loss
        #ws.append(w)
        #losses.append(loss)
        print("Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}".format(
            bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))

    return loss, w
示例#13
0
def gradient_descent(y, tx, initial_w, max_iters, gamma):
    """Gradient descent algorithm."""
    # Define parameters to store w and loss
    ws = [initial_w]
    losses = []
    w = initial_w
    for n_iter in range(max_iters):
        # ***************************************************
        gradient = compute_gradient(y, tx, w)
        loss = compute_mse(y, tx, w)
        # ***************************************************
        #raise NotImplementedError
        # ***************************************************
        w = w - gamma * gradient
        # ***************************************************
        #raise NotImplementedError
        # store w and loss
        ws.append(w)
        losses.append(loss)
        #if n_iter%10 == 0:
        #print("Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}".format(
#             bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))

    return losses, ws
示例#14
0
def least_squares(y, tx):
    """calculate the least squares solution."""
    w = np.linalg.lstsq(tx, y, rcond=None)[0]
    return compute_mse(y, tx, w), w
示例#15
0
def ridge_regression(y, tx, lambda_):
    """implement ridge regression."""
    A = tx.T.dot(tx)
    I = np.identity(A.shape[0])
    w = np.linalg.solve(A + lambda_ * 2 * len(y) * I, tx.T.dot(y))
    return compute_mse(y, tx, w), w
示例#16
0
def ridge_regression(y, tx, lambda_):
    N, D = tx.shape
    w = np.linalg.inv(tx.T @ tx + 2 * N * lambda_ * np.identity(D)) @ tx.T @ y
    mse = compute_mse(y, tx, w)
    return w, mse
示例#17
0
def least_squares(y, tx):
    """calculate the least squares solution."""
    tx_t = tx.T
    w_star = np.linalg.solve(tx_t@tx, tx_t@y)
    mse = compute_mse(y, tx, w_star)
    return w_star, mse
示例#18
0
def least_squares(y, tx):
    """calculate the least squares."""
    w = np.linalg.inv(tx.T @ tx) @ tx.T @ y
    mse = compute_mse(y, tx, w)
    return w, mse