Ejemplo n.º 1
0
def least_squares(y, tx):
    """Least squares regression using normal eqations."""
    lt = np.dot(tx.T, tx)
    rt = np.dot(tx.T, y)
    # solve normal equation
    w = np.linalg.solve(lt, rt)
    # compute loss
    loss = compute_ls_loss(y, tx, w)

    return (w, loss)
Ejemplo n.º 2
0
def ridge_regression(y, tx, lambda_):
    """Ridge regression using normal equations."""
    # add regularization term
    reg = 2 * len(tx) * lambda_ * np.identity(tx.shape[1])
    lt = np.dot(tx.T, tx) + reg
    rt = np.dot(tx.T, y)
    # solve normal equation
    w = np.linalg.solve(lt, rt)
    # compute loss
    loss = compute_ls_loss(y, tx, w)

    return (w, loss)
Ejemplo n.º 3
0
def least_squares_GD(y, tx, initial_w, max_iters, gamma):
    """Linear regression using gradient descent."""
    w = initial_w
    losses = []
    threshold = 1e-8
    for n_iter in range(max_iters):
        # compute loss and gradient
        loss = compute_ls_loss(y, tx, w)
        grad = compute_ls_gradient(y, tx, w)
        # update w by gradient
        w = w - gamma * grad
        # log info
        # print("Gradient Descent({bi}/{ti}): loss={l}".format(
        #     bi=n_iter, ti=max_iters - 1, l=loss))
        # converge criterion
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break

    return (w, loss)
Ejemplo n.º 4
0
def least_squares_SGD(y, tx, initial_w, max_iters, gamma):
    """Linear regression using stochastic gradient descent."""
    # Define parameters to store w and loss
    w = initial_w
    losses = []
    threshold = 1e-8
    for n_iter in range(max_iters):
        # get a random minibatch of data
        for minibatch_y, minibatch_x in batch_iter(y, tx, 1):
            # compute loss and gradient
            loss = compute_ls_loss(minibatch_y, minibatch_x, w)
            grad = compute_ls_gradient(minibatch_y, minibatch_x, w)
            # update w by gradient
            w = w - gamma * grad
        # log info
        # print("Stochastic Gradient Descent({bi}/{ti}): loss={l}".format(
        #     bi=n_iter, ti=max_iters - 1, l=loss))
        # converge criterion
        losses.append(loss)
        if len(losses) > 1 and np.abs(losses[-1] - losses[-2]) < threshold:
            break

    return (w, loss)