示例#1
0
def reg_logistic_regression(y, tx, lambda_, gamma, max_iters):
    """ L2 reg logistic basic version """
    tx, _, _ = standardize(tx)
    model = LogisticRegression((tx, y),
                               regularizer="Ridge",
                               regularizer_p=lambda_)
    return model.train(lr=gamma, decay=1, max_iters=max_iters)
示例#2
0
def least_squares(y, tx):
    """calculate the least squares solution."""
    tx, _, _ = standardize(tx)
    A = np.dot(tx.T, tx)
    b = np.dot(tx.T, y)
    # Compute solution
    w_opt = np.linalg.solve(A, b)
    # Compute loss
    e = y - (tx).dot(w_opt)
    N = len(e)
    MSE_opt = 1 / (2 * N) * np.dot(e.T, e)
    return w_opt, MSE_opt
示例#3
0
def lasso_logistic_regression(y, tx, lambda_, gamma, max_iters):
    """
    L1 reg logistic logistic basic version
    :param y:           given y
    :param tx:          data matrix
    :param lambda_:     given parameter for lasso logistic regression
    :param gamma:       initial learning rate
    :param max_iters:   maximum iterations
    :return:
    """
    tx, _, _ = standardize(tx)
    model = LogisticRegression((tx, y),
                               regularizer="Lasso",
                               regularizer_p=lambda_)
    return model.train(lr=gamma, decay=1, max_iters=max_iters)
示例#4
0
def ridge_regression(y, tx, lamb):
    """implement ridge regression."""
    tx, _, _ = standardize(tx)

    D = tx.shape[1]  # number of features
    N = len(y)  # Number of measurements
    A = np.dot(tx.T, tx) + 2 * N * lamb * np.eye(D)
    b = np.dot(tx.T, y)

    # Obtain optimal solution
    w_opt = np.linalg.solve(A, b)

    # Compute the loss
    e = y - np.dot(tx, w_opt)
    MSE = 1 / (2 * N) * np.dot(e.T, e)
    RMSE = np.sqrt(2 * MSE)
    return w_opt, RMSE
示例#5
0
def least_squares_SGD(y, tx, gamma, max_iters):
    """Gradient descent algorithm."""
    # Initialisation
    tx, _, _ = standardize(tx)
    D = tx.shape[1]  # number of features
    initial_w = np.zeros((D, ))
    batch_size = 1
    # Start SGD.
    start_time = datetime.datetime.now()
    gradient_losses, ws = stochastic_gradient_descent(y, tx, initial_w,
                                                      batch_size, gamma,
                                                      max_iters)
    end_time = datetime.datetime.now()
    # Print result
    exection_time = (end_time - start_time).total_seconds()
    print("SGD: execution time={t:.3f} seconds".format(t=exection_time))
    # The last element is the optimum one
    return ws[-1]
示例#6
0
def logistic_regression(y, tx, gamma, max_iters):
    """ Logistic regression basic version """
    tx, _, _ = standardize(tx)
    model = LogisticRegression((tx, y))
    return model.train(lr=gamma, decay=1, max_iters=max_iters)
示例#7
0
            consecutive_small_count += 1
        elif diff < 0:
            increasing_count += 1

        if consecutive_small_count > stop_threshold:
            break
        elif increasing_count > stop_threshold:
            raise OverflowError
    return log_func


pts = (((0.1, 0.8, 0.2), 0), ((0.4, 0.92, 0.35), 0), ((0.3, 0.89, 0.22), 0),
       ((0.2, 0.81, 0.27), 0), ((0.75, 0.4, 0.4), 1), ((0.7, 0.79, 0.4), 1),
       ((0.9, 0.52, 0.73), 1), ((0.6, 0.01, 0.99), 1))

stdpts_with_stats = standardize(pts)
stdpts = stdpts_with_stats["points"]
stats = stdpts_with_stats["stats"]
print("Standardized points: {} // stats {}".format(stdpts, stats))
params = (0.1, 0.1, 0.1, 0.1)

log_func = logistic_function(params)
print("Cost 1: {}".format(non_regularized_cost(log_func, stdpts)))

final_func = regularized_logistic_regression_bounded(log_func, 0.1, 0.000001,
                                                     10, stdpts, 0.01)

print(list(final_func["params"]))

print("Output values: {}".format([((x, y), final_func["func"](x))
                                  for x, y in stdpts]))
示例#8
0
    func = nn_func(tensor, activations, std_vals)
    grad = nn_deriv(func, tensor, activations, std_vals, deriv_gap)
    return {
        "func": func,
        "coeffs": tensor,
        "fns": activations,
        "derivs": grad,
        "std_vals": std_vals
    }


pts = (((0.1, 0.8, 0.2), 0), ((0.4, 0.92, 0.35), 0), ((0.3, 0.89, 0.22), 0),
       ((0.2, 0.81, 0.27), 0), ((0.75, 0.4, 0.4), 1), ((0.7, 0.79, 0.4), 1),
       ((0.9, 0.52, 0.73), 1), ((0.6, 0.01, 0.99), 1))

std_dict = data_utils.standardize(pts)

x = (0.5, 0.2, 0.1)

#std = std_dict["stats"]

std = {"avgs": (0, 0, 0), "stdevs": (1, 1, 1)}

layers = []
layers.append(data_utils.init_matrix(5, 4,
                                     lambda n, m: random.randrange(0, 7)))
layers.append(data_utils.init_matrix(2, 6, lambda n, m: random.uniform(-1, 1)))

activations = (nn_function_defs.relu(), nn_function_defs.sigmoid())

network = nn(layers, activations, std)