def backpropagate(network, input_vector, target):

    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [
        output * (1 - output) * (output - target[i])
        for i, output in enumerate(outputs)
    ]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    # back-propagate errors to hidden layer
    hidden_deltas = [
        hidden_output * (1 - hidden_output) *
        dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    # adjust weights for hidden layer (network[0])
    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + [1]):
            hidden_neuron[j] -= hidden_deltas[i] * input
def neuron_output(weights, inputs):
    return sigmoid(dot(weights, inputs))
Beispiel #3
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """here i is the index of the data point,
    j the index of the derivative"""

    return (y_i - logistic(dot(x_i, beta))) * x_i[j]
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires', 0 if not"""
    return step_function(dot(weights, x) + bias)
Beispiel #5
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(dot(x_i, beta)))
    else:
        return math.log(1 - logistic(dot(x_i, beta)))
Beispiel #6
0
    y = [row[2] for row in data]  # each element is paid_account
    rescaled_x = rescale(x)
    random.seed(0)
    x_train, x_test, y_train, y_test = train_test_split(rescaled_x, y, 0.33)

    print "linear regression:"

    #beta = estimate_beta(x_train, y_train)
    beta_initial = [1, 1, 1]
    beta = minimize_stochastic(squared_error, squared_error_gradient, x_train,
                               y_train, beta_initial, 0.001)
    print beta

    y_pred = []
    for x_i, y_i in zip(x_train, y_test):
        y_pred.append(logistic(dot(beta, x_i)))
    classification_report(y_test, y_pred)

    print "logistic regression:"

    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)  # need put beta
    gradient_fn = partial(logistic_log_gradient, x_train,
                          y_train)  # need put beta

    ### max batch gradient
    beta_0 = [1, 1, 1]
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print "beta_batch", beta_hat
def ridge_penalty(beta, alpha):
  return alpha * dot(beta[1:], beta[1:])
def predict(x_i, beta):
    return dot(x_i, beta)
    random.seed(0) # so that you get the same results as me

    bootstrap_betas = bootstrap_statistic(zip(x, daily_minutes_good),
                                          estimate_sample_beta,
                                          100)

    bootstrap_standard_errors = [
        standard_deviation([beta[i] for beta in bootstrap_betas])
        for i in range(4)]

    print "bootstrap standard errors", bootstrap_standard_errors
    print

    print "p_value(30.63, 1.174)", p_value(30.63, 1.174)
    print "p_value(0.972, 0.079)", p_value(0.972, 0.079)
    print "p_value(-1.868, 0.131)", p_value(-1.868, 0.131)
    print "p_value(0.911, 0.990)", p_value(0.911, 0.990)
    print

    print "regularization"

    random.seed(0)
    for alpha in [0.0, 0.01, 0.1, 1, 10]:
        beta = estimate_beta_ridge(x, daily_minutes_good, alpha=alpha)
        print "alpha", alpha
        print "beta", beta
        print "dot(beta[1:],beta[1:])", dot(beta[1:], beta[1:])
        print "r-squared", multiple_r_squared(x, daily_minutes_good, beta)
        print