def backpropagate(network, input_vector, target): hidden_outputs, outputs = feed_forward(network, input_vector) # the output * (1 - output) is from the derivative of sigmoid output_deltas = [ output * (1 - output) * (output - target[i]) for i, output in enumerate(outputs) ] # adjust weights for output layer (network[-1]) for i, output_neuron in enumerate(network[-1]): for j, hidden_output in enumerate(hidden_outputs + [1]): output_neuron[j] -= output_deltas[i] * hidden_output # back-propagate errors to hidden layer hidden_deltas = [ hidden_output * (1 - hidden_output) * dot(output_deltas, [n[i] for n in network[-1]]) for i, hidden_output in enumerate(hidden_outputs) ] # adjust weights for hidden layer (network[0]) for i, hidden_neuron in enumerate(network[0]): for j, input in enumerate(input_vector + [1]): hidden_neuron[j] -= hidden_deltas[i] * input
def covariance(x, y): n = len(x) return dot(de_mean(x), de_mean(y)) / (n - 1)
def predict(x_i, beta): return dot(x_i, beta)
def ridge_penalty(beta, alpha): return alpha * dot(beta[1:], beta[1:])
def transform_vector(v, components): return [dot(v, w) for w in components]
def project(v, w): """return the projection of v onto w""" coefficient = dot(v, w) return scalar_multiply(coefficient, w)
def directional_variance_gradient_i(x_i, w): """the contribution of row x_i to the gradient of the direction-w variance""" projection_length = dot(x_i, direction(w)) return [2 * projection_length * x_ij for x_ij in x_i]
def directional_variance_i(x_i, w): """the variance of the row x_i in the direction w""" return dot(x_i, direction(w))**2
print() random.seed(0) # so that you get the same results as me bootstrap_betas = bootstrap_statistic(list(zip(x, daily_minutes_good)), estimate_sample_beta, 100) bootstrap_standard_errors = [ standard_deviation([beta[i] for beta in bootstrap_betas]) for i in range(4) ] print("bootstrap standard errors", bootstrap_standard_errors) print() print("p_value(30.63, 1.174)", p_value(30.63, 1.174)) print("p_value(0.972, 0.079)", p_value(0.972, 0.079)) print("p_value(-1.868, 0.131)", p_value(-1.868, 0.131)) print("p_value(0.911, 0.990)", p_value(0.911, 0.990)) print() print("regularization") random.seed(0) for alpha in [0.0, 0.01, 0.1, 1, 10]: beta = estimate_beta_ridge(x, daily_minutes_good, alpha=alpha) print("alpha", alpha) print("beta", beta) print("dot(beta[1:],beta[1:])", dot(beta[1:], beta[1:])) print("r-squared", multiple_r_squared(x, daily_minutes_good, beta)) print()
def cosine_similarity(v, w): return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
def neuron_output(weights, inputs): return sigmoid(dot(weights, inputs))
def perceptron_output(weights, bias, x): """returns 1 if the perceptron 'fires', 0 if not""" return step_function(dot(weights, x) + bias)
# and maximize using gradient descent beta_hat = maximize_batch(fn, gradient_fn, beta_0) print("beta_batch", beta_hat) beta_0 = [1, 1, 1] beta_hat = maximize_stochastic(logistic_log_likelihood_i, logistic_log_gradient_i, x_train, y_train, beta_0) print("beta stochastic", beta_hat) true_positives = false_positives = true_negatives = false_negatives = 0 for x_i, y_i in zip(x_test, y_test): predict = logistic(dot(beta_hat, x_i)) if y_i == 1 and predict >= 0.5: # TP: paid and we predict paid true_positives += 1 elif y_i == 1: # FN: paid and we predict unpaid false_negatives += 1 elif predict >= 0.5: # FP: unpaid and we predict paid false_positives += 1 else: # TN: unpaid and we predict unpaid true_negatives += 1 precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives) print("precision", precision) print("recall", recall)
def matrix_product_entry(A, B, i, j): return dot(get_row(A, i), get_column(B, j))
def logistic_log_partial_ij(x_i, y_i, beta, j): """here i is the index of the data point, j the index of the derivative""" return (y_i - logistic(dot(x_i, beta))) * x_i[j]
def logistic_log_likelihood_i(x_i, y_i, beta): if y_i == 1: return math.log(logistic(dot(x_i, beta))) else: return math.log(1 - logistic(dot(x_i, beta)))