def backpropagate(network, input_vector, target):

    hidden_outputs, outputs = feed_forward(network, input_vector)

    # из производной сигмоидальной функции взято output * (1 - output)
    output_deltas = [
        output * (1 - output) * (output - target[i])
        for i, output in enumerate(outputs)
    ]

    # понейронно скорректировать веса для слоя выходов (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    # распространить ошибки на скрытый слой, двигаясь в обратную сторону
    hidden_deltas = [
        hidden_output * (1 - hidden_output) *
        dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    # понейронно скорректировать веса для скрытого слоя (network[0])
    for i, hidden_neuron in enumerate(network[0]):
        for j, input in enumerate(input_vector + [1]):
            hidden_neuron[j] -= hidden_deltas[i] * input
Exemplo n.º 2
0
    random.seed(0)  # so that you get the same results as me

    bootstrap_betas = bootstrap_statistic(list(zip(x, daily_minutes_good)),
                                          estimate_sample_beta, 100)

    bootstrap_standard_errors = [
        standard_deviation([beta[i] for beta in bootstrap_betas])
        for i in range(4)
    ]

    print("bootstrap standard errors", bootstrap_standard_errors)
    print()

    print("p_value(30.63, 1.174)", p_value(30.63, 1.174))
    print("p_value(0.972, 0.079)", p_value(0.972, 0.079))
    print("p_value(-1.868, 0.131)", p_value(-1.868, 0.131))
    print("p_value(0.911, 0.990)", p_value(0.911, 0.990))
    print()

    print("regularization")

    random.seed(0)
    for alpha in [0.0, 0.01, 0.1, 1, 10]:
        beta = estimate_beta_ridge(x, daily_minutes_good, alpha=alpha)
        print("alpha", alpha)
        print("beta", beta)
        print("dot(beta[1:],beta[1:])", dot(beta[1:], beta[1:]))
        print("r-squared", multiple_r_squared(x, daily_minutes_good, beta))
        print()
        standard_deviation([beta[i] for beta in bootstrap_betas])
        for i in range(4)]

    print()
    print("стандартные ошибки бутстрапированных выборок", bootstrap_standard_errors)
    print()

    print("p_value(30.63, 1.174)", p_value(30.63, 1.174))
    print("p_value(0.972, 0.079)", p_value(0.972, 0.079))
    print("p_value(-1.868, 0.131)", p_value(-1.868, 0.131))
    print("p_value(0.911, 0.990)", p_value(0.911, 0.990))
    print()

    print("регуляризация")
    print()
    
    random.seed(0)
    for alpha in [0.0, 0.01, 0.1, 1, 10]:
        beta = estimate_beta_ridge(x, daily_minutes_good, alpha=alpha)
        print("alpha", alpha)
        print("beta", beta)
        print("скалярное_произведение(beta[1:],beta[1:])", dot(beta[1:], beta[1:]))
        print("r-квадрат", multiple_r_squared(x, daily_minutes_good, beta))
        print()


# In[ ]:



def cosine_similarity(v, w):
    return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
Exemplo n.º 5
0
def covariance(x, y):
    n = len(x)
    return dot(de_mean(x), de_mean(y)) / (n - 1)
def matrix_product_entry(A, B, i, j):
    return dot(get_row(A, i), get_column(B, j))
Exemplo n.º 7
0
def project(v, w):
    """return the projection of v onto w"""
    coefficient = dot(v, w)
    return scalar_multiply(coefficient, w)
Exemplo n.º 8
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction w"""
    return dot(x_i, direction(w)) ** 2
def project(v, w):
    """вернуть проекцию v на направление w"""
    coefficient = dot(v, w)
    return scalar_multiply(coefficient, w)
def directional_variance_gradient_i(x_i, w):
    """вклад строки x_i в градиент w-направленной дисперсии"""
    projection_length = dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
def directional_variance_i(x_i, w):
    """дисперсия строки x_i в направлении, определяемом w"""
    return dot(x_i, direction(w)) ** 2
Exemplo n.º 12
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """here i is the index of the data point,
    j the index of the derivative"""

    return (y_i - logistic(dot(x_i, beta))) * x_i[j]
Exemplo n.º 13
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(dot(x_i, beta)))
    else:
        return math.log(1 - logistic(dot(x_i, beta)))
Exemplo n.º 14
0
    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    print("beta_batch", beta_hat)

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i,
                                   x_train, y_train, beta_0)

    print("beta stochastic", beta_hat)

    true_positives = false_positives = true_negatives = false_negatives = 0

    for x_i, y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, x_i))

        if y_i == 1 and predict >= 0.5:  # TP: paid and we predict paid
            true_positives += 1
        elif y_i == 1:                   # FN: paid and we predict unpaid
            false_negatives += 1
        elif predict >= 0.5:             # FP: unpaid and we predict paid
            false_positives += 1
        else:                            # TN: unpaid and we predict unpaid
            true_negatives += 1

    precision = true_positives / (true_positives + false_positives)
    recall = true_positives / (true_positives + false_negatives)

    print("precision", precision)
    print("recall", recall)
Exemplo n.º 15
0
def predict(x_i, beta):
    return dot(x_i, beta)
Exemplo n.º 16
0
def ridge_penalty(beta, alpha):
    return alpha * dot(beta[1:], beta[1:])
def perceptron_output(weights, bias, x):
    """возвращает 1, если перцептрон 'активизируется', и 0, если нет"""
    return step_function(dot(weights, x) + bias)
Exemplo n.º 18
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i to the gradient of
    the direction-w variance"""
    projection_length = dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
def neuron_output(weights, inputs):
    return sigmoid(dot(weights, inputs))
Exemplo n.º 20
0
def transform_vector(v, components):
    return [dot(v, w) for w in components]
Exemplo n.º 21
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """здесь i - индекс точки данных, j - индекс производной"""

    return (y_i - logistic(dot(x_i, beta))) * x_i[j]