def backpropagate(network, input_vector, target):
    hidden_outputs, outputs = feed_forward(network, input_vector)

    # the output * (1 - output) is from the derivative of sigmoid
    output_deltas = [
        output * (1 - output) * (output - target[i]) for i, output in enumerate(outputs)
    ]

    # adjust weights for output layer (network[-1])
    for i, output_neuron in enumerate(network[-1]):
        for j, hidden_output in enumerate(hidden_outputs + [1]):
            output_neuron[j] -= output_deltas[i] * hidden_output

    # back-propagate errors to hidden layer
    hidden_deltas = [
        hidden_output
        * (1 - hidden_output)
        * dot(output_deltas, [n[i] for n in network[-1]])
        for i, hidden_output in enumerate(hidden_outputs)
    ]

    # adjust weights for hidden layer (network[0])
    for i, hidden_neuron in enumerate(network[0]):
        for j, input_j in enumerate(input_vector + [1]):
            hidden_neuron[j] -= hidden_deltas[i] * input_j
Esempio n. 2
0
def make_graph_dot_product_as_vector_projection(plt):
    v = [2, 1]
    w = [math.sqrt(0.25), math.sqrt(0.75)]
    c = dot(v, w)
    vonw = scalar_multiply(c, w)
    o = [0, 0]

    plt.arrow(0,
              0,
              v[0],
              v[1],
              width=0.002,
              head_width=0.1,
              length_includes_head=True)
    plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
    plt.arrow(0,
              0,
              w[0],
              w[1],
              width=0.002,
              head_width=0.1,
              length_includes_head=True)
    plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
    plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
    plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
    plt.arrow(
        v[0],
        v[1],
        vonw[0] - v[0],
        vonw[1] - v[1],
        linestyle="dotted",
        length_includes_head=True,
    )
    plt.scatter(*zip(v, w, o), marker=".")
    plt.axis("equal")
    plt.show()
def neuron_output(weights, inputs):
    return sigmoid(dot(weights, inputs))
def perceptron_output(weights, bias, x):
    """returns 1 if the perceptron 'fires', 0 if not"""
    return step_function(dot(weights, x) + bias)
def ridge_penalty(beta, alpha):
    # alpha is a *hyperparameter* controlling how harsh the penalty is
    # sometimes it's called "lambda" but that already means something in Python

    return alpha * dot(beta[1:], beta[1:])
def main():
    _x = [
        [1, 49, 4, 0],
        [1, 41, 9, 0],
        [1, 40, 8, 0],
        [1, 25, 6, 0],
        [1, 21, 1, 0],
        [1, 21, 0, 0],
        [1, 19, 3, 0],
        [1, 19, 0, 0],
        [1, 18, 9, 0],
        [1, 18, 8, 0],
        [1, 16, 4, 0],
        [1, 15, 3, 0],
        [1, 15, 0, 0],
        [1, 15, 2, 0],
        [1, 15, 7, 0],
        [1, 14, 0, 0],
        [1, 14, 1, 0],
        [1, 13, 1, 0],
        [1, 13, 7, 0],
        [1, 13, 4, 0],
        [1, 13, 2, 0],
        [1, 12, 5, 0],
        [1, 12, 0, 0],
        [1, 11, 9, 0],
        [1, 10, 9, 0],
        [1, 10, 1, 0],
        [1, 10, 1, 0],
        [1, 10, 7, 0],
        [1, 10, 9, 0],
        [1, 10, 1, 0],
        [1, 10, 6, 0],
        [1, 10, 6, 0],
        [1, 10, 8, 0],
        [1, 10, 10, 0],
        [1, 10, 6, 0],
        [1, 10, 0, 0],
        [1, 10, 5, 0],
        [1, 10, 3, 0],
        [1, 10, 4, 0],
        [1, 9, 9, 0],
        [1, 9, 9, 0],
        [1, 9, 0, 0],
        [1, 9, 0, 0],
        [1, 9, 6, 0],
        [1, 9, 10, 0],
        [1, 9, 8, 0],
        [1, 9, 5, 0],
        [1, 9, 2, 0],
        [1, 9, 9, 0],
        [1, 9, 10, 0],
        [1, 9, 7, 0],
        [1, 9, 2, 0],
        [1, 9, 0, 0],
        [1, 9, 4, 0],
        [1, 9, 6, 0],
        [1, 9, 4, 0],
        [1, 9, 7, 0],
        [1, 8, 3, 0],
        [1, 8, 2, 0],
        [1, 8, 4, 0],
        [1, 8, 9, 0],
        [1, 8, 2, 0],
        [1, 8, 3, 0],
        [1, 8, 5, 0],
        [1, 8, 8, 0],
        [1, 8, 0, 0],
        [1, 8, 9, 0],
        [1, 8, 10, 0],
        [1, 8, 5, 0],
        [1, 8, 5, 0],
        [1, 7, 5, 0],
        [1, 7, 5, 0],
        [1, 7, 0, 0],
        [1, 7, 2, 0],
        [1, 7, 8, 0],
        [1, 7, 10, 0],
        [1, 7, 5, 0],
        [1, 7, 3, 0],
        [1, 7, 3, 0],
        [1, 7, 6, 0],
        [1, 7, 7, 0],
        [1, 7, 7, 0],
        [1, 7, 9, 0],
        [1, 7, 3, 0],
        [1, 7, 8, 0],
        [1, 6, 4, 0],
        [1, 6, 6, 0],
        [1, 6, 4, 0],
        [1, 6, 9, 0],
        [1, 6, 0, 0],
        [1, 6, 1, 0],
        [1, 6, 4, 0],
        [1, 6, 1, 0],
        [1, 6, 0, 0],
        [1, 6, 7, 0],
        [1, 6, 0, 0],
        [1, 6, 8, 0],
        [1, 6, 4, 0],
        [1, 6, 2, 1],
        [1, 6, 1, 1],
        [1, 6, 3, 1],
        [1, 6, 6, 1],
        [1, 6, 4, 1],
        [1, 6, 4, 1],
        [1, 6, 1, 1],
        [1, 6, 3, 1],
        [1, 6, 4, 1],
        [1, 5, 1, 1],
        [1, 5, 9, 1],
        [1, 5, 4, 1],
        [1, 5, 6, 1],
        [1, 5, 4, 1],
        [1, 5, 4, 1],
        [1, 5, 10, 1],
        [1, 5, 5, 1],
        [1, 5, 2, 1],
        [1, 5, 4, 1],
        [1, 5, 4, 1],
        [1, 5, 9, 1],
        [1, 5, 3, 1],
        [1, 5, 10, 1],
        [1, 5, 2, 1],
        [1, 5, 2, 1],
        [1, 5, 9, 1],
        [1, 4, 8, 1],
        [1, 4, 6, 1],
        [1, 4, 0, 1],
        [1, 4, 10, 1],
        [1, 4, 5, 1],
        [1, 4, 10, 1],
        [1, 4, 9, 1],
        [1, 4, 1, 1],
        [1, 4, 4, 1],
        [1, 4, 4, 1],
        [1, 4, 0, 1],
        [1, 4, 3, 1],
        [1, 4, 1, 1],
        [1, 4, 3, 1],
        [1, 4, 2, 1],
        [1, 4, 4, 1],
        [1, 4, 4, 1],
        [1, 4, 8, 1],
        [1, 4, 2, 1],
        [1, 4, 4, 1],
        [1, 3, 2, 1],
        [1, 3, 6, 1],
        [1, 3, 4, 1],
        [1, 3, 7, 1],
        [1, 3, 4, 1],
        [1, 3, 1, 1],
        [1, 3, 10, 1],
        [1, 3, 3, 1],
        [1, 3, 4, 1],
        [1, 3, 7, 1],
        [1, 3, 5, 1],
        [1, 3, 6, 1],
        [1, 3, 1, 1],
        [1, 3, 6, 1],
        [1, 3, 10, 1],
        [1, 3, 2, 1],
        [1, 3, 4, 1],
        [1, 3, 2, 1],
        [1, 3, 1, 1],
        [1, 3, 5, 1],
        [1, 2, 4, 1],
        [1, 2, 2, 1],
        [1, 2, 8, 1],
        [1, 2, 3, 1],
        [1, 2, 1, 1],
        [1, 2, 9, 1],
        [1, 2, 10, 1],
        [1, 2, 9, 1],
        [1, 2, 4, 1],
        [1, 2, 5, 1],
        [1, 2, 0, 1],
        [1, 2, 9, 1],
        [1, 2, 9, 1],
        [1, 2, 0, 1],
        [1, 2, 1, 1],
        [1, 2, 1, 1],
        [1, 2, 4, 1],
        [1, 1, 0, 1],
        [1, 1, 2, 1],
        [1, 1, 2, 1],
        [1, 1, 5, 1],
        [1, 1, 3, 1],
        [1, 1, 10, 1],
        [1, 1, 6, 1],
        [1, 1, 0, 1],
        [1, 1, 8, 1],
        [1, 1, 6, 1],
        [1, 1, 4, 1],
        [1, 1, 9, 1],
        [1, 1, 9, 1],
        [1, 1, 4, 1],
        [1, 1, 2, 1],
        [1, 1, 9, 1],
        [1, 1, 0, 1],
        [1, 1, 8, 1],
        [1, 1, 6, 1],
        [1, 1, 1, 1],
        [1, 1, 1, 1],
        [1, 1, 5, 1],
    ]
    daily_minutes_good = [
        68.77,
        51.25,
        52.08,
        38.36,
        44.54,
        57.13,
        51.4,
        41.42,
        31.22,
        34.76,
        54.01,
        38.79,
        47.59,
        49.1,
        27.66,
        41.03,
        36.73,
        48.65,
        28.12,
        46.62,
        35.57,
        32.98,
        35,
        26.07,
        23.77,
        39.73,
        40.57,
        31.65,
        31.21,
        36.32,
        20.45,
        21.93,
        26.02,
        27.34,
        23.49,
        46.94,
        30.5,
        33.8,
        24.23,
        21.4,
        27.94,
        32.24,
        40.57,
        25.07,
        19.42,
        22.39,
        18.42,
        46.96,
        23.72,
        26.41,
        26.97,
        36.76,
        40.32,
        35.02,
        29.47,
        30.2,
        31,
        38.11,
        38.18,
        36.31,
        21.03,
        30.86,
        36.07,
        28.66,
        29.08,
        37.28,
        15.28,
        24.17,
        22.31,
        30.17,
        25.53,
        19.85,
        35.37,
        44.6,
        17.23,
        13.47,
        26.33,
        35.02,
        32.09,
        24.81,
        19.33,
        28.77,
        24.26,
        31.98,
        25.73,
        24.86,
        16.28,
        34.51,
        15.23,
        39.72,
        40.8,
        26.06,
        35.76,
        34.76,
        16.13,
        44.04,
        18.03,
        19.65,
        32.62,
        35.59,
        39.43,
        14.18,
        35.24,
        40.13,
        41.82,
        35.45,
        36.07,
        43.67,
        24.61,
        20.9,
        21.9,
        18.79,
        27.61,
        27.21,
        26.61,
        29.77,
        20.59,
        27.53,
        13.82,
        33.2,
        25,
        33.1,
        36.65,
        18.63,
        14.87,
        22.2,
        36.81,
        25.53,
        24.62,
        26.25,
        18.21,
        28.08,
        19.42,
        29.79,
        32.8,
        35.99,
        28.32,
        27.79,
        35.88,
        29.06,
        36.28,
        14.1,
        36.63,
        37.49,
        26.9,
        18.58,
        38.48,
        24.48,
        18.95,
        33.55,
        14.24,
        29.04,
        32.51,
        25.63,
        22.22,
        19,
        32.73,
        15.16,
        13.9,
        27.2,
        32.01,
        29.27,
        33,
        13.74,
        20.42,
        27.32,
        18.23,
        35.35,
        28.48,
        9.08,
        24.62,
        20.12,
        35.26,
        19.92,
        31.02,
        16.49,
        12.16,
        30.7,
        31.22,
        34.65,
        13.13,
        27.51,
        33.2,
        31.57,
        14.1,
        33.42,
        17.44,
        10.12,
        24.42,
        9.82,
        23.39,
        30.93,
        15.03,
        21.67,
        31.09,
        33.29,
        22.61,
        26.89,
        23.48,
        8.38,
        27.81,
        32.35,
        23.84,
    ]

    random.seed(0)
    _beta = estimate_beta(_x,
                          daily_minutes_good)  # [30.63, 0.972, -1.868, 0.911]
    logging.info("%r", "beta {}".format(_beta))
    logging.info(
        "%r",
        "r-squared {}".format(multiple_r_squared(_x, daily_minutes_good,
                                                 _beta)))

    logging.info("digression: the bootstrap")
    # 101 points all very close to 100
    close_to_100 = [99.5 + random.random() for _ in range(101)]

    # 101 points, 50 of them near 0, 50 of them near 200
    far_from_100 = ([99.5 + random.random()] +
                    [random.random() for _ in range(50)] +
                    [200 + random.random() for _ in range(50)])

    logging.info(
        "%r", "bootstrap_statistic(close_to_100, median, 100): {}".format(
            bootstrap_statistic(close_to_100, median, 100)))
    logging.info(
        "%r", "bootstrap_statistic(far_from_100, median, 100): {}".format(
            bootstrap_statistic(far_from_100, median, 100)))

    random.seed(0)  # so that you get the same results as me

    bootstrap_betas = bootstrap_statistic(list(zip(_x, daily_minutes_good)),
                                          estimate_sample_beta, 100)

    bootstrap_standard_errors = [
        standard_deviation([beta[i] for beta in bootstrap_betas])
        for i in range(4)
    ]

    logging.info(
        "%r", "bootstrap standard errors {}".format(bootstrap_standard_errors))

    logging.info("%r", "p_value(30.63, 1.174) {}".format(p_value(30.63,
                                                                 1.174)))
    logging.info("%r", "p_value(0.972, 0.079) {}".format(p_value(0.972,
                                                                 0.079)))
    logging.info("%r",
                 "p_value(-1.868, 0.131) {}".format(p_value(-1.868, 0.131)))
    logging.info("%r", "p_value(0.911, 0.990) {}".format(p_value(0.911,
                                                                 0.990)))

    logging.info("regularization")

    random.seed(0)
    for _alpha in [0.0, 0.01, 0.1, 1, 10]:
        _beta = estimate_beta_ridge(_x, daily_minutes_good, alpha=_alpha)
        logging.info("%r", "alpha {}".format(_alpha))
        logging.info("%r", "beta {}".format(_beta))
        logging.info(
            "%r", "dot(beta[1:],beta[1:]) {}".format(dot(_beta[1:],
                                                         _beta[1:])))
        logging.info(
            "%r",
            "r-squared {}".format(
                multiple_r_squared(_x, daily_minutes_good, _beta)),
        )
def predict(x_i, beta):
    return dot(x_i, beta)
Esempio n. 8
0
def matrix_product_entry(a_matrix, b_matrix, i, j):
    return dot(get_row(a_matrix, i), get_column(b_matrix, j))
Esempio n. 9
0
def covariance(x, y):
    n = len(x)
    return dot(de_mean(x), de_mean(y)) / (n - 1)
Esempio n. 10
0
def project(v, w):
    """return the projection of v onto w"""
    coefficient = dot(v, w)
    return scalar_multiply(coefficient, w)
Esempio n. 11
0
def transform_vector(v, components):
    return [dot(v, w) for w in components]
Esempio n. 12
0
def directional_variance_gradient_i(x_i, w):
    """the contribution of row x_i to the gradient of
    the direction-w variance"""
    projection_length = dot(x_i, direction(w))
    return [2 * projection_length * x_ij for x_ij in x_i]
Esempio n. 13
0
def directional_variance_i(x_i, w):
    """the variance of the row x_i in the direction w"""
    return dot(x_i, direction(w))**2
Esempio n. 14
0
def main():
    data = [
        (0.7, 48000, 1),
        (1.9, 48000, 0),
        (2.5, 60000, 1),
        (4.2, 63000, 0),
        (6, 76000, 0),
        (6.5, 69000, 0),
        (7.5, 76000, 0),
        (8.1, 88000, 0),
        (8.7, 83000, 1),
        (10, 83000, 1),
        (0.8, 43000, 0),
        (1.8, 60000, 0),
        (10, 79000, 1),
        (6.1, 76000, 0),
        (1.4, 50000, 0),
        (9.1, 92000, 0),
        (5.8, 75000, 0),
        (5.2, 69000, 0),
        (1, 56000, 0),
        (6, 67000, 0),
        (4.9, 74000, 0),
        (6.4, 63000, 1),
        (6.2, 82000, 0),
        (3.3, 58000, 0),
        (9.3, 90000, 1),
        (5.5, 57000, 1),
        (9.1, 102000, 0),
        (2.4, 54000, 0),
        (8.2, 65000, 1),
        (5.3, 82000, 0),
        (9.8, 107000, 0),
        (1.8, 64000, 0),
        (0.6, 46000, 1),
        (0.8, 48000, 0),
        (8.6, 84000, 1),
        (0.6, 45000, 0),
        (0.5, 30000, 1),
        (7.3, 89000, 0),
        (2.5, 48000, 1),
        (5.6, 76000, 0),
        (7.4, 77000, 0),
        (2.7, 56000, 0),
        (0.7, 48000, 0),
        (1.2, 42000, 0),
        (0.2, 32000, 1),
        (4.7, 56000, 1),
        (2.8, 44000, 1),
        (7.6, 78000, 0),
        (1.1, 63000, 0),
        (8, 79000, 1),
        (2.7, 56000, 0),
        (6, 52000, 1),
        (4.6, 56000, 0),
        (2.5, 51000, 0),
        (5.7, 71000, 0),
        (2.9, 65000, 0),
        (1.1, 33000, 1),
        (3, 62000, 0),
        (4, 71000, 0),
        (2.4, 61000, 0),
        (7.5, 75000, 0),
        (9.7, 81000, 1),
        (3.2, 62000, 0),
        (7.9, 88000, 0),
        (4.7, 44000, 1),
        (2.5, 55000, 0),
        (1.6, 41000, 0),
        (6.7, 64000, 1),
        (6.9, 66000, 1),
        (7.9, 78000, 1),
        (8.1, 102000, 0),
        (5.3, 48000, 1),
        (8.5, 66000, 1),
        (0.2, 56000, 0),
        (6, 69000, 0),
        (7.5, 77000, 0),
        (8, 86000, 0),
        (4.4, 68000, 0),
        (4.9, 75000, 0),
        (1.5, 60000, 0),
        (2.2, 50000, 0),
        (3.4, 49000, 1),
        (4.2, 70000, 0),
        (7.7, 98000, 0),
        (8.2, 85000, 0),
        (5.4, 88000, 0),
        (0.1, 46000, 0),
        (1.5, 37000, 0),
        (6.3, 86000, 0),
        (3.7, 57000, 0),
        (8.4, 85000, 0),
        (2, 42000, 0),
        (5.8, 69000, 1),
        (2.7, 64000, 0),
        (3.1, 63000, 0),
        (1.9, 48000, 0),
        (10, 72000, 1),
        (0.2, 45000, 0),
        (8.6, 95000, 0),
        (1.5, 64000, 0),
        (9.8, 95000, 0),
        (5.3, 65000, 0),
        (7.5, 80000, 0),
        (9.9, 91000, 0),
        (9.7, 50000, 1),
        (2.8, 68000, 0),
        (3.6, 58000, 0),
        (3.9, 74000, 0),
        (4.4, 76000, 0),
        (2.5, 49000, 0),
        (7.2, 81000, 0),
        (5.2, 60000, 1),
        (2.4, 62000, 0),
        (8.9, 94000, 0),
        (2.4, 63000, 0),
        (6.8, 69000, 1),
        (6.5, 77000, 0),
        (7, 86000, 0),
        (9.4, 94000, 0),
        (7.8, 72000, 1),
        (0.2, 53000, 0),
        (10, 97000, 0),
        (5.5, 65000, 0),
        (7.7, 71000, 1),
        (8.1, 66000, 1),
        (9.8, 91000, 0),
        (8, 84000, 0),
        (2.7, 55000, 0),
        (2.8, 62000, 0),
        (9.4, 79000, 0),
        (2.5, 57000, 0),
        (7.4, 70000, 1),
        (2.1, 47000, 0),
        (5.3, 62000, 1),
        (6.3, 79000, 0),
        (6.8, 58000, 1),
        (5.7, 80000, 0),
        (2.2, 61000, 0),
        (4.8, 62000, 0),
        (3.7, 64000, 0),
        (4.1, 85000, 0),
        (2.3, 51000, 0),
        (3.5, 58000, 0),
        (0.9, 43000, 0),
        (0.9, 54000, 0),
        (4.5, 74000, 0),
        (6.5, 55000, 1),
        (4.1, 41000, 1),
        (7.1, 73000, 0),
        (1.1, 66000, 0),
        (9.1, 81000, 1),
        (8, 69000, 1),
        (7.3, 72000, 1),
        (3.3, 50000, 0),
        (3.9, 58000, 0),
        (2.6, 49000, 0),
        (1.6, 78000, 0),
        (0.7, 56000, 0),
        (2.1, 36000, 1),
        (7.5, 90000, 0),
        (4.8, 59000, 1),
        (8.9, 95000, 0),
        (6.2, 72000, 0),
        (6.3, 63000, 0),
        (9.1, 100000, 0),
        (7.3, 61000, 1),
        (5.6, 74000, 0),
        (0.5, 66000, 0),
        (1.1, 59000, 0),
        (5.1, 61000, 0),
        (6.2, 70000, 0),
        (6.6, 56000, 1),
        (6.3, 76000, 0),
        (6.5, 78000, 0),
        (5.1, 59000, 0),
        (9.5, 74000, 1),
        (4.5, 64000, 0),
        (2, 54000, 0),
        (1, 52000, 0),
        (4, 69000, 0),
        (6.5, 76000, 0),
        (3, 60000, 0),
        (4.5, 63000, 0),
        (7.8, 70000, 0),
        (3.9, 60000, 1),
        (0.8, 51000, 0),
        (4.2, 78000, 0),
        (1.1, 54000, 0),
        (6.2, 60000, 0),
        (2.9, 59000, 0),
        (2.1, 52000, 0),
        (8.2, 87000, 0),
        (4.8, 73000, 0),
        (2.2, 42000, 1),
        (9.1, 98000, 0),
        (6.5, 84000, 0),
        (6.9, 73000, 0),
        (5.1, 72000, 0),
        (9.1, 69000, 1),
        (9.8, 79000, 1),
    ]
    data = list(map(list, data))  # change tuples to lists

    _x = [[1] + row[:2]
          for row in data]  # each element is [1, experience, salary]
    _y = [row[2] for row in data]  # each element is paid_account

    logging.info("linear regression:")

    rescaled_x = rescale(_x)
    _beta = estimate_beta(rescaled_x, _y)
    logging.info("%r", "beta = {}".format(_beta))

    logging.info("logistic regression")

    random.seed(0)
    x_train, x_test, y_train, y_test = train_test_split(rescaled_x, _y, 0.33)

    # want to maximize log likelihood on the training data
    fn = partial(logistic_log_likelihood, x_train, y_train)
    gradient_fn = partial(logistic_log_gradient, x_train, y_train)

    # pick a random starting point
    beta_0 = [1, 1, 1]

    # and maximize using gradient descent
    beta_hat = maximize_batch(fn, gradient_fn, beta_0)

    logging.info("%r", "beta_batch {}".format(beta_hat))

    beta_0 = [1, 1, 1]
    beta_hat = maximize_stochastic(logistic_log_likelihood_i,
                                   logistic_log_gradient_i, x_train, y_train,
                                   beta_0)

    logging.info("%r", "beta stochastic {}".format(beta_hat))

    true_positives = false_positives = true_negatives = false_negatives = 0

    for _x_i, _y_i in zip(x_test, y_test):
        predict = logistic(dot(beta_hat, _x_i))

        if _y_i == 1 and predict >= 0.5:  # TP: paid and we predict paid
            true_positives += 1
        elif _y_i == 1:  # FN: paid and we predict unpaid
            false_negatives += 1
        elif predict >= 0.5:  # FP: unpaid and we predict paid
            false_positives += 1
        else:  # TN: unpaid and we predict unpaid
            true_negatives += 1

    precision = true_positives / (true_positives + false_positives)
    recall = true_positives / (true_positives + false_negatives)

    logging.info("%r", "precision {}".format(precision))
    logging.info("%r", "recall {}".format(recall))
Esempio n. 15
0
def logistic_log_partial_ij(x_i, y_i, beta, j):
    """here i is the index of the data point,
    j the index of the derivative"""

    return (y_i - logistic(dot(x_i, beta))) * x_i[j]
Esempio n. 16
0
def logistic_log_likelihood_i(x_i, y_i, beta):
    if y_i == 1:
        return math.log(logistic(dot(x_i, beta)))
    else:
        return math.log(1 - logistic(dot(x_i, beta)))