示例#1
0
def test_gradient():
    x = to_numpy([5, -9, 12])

    numerical_grad = gradient(x, f)

    analytical_grad = grad_f(x)
    assert_allclose(numerical_grad, analytical_grad)
def test_cost_gradient(X, y, weights, activation, output_activation):
    unrolled_weights = unroll(weights)
    neural_network = BaseNeuralNetwork(hidden_layers=(2, ),
                                       regularization_param=1,
                                       activation=activation)
    neural_network._output_activation = output_activation

    analytical_gradient = neural_network._cost_gradient(unrolled_weights, X, y)
    numerical_gradient = gradient(unrolled_weights, neural_network._cost,
                                  (X, y))

    assert_almost_equal(analytical_gradient, numerical_gradient)
def test_cost_gradient(y, params, regularization_param, labels, use_softmax):
    # Due to significant digits limitation of floating-point variables an output of the logistic function for very large
    # or very small arguments is rounded, so altering such an argument a little bit won't change the result of the
    # function, making numerical gradient calculation impossible. This can be avoided by scaling X and therefore
    # decreasing absolute values of its elements.
    X_scaled = DataScaler().fit(X).scale(X)

    y_np = to_numpy(y)
    logistic_regression = LogisticRegression(regularization_param=regularization_param, use_softmax=use_softmax)
    logistic_regression._labels = labels

    analytical_gradient = logistic_regression._cost_gradient(params, X_scaled, y_np)
    numerical_gradient = gradient(params, logistic_regression._cost, (X_scaled, y_np))

    assert_allclose(analytical_gradient, numerical_gradient)
def test_cost_gradient__random_input(samples_count, features_count,
                                     classes_count, hidden_layers, activation,
                                     output_activation):
    random_state = np.random.RandomState(seed=7)
    X = np.asarray(random_state.rand(samples_count, features_count))
    y = one_hot(1 + np.mod(np.arange(samples_count) + 1, classes_count))[1]

    neural_network = BaseNeuralNetwork(hidden_layers=hidden_layers,
                                       activation=activation)
    neural_network._output_activation = output_activation
    initial_weights = neural_network._init_weights(X, y)
    weights = neural_network._optimize_params(X, y, unroll(initial_weights))

    analytical_gradient = neural_network._cost_gradient(weights, X, y)
    numerical_gradient = gradient(weights, neural_network._cost, (X, y))

    assert_almost_equal(analytical_gradient, numerical_gradient)
def test_cost_gradient(features_count, regularization_param, Y):
    Y = to_numpy(Y)
    users_count = Y.shape[1]
    items_count = Y.shape[0]
    params = unroll(
        glorot_init(
            ((users_count, features_count), (items_count, features_count))))

    collaborative_filtering = CollaborativeFiltering(features_count,
                                                     regularization_param)
    collaborative_filtering._users_count = users_count
    collaborative_filtering._items_count = items_count

    analytical_gradient = collaborative_filtering._cost_gradient(params, Y)
    numerical_gradient = gradient(params, collaborative_filtering._cost, (Y, ))

    assert_allclose(analytical_gradient,
                    numerical_gradient,
                    rtol=1E-4,
                    atol=1E-4)