def test_calculate_updates():
        """Test calculate_updates method"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])

        nodes = [4, 2, 1]

        fitness = NetworkWeights(X,
                                 y,
                                 nodes,
                                 activation=identity,
                                 bias=False,
                                 is_classifier=False,
                                 learning_rate=1)

        a = list(np.arange(8) + 1)
        b = list(0.01 * (np.arange(2) + 1))

        weights = a + b
        fitness.evaluate(weights)

        updates = fitness.calculate_updates()

        update1 = np.array([[-0.0017, -0.0034], [-0.0046, -0.0092],
                            [-0.0052, -0.0104], [0.0014, 0.0028]])

        update2 = np.array([[-3.17], [-4.18]])

        assert (np.allclose(updates[0], update1, atol=0.001)
                and np.allclose(updates[1], update2, atol=0.001))
    def test_gradient_descent():
        """Test gradient_descent function"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])

        nodes = [4, 2, 1]

        fitness = NetworkWeights(X,
                                 y,
                                 nodes,
                                 activation=identity,
                                 bias=False,
                                 is_classifier=False,
                                 learning_rate=0.1)

        problem = ContinuousOpt(10,
                                fitness,
                                maximize=False,
                                min_val=-1,
                                max_val=1,
                                step=0.1)

        test_weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
        test_fitness = -1 * problem.eval_fitness(test_weights)

        best_state, best_fitness, _ = gradient_descent(problem)

        assert (len(best_state) == 10 and min(best_state) >= -1
                and max(best_state) <= 1 and best_fitness < test_fitness)
    def test_evaluate_no_bias_classifier():
        """Test evaluate method for binary classifier with no bias term"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])

        nodes = [4, 2, 1]

        fitness = NetworkWeights(X, y, nodes, activation=identity, bias=False)

        a = list(np.arange(8) + 1)
        b = list(0.01 * (np.arange(2) + 1))

        weights = a + b

        assert round(fitness.evaluate(weights), 4) == 0.7393
    def test_evaluate_no_bias_multi():
        """Test evaluate method for multivariate classifier with no bias
        term"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.array([[1, 1], [1, 0], [0, 0], [0, 0], [1, 0], [1, 1]])

        nodes = [4, 2, 2]

        fitness = NetworkWeights(X, y, nodes, activation=identity, bias=False)

        a = list(np.arange(8) + 1)
        b = list(0.01 * (np.arange(4) + 1))

        weights = a + b

        assert round(fitness.evaluate(weights), 4) == 0.7183
    def test_evaluate_bias_regressor():
        """Test evaluate method for regressor with bias term"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])

        nodes = [5, 2, 1]

        fitness = NetworkWeights(X,
                                 y,
                                 nodes,
                                 activation=identity,
                                 bias=True,
                                 is_classifier=False)

        a = list(np.arange(10) + 1)
        b = list(0.01 * (np.arange(2) + 1))

        weights = a + b

        assert round(fitness.evaluate(weights), 4) == 0.4363
    def test_gradient_descent_iter1():
        """Test gradient_descent function gets the correct answer after a
        single iteration"""

        X = np.array([[0, 1, 0, 1], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1],
                      [0, 0, 1, 1], [1, 0, 0, 0]])

        y = np.reshape(np.array([1, 1, 0, 0, 1, 1]), [6, 1])

        nodes = [4, 2, 1]

        fitness = NetworkWeights(X,
                                 y,
                                 nodes,
                                 activation=identity,
                                 bias=False,
                                 is_classifier=False,
                                 learning_rate=0.1)

        problem = ContinuousOpt(10,
                                fitness,
                                maximize=False,
                                min_val=-1,
                                max_val=1,
                                step=0.1)

        init_weights = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])

        best_state, best_fitness, _ = gradient_descent(problem,
                                                       max_iters=1,
                                                       init_state=init_weights)

        x = np.array([-0.7, -0.7, -0.9, -0.9, -0.9, -0.9, -1, -1, -1, -1])

        assert (np.allclose(best_state, x, atol=0.001)
                and round(best_fitness, 2) == 19.14)
Ejemplo n.º 7
0
 def _build_problem_and_fitness_function(X,
                                         y,
                                         node_list,
                                         activation,
                                         learning_rate,
                                         bias,
                                         clip_max,
                                         is_classifier=True):
     # Initialize optimization problem
     fitness = NetworkWeights(X,
                              y,
                              node_list,
                              activation,
                              bias,
                              is_classifier,
                              learning_rate=learning_rate)
     num_nodes = _NNBase._calculate_state_size(node_list)
     problem = ContinuousOpt(length=num_nodes,
                             fitness_fn=fitness,
                             maximize=False,
                             min_val=-1 * clip_max,
                             max_val=clip_max,
                             step=learning_rate)
     return fitness, problem