Exemplo n.º 1
0
mlp.train(x_data, t_data,
          epochs=2000, batch_size=10,
          epsilon=0.1,
          print_cost=True)

time_end = time.time()

print('Time used in training %f' % (time_end - time_begin))


# check if circles nearby the middle one are
# correctly classified


mlp.get_activations_and_units(x_black)
wrong_black = (mlp.y > 1/2).squeeze()
print('Points misclassified as black: {}'.format(np.sum(wrong_black)))

mlp.get_activations_and_units(x_red)
wrong_red = (mlp.y < 1/2).squeeze()
print('Points misclassified as red: {}'.format(np.sum(wrong_red)))


# plot the probability mapping and the data

delta = 0.01
x = np.arange(-3, 3, delta)
y = np.arange(-3, 3, delta)

y = np.arange(-3, 3, delta)

X, Y = np.meshgrid(x, y)

x_pts = np.vstack((X.flatten(), Y.flatten())).T
x_middle = (x_black + x_red) / 2

mlp = MLP(K_list, activation_functions, diff_activation_functions)

for k in range(1000):
    mlp.train(x_data, t_data,
               epochs=10, batch_size=10,
               epsilon=0.1,
               print_cost=True)
    
    mlp.get_activations_and_units(x_pts)
    grid_size = X.shape[0]
    Z = mlp.y.reshape(grid_size, grid_size)
    
    
    plt.axis('equal')
    plot_contour = plt.contourf(X, Y, Z, 10)
    plt.scatter(x_black[:, 0], x_black[:, 1], marker='o', color='black')
    plt.scatter(x_red[:, 0], x_red[:, 1], marker='x', color='red')
    
    plt.scatter(x_middle[:, 0], x_middle[:, 1], marker='.', color='white')

    plt.pause(0.01)
    plt.draw()
    
plt.show()
        K_list = [D, i, j, K]  #list of dimensions of layers

        activation_functions = [np.tanh, np.tanh, MLP.sigmoid]

        diff_activation_functions = [MLP.dtanh, MLP.dtanh, MLP.dsigmoid]

        #%%
        x_middle = (x_black + x_red) / 2
        nb_middle = x_middle.shape[0]
        t_middle = np.asarray([0.5] * nb_middle).reshape(nb_middle, 1)

        mlp = MLP(K_list, activation_functions, diff_activation_functions)

        for k in range(20):
            mlp.train(x_data,
                      t_data,
                      epochs=10,
                      batch_size=10,
                      epsilon=0.1,
                      print_cost=True)

        mlp.get_activations_and_units(x_data)
        data_cost = mlp.binary_cross_entropy(mlp.y, t_data)
        if data_cost < data_min:
            data_min = data_cost
            i_min = i
            j_min = j
            print(i, " ", j, " ", data_min, "\n")
        #mlp.get_activations_and_units(x_middle)
        #print(mlp.binary_cross_entropy(mlp.y, t_middle))