def test_forward_backward(self): l = NormalizationLayer(np.array([0.0, 0.0, -5.0, -2.0]), np.array([5.0, 5.0, 5.0, 2.0]), np.array([-1.0, -1.0, -1.0, -1.0]), np.array([1.0, 1.0, 1.0, 1.0])) y = l.forward(np.array([5.0, 4.0, -5.0, -1.0])) self.assertEqual(y.shape, (4, )) assert_almost_equal(y, np.array([1.0, 0.6, -1.0, -0.5])) x = np.random.rand(4) gradient = l.numeric_gradient(x) l.forward(x) d = l.backward([1, 1, 1, 1]) self.assertEqual(d.shape, (4, )) assert_almost_equal(np.diag(gradient), d, decimal=5) return
p.draw_decision_surface(10, agent.Q, np.array([[0, 0], [5.0, 5.0]])) plt.show() else: # norm = NormalizationLayer( # np.array([0.0,0.0,0.0,-3.0,-3.0]), # np.array([5.0,5.0,5.0,3.0,3.0]), # np.array([-1.0,-1.0,-1.0,-1.0,-1.0]), # np.array([1.0,1.0,1.0,1.0,1.0]) # ) # norm = NormalizationLayer( # np.array([0.0,0.0,0.0,-3.0]), # np.array([5.0,5.0,5.0,3.0]), # np.array([-1.0,-1.0,-1.0,-1.0]), # np.array([1.0,1.0,1.0,1.0]) # ) norm = NormalizationLayer(np.array([0.0, 0.0]), np.array([5.0, 5.0]), np.array([0.0, 0.0]), np.array([1.0, 1.0])) W1 = utils.SharedWeights('gaussian', 2 + 1, 2) W2 = utils.SharedWeights('gaussian', 2 + 1, 3) Q = Sequential( norm, LinearLayer(2, 2, weights=W1), TanhLayer, LinearLayer(2, 3, weights=W2), # TanhLayer ) W3 = utils.SharedWeights('gaussian', 2 + 1, 2) W4 = utils.SharedWeights('gaussian', 2 + 1, 3) # W3 = utils.SharedWeights(np.array([[10.0,-10.0,0.0],[-10.0,10.0,0.0]]),2+1,2) #W2 = utils.SharedWeights('gaussian',2+1,2) Q_hat = Sequential( norm,
img1 = ax.imshow(image1, cmap=plt.get_cmap('Greys')) ax1 = fig.add_subplot(1, 2, 2) img2 = ax1.imshow(image2, cmap=plt.get_cmap('Greys')) plt.show() train = load_mnist_dataset("training", "mnist") mean_val = [np.zeros(784) for i in range(10)] tot_val = np.zeros(10) for x, t in train: mean_val[np.argmax(t)] += x tot_val[np.argmax(t)] += 1 normalization_net = Sequential( NormalizationLayer(0, 255, -1, 1), SignLayer, ) for i in range(10): mean_val[i] = mean_val[i] / tot_val[i] num = mean_val[i].reshape(28, 28) plt.imshow(normalization_net.forward(num), cmap=plt.get_cmap('Greys')) # plt.imshow(num)) plt.show() hop_net = Hopfield(784) stored_numers = [0, 1] #numbers stored in the network for i in stored_numers:
err = 0 for (img, target) in test: #print str(np.argmax(model.forward(test_data[ind])))+' '+str(np.argmax(test_targets[ind])) if np.argmax(model.forward(img)) != np.argmax(target): err += 1 print(1.0 - err / float(len(test))) * 100.0 if load_net: print "Load Network" model = StoreNetwork.load(name_net) else: print "New Network" #Two layer network model = Sequential([ NormalizationLayer(0, 255, -0.1, 0.1), LinearLayer(784, 10, weights='norm_random'), # TanhLayer, # LinearLayer(50, 10, weights='norm_random'), # TanhLayer, # NormalizationLayer(0,10,0,1), # SigmoidLayer() ]) # display = ShowTraining(epochs_num = epochs) trainer = Trainer(show_training=False) #, show_function = display.show) J_list, dJdy_list, J_test = trainer.learn( model=model, train=train,
784, [ { "size": 32, "output_layer": TanhLayer, "weights": W }, { "size": 784, "output_layer": TanhLayer } #, "weights": W.T()} ]) ae.choose_network([0, 1]) #ae.choose_network() model = Sequential([ NormalizationLayer(0, 255, -0.1, 0.1), ae, NormalizationLayer(-1, 1, 0, 255), ]) plt.figure(12) plt.figure(13) train = [(t / 255.0, t / 255.0) for (t, v) in train[:100]] # train = [(t,t) for (t,v) in train[:100]] display = ShowTraining(epochs_num=epochs) trainer = Trainer(show_training=True, show_function=display.show) J_list, dJdy_list = trainer.learn(