예제 #1
0
def test_net(train_set, train_labels, valid_set, valid_labels, test_set, learning_rate, \
            decrease_constant, size, l2, l1, function) :
    """
    Train and validate the neural net with 
    a given set of parameters.
    Returns the final test output.
    """    
    neuralNet = NeuralNetwork(lr=learning_rate, dc=decrease_constant, sizes=size, L2=l2, L1=l1,
                     seed=5678, tanh=function, n_epochs=10)
    
    n_classes = 10
    
    print "Training..."
    # Early stopping code
    best_val_error = np.inf # Begin with infinite error
    best_it = 0 # Iteration of the best neural net so far wrt valid error
    look_ahead = 5
    n_incr_error = 0
    for current_stage in range(1,500+1,1):
        
        #Stop training when NN has not improved for 5 turns.
        if not n_incr_error < look_ahead:
            break
        neuralNet.n_epochs = current_stage
        neuralNet.train(train_set, train_labels, n_classes)
        n_incr_error += 1
        
        outputs, errors, accuracy = neuralNet.test(train_set, train_labels)
        print 'Epoch',current_stage,'|',
        print 'Training accuracy: ' + '%.3f'%accuracy+',', ' |',
        outputs, errors, accuracy = neuralNet.test(valid_set, valid_labels)
        print 'Validation accuracy: ' + '%.3f'%accuracy
        
        # Check if this model is better than the previous:
        error = 1.0 - accuracy
        if error < best_val_error:
            best_val_error = error
            best_it = current_stage
            n_incr_error = 0
            best_model = copy.deepcopy(neuralNet) # Save the model.
    
    #TODO Clear train and valid set to free memory.
    #Load test set
    outputs = best_model.predict(test_set)
예제 #2
0
def validate_net(train_set, train_labels, valid_set, valid_labels, learning_rate, \
            decrease_constant, size, l2, l1, function) :
    """
    Train and validate the neural net with 
    a given set of parameters.
    Return the best accuracy.
    """    
    neuralNet = NeuralNetwork(lr=learning_rate, dc=decrease_constant, sizes=size, L2=l2, L1=l1,
                     seed=5678, tanh=function, n_epochs=10)
    
    n_classes = 10
    
    print "Training..."
    # Early stopping code @Hugo Larochelle (partially)
    best_val_error = np.inf # Begin with infinite error
    best_it = 0 # Iteration of the best neural net so far wrt valid error
    look_ahead = 5
    n_incr_error = 0
    for current_stage in range(1,500+1,1):
        
        #Stop training when NN has not improved for 5 turns.
        if not n_incr_error < look_ahead:
            break
        neuralNet.n_epochs = current_stage
        neuralNet.train(train_set, train_labels, n_classes)
        n_incr_error += 1
        
        outputs, errors, train_accuracy = neuralNet.test(train_set, train_labels)
        print 'Epoch',current_stage,'|',
        print 'Training accuracy: ' + '%.3f'%train_accuracy+',', ' |',
        outputs, errors, valid_accuracy = neuralNet.test(valid_set, valid_labels)
        print 'Validation accuracy: ' + '%.3f'%valid_accuracy
        
        # Check if this model is better than the previous:
        error = 1.0 - valid_accuracy
        if error < best_val_error:
            best_val_error = error
            best_train_accuracy = train_accuracy            
            n_incr_error = 0
    
    return 1 - best_val_error, best_train_accuracy
예제 #3
0
                         sizes=sizes,
                         seed=seed,
                         parameter_initialization=(pretrained_bs,pretrained_Ws))

print "Fine-tuning..."
# Early stopping code
best_val_error = np.inf
best_it = 0
str_header = 'best_it\t'
look_ahead = 5
n_incr_error = 0
for stage in range(1,500+1,1):
    if not n_incr_error < look_ahead:
        break
    myObject.n_epochs = stage
    myObject.train(trainset)
    n_incr_error += 1
    outputs, costs = myObject.test(trainset)
    errors = np.mean(costs,axis=0)
    print 'Epoch',stage,'|',
    print 'Training errors: classif=' + '%.3f'%errors[0]+',', 'NLL='+'%.3f'%errors[1] + ' |',
    outputs, costs = myObject.test(validset)
    errors = np.mean(costs,axis=0)
    print 'Validation errors: classif=' + '%.3f'%errors[0]+',', 'NLL='+'%.3f'%errors[1]
    error = errors[0]
    if error < best_val_error:
        best_val_error = error
        best_it = stage
        n_incr_error = 0
        best_model = copy.deepcopy(myObject)
예제 #4
0
def main(argv):    
    if args.seed:
        np.random.seed(args.seed)

    map = Map(MAP_WIDTH, MAP_HEIGHT)
    net = NeuralNetwork(2, args.layer_neurons, 1, args.hidden_layers, args.bias)
    print net
    if  args.train:
        # тренировочные данные
        train_d0, train_d1 = map.dataset(0, MAP_WIDTH + MAP_HEIGHT), \
                             map.dataset(1, MAP_WIDTH + MAP_HEIGHT)
        td0 = np.array([[0]] * train_d0.shape[0], dtype=float)
        td1 = np.array([[1]] * train_d1.shape[0], dtype=float)
        t = np.concatenate((td0, td1), axis=0) # уже нормализован
        # вход
        x = np.concatenate((train_d0, train_d1), axis=0)
        x_normalized = x / np.amax(x, axis=0)
        
        print 'Training...'
        if args.logging:
            with open('training.log', 'w') as f:
                for epoch in xrange(args.epochs):
                    f.write('Epoch {}\n'.format(epoch))
                    f.write("Input:\n{}\n".format(x_normalized.T))
                    f.write("Actual Output:\n{}\n".format(t.T))
                    f.write("Predicted Output:\n{}\n".format(np.round(net.forward(x_normalized).T)))
                    f.write("Loss:\n{}\n\n".format(str(np.mean(np.square(t - net.forward(x_normalized))))))
                    net.train(x_normalized, t)
        else:
            for epoch in xrange(args.epochs):
                net.train(x_normalized, t, args.alpha, args.train_speed)
        print "Saving weights..."
        net.save_weights(W_PREFIX)
        print 'Done.'
    else:
        train_d0 = train_d1 = np.array([])
        if os.path.exists('{}_0.w.txt'.format(W_PREFIX)):
           print "Loading weights..."
           net.load_weights(W_PREFIX)
           print 'Done.'
        else:
            print "No weights were found!"

    if args.seed:
        np.random.seed(args.seed + 1)
    
    # вход
    zds0, zds1 = np.random.randint(2, 20), np.random.randint(2, 20)
    d0, d1 = map.dataset(0, zds0), map.dataset(1, zds1)
    x = np.concatenate((d0, d1), axis=0)
    x_normalized = x / np.amax(x, axis=0)
    # ожидаемые данные для проверки
    td0 = np.array([[0]] * d0.shape[0], dtype=float)
    td1 = np.array([[1]] * d1.shape[0], dtype=float)
    t = np.concatenate((td0, td1), axis=0) # уже нормализован
    # выход
    y = np.round(net.predict(x_normalized))
    if args.verbose:
        print "Input:"
        print x
        print "Output (Expected):"
        print t
        print "Output (Actual):"
        print y

    res = (y == t)
    if res.all():
        print "\nAll Good!"
    else:
        print "{}% are good!".format(res.sum() * 100 / len(res))

    if args.plotting:
        # фильтрация 'попаданий' и 'промахов'
        good = []
        bad = []
        for i, v in enumerate(res):
            if v:
                good.append(x[i])
            else:
                bad.append(x[i])
        map.plot(np.array(good), np.array(bad), train_d0, train_d1, args.plot_name)
예제 #5
0
        trial.training = file_name
        train_pixels = trial.prepare_data(trial.training)[0]
        weights = trial.train(train_pixels)
        weight_values = weights.values()
        for i in weights:
            weights[i] += 10

        with open(model_file, 'w') as myfile:
            for i in weights:
                if i == trial.learner1:
                    myfile.write('%s %.9f\n' % ('learner1', weights[i]))
                if i == trial.learner2:
                    myfile.write('%s %.9f\n' % ('learner2', weights[i]))
    elif model == 'nnet':
        nnet = NeuralNetwork()
        nnet.train(file_name, model_file, epochs=10000)
    elif model == 'best':
        best = Best()
        best.train(file_name, model_file, epochs=10000)
    else:
        print 'Specified model not found!!'
else:
    if model == 'nearest' or model == 'nnet' or model == 'best':
        if model_file.endswith('.txt'):
            model_file = model_file + '.npy'

    if model == 'nearest':
        knn = Knn()
        knn.test(file_name, model_file)
    elif model == 'nnet':
        nnet = NeuralNetwork()
예제 #6
0
파일: main.py 프로젝트: blckshrk/IFT6390
    def main(self, algo="KNN", textview=None):
        
        # Remplace "print"
        def print_output(text):
            if textview != None:
                buf = textview.get_buffer()
                buf.insert_at_cursor(text + "\n")
                textview.scroll_mark_onscreen(buf.get_insert())
            else:
                log.info(text)
        
        
        # liste des types de set
        if self.validation == 1:
            listeTypesSet = ["train", "validation", "test"]
        else:
            listeTypesSet = ["train", "test"]

        # liste des resultats utilises pour les courbes
        listeRes=[]

        # creation des trainFile et testFile
        log.debug("Construction des fichiers d'entrainement")
        tools.constructLfwNamesCurrent( self.nbExemples )   

        #TODO ca ne sert plus a rien finalement
        ( nbClassesLFW, nbClassesORL ) = tools.trainAndTestConstruction( self.pourcentageTrain, self.nbExemples )

        # Chargement des données
        dataTrain, dataTrainIndices, nClass = tools.loadImageData( "train", self.categorie)
        
        # tranformation pca
        print_output("Calcul des vecteurs propres...")
        pca_model = PCA( dataTrain )
        pca_model.transform() # on transforme les donné dans un le "eigen space"

        ##### Recherche pas KNN
        if algo == "KNN":
            print_output("Début de l'algorithme des K plus proches voisins...")
            
            # On build le model pour recherche par KNN
            knn_model = KNN( pca_model.getWeightsVectors(), dataTrainIndices, nClass, self.K )
            
            # On build le model pour Parzen
            parzen_model = ParzenWindows( pca_model.getWeightsVectors(), dataTrainIndices, nClass, self.Theta )

            ## TEST ###########################
            #TODO Toute cette partie est a revoir pour sortir des graphes
            # de train, validation, test
            for trainTest in listeTypesSet:
                if trainTest == "train":
                    dataTest, dataTestIndices = dataTrain, dataTrainIndices
                else :
                    ### si l'on n'effectue pas de validation on concatene les entrees de test et de validation initiales pour obtenir le test
                    #if "validation" not in listeTypesSet:
                        #dataTestInitial, dataTestInitialIndices, nClass = tools.loadImageData( "test", self.categorie )
                        #dataValidation, dataValidationIndices, nClass = tools.loadImageData( "validation", self.categorie )
                        #dataTest = np.zeros(dataTestInitial.size + dataValidation.size)
                        #dataTestIndices = np.zeros( dataTest.size )
                        #dataTest[ : dataTestInitial.size], dataTestIndices[ : dataTestInitial.size] = dataTestInitial, dataTestInitialIndices
                        #dataTest[dataTestInitial.size : ], dataTestIndices[dataTestInitial.size : ] = dataValidation, dataValidationIndices
                        
                        
                    #else:
                        dataTest, dataTestIndices, nClass = tools.loadImageData( trainTest, self.categorie )
                print_output("Projection des données de test...")
                dataTest_proj = pca_model.getProjection( dataTest )
                

            	# compteurs de bons résultats   
                nbGoodResult = 0
                nbGoodResult2 = 0 
                nbGoodResult3 = 0

                t_start = time.clock()
                for i in range(0, int( dataTest.shape[1] )):

					# k = 1, pour réference
					# on force k
                    knn_model.setK( 1 )
                    result1NN = knn_model.compute_predictions( dataTest_proj[:,i] )
                    if(result1NN == dataTestIndices[i]):
                        nbGoodResult += 1

		            # k = n
		            # replace k a ca position initial
                    knn_model.setK( self.K )
                    resultKNN = knn_model.compute_predictions( dataTest_proj[:,i] )
                    if(resultKNN == dataTestIndices[i]):
                        nbGoodResult2 += 1

                
                    resultParzen = parzen_model.compute_predictions( dataTest_proj[:,i] )
                    if(resultParzen == dataTestIndices[i]):
                        nbGoodResult3 += 1
     
                    out_str = "Classic method: "+ str( result1NN ) +" | KNN method: "+ str( resultKNN ) +" | KNN+Parzen method: "+ str( resultParzen ) +" | Expected: "+ str( dataTestIndices[i] ) +"\n" # +1 car l'index de la matrice commence a 0
                    print_output(out_str)

                resClassic = (float(nbGoodResult) / float(dataTest.shape[1])) * 100.
                out_str = "\nAccuracy with classic method: %.3f" % resClassic + "%\n"
                resKNN = (nbGoodResult2 / float(dataTest.shape[1])) * 100.
                out_str += "Accuracy with KNN method (k="+ str( self.K ) +"): %.3f" % resKNN + "%\n"
                res = (nbGoodResult3 / float(dataTest.shape[1])) * 100.
                out_str += "Accuracy with KNN + Parzen window method (theta="+ str( self.Theta ) +"): %.3f" % res + "%\n"
                print_output(out_str)
                
                t_stop = time.clock()
                log.info("Temps total: %.4fs\n" % float(t_stop-t_start)) 

				#### recupere les valeurs finale de l'erreur
                listeRes.append( 100 - resClassic )
                listeRes.append( 100 - resKNN )
                listeRes.append( 100 - res )

            
        
        #### Recherche pas NNET
        elif algo == "NNET":
			print_output("Début de l'algorithme du Perceptron multicouche...")
			
			# parametre, donnees, etc...
			dataTrain = pca_model.getWeightsVectors()
			dataTrainTargets = (dataTrainIndices - 1).reshape(dataTrainIndices.shape[0], -1)
			#! contrairement au KNN le NNET prends les vecteurs de features en ligne et non pas en colonne
			train_set = np.concatenate((dataTrain.T, dataTrainTargets), axis=1)

                        # recuperation des données de validation
			dataValidation, dataValidationIndices, nClass = tools.loadImageData( "validation", self.categorie )
			print_output("Projection des données de validation...")
			dataValidation_proj = pca_model.getProjection( dataValidation )
			dataValidationTargets = (dataValidationIndices - 1).reshape(dataValidationIndices.shape[0], -1)
			validation_set = np.concatenate((dataValidation_proj.T, dataValidationTargets), axis=1)

			# recuperation des données de test
			dataTest, dataTestIndices, nClass = tools.loadImageData( "test", self.categorie )
			print_output("Projection des données de test...")
			dataTest_proj = pca_model.getProjection( dataTest )
			dataTestTargets = (dataTestIndices - 1).reshape(dataTestIndices.shape[0], -1)
			test_set = np.concatenate((dataTest_proj.T, dataTestTargets), axis=1)

			# On build et on entraine le model pour recherche par KNN
			nnet_model = NeuralNetwork( dataTrain.shape[0], self.n_hidden, nClass, self.lr, self.wd )
                        if self.validation == 1:
                            train_out, valid_out, test_out = nnet_model.train( train_set, self.n_epoch, self.batch_size, valid_set=validation_set, test_set=test_set)
                        else :
                            train_out, test_out = nnet_model.train( train_set, self.n_epoch, self.batch_size, test_set=test_set)

			# affichage des courbes d'entrainement
			x = []
			y = []
			y_err = []
			color = []
			legend = []
			legend_err = []
			filename = IMG_DIR + "Risque__Epoch_"+ str(self.n_epoch) +"_Hidden_"+ str(self.n_hidden) +"_Lr_"+ str(self.lr) +"_L2_"+ str(self.wd) + "_Categorie_" + str(self.categorie) + "_Batch_" + str(self.batch_size) + "_"
			filename_err = IMG_DIR + "Erreur_classification__Epoch_"+ str(self.n_epoch) +"_Hidden_"+ str(self.n_hidden) +"_Lr_"+ str(self.lr) +"_L2_"+ str(self.wd) + "_Categorie_" + str(self.categorie) + "_Batch_" + str(self.batch_size) + "_"

			train_out = np.array(train_out)
			x.append(np.array(xrange(train_out.shape[0])))
		
			# parametres courbes train
			color.append('g-')
			legend.append("R Train")
			filename += "_Train"
			y.append(train_out[:,0])
			y_err.append(train_out[:,1])
			legend_err.append("Err Train")
			filename_err += "_Train"

                        # parametre courbes validation
                        if self.validation == 1:
                            valid_out = np.array(valid_out)
                            x.append(np.array(xrange(valid_out.shape[0])))
                            y.append(valid_out[:,0])
                            y_err.append(valid_out[:,1])
                            color.append('b-')
                            legend.append("R Validation")
                            legend_err.append("Err Validation")
                            filename += "_Validation"
                            filename_err += "_Validation"

			# parametre courbes test
			test_out = np.array(test_out)
			x.append(np.array(xrange(test_out.shape[0])))
			y.append(test_out[:,0])
			y_err.append(test_out[:,1])
			color.append('r-')
			legend.append("R Test")
			legend_err.append("Err Test")
			filename += "_Test"
			filename_err += "_Test"

			
			# affichage
			title = u"\nEpoque: " + str(self.n_epoch) + " - Taille du batch: " + str(self.batch_size) + u" - Neurones cachés: " + str(self.n_hidden) + "\nL2: " + str(self.wd) + " - Taux d'apprentissage: " + str(self.lr) + u" - Catégorie: " + str(self.categorie)
			tools.drawCurves(x, y, color, legend, bDisplay=True, filename=filename, title=title, xlabel="Epoque", ylabel=u"Risque régularisé")
			tools.drawCurves(x, y_err, color, legend_err, bDisplay=True, filename=filename_err, title=title, xlabel="Epoque", ylabel="Erreur classification")

                         #### construction fichier pour courbes ameliorees
                        if self.stock == 1 :
                            fichier = open("curvErrorNNet"+''.join( ''.join( title.split(' ') ).split('\n') ),"w")
                            fichier.write("#epoch errorTrain errorValidation errorTest\n")
                            
                            if len(x) == 3:
                            	for j in range(len( x[0] )):
                            	    fichier.write(str( x[0][j] )+" "+str( y[0][j] )+" "+str( y[1][j] )+" "+str( y[2][j] )+"\n")

                            fichier.close()

                        
			"""
			/!\ Cette partie n'est plus utile car effectué dans le nnet durant le train
			
			## TEST ###########################
			#TODO Toute cette partie est a revoir pour sortir des graphes
			# de train, validation, test
			
			# compteurs de bons résultats   
			nbGoodResult = 0

			for i in range(0, int( dataTest.shape[1] )):

				#
				resultNNET = np.argmax(nnet_model.compute_predictions( dataTest_proj[:,i] ), axis=1)[0]
				if(resultNNET == dataTestTargets[i]):
					nbGoodResult += 1
				out_str = "Result: "+ str( resultNNET ) + " | Expected: "+ str( dataTestTargets[i] ) +"\n" # +1 car l'index de la matrice commence a 0
				print_output(out_str)

			res = (float(nbGoodResult) / float(dataTest.shape[1])) * 100.
			out_str = "\nAccuracy : %.3f" % res + "%\n"
			print_output(out_str)
            """            
   
        return listeRes
예제 #7
0
 def train(self, file_name, model_file, epochs):
     nnet = NeuralNetwork()
     nnet.train(file_name, model_file, epochs)
Y_c = np.copy(Y)
np.random.shuffle(X_c)
np.random.shuffle(Y_c)
XY_train = np.stack((X_c.ravel(), Y_c.ravel()), axis=-1)
res = f(X_c, Y_c)
#

Z_train = res.reshape(Xn * Yn, 1)
Z_train = remap(Z_train, np.min(Z_train), np.max(Z_train), 0.01, 0.99)

Z_ideal = f(X, Y)

epochs = 2000
err = []
for i in range(epochs):
    err.append(np.max(network.train(XY_train, Z_train)))
    if i % 100 == 0:
        print("running " + str(i) + " epoch")
print("error on end of train = " + str(err[-1]))

XY_pred = np.stack((X.ravel(), Y.ravel()), axis=-1)
Z_predicted = network.predict(XY_pred).reshape(Xn, Yn)  # XY_train
Z_predicted = remap(Z_predicted, 0.05, 0.95, np.min(Z_train), np.max(Z_train))

fig = plt.figure(0)
ax = fig.gca(projection='3d')
plot = ax.plot_wireframe(X, Y, Z_ideal, color='red', linewidth=1)
fig.suptitle('Input function', fontsize=16)

fig2 = plt.figure(1)
ax = fig2.gca(projection='3d')
예제 #9
0
    y = np.zeros(10) + 0.01
    y[int(label)] = 0.99

    X.append(x)
    Y.append(y)

X = np.array(X)
Y = np.array(Y)

epochs = 80  # after 30 ne nado
err = []
for i in range(epochs):
    epoch_err = []
    for train_idx in range(len(Y)):
        epoch_err.append(net.train(X[train_idx], Y[train_idx]))
    err.append(np.max(np.array(epoch_err)))
    # err.append(np.max(net.train(X, Y)) / len(Y))
    if i % 100 == 0:
        print("running " + str(i) + " epoch")

print("error on end of train = " + str(err[-1]))

validate_file = open("../mnist_data/mnist_test_10.csv")
validate_list = validate_file.readlines()
validate_file.close()

scorecard = []
for i in range(len(validate_list)):
    raw_values = validate_list[i].split(',')
    label = raw_values[0]