コード例 #1
0
    def test_get_random_params(self):
        dlayers = [1024, 32, 64, 47]
        nnet = NeuralNetwork(dlayers)
        params = nnet.get_random_params()

        weights = [n * m for n, m in zip(dlayers, dlayers[1:])]
        biases = dlayers[1:]
        self.assertEqual(len(params), sum(weights) + sum(biases))
        self.assertTrue(all(-GM <= p <= GM for p in params))
コード例 #2
0
def test_backward():
    n = 100
    p = 4
    x = np.ones([p, n])
    y = np.zeros([1, n])
    net = NeuralNetwork(p, [(10, Sigmoid()), (11, Sigmoid()), (1, Identity())], QuadraticLoss())
    net(x)
    net.backward(y)
    assert True
コード例 #3
0
    def test_numerical_gradient_checking(self):
        label, image = next(mnist.read())
        ninput = [pixel / 255 for row in image for pixel in row]
        expected = [1 if i == label else 0 for i in range(10)]
        nnet = NeuralNetwork([784, 16, 16, 10])

        epsilon = 1e-5
        numgrad = [np.empty(wmatrix.shape) for wmatrix in nnet.weight]

        for k, wmatrix in enumerate(nnet.weight):
            for i, w in np.ndenumerate(wmatrix):
                wmatrix[i] = w - epsilon
                nnet.feedforward(ninput)
                a = nnet.get_error(expected)
                wmatrix[i] = w + epsilon
                nnet.feedforward(ninput)
                b = nnet.get_error(expected)
                numgrad[k][i] = (b - a) / 2 * epsilon
                wmatrix[i] = w
        error_gradient = nnet.get_error_gradient(expected)

        unit = lambda v: v / norm(v) if (v != 0).any() else np.zeros(v.shape)

        for k in range(len(nnet.weight)):
            ag = error_gradient[k]
            ng = numgrad[k]
            print(f"custom = {norm(unit(ag) - unit(ng))}")
            print(
                f"derived from cs231 = {norm(unit(ag) * norm(ng) - ng) / max(norm(ag), norm(ng))}"
            )
コード例 #4
0
def test_forward():
    n = 100
    p = 4
    x = np.ones([p, n])
    net = NeuralNetwork(p, [(10, Sigmoid()), (11, Sigmoid()), (1, Identity())], QuadraticLoss())
    output = net(x)
    assert output.shape == (1, n)
コード例 #5
0
    def test_create_layers(self):
        dlayers = [1024, 32, 64, 47]
        nnet = NeuralNetwork(dlayers)
        params = nnet.get_random_params()
        nnet.create_layers(params)

        weights = [(n + 1) * m for n, m in zip(dlayers, dlayers[1:])]

        # Weights assertions
        self.assertEqual(len(nnet.weight), len(dlayers) - 1)
        self.assertTrue(
            all(w.size == weights[i] for i, w in enumerate(nnet.weight)))
        self.assertTrue(
            all(w.shape == (dlayers[i - 1] + 1, dlayers[i])
                for i, w in enumerate(nnet.weight, 1)))
        self.assertTrue(
            all(-GM <= p <= GM for w in nnet.weight for p in np.nditer(w)))
コード例 #6
0
def test_trained(params=None, head=100, tail=100):
    "Tests a network with params against first `head` and last `tail` examples"
    params = params if params is not None else load_params()
    nnet = NeuralNetwork(DLAYERS, params)
    mnist_db = list(mnist.read())
    print("[KNOWN]")
    test_and_report_against(nnet, mnist_db[:head])  # Training dataset
    print("[UNKNOWN]")
    test_and_report_against(nnet, mnist_db[-tail:])  # Unknown dataset
コード例 #7
0
def backpropagation_main():
    label, image = next(mnist.read())
    ninput = [pixel / 255 for row in image for pixel in row]
    expected = [1 if i == label else 0 for i in range(10)]

    nnet = NeuralNetwork(DLAYERS, params=None)
    # nnet = NeuralNetwork(DLAYERS, params=load_params())
    for i in range(1000000000000):
        guess = nnet.feedforward(ninput)
        cost = nnet.get_error(expected)
        print(f"[{i + 1}] cost = {cost}, guess = {guess}")
        try:
            nnet.backpropagate(expected)
        except KeyboardInterrupt:
            break
    guess = nnet.feedforward(ninput)
    cost = nnet.get_error(expected)
    print(f"[{i + 1}] cost = {cost}")
    save_params(nnet.params)
コード例 #8
0
ファイル: game.py プロジェクト: ThiagoLira/flappyGANNET
class NeuralAgent():
    """
        This is our neural agent. It picks actions determined by the NeuralNetwork!
    """
    def __init__(self, actions, weights):
        self.actions = actions
        #number of state variables in FlappyBird
        self.nn = NeuralNetwork([8, 16, 1], weights=weights)

    #normalize inputs and feedfoward NeuralNetwork to pick action
    def pickAction(self, state, minValues, maxValues):

        stateValues = state.tolist()[0]

        for i in range(len(stateValues)):

            #update minimum value
            if (minValues[i] > stateValues[i]):
                #print("updated min")
                minValues[i] = stateValues[i]
            #update max value
            if (maxValues[i] < stateValues[i]):
                #print("updated max")
                maxValues[i] = stateValues[i]
            try:
                output = [(stateValues[i] - minValues[i]) /
                          (maxValues[i] - minValues[i])
                          for i in range(len(stateValues))]
            except ZeroDivisionError:
                print("Divided by zero!")
                output = [1 for i in range(len(stateValues))]

        out = self.nn.eval(output)

        if (out[0] > 0.5):
            action = 0
        else:
            action = 1

        #out.index(max(out))
        return self.actions[action]
コード例 #9
0
ファイル: run_nnet.py プロジェクト: BenedicteLC/MLProj3
def test_net(train_set, train_labels, valid_set, valid_labels, test_set, learning_rate, \
            decrease_constant, size, l2, l1, function) :
    """
    Train and validate the neural net with 
    a given set of parameters.
    Returns the final test output.
    """    
    neuralNet = NeuralNetwork(lr=learning_rate, dc=decrease_constant, sizes=size, L2=l2, L1=l1,
                     seed=5678, tanh=function, n_epochs=10)
    
    n_classes = 10
    
    print "Training..."
    # Early stopping code
    best_val_error = np.inf # Begin with infinite error
    best_it = 0 # Iteration of the best neural net so far wrt valid error
    look_ahead = 5
    n_incr_error = 0
    for current_stage in range(1,500+1,1):
        
        #Stop training when NN has not improved for 5 turns.
        if not n_incr_error < look_ahead:
            break
        neuralNet.n_epochs = current_stage
        neuralNet.train(train_set, train_labels, n_classes)
        n_incr_error += 1
        
        outputs, errors, accuracy = neuralNet.test(train_set, train_labels)
        print 'Epoch',current_stage,'|',
        print 'Training accuracy: ' + '%.3f'%accuracy+',', ' |',
        outputs, errors, accuracy = neuralNet.test(valid_set, valid_labels)
        print 'Validation accuracy: ' + '%.3f'%accuracy
        
        # Check if this model is better than the previous:
        error = 1.0 - accuracy
        if error < best_val_error:
            best_val_error = error
            best_it = current_stage
            n_incr_error = 0
            best_model = copy.deepcopy(neuralNet) # Save the model.
    
    #TODO Clear train and valid set to free memory.
    #Load test set
    outputs = best_model.predict(test_set)
コード例 #10
0
ファイル: run_nnet.py プロジェクト: BenedicteLC/MLProj3
def validate_net(train_set, train_labels, valid_set, valid_labels, learning_rate, \
            decrease_constant, size, l2, l1, function) :
    """
    Train and validate the neural net with 
    a given set of parameters.
    Return the best accuracy.
    """    
    neuralNet = NeuralNetwork(lr=learning_rate, dc=decrease_constant, sizes=size, L2=l2, L1=l1,
                     seed=5678, tanh=function, n_epochs=10)
    
    n_classes = 10
    
    print "Training..."
    # Early stopping code @Hugo Larochelle (partially)
    best_val_error = np.inf # Begin with infinite error
    best_it = 0 # Iteration of the best neural net so far wrt valid error
    look_ahead = 5
    n_incr_error = 0
    for current_stage in range(1,500+1,1):
        
        #Stop training when NN has not improved for 5 turns.
        if not n_incr_error < look_ahead:
            break
        neuralNet.n_epochs = current_stage
        neuralNet.train(train_set, train_labels, n_classes)
        n_incr_error += 1
        
        outputs, errors, train_accuracy = neuralNet.test(train_set, train_labels)
        print 'Epoch',current_stage,'|',
        print 'Training accuracy: ' + '%.3f'%train_accuracy+',', ' |',
        outputs, errors, valid_accuracy = neuralNet.test(valid_set, valid_labels)
        print 'Validation accuracy: ' + '%.3f'%valid_accuracy
        
        # Check if this model is better than the previous:
        error = 1.0 - valid_accuracy
        if error < best_val_error:
            best_val_error = error
            best_train_accuracy = train_accuracy            
            n_incr_error = 0
    
    return 1 - best_val_error, best_train_accuracy
コード例 #11
0
ファイル: batch_test.py プロジェクト: Soarex16/NeuralNetStudy
import numpy as np
import matplotlib.pyplot as plt
from nnet import NeuralNetwork
import activation_functions as funcs

config = [1, 5, 5, 1]
net = NeuralNetwork(config,
                    learning_rate=0.1,
                    act_func=funcs.sigmoid,
                    df_act_func=funcs.df_sigmoid)


def f(x):
    return x**2


vf = np.vectorize(f)

x = np.array(np.linspace(-1, 1, num=10), ndmin=2)
y = vf(x)

epochs = 5000
err = net.fit_raw(x, y, epochs, True, 10)

test_sample = np.array([0.1])
y_pred = net.predict(test_sample)

plt.plot(np.array(range(epochs) + 1), np.array(err))
plt.show()
コード例 #12
0
ファイル: game.py プロジェクト: ThiagoLira/flappyGANNET
 def __init__(self, actions, weights):
     self.actions = actions
     #number of state variables in FlappyBird
     self.nn = NeuralNetwork([8, 16, 1], weights=weights)
コード例 #13
0
def main(argv):    
    if args.seed:
        np.random.seed(args.seed)

    map = Map(MAP_WIDTH, MAP_HEIGHT)
    net = NeuralNetwork(2, args.layer_neurons, 1, args.hidden_layers, args.bias)
    print net
    if  args.train:
        # тренировочные данные
        train_d0, train_d1 = map.dataset(0, MAP_WIDTH + MAP_HEIGHT), \
                             map.dataset(1, MAP_WIDTH + MAP_HEIGHT)
        td0 = np.array([[0]] * train_d0.shape[0], dtype=float)
        td1 = np.array([[1]] * train_d1.shape[0], dtype=float)
        t = np.concatenate((td0, td1), axis=0) # уже нормализован
        # вход
        x = np.concatenate((train_d0, train_d1), axis=0)
        x_normalized = x / np.amax(x, axis=0)
        
        print 'Training...'
        if args.logging:
            with open('training.log', 'w') as f:
                for epoch in xrange(args.epochs):
                    f.write('Epoch {}\n'.format(epoch))
                    f.write("Input:\n{}\n".format(x_normalized.T))
                    f.write("Actual Output:\n{}\n".format(t.T))
                    f.write("Predicted Output:\n{}\n".format(np.round(net.forward(x_normalized).T)))
                    f.write("Loss:\n{}\n\n".format(str(np.mean(np.square(t - net.forward(x_normalized))))))
                    net.train(x_normalized, t)
        else:
            for epoch in xrange(args.epochs):
                net.train(x_normalized, t, args.alpha, args.train_speed)
        print "Saving weights..."
        net.save_weights(W_PREFIX)
        print 'Done.'
    else:
        train_d0 = train_d1 = np.array([])
        if os.path.exists('{}_0.w.txt'.format(W_PREFIX)):
           print "Loading weights..."
           net.load_weights(W_PREFIX)
           print 'Done.'
        else:
            print "No weights were found!"

    if args.seed:
        np.random.seed(args.seed + 1)
    
    # вход
    zds0, zds1 = np.random.randint(2, 20), np.random.randint(2, 20)
    d0, d1 = map.dataset(0, zds0), map.dataset(1, zds1)
    x = np.concatenate((d0, d1), axis=0)
    x_normalized = x / np.amax(x, axis=0)
    # ожидаемые данные для проверки
    td0 = np.array([[0]] * d0.shape[0], dtype=float)
    td1 = np.array([[1]] * d1.shape[0], dtype=float)
    t = np.concatenate((td0, td1), axis=0) # уже нормализован
    # выход
    y = np.round(net.predict(x_normalized))
    if args.verbose:
        print "Input:"
        print x
        print "Output (Expected):"
        print t
        print "Output (Actual):"
        print y

    res = (y == t)
    if res.all():
        print "\nAll Good!"
    else:
        print "{}% are good!".format(res.sum() * 100 / len(res))

    if args.plotting:
        # фильтрация 'попаданий' и 'промахов'
        good = []
        bad = []
        for i, v in enumerate(res):
            if v:
                good.append(x[i])
            else:
                bad.append(x[i])
        map.plot(np.array(good), np.array(bad), train_d0, train_d1, args.plot_name)
コード例 #14
0
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from nnet import NeuralNetwork
import activation_functions as funcs

np.random.seed(1)


def remap(value, low1, high1, low2, high2):
    return low2 + (value - low1) * (high2 - low2) / (high1 - low1)


config = [2, 20, 15, 20, 1]
network = NeuralNetwork(config, learning_rate=0.00005, act_func=funcs.tanh, df_act_func=funcs.df_illogical)

Xn = 30
Yn = 30

X = np.linspace(-0.9, 0.9, Xn).reshape(-1, 1)
Y = np.linspace(-0.9, 0.9, Yn).reshape(-1, 1)


def f(x, y):
    return np.sin(3 * x) * np.cos(2 * y) * 0.5


X, Y = np.meshgrid(X, Y)
XY_train = np.stack((X.ravel(), Y.ravel()), axis=-1)

res = f(X, Y)
コード例 #15
0
                            noise_prob = pretrain_noise_prob,
                            seed=seed
                            )
    new_layer.train(pretraining_trainset)
    print ' DONE'

    pretrained_Ws += [new_layer.W]
    pretrained_bs += [new_layer.b]

pretrained_Ws += [np.zeros((sizes[-1],len(trainset.metadata['targets'])))]
pretrained_bs += [np.zeros((len(trainset.metadata['targets'],)))]
    
# Construct neural network, with pretrained parameters
myObject = NeuralNetwork(n_epochs=1,
                         lr=lr,
                         dc=dc,
                         sizes=sizes,
                         seed=seed,
                         parameter_initialization=(pretrained_bs,pretrained_Ws))

print "Fine-tuning..."
# Early stopping code
best_val_error = np.inf
best_it = 0
str_header = 'best_it\t'
look_ahead = 5
n_incr_error = 0
for stage in range(1,500+1,1):
    if not n_incr_error < look_ahead:
        break
    myObject.n_epochs = stage
    myObject.train(trainset)
コード例 #16
0
 def test(self, file_name, model_file, output_file="best_output.txt"):
     nnet = NeuralNetwork()
     nnet.test(file_name, model_file, output_file)
コード例 #17
0
 def train(self, file_name, model_file, epochs):
     nnet = NeuralNetwork()
     nnet.train(file_name, model_file, epochs)
コード例 #18
0
from nnet import NeuralNetwork

print "Verifying gradients with sigmoid activation"
m = NeuralNetwork()
m.L2 = 0
m.L1 = 0
m.tanh = False
m.verify_gradients()

print ""
print "Verifying gradients with tanh activation"
m.tanh = True
m.verify_gradients()

print ""
print "Verifying gradients with L2 regularization"
m.L2 = 0.001
m.verify_gradients()

print ""
print "Verifying gradients with L1 regularization"
m.L2 = 0
m.L1 = 0.001
m.verify_gradients()

コード例 #19
0
ファイル: main.py プロジェクト: blckshrk/IFT6390
    def main(self, algo="KNN", textview=None):
        
        # Remplace "print"
        def print_output(text):
            if textview != None:
                buf = textview.get_buffer()
                buf.insert_at_cursor(text + "\n")
                textview.scroll_mark_onscreen(buf.get_insert())
            else:
                log.info(text)
        
        
        # liste des types de set
        if self.validation == 1:
            listeTypesSet = ["train", "validation", "test"]
        else:
            listeTypesSet = ["train", "test"]

        # liste des resultats utilises pour les courbes
        listeRes=[]

        # creation des trainFile et testFile
        log.debug("Construction des fichiers d'entrainement")
        tools.constructLfwNamesCurrent( self.nbExemples )   

        #TODO ca ne sert plus a rien finalement
        ( nbClassesLFW, nbClassesORL ) = tools.trainAndTestConstruction( self.pourcentageTrain, self.nbExemples )

        # Chargement des données
        dataTrain, dataTrainIndices, nClass = tools.loadImageData( "train", self.categorie)
        
        # tranformation pca
        print_output("Calcul des vecteurs propres...")
        pca_model = PCA( dataTrain )
        pca_model.transform() # on transforme les donné dans un le "eigen space"

        ##### Recherche pas KNN
        if algo == "KNN":
            print_output("Début de l'algorithme des K plus proches voisins...")
            
            # On build le model pour recherche par KNN
            knn_model = KNN( pca_model.getWeightsVectors(), dataTrainIndices, nClass, self.K )
            
            # On build le model pour Parzen
            parzen_model = ParzenWindows( pca_model.getWeightsVectors(), dataTrainIndices, nClass, self.Theta )

            ## TEST ###########################
            #TODO Toute cette partie est a revoir pour sortir des graphes
            # de train, validation, test
            for trainTest in listeTypesSet:
                if trainTest == "train":
                    dataTest, dataTestIndices = dataTrain, dataTrainIndices
                else :
                    ### si l'on n'effectue pas de validation on concatene les entrees de test et de validation initiales pour obtenir le test
                    #if "validation" not in listeTypesSet:
                        #dataTestInitial, dataTestInitialIndices, nClass = tools.loadImageData( "test", self.categorie )
                        #dataValidation, dataValidationIndices, nClass = tools.loadImageData( "validation", self.categorie )
                        #dataTest = np.zeros(dataTestInitial.size + dataValidation.size)
                        #dataTestIndices = np.zeros( dataTest.size )
                        #dataTest[ : dataTestInitial.size], dataTestIndices[ : dataTestInitial.size] = dataTestInitial, dataTestInitialIndices
                        #dataTest[dataTestInitial.size : ], dataTestIndices[dataTestInitial.size : ] = dataValidation, dataValidationIndices
                        
                        
                    #else:
                        dataTest, dataTestIndices, nClass = tools.loadImageData( trainTest, self.categorie )
                print_output("Projection des données de test...")
                dataTest_proj = pca_model.getProjection( dataTest )
                

            	# compteurs de bons résultats   
                nbGoodResult = 0
                nbGoodResult2 = 0 
                nbGoodResult3 = 0

                t_start = time.clock()
                for i in range(0, int( dataTest.shape[1] )):

					# k = 1, pour réference
					# on force k
                    knn_model.setK( 1 )
                    result1NN = knn_model.compute_predictions( dataTest_proj[:,i] )
                    if(result1NN == dataTestIndices[i]):
                        nbGoodResult += 1

		            # k = n
		            # replace k a ca position initial
                    knn_model.setK( self.K )
                    resultKNN = knn_model.compute_predictions( dataTest_proj[:,i] )
                    if(resultKNN == dataTestIndices[i]):
                        nbGoodResult2 += 1

                
                    resultParzen = parzen_model.compute_predictions( dataTest_proj[:,i] )
                    if(resultParzen == dataTestIndices[i]):
                        nbGoodResult3 += 1
     
                    out_str = "Classic method: "+ str( result1NN ) +" | KNN method: "+ str( resultKNN ) +" | KNN+Parzen method: "+ str( resultParzen ) +" | Expected: "+ str( dataTestIndices[i] ) +"\n" # +1 car l'index de la matrice commence a 0
                    print_output(out_str)

                resClassic = (float(nbGoodResult) / float(dataTest.shape[1])) * 100.
                out_str = "\nAccuracy with classic method: %.3f" % resClassic + "%\n"
                resKNN = (nbGoodResult2 / float(dataTest.shape[1])) * 100.
                out_str += "Accuracy with KNN method (k="+ str( self.K ) +"): %.3f" % resKNN + "%\n"
                res = (nbGoodResult3 / float(dataTest.shape[1])) * 100.
                out_str += "Accuracy with KNN + Parzen window method (theta="+ str( self.Theta ) +"): %.3f" % res + "%\n"
                print_output(out_str)
                
                t_stop = time.clock()
                log.info("Temps total: %.4fs\n" % float(t_stop-t_start)) 

				#### recupere les valeurs finale de l'erreur
                listeRes.append( 100 - resClassic )
                listeRes.append( 100 - resKNN )
                listeRes.append( 100 - res )

            
        
        #### Recherche pas NNET
        elif algo == "NNET":
			print_output("Début de l'algorithme du Perceptron multicouche...")
			
			# parametre, donnees, etc...
			dataTrain = pca_model.getWeightsVectors()
			dataTrainTargets = (dataTrainIndices - 1).reshape(dataTrainIndices.shape[0], -1)
			#! contrairement au KNN le NNET prends les vecteurs de features en ligne et non pas en colonne
			train_set = np.concatenate((dataTrain.T, dataTrainTargets), axis=1)

                        # recuperation des données de validation
			dataValidation, dataValidationIndices, nClass = tools.loadImageData( "validation", self.categorie )
			print_output("Projection des données de validation...")
			dataValidation_proj = pca_model.getProjection( dataValidation )
			dataValidationTargets = (dataValidationIndices - 1).reshape(dataValidationIndices.shape[0], -1)
			validation_set = np.concatenate((dataValidation_proj.T, dataValidationTargets), axis=1)

			# recuperation des données de test
			dataTest, dataTestIndices, nClass = tools.loadImageData( "test", self.categorie )
			print_output("Projection des données de test...")
			dataTest_proj = pca_model.getProjection( dataTest )
			dataTestTargets = (dataTestIndices - 1).reshape(dataTestIndices.shape[0], -1)
			test_set = np.concatenate((dataTest_proj.T, dataTestTargets), axis=1)

			# On build et on entraine le model pour recherche par KNN
			nnet_model = NeuralNetwork( dataTrain.shape[0], self.n_hidden, nClass, self.lr, self.wd )
                        if self.validation == 1:
                            train_out, valid_out, test_out = nnet_model.train( train_set, self.n_epoch, self.batch_size, valid_set=validation_set, test_set=test_set)
                        else :
                            train_out, test_out = nnet_model.train( train_set, self.n_epoch, self.batch_size, test_set=test_set)

			# affichage des courbes d'entrainement
			x = []
			y = []
			y_err = []
			color = []
			legend = []
			legend_err = []
			filename = IMG_DIR + "Risque__Epoch_"+ str(self.n_epoch) +"_Hidden_"+ str(self.n_hidden) +"_Lr_"+ str(self.lr) +"_L2_"+ str(self.wd) + "_Categorie_" + str(self.categorie) + "_Batch_" + str(self.batch_size) + "_"
			filename_err = IMG_DIR + "Erreur_classification__Epoch_"+ str(self.n_epoch) +"_Hidden_"+ str(self.n_hidden) +"_Lr_"+ str(self.lr) +"_L2_"+ str(self.wd) + "_Categorie_" + str(self.categorie) + "_Batch_" + str(self.batch_size) + "_"

			train_out = np.array(train_out)
			x.append(np.array(xrange(train_out.shape[0])))
		
			# parametres courbes train
			color.append('g-')
			legend.append("R Train")
			filename += "_Train"
			y.append(train_out[:,0])
			y_err.append(train_out[:,1])
			legend_err.append("Err Train")
			filename_err += "_Train"

                        # parametre courbes validation
                        if self.validation == 1:
                            valid_out = np.array(valid_out)
                            x.append(np.array(xrange(valid_out.shape[0])))
                            y.append(valid_out[:,0])
                            y_err.append(valid_out[:,1])
                            color.append('b-')
                            legend.append("R Validation")
                            legend_err.append("Err Validation")
                            filename += "_Validation"
                            filename_err += "_Validation"

			# parametre courbes test
			test_out = np.array(test_out)
			x.append(np.array(xrange(test_out.shape[0])))
			y.append(test_out[:,0])
			y_err.append(test_out[:,1])
			color.append('r-')
			legend.append("R Test")
			legend_err.append("Err Test")
			filename += "_Test"
			filename_err += "_Test"

			
			# affichage
			title = u"\nEpoque: " + str(self.n_epoch) + " - Taille du batch: " + str(self.batch_size) + u" - Neurones cachés: " + str(self.n_hidden) + "\nL2: " + str(self.wd) + " - Taux d'apprentissage: " + str(self.lr) + u" - Catégorie: " + str(self.categorie)
			tools.drawCurves(x, y, color, legend, bDisplay=True, filename=filename, title=title, xlabel="Epoque", ylabel=u"Risque régularisé")
			tools.drawCurves(x, y_err, color, legend_err, bDisplay=True, filename=filename_err, title=title, xlabel="Epoque", ylabel="Erreur classification")

                         #### construction fichier pour courbes ameliorees
                        if self.stock == 1 :
                            fichier = open("curvErrorNNet"+''.join( ''.join( title.split(' ') ).split('\n') ),"w")
                            fichier.write("#epoch errorTrain errorValidation errorTest\n")
                            
                            if len(x) == 3:
                            	for j in range(len( x[0] )):
                            	    fichier.write(str( x[0][j] )+" "+str( y[0][j] )+" "+str( y[1][j] )+" "+str( y[2][j] )+"\n")

                            fichier.close()

                        
			"""
			/!\ Cette partie n'est plus utile car effectué dans le nnet durant le train
			
			## TEST ###########################
			#TODO Toute cette partie est a revoir pour sortir des graphes
			# de train, validation, test
			
			# compteurs de bons résultats   
			nbGoodResult = 0

			for i in range(0, int( dataTest.shape[1] )):

				#
				resultNNET = np.argmax(nnet_model.compute_predictions( dataTest_proj[:,i] ), axis=1)[0]
				if(resultNNET == dataTestTargets[i]):
					nbGoodResult += 1
				out_str = "Result: "+ str( resultNNET ) + " | Expected: "+ str( dataTestTargets[i] ) +"\n" # +1 car l'index de la matrice commence a 0
				print_output(out_str)

			res = (float(nbGoodResult) / float(dataTest.shape[1])) * 100.
			out_str = "\nAccuracy : %.3f" % res + "%\n"
			print_output(out_str)
            """            
   
        return listeRes
コード例 #20
0
        trial = adaboost.Adaboost(file_name, None)
        trial.training = file_name
        train_pixels = trial.prepare_data(trial.training)[0]
        weights = trial.train(train_pixels)
        weight_values = weights.values()
        for i in weights:
            weights[i] += 10

        with open(model_file, 'w') as myfile:
            for i in weights:
                if i == trial.learner1:
                    myfile.write('%s %.9f\n' % ('learner1', weights[i]))
                if i == trial.learner2:
                    myfile.write('%s %.9f\n' % ('learner2', weights[i]))
    elif model == 'nnet':
        nnet = NeuralNetwork()
        nnet.train(file_name, model_file, epochs=10000)
    elif model == 'best':
        best = Best()
        best.train(file_name, model_file, epochs=10000)
    else:
        print 'Specified model not found!!'
else:
    if model == 'nearest' or model == 'nnet' or model == 'best':
        if model_file.endswith('.txt'):
            model_file = model_file + '.npy'

    if model == 'nearest':
        knn = Knn()
        knn.test(file_name, model_file)
    elif model == 'nnet':
コード例 #21
0
ファイル: genetic.py プロジェクト: ThiagoLira/flappyGANNET
def createParent():
    nn = NeuralNetwork([8, 16, 1])
    return nn.weights
コード例 #22
0
ファイル: NeuralDriver.py プロジェクト: tizon9804/NNRobot
#from RobotSystem import Robot
#robot=Robot.RobotDriver()

#-------------------------------------------------------------------------
#initialize neuralnetwork
#-------------------------------------------------------------------------
print("initializing neuralnetwork..")
from nnet import NeuralNetwork as NN
type = 'SightConscience'
init.convImage()
print init.convImg.shape
input_layer_size = init.convImg.shape[1]
num_hidden_layer = 3
hidden_layer_size = 100
num_output_Layers = 5
NNSight = NN.CNNET(type, input_layer_size, num_hidden_layer, hidden_layer_size,
                   num_output_Layers)
mind1 = Think.assemble([], NNSight.brain, input_layer_size, num_hidden_layer,
                       hidden_layer_size, num_output_Layers)

persistence.loadNN(type)

typeN = 'NavigatonConscience'
input_layer_size = 229
num_hidden_layer = 3
hidden_layer_size = 25
num_output_Layer = 3
NNNavigation = NN.CNNET(typeN, input_layer_size, num_hidden_layer,
                        hidden_layer_size, num_output_Layer)
mind2 = Think.assemble([], NNNavigation.brain, input_layer_size,
                       num_hidden_layer, hidden_layer_size, num_output_Layer)
コード例 #23
0
ファイル: mnist_test.py プロジェクト: Soarex16/NeuralNetStudy
import activation_functions as funcs

np.random.seed(0)


def remap(value, low1, high1, low2, high2):
    return low2 + (value - low1) * (high2 - low2) / (high1 - low1)


data_file = open("../mnist_data/mnist_train_100.csv")
data_list = data_file.readlines()
data_file.close()

config = [28 * 28, 200, 10]
net = NeuralNetwork(config,
                    learning_rate=0.05,
                    act_func=funcs.sigmoid,
                    df_act_func=funcs.df_sigmoid)

X = []
Y = []
for data in data_list:
    raw_values = data.split(',')
    label = raw_values[0]
    x = np.asfarray(raw_values[1:])
    x = remap(x, 0, 255, 0.01, 0.99)

    y = np.zeros(10) + 0.01
    y[int(label)] = 0.99

    X.append(x)
    Y.append(y)