コード例 #1
0
def NN_fit(train_set, val_set, nn=5, epochs=10, width=10, layers=2):
    from NN import NeuralNetwork

    last_error = 100
    last_predicated = None
    fnn = None
    for x in range(nn):
        nn = NeuralNetwork()
        nn.train(
            train_set,
            val_set,
            epochs=epochs,
            width=width,
            layers=layers,
            batch_size=20,
            learning_rate=0.001,
        )
        predicted_y, _ = nn.predict(train_set.X)

        logger.info(
            f"NN train MSE {mean_squared_error(train_set.Y, predicted_y)}")
        predicted_y, _ = nn.predict(val_set.X)
        error = mean_squared_error(val_set.Y, predicted_y)
        logger.info(f"NN dev MSE {error}")

        if error < last_error:
            last_error = error
            fnn = nn

    return fnn
コード例 #2
0
def train(iterations, rate):
    global training_inputs, temp_training_outputs, test_inputs, test_outputs
    NeuralNetwork.train(NumberNet,
                        training_inputs=training_inputs,
                        training_outputs=temp_training_outputs,
                        training_iterations=iterations,
                        learning_rate=rate)
コード例 #3
0
ファイル: v1.py プロジェクト: ruitaiS/vybLab
def generate():
    Digit_NN = NeuralNetwork(no_of_in_nodes=image_pixels,
                             no_of_out_nodes=10,
                             no_of_hidden_nodes=100,
                             learning_rate=0.1)
    '''
    Letter_NN = NeuralNetwork(no_of_in_nodes = image_pixels, 
                        no_of_out_nodes = 26, 
                        no_of_hidden_nodes = 100,
                        learning_rate = 0.1)
    '''

    Meta_NN = NeuralNetwork(Digit_NN.no_of_out_nodes, 2, 100, 0.05)

    #Train Digit NN
    for i in range(len(digits_train_imgs)):
        Digit_NN.train(digits_train_imgs[i], digits_train_labels_one_hot[i])

    #Display Statistics for Digits
    corrects, wrongs = Digit_NN.evaluate(digits_train_imgs,
                                         digits_train_labels)
    print("accuracy train: ", corrects / (corrects + wrongs))
    corrects, wrongs = Digit_NN.evaluate(digits_test_imgs, digits_test_labels)
    print("accuracy: test", corrects / (corrects + wrongs))

    #Train MetaNN, save NN output vectors to be evaluated later
    for i in range(len(mixed_train_imgs)):
        Meta_NN.train(np.sort(Digit_NN.run(mixed_train_imgs[i]).T),
                      mixed_train_labels[i])

    #Display Statistics for Meta
    #TODO: Investigate whether this has redundant code
    corrects, wrongs = Meta_NN.metaEval(Digit_NN, mixed_train_imgs,
                                        mixed_train_labels)
    train_accuracy = corrects / (corrects + wrongs)
    print("Train Accuracy: ", train_accuracy)
    print("Train Confusion Matrix: ")
    print(
        Meta_NN.meta_confusion_matrix(Digit_NN, mixed_train_imgs,
                                      mixed_train_labels, mixed_train_values))

    corrects, wrongs = Meta_NN.metaEval(Digit_NN, mixed_test_imgs,
                                        mixed_test_labels)
    test_accuracy = corrects / (corrects + wrongs)
    print("Test Accuracy: ", test_accuracy)
    print("Test Confusion Matrix: ")
    print(
        Meta_NN.meta_confusion_matrix(Digit_NN, mixed_test_imgs,
                                      mixed_test_labels, mixed_test_values))

    return train_accuracy, test_accuracy, Digit_NN, Meta_NN
def trainOR():
    Network0 = NeuralNetwork(sturcture=[2, 3, 1], learningRate=0.5)

    for i in range(1000):
        Network0.train([0, 0], [0])
        Network0.train([0, 1], [1])
        Network0.train([1, 0], [1])
        Network0.train([1, 1], [1])
        print(i, "", round((1 - Network.costFunction) * 100, 0))
        pass

    print(round((1 - Network0.costFunction) * 100, 2))

    Network0.answer([0, 0])

    print(Network0.neurons[2][0].getOutput())
    pass
コード例 #5
0
def cross(Father, Mother, training_inputs, training_groundtruth, test_inputs, test_groundtruth ):
    '''
    This cross function is used for the genetic algorithm optimization. the inputs are two individuals, and will return a "child" with a better training performance.
    This function will first generate two children. And for each child's one single feature, it will either be from the 'mother' or the 'father'
    And then a quick training and testing process will be used to evalue the AUROC score.
    And the 'child' with a better performance will be returned.
    '''
    Child_1 = []
    Child_2 = []
    for i in range(6):
        coin = rand(-1,1)
        if coin>= 0:
            Child_1.append(Father[i])
            Child_2.append(Mother[i])
        else: 
            Child_1.append(Mother[i])
            Child_2.append(Father[i])
    Child_1 = mutate(Child_1)
    Child_2 = mutate(Child_2)
    

    
    # build the network, make weights, and train it
    NN_1 = NeuralNetwork(input_layer=68, hidden_layer= Child_1[0], output_layer=1,
                            lr = Child_1[1], lr_decay= Child_1[2], iteration= Child_1[5],
                            batch_size= Child_1[4], mf= Child_1[3])
        
    NN_2 = NeuralNetwork(input_layer=68, hidden_layer= Child_2[0], output_layer=1,
                            lr = Child_2[1], lr_decay= Child_2[2], iteration= Child_2[5],
                            batch_size= Child_2[4], mf= Child_2[3])
        
    NN_1.make_weights()
    NN_2.make_weights()

    NN_1.train(training_inputs, training_groundtruth)
    NN_2.train(training_inputs, training_groundtruth)
        
    Score_1 = AUROC_cruve(NN_1, test_inputs, test_groundtruth, Fig=False)
    Score_2 = AUROC_cruve(NN_2, test_inputs, test_groundtruth, Fig=False)
    if Score_1 > Score_2:
        return Child_1, Score_1
    else: 
        return Child_2, Score_2
def trainXOR():
    print("\ntrain XOR")
    sturcture = [2, 3, 1]
    Network0 = NeuralNetwork(sturcture=sturcture, learningRate=1)

    for i in range(10000):
        Network0.train([0, 0], [0])
        Network0.train([0, 1], [1])
        Network0.train([1, 0], [1])
        Network0.train([1, 1], [0])

        #print(i,"",round((1-Network0.costFunction)*100, 0))
        print(i, "",
              round(abs(Network0.neurons[len(sturcture) - 1][0].a - 0), 3))

    print(Network0.answer([0, 0]))
    print(Network0.answer([1, 0]))
    print(Network0.answer([0, 1]))
    print(Network0.answer([1, 1]))

    pass
コード例 #7
0
def do_single_run(params):
    nn = NeuralNetwork(params)
    final_loss = nn.train(
        number_of_batches=params["Run"]["NumberOfBatchesForTraining"],
        learning_rate=params["Run"]["LearningRate"])
    if params["General"]["Run"]["DoEvalAOnGrid"]:
        a_list = nn.eval_a_on_grid(params["Grid"]["x_min"],
                                   params["Grid"]["x_steps"],
                                   params["Grid"]["x_step_size"])

        do_a_graph(params, a_list)

    monte_carlo_price, monte_carlo_std = nn.monte_carlo_price(
        number_of_batches=params["Run"]["NumberOfBatchesForEval"])
    print("real price: ", nn.bachelier_call_price())
    print("monte_carlo_price: ", monte_carlo_price)
    print("monte_carlo_std: ", monte_carlo_std)
    _, _, = nn.eval(number_of_batches=params["Run"]["NumberOfBatchesForEval"])

    if params["General"]["Run"]["DoRobustGraphs"]:
        params["General"]["Run"]["RatioRobustGraphs"] = params["General"][
            "Run"]["RatioNumberOfBatchesForEval"] * params["General"]["Run"][
                "RatioNumberOfBatchesForTraining"]
        do_robust_graphs(params, nn)

    nn.sess.graph.finalize()
    tf.reset_default_graph()
    nn.sess.close()
    tf.reset_default_graph()

    conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
        params["Conf"], params["Diffusion"]["Type"])
    a_type = "a_" + params["a"]
    file_path_final_loss = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + "_final_loss.txt"
    f = open(file_path_final_loss, "w")
    file_string = "Var(Z) for single run is: " + str(final_loss)
    f.write(file_string)
    f.close()
コード例 #8
0
ファイル: XOR.py プロジェクト: PkBadger/firstNN
    'target': [0]
}]


def drawPrediction():
    resolution = 10
    cols = rows = int(side / resolution)

    for i in range(cols):
        for j in range(rows):
            x1 = i / cols
            x2 = j / rows
            color = int(brain.predict([x1, x2])[0][0] * 255)
            rect = pygame.draw.rect(
                screen, (color, color, color),
                (i * resolution, j * resolution, resolution, resolution))
    pygame.display.flip()


while running:
    event = pygame.event.poll()
    if event.type == pygame.QUIT:
        brain.saveState('XOR.state.json')
        running = 0
    if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
        brain = NeuralNetwork(2, 4, 1)
    for _ in range(1000):
        data = np.random.choice(training_data)
        brain.train(data['input'], data['target'])
    drawPrediction()
コード例 #9
0
def genetic_algorithm(training_inputs, training_groundtruth, test_inputs, test_groundtruth,
                     num_population,times, invasion, hidden_nodes, lr, lr_decay, mf, batch_size, epoch):
    
    '''
    In this function, I will perform the genetic_algorithm to find out the best combination of hyperparameters for the training.
    [training_inputs, training_groundtruth, test_inputs, test_groundtruth] is a training, testing set which are obtained for evalute the training performance.
    num_population: the number of random candidates with the random number of feathers.
    times: total times of "cross" for the parents to exchange the features which uis aiming at getting 'progeny" with the better performance.
    invasion: number of new "invasion" individuals, besides the progeny got from the 'cross', for each time I also introduce some new individuals with different features, 
    which is aimming at making the whole population get more information for optimization.
    
    [hidden_nodes, lr, lr_decay, mf, batch_size, epoch] are the six features i planned to use as the hyperparameter features that needs to be optimized
    They are all put in as the two-element list, which represents the range. In the function, I will use rand funvtion to generate a random number between the range.
    '''
    
    
    # generate the parents population
    print('generating '+ str(num_population)+' individuals')
    
    # makeing sure the input are correct
    assert hidden_nodes[0] < hidden_nodes[1], 'something went wrong!'
    assert lr[0] < lr[1], 'something went wrong!'
    assert lr_decay[0] < lr_decay[1], 'something went wrong!'
    assert mf[0] < mf[1], 'something went wrong!'
    assert batch_size[0] < batch_size[1], 'something went wrong!'
    assert epoch[0] < epoch[1], 'something went wrong!'
    

    # generating a bunch of individuals based on the range that provided
    individuals_genom = []
    individuals_phyno = []
    for i in range(num_population):
        # randonly generate the feature
        _hidden_nodes = int(rand(hidden_nodes[0],hidden_nodes[1]))
        _lr = rand(lr[0], lr[1])
        _lr_decay = rand(lr_decay[0], lr_decay[1])
        _mf = rand(mf[0],mf[1])
        _batch_size = int(rand(batch_size[0],batch_size[1] ))
        _epoch = int(rand(epoch[0], epoch[1]))
        
        # build an individual and put it into the whole set
        individuals_genom.append( [_hidden_nodes, _lr, _lr_decay, _mf, _batch_size, _epoch])
        NN = NeuralNetwork(input_layer=68, hidden_layer= _hidden_nodes, output_layer=1,
                           lr = _lr, lr_decay= _lr_decay, iteration= _epoch,
                           batch_size= _batch_size, mf= _mf)
        NN.make_weights()
        NN.train(training_inputs, training_groundtruth)
        # also store the individual's performance in a list vector
        individuals_phyno.append(AUROC_cruve(NN, test_inputs, test_groundtruth, Fig=False))
        
        
    # take the best performance people and keep it for the next generation
    my_phyno = max(individuals_phyno)
    idx = individuals_phyno.index(my_phyno)
    my_genome = individuals_genom[idx]
    
    n = invasion
    # begin the cross, do N times of cross
    for t in range(times):
        print('For the time '+str(t)+' the best candidates and the best result is')
        print(my_genome)
        print(my_phyno)
      
        if t >=1:   
            if len(individuals_genom) % 2 == 0:  # add the new invasion people,
                                                 # make sure that the number is even 
                add = n
            else:
                add = n+1
            for i in range(add):
        
                _hidden_nodes = int(rand(hidden_nodes[0],hidden_nodes[1]))
                _lr = rand(lr[0] , lr[1])
                _lr_decay = rand(lr_decay[0], lr_decay[1])
                _mf = rand(mf[0],mf[1])
                _batch_size = int(rand(batch_size[0],batch_size[1] ))
                _epoch = int(rand(epoch[0], epoch[1]))

                individuals_genom.append( [_hidden_nodes, _lr, _lr_decay, _mf, _batch_size, _epoch])
                NN = NeuralNetwork(input_layer=68, hidden_layer= _hidden_nodes, output_layer=1,
                                       lr = _lr, lr_decay= _lr_decay, iteration= _epoch,
                                       batch_size= _batch_size, mf= _mf)
                
                
                NN.make_weights()
                NN.train(training_inputs, training_groundtruth)
                individuals_phyno.append(AUROC_cruve(NN, test_inputs, test_groundtruth, Fig=False))
        #   
        next_generation_genom = []  
        next_generation_phyno = []
        next_generation_genom.append(my_genome)
        next_generation_phyno.append(my_phyno)
        assert len(individuals_genom) %2 == 0, 'something wrong!'
        for i in range(int(len(individuals_genom)/2)):

            child_genom, child_phyno = cross(individuals_genom[i*2],individuals_genom[ i*2 +1],
                                             training_inputs, training_groundtruth, test_inputs, test_groundtruth)
            # get the child, put the child into the next generation
            next_generation_genom.append(child_genom)
            next_generation_phyno.append(child_phyno)
          
        # next is now the parents
        individuals_genom = next_generation_genom
        individuals_phyno = next_generation_phyno
        # shift it randomly
        index = list(range(len(individuals_genom)))
        random.shuffle(index)
        individuals_phyno = [individuals_phyno[x] for x in index]
        individuals_genom = [individuals_genom[x] for x in index]
        # still get the best performance
        my_phyno = max(individuals_phyno)
        idx = individuals_phyno.index(my_phyno)
        my_genome = individuals_genom[idx]
    
    print('The whole crossing process is done')
    my_phyno = max(individuals_phyno)
    idx = individuals_phyno.index(my_phyno)
    my_genome = individuals_genom[idx]   
    print('the best candidates and the best result is')
    print(my_genome)
    print(my_phyno)
        
コード例 #10
0
x = preprocess(data)

train_x = x[:NUM_TRAIN]
train_y = y[:NUM_TRAIN]

test_x = x[-NUM_TEST:]
test_y = y[-NUM_TEST:]

params = {
    'train_inputs': train_x,
    'train_targets': train_y,
    'layer_dimentions': [SIZE**2, 450, 250, 50, 10],
    'learning_rate': 1e-3,
    'iterations': 250
}

nn = NN(params)
nn.train()

prediction = nn.predict(test_x)
accuracy = accuracy_score(test_y, prediction)
precision = precision_score(test_y, prediction, average='micro')
recall = recall_score(test_y, prediction, average='micro')
f1 = f1_score(test_y, prediction, average='micro')
conf_matrix = confusion_matrix(test_y, prediction)
print("accuracy:", accuracy)
print("precision:", precision)
print("recall:", recall)
print("f1:", f1)
print("confusion matrix:\n", conf_matrix)
コード例 #11
0
ファイル: digit-recognizer.py プロジェクト: PkBadger/firstNN
import pandas
import numpy
from NN import NeuralNetwork

numbers = pandas.read_csv('processed_train.csv')
#print(numbers.iloc[:,1:])

brain = NeuralNetwork(784, 100, 10, 0.3)

total = numbers.shape[0]

for index, row in numbers.iterrows():
    target = row[1]
    inputs = row[2:]
    targets = numpy.zeros(10) + 0.01
    targets[int(target)] = 0.99

    brain.train(inputs, targets)
    print(str(index) + '/' + str(total))

brain.saveState('digit-recognizer.state.json')