def main(): #make a neural network with set architecture arch = (2,4,1) nn = Neural_Network(arch) #XOR input data X_train = np.array( [ [0,0], [0,1], [1,0], [1,1] ] ) #XOR output data y_train = np.array( [[0],[1],[1],[0]] ) #set max iterations, learning rate, and convergence threshold iters, lr, threshold = 5000, 1, 0.00001 #train the network J_Hist = nn.train(X_train, y_train, alpha = lr, maxIter = iters, convergenceThreshold = threshold) #forward propagate to get a prediction from the network result = nn.forwardProp(X_train) #print some nice information print("\nUnfiltered Prediction:\n", result) print("Final Prediction:\n", result >= 0.5, '\n') print("Random init cost: ", round(J_Hist[0], 5), ", Final cost: ", round(J_Hist[-1], 5)) print("Cost reduction from random init: ", round(J_Hist[0] - J_Hist[-1], 5), '\n') #set up subplots for the cost history and decision boundary figure, plots = plt.subplots(ncols=2) figure.suptitle('Neural Network Learning of XOR') #supertitle figure.tight_layout(pad=2.5, w_pad=1.5, h_pad=0) #fix margins drawCostHistory(J_Hist, plots[0]) drawDecisionBoundary(nn, plots[1], seperation_coefficient = 50, square_size = 1, allowNegatives = False) #show the cool graphs :) plt.show()
def programWorkStation(train_file): image_values = read_mat(train_file)[0] # images normalized_images = normalize(image_values) # normalized images expected_classes = read_mat(train_file)[1] # expected flower types expected_outputs = expectedOutputs(expected_classes) # flatten outputs X = normalized_images #normalized input images size_of_one_image = len(normalized_images[0]) size_of_input = size_of_one_image # parameters of neural network hidden_node_number = 100 hidden_layer_number = 2 size_of_output = 5 learning_rate = 0.005 epoch_size = 300 batch_size = 20 # neural network object is created here. Beauty_Neural_Network = Neural_Network(size_of_input, hidden_node_number, size_of_output, hidden_layer_number) deneme_input = X size_den_inp = len(deneme_input) den_expected = expected_outputs # run the code according to epoch and batch sizes. epochProcess(epoch_size, batch_size, learning_rate, Beauty_Neural_Network, size_den_inp, deneme_input, den_expected)
def train(self, training_data): """ Data should be nx(m+1) numpy matrix where n is the number of examples and m is the number of features (recall that the first element of the vector is the label). I recommend implementing the specific algorithms in a seperate module and then determining which method to call based on classifier_type. E.g. if you had a module called neural_nets: if self.classifier_type == 'neural_net': import neural_nets neural_nets.train_neural_net(self.params, training_data) Note that your training algorithms should be modifying the parameters so make sure that your methods are actually modifying self.params You should print the accuracy, precision, and recall on the training data. """ if self.classifier_type == 'neural_network': #change num_input, num_output based upon the data self.nn = Neural_Network("neural_network",weights = [], num_input=self.params['num_input'], num_hidden=1000, num_output=self.params['num_output'], alt_weight=self.params['one']=='1', momentum=self.params['two']=='1') self.nn.train(training_data) elif self.classifier_type == 'naive_bayes': self.nb = Naive_Bayes("naive_bayes") self.nb.train(training_data) elif self.classifier_type =='decision_tree': self.dt = Decision_Tree("decision_tree", pruning=self.params['one']=='1', info_gain_ratio=self.params['two']=='1') self.dt.train(training_data)
def train(args): try: inputs = [] targets = [] y = range(args.start,args.end+1) if(args.db == 'u'): dl = Data_Loader() i,t = dl.getTargets(y) elif(args.db == 'b'): dl = Data_Loader() i,t = dl.getBalancedTargets(y) elif(args.db == 'p'): dl = Data_Loader('playoffTeams.csv') i,t = dl.getTargets(y) elif(args.db == 'o'): dl = Data_Loader('balancedData.csv') i,t = dl.getTargets(y) elif(args.db == 's'): dl = Data_Loader() i,t = dl.getBLSmoteTargets(y,.25) #i,t = dl.getSmoteTargets(y) inputs += i targets += t #create NN # if file already exists, build on that training if (os.path.exists(args.file)): print "file exists" nn = Neural_Network.createFromFile(args.file) pass else: print "file does not exist" nn = Neural_Network.createWithRandomWeights(len(inputs[0]),args.nodes,len(targets[0])) #train NN with the given data print 'Beginning Training...' nn = nn.train(args.epochs,inputs,targets,args.learn_rate) nn.saveToFile(args.file) print "Neural Network saved to %s" % (args.file) except Exception as e: print "invalid formatting, consult neural_main.py t --help \n Error: %s" % e
def compute(self, simulation, closest_rsu): neural_net = Neural_Network() X = self.training_data.pop() y = self.training_label.pop() # print(X) # print(y) with autograd.record(): output = self.net(X) if cfg['attack'] == 'label' and len( closest_rsu.accumulative_gradients ) < cfg['num_faulty_grads']: loss = neural_net.loss(output, 9 - y) else: loss = neural_net.loss(output, y) loss.backward() grad_collect = [] for param in self.net.collect_params().values(): if param.grad_req != 'null': grad_collect.append(param.grad().copy()) self.gradients = grad_collect
def b_2(plot=False, units=[5], eeta=0.1, threshold=1e-6): print("\nNeural_Network") model = Neural_Network(len(train_data[0]), units, activation="sigmoid") print(model) model.train(train_data, train_labels, max_iter=5000, eeta=eeta, batch_size=len(train_data), threshold=threshold, decay=False) pred = model.predict(train_data) train_acc = accuracy_score(train_labels, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(test_data) test_acc = accuracy_score(test_labels, pred) * 100 print("Test Set Accuracy: ", test_acc) if plot: plot_decision_boundary( model.predict, np.array(train_data), np.array(train_labels), "Neural_Network Train Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), train_acc)) plot_decision_boundary( model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def b_3(plot=False): units = [1, 2, 3, 10, 20, 40] lrs = [0.09, 0.09, 0.1, 0.1, 0.1, 0.01] # lrs = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] for unit, lr in zip(units, lrs): print("\nNeural_Network") model = Neural_Network(len(train_data[0]), [unit], activation="sigmoid") print(model) model.train(train_data, train_labels, max_iter=10000, eeta=lr, batch_size=len(train_data), threshold=1e-6, decay=False) pred = model.predict(train_data) train_acc = accuracy_score(train_labels, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(test_data) test_acc = accuracy_score(test_labels, pred) * 100 print("Test Set Accuracy: ", test_acc) if plot: plot_decision_boundary( model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def test_xor(): X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float) y = np.array(([75], [82], [93]), dtype=float) X = X / np.amax(X, axis=0) y = y / 100 # Max test score is 100 X = np.array(([1, 1], [0, 1], [0, 0], [1, 0]), dtype=float) y = np.array(([0], [1], [0], [1]), dtype=float) NN = Neural_Network() train(NN, X, y) X = np.array(([1, 1]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([0, 1]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([1, 0]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([0, 0]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat))
def test_train_ocr(): X1 = np.array(([3, 5], [5, 1], [10, 2]), dtype=float) y1 = np.array(([75], [82], [93]), dtype=float) a, b, c, c_to_recognized = alphabet() inputLayerSize = len(a[0]) hiddenLayerSize = 3 * inputLayerSize outputLayerSize = 1 NN = Neural_Network(inputLayerSize=inputLayerSize, hiddenLayerSize=hiddenLayerSize, outputLayerSize=outputLayerSize) X = np.array((a[0], b[0], c[0]), dtype=float) y = np.array((a[1], b[1], c[1]), dtype=float) train(NN, X, y) X = np.array((c[0]), dtype=float) yHat = NN.forward(X) print('estimate for good C: {}'.format(yHat)) X = np.array((c_to_recognized), dtype=float) yHat = NN.forward(X) print('estimate for bad C: {}'.format(yHat))
def cross_validate(args): # import some functions encode = Data_Loader().encode find_error = NFL_Predictor().compareVector try: nn = Neural_Network.createFromFile(args.file) print "Loaded Neural Network with %i hidden nodes" % len(nn.hidden_nodes) totalCorrect = 0.0 total_tested = 0.0 for y in range(args.start,args.end+1): classRight = [0, 0, 0, 0, 0, 0] correct = incorrect = 0 if(args.db == 'u'): dl = Data_Loader() teams = dl.getAllTeams(y) elif(args.db == 'b'): dl = Data_Loader() teams = dl.getAllTeams(y) elif(args.db == 'p'): dl = Data_Loader('playoffTeams.csv') teams = dl.getAllTeams(y) elif(args.db == 'o'): dl = Data_Loader('balancedData.csv') teams = dl.getAllTeams(y) total_tested += len(teams) total_error = 0.0 for t in teams: t.result = nn.feed_forward(t.stats) error = (find_error(t.result, encode(t.classification))) total_error += error**2 if error < .08: correct += 1 classRight if args.v: print "team %s, results %s, class %s, error %s" % (t.name, t.result, encode(t.classification), error) if not args.q: print "%d \t within threshold: %d/%d \t error: %s" % (y, correct, len(teams), str(total_error)) totalCorrect += correct print "totalCorrect: %i/%i, %.2f%%" % (totalCorrect, total_tested, (totalCorrect/total_tested)*100) except Exception as e: print "invalid formatting, consult neural_main.py c --help \nError: %s" % e
def __init__(self, init_NN=True) -> None: if init_NN: self.NN: Neural_Network = Neural_Network(SHAPE) else: self.NN: Neural_Network = None self.size: int = int() self.time: int = int() ##### simulation variables ##### self.pos: List[int] = [frame_x//2, frame_y//2] self.body: List[List[int]] = [[self.pos[0]-10*i, self.pos[1]] for i in range(3)] self.length: int = 3 # controls self.direction: str = 'RIGHT' self.change_to: str = self.direction self.food_pos: List[int] = [r.randrange(1, (frame_x//10)) * 10, r.randrange(1, (frame_y//10)) * 10] #self.food_spawn: bool = True self.dead: bool = False ##### data fed to neural network ##### self.current_frame: List[List[int]] = None self.framebuffer: List[List[int]] = None self.reset_framebuffer()
def classify(self, show_output=False): """Send the preprocessed images to the NN classifier""" print('{0} Numbers to be classified'.format(len(self.cropped_images))) return_list = [] self.apply_cropping(show_output=show_output) net = Neural_Network() net.load_state_dict(torch.load(TENSOR_LOCATION)) net.eval() for image in self.cropped_images: image = Image.fromarray(image) # Resizes the number and adds a 10 px border transfrom = transforms.Compose([ transforms.Grayscale(), transforms.Resize(self.output_size - self.border_size), transforms.CenterCrop(self.output_size), transforms.ToTensor(), ]) img_tensor = transfrom(image) if show_output: plt.imshow(np.array(img_tensor)[0, :, :], cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Image used for classification') plt.show() img_tensor.unsqueeze_(0) outputs = net.forward(Variable(img_tensor)) dummy, predicted_labels = torch.max(outputs.data, 1) return_list.append(int(predicted_labels.numpy().max())) print('Classified: {0}'.format(predicted_labels.numpy().max())) return return_list
def predict(args): try: nn = Neural_Network.createFromFile(args.file) dl = Data_Loader() #team = dl.getTeam(args.team, args.year) team = dl.getTeam(args.team, args.year) result = nn.feed_forward(team.stats) print "\n\nPredicting the %i %s..." % (args.year, team_dict.teams[args.team]) print "RESULTS: %.3f\n" % result[0] if args.show_expected: print "EXPECTED: %s" % (dl.encode(team.classification))[0] results_graph = "\t|" + "".join(repeat("-",int(result[0]/.8*50))) + "|" + "".join(repeat("-",(int((1-result[0]/.8)*50)))) + "|" print " Not in playoffs" + "".join(repeat(" ",35)) + "Super Bowl Champs" print results_graph post_processor = NFL_Predictor(nn) similar_teams = post_processor.compareWithPastTeams(dl.getEveryTeam(), team, 16) print"\nThe 15 most similar teams throughout history:" del similar_teams[0] for t in similar_teams: print "%s \tScore: %f" % t except Exception as e: print "invalid formatting, consult neural_main.py t --help \n Error: %s" % e
def c_2(plot=False, units=[100], activation="sigmoid", eeta=0.1): print("\nNeural_Network MNIST") model = Neural_Network(len(mnist_trd[0]), units, activation=activation) print(model) model.train(mnist_trd, mnist_trl, max_iter=300, eeta=eeta, batch_size=100, decay=True, threshold=1e-3) pred = model.predict(mnist_trd) train_acc = accuracy_score(mnist_trl, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(mnist_ted) test_acc = accuracy_score(mnist_tel, pred) * 100 print("Test Set Accuracy: ", test_acc)
################################################################################ ## ## Copyright 2016 Udara Karunarathna (IT13021030) and Supun Sudaraka (IT13019914). All rights reserved. ## ################################################################################ import numpy as np from scipy import optimize from StringIO import StringIO import matplotlib.pyplot as plot from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from neural_network import Neural_Network from trainer import Trainer NeuralNetwork = Neural_Network() #Pass to Trainer Data: X,Y = NeuralNetwork.readInputFile("input.txt") Y = np.reshape(Y, (3,1)) print ("Input : ") print (X) print ("Results : ") print (Y) #normalized training data X = NeuralNetwork.normalize(X) Y = Y/100 print ("Input : ") print (X)
def get_model(weights=[], bias=[]): return Neural_Network(9, 6, 3, weights, bias)
class Classifier: def __init__(self, classifier_type, **kwargs): """ Initializer. Classifier_type should be a string which refers to the specific algorithm the current classifier is using. Use keyword arguments to store parameters specific to the algorithm being used. E.g. if you were making a neural net with 30 input nodes, hidden layer with 10 units, and 3 output nodes your initalization might look something like this: neural_net = Classifier(weights = [], num_input=30, num_hidden=10, num_output=3) Here I have the weight matrices being stored in a list called weights (initially empty). """ self.classifier_type = classifier_type self.params = kwargs """ The kwargs you inputted just becomes a dictionary, so we can save that dictionary to be used in other methods. """ def train(self, training_data): """ Data should be nx(m+1) numpy matrix where n is the number of examples and m is the number of features (recall that the first element of the vector is the label). I recommend implementing the specific algorithms in a seperate module and then determining which method to call based on classifier_type. E.g. if you had a module called neural_nets: if self.classifier_type == 'neural_net': import neural_nets neural_nets.train_neural_net(self.params, training_data) Note that your training algorithms should be modifying the parameters so make sure that your methods are actually modifying self.params You should print the accuracy, precision, and recall on the training data. """ if self.classifier_type == 'neural_network': #change num_input, num_output based upon the data self.nn = Neural_Network("neural_network",weights = [], num_input=self.params['num_input'], num_hidden=1000, num_output=self.params['num_output'], alt_weight=self.params['one']=='1', momentum=self.params['two']=='1') self.nn.train(training_data) elif self.classifier_type == 'naive_bayes': self.nb = Naive_Bayes("naive_bayes") self.nb.train(training_data) elif self.classifier_type =='decision_tree': self.dt = Decision_Tree("decision_tree", pruning=self.params['one']=='1', info_gain_ratio=self.params['two']=='1') self.dt.train(training_data) def predict(self, data): """ Predict class of a single data vector Data should be 1x(m+1) numpy matrix where m is the number of features (recall that the first element of the vector is the label). I recommend implementing the specific algorithms in a seperate module and then determining which method to call based on classifier_type. This method should return the predicted label. """ def test(self, test_data): """ Data should be nx(m+1) numpy matrix where n is the number of examples and m is the number of features (recall that the first element of the vector is the label). You should print the accuracy, precision, and recall on the test data. """ #pdb.set_trace() #Accuracy, Recall, and Precision relevant_and_retrieved, relevant, retrieved, total, hit = 0, 0, 0, 0, 0 for person in test_data: predict = 0 if self.classifier_type == 'neural_network': predict = self.nn.predict(person) elif self.classifier_type == 'naive_bayes': predict = self.nb.predict(person) elif self.classifier_type == 'decision_tree': predict = self.dt.predict(person) if predict == person[0]: if predict == 0: relevant_and_retrieved += 1 hit += 1 if person[0] == 0: relevant += 1 if predict == 0: retrieved += 1 total += 1 accuracy = hit/float(total) recall = relevant_and_retrieved/float(relevant) precision = relevant_and_retrieved/float(retrieved) print "Accuracy: ", accuracy print "Precision ", precision print "Recall: " , recall
import time from neural_network import Neural_Network, X, y import numpy as np weightsToTry = np.linspace(-5, 5, 1000) costs = np.zeros(1000) NN = Neural_Network() startTime = time.clock() for i in range(1000): NN.W1[0, 0] = weightsToTry[i] yHat = NN.forward(X) costs[i] = 0.5 * sum((y - yHat) ** 2) endTime = time.clock() print(endTime)
import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import numpy from neural_network import Neural_Network from plotter import Plotter # fix random seed for reproducibility seed = 7 numpy.random.seed(seed) #writing a circle of pixels to the array according to middle coordinates of mouse position plotter=Plotter() NN=Neural_Network() if input("Wanna train model? y/N ") =='y': layers = [800,400] NN.initialize(layers) NN.train(20) if input("Wanna save model? Y/n ") !='n' : NN.save_model() elif input("Wanna load from file? Y/n ")!='n' : NN.load_model() else : print("Exit")
[1, 1, 0], [1, 1, 1]), dtype=float) # 7x3 Tensor # y = our output of our neural network. This is a supervised method. y = np.array(([1], [0], [0], [0], [0], [0], [0], [1]), dtype=float) # what value we want to predict xPredicted = np.array(([0, 0, 1]), dtype=float) # Normalize xPredicted X = X / np.amax(X, axis=0) # maximum of X input array # maximum of xPredicted (our input data for the prediction) xPredicted = xPredicted / np.amax(xPredicted, axis=0) # set up our Loss file for graphing lossFile = open("SumSquaredLossList.csv", "w") myNeuralNetwork = Neural_Network(hidden_layer_size=10) # trainingEpochs = 1000 trainingEpochs = 100000 for i in range(trainingEpochs): # train myNeuralNetwork 1,000 times print ("Epoch # " + str(i) + "\n") print("Network Input : \n" + str(X)) print("Expected Output of XOR Gate Neural Network: \n" + str(y)) print("Actual Output from XOR Gate Neural Network: \n" + str(myNeuralNetwork.feedForward(X))) # mean sum squared loss Loss = np.mean(np.square(y - myNeuralNetwork.feedForward(X))) myNeuralNetwork.saveSumSquaredLossList(i, Loss) print("Sum Squared Loss: \n" + str(Loss)) print("\n") myNeuralNetwork.trainNetwork(X, y) myNeuralNetwork.saveWeights()
from neural_network import Neural_Network import load_data as ld import pdb nb = Neural_Network("neural_network",weights = [], num_input=16, num_hidden=1000, num_output=2) #neural_net = Classifier(weights = [], num_input=30, num_hidden=10, num_output=3) data = ld.load_congress_data(.85) #data = ld.load_iris(.75) #data = ld.load_monks(3) classify = nb.train(data[0]) #nb.train(iris[0]) #pdb.set_trace() #nb.test(congress[1]) tot, hit = 0, 0 ones = 0 zeros = 0 twos = 0 for person in data[1]: predict = nb.predict(person) if predict == person[0]: hit += 1 tot += 1 if predict == 1: ones += 1 elif predict == 0: zeros += 1
def initialize_network_for_validation(network_file_lines, initial_weights_file_lines, dataset_file_lines, isTest, network=None): #print("[main] Inicializando rede") #print("network_file_lines", network_file_lines) #print("initial_weights_file_lines", initial_weights_file_lines) #print("dataset_file_lines", dataset_file_lines) if (network == None): #primeira linha é o fator de regularização network_lambda = float(network_file_lines[0]) #ada linha sendo uma camada e o valor da linha sendo a quantidade de neurônios layers_size = [] for neurons in network_file_lines[1:]: #print("[main] camada com", neurons, "neuronio") layers_size.append(int(neurons)) layers = [] # camadas # faz a leitura dos pesos no arquivo de pesos iniciais passados por linha de comando if (len(initial_weights_file_lines) > 0): #print("initial weghts vector is not null") for line in initial_weights_file_lines: neurons = line.split(';') v_neurons = [] for neuron in neurons: weights = neuron.split(',') v_weights = [] for weight in weights: #pesos de cada neurônio v_weights.append(float(weight)) v_neurons.append(v_weights) layers.append( np.array(v_neurons) ) #cada camada tem seus neurônios que contém seus pesos else: #cria pesos inicias randomicamente entre -1 e 1 #print("initial weights vector is null") #print("layer_sizes", layers_size) for i, layer in enumerate(layers_size[:-1]): v_neurons = [] for i in range(layers_size[i + 1]): weights_v = [] for y in range(layer + 1): #bias weights_v.append(random.triangular(-1, 1, 0)) v_neurons.append(weights_v) layers.append(np.array(v_neurons)) instances = [] for instance in dataset_file_lines: instances.append(instance) #print("[main] Fator de regularizacao:", network_lambda) #print("[main] Quantidade de camadas:", len(layers)) #estrutura geral da rede neural_network = Neural_Network(network_lambda, layers_size, layers) if (isTest): networkPlus = Neural_Network(network_lambda, layers_size, layers) networkMinus = Neural_Network(network_lambda, layers_size, layers) networkClean = Neural_Network(network_lambda, layers_size, layers) back_propagation.gradient_verification(network, dataset_file_lines, isTest, alpha, networkPlus, networkMinus, networkClean, 0.000001) #chama algoritmo de bajpropagation passando a rede e as instancias de treinamento errorReg, network, fx, D = back_propagation.execute( neural_network, dataset_file_lines, isTest, alpha) else: errorReg, network, fx, D = back_propagation.execute( network, dataset_file_lines, isTest, alpha) return errorReg, network, fx
DL = Data_Loader() ''' nn = Neural_Network.createWithRandomWeights(66,40,6) # train! with learning rate proportional to # of teams in the situations inputs = [] targets = [] for y in range(2005,2007): i,t = DL.getTargets(y) inputs += i targets += t #print targets nn = nn.train(10000,inputs,targets,1.5) nn.saveToFile("predictortest.txt") ''' nn = Neural_Network.createFromFile("predictortest.txt") teams_2011 = DL.getAllTeams(2011) pats_2011 = filter(lambda t: t.name == "nwe", teams_2011)[0] all_other_teams = filter(lambda t: t.name != "nwe", teams_2011) predictor = NFL_Predictor(nn) similar = predictor.compareWithPastTeams(all_other_teams, pats_2011, 3) for t,d in similar: print t.name + " " + str(d) + "\n"