def b_3(plot=False): units = [1, 2, 3, 10, 20, 40] lrs = [0.09, 0.09, 0.1, 0.1, 0.1, 0.01] # lrs = [0.1, 0.1, 0.1, 0.1, 0.1, 0.1] for unit, lr in zip(units, lrs): print("\nNeural_Network") model = Neural_Network(len(train_data[0]), [unit], activation="sigmoid") print(model) model.train(train_data, train_labels, max_iter=10000, eeta=lr, batch_size=len(train_data), threshold=1e-6, decay=False) pred = model.predict(train_data) train_acc = accuracy_score(train_labels, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(test_data) test_acc = accuracy_score(test_labels, pred) * 100 print("Test Set Accuracy: ", test_acc) if plot: plot_decision_boundary( model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def programWorkStation(train_file): image_values = read_mat(train_file)[0] # images normalized_images = normalize(image_values) # normalized images expected_classes = read_mat(train_file)[1] # expected flower types expected_outputs = expectedOutputs(expected_classes) # flatten outputs X = normalized_images #normalized input images size_of_one_image = len(normalized_images[0]) size_of_input = size_of_one_image # parameters of neural network hidden_node_number = 100 hidden_layer_number = 2 size_of_output = 5 learning_rate = 0.005 epoch_size = 300 batch_size = 20 # neural network object is created here. Beauty_Neural_Network = Neural_Network(size_of_input, hidden_node_number, size_of_output, hidden_layer_number) deneme_input = X size_den_inp = len(deneme_input) den_expected = expected_outputs # run the code according to epoch and batch sizes. epochProcess(epoch_size, batch_size, learning_rate, Beauty_Neural_Network, size_den_inp, deneme_input, den_expected)
def test_xor(): X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float) y = np.array(([75], [82], [93]), dtype=float) X = X / np.amax(X, axis=0) y = y / 100 # Max test score is 100 X = np.array(([1, 1], [0, 1], [0, 0], [1, 0]), dtype=float) y = np.array(([0], [1], [0], [1]), dtype=float) NN = Neural_Network() train(NN, X, y) X = np.array(([1, 1]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([0, 1]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([1, 0]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat)) X = np.array(([0, 0]), dtype=float) yHat = NN.forward(X) print('estimate for {}: {}'.format(X, yHat))
def b_2(plot=False, units=[5], eeta=0.1, threshold=1e-6): print("\nNeural_Network") model = Neural_Network(len(train_data[0]), units, activation="sigmoid") print(model) model.train(train_data, train_labels, max_iter=5000, eeta=eeta, batch_size=len(train_data), threshold=threshold, decay=False) pred = model.predict(train_data) train_acc = accuracy_score(train_labels, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(test_data) test_acc = accuracy_score(test_labels, pred) * 100 print("Test Set Accuracy: ", test_acc) if plot: plot_decision_boundary( model.predict, np.array(train_data), np.array(train_labels), "Neural_Network Train Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), train_acc)) plot_decision_boundary( model.predict, np.array(test_data), np.array(test_labels), "Neural_Network Test Set\n Units in Hidden layers: %s\nAccuracy: %f" % (str(model.hidden_layer_sizes), test_acc))
def main(): #make a neural network with set architecture arch = (2,4,1) nn = Neural_Network(arch) #XOR input data X_train = np.array( [ [0,0], [0,1], [1,0], [1,1] ] ) #XOR output data y_train = np.array( [[0],[1],[1],[0]] ) #set max iterations, learning rate, and convergence threshold iters, lr, threshold = 5000, 1, 0.00001 #train the network J_Hist = nn.train(X_train, y_train, alpha = lr, maxIter = iters, convergenceThreshold = threshold) #forward propagate to get a prediction from the network result = nn.forwardProp(X_train) #print some nice information print("\nUnfiltered Prediction:\n", result) print("Final Prediction:\n", result >= 0.5, '\n') print("Random init cost: ", round(J_Hist[0], 5), ", Final cost: ", round(J_Hist[-1], 5)) print("Cost reduction from random init: ", round(J_Hist[0] - J_Hist[-1], 5), '\n') #set up subplots for the cost history and decision boundary figure, plots = plt.subplots(ncols=2) figure.suptitle('Neural Network Learning of XOR') #supertitle figure.tight_layout(pad=2.5, w_pad=1.5, h_pad=0) #fix margins drawCostHistory(J_Hist, plots[0]) drawDecisionBoundary(nn, plots[1], seperation_coefficient = 50, square_size = 1, allowNegatives = False) #show the cool graphs :) plt.show()
def c_2(plot=False, units=[100], activation="sigmoid", eeta=0.1): print("\nNeural_Network MNIST") model = Neural_Network(len(mnist_trd[0]), units, activation=activation) print(model) model.train(mnist_trd, mnist_trl, max_iter=300, eeta=eeta, batch_size=100, decay=True, threshold=1e-3) pred = model.predict(mnist_trd) train_acc = accuracy_score(mnist_trl, pred) * 100 print("Train Set Accuracy: ", train_acc) pred = model.predict(mnist_ted) test_acc = accuracy_score(mnist_tel, pred) * 100 print("Test Set Accuracy: ", test_acc)
def __init__(self, init_NN=True) -> None: if init_NN: self.NN: Neural_Network = Neural_Network(SHAPE) else: self.NN: Neural_Network = None self.size: int = int() self.time: int = int() ##### simulation variables ##### self.pos: List[int] = [frame_x//2, frame_y//2] self.body: List[List[int]] = [[self.pos[0]-10*i, self.pos[1]] for i in range(3)] self.length: int = 3 # controls self.direction: str = 'RIGHT' self.change_to: str = self.direction self.food_pos: List[int] = [r.randrange(1, (frame_x//10)) * 10, r.randrange(1, (frame_y//10)) * 10] #self.food_spawn: bool = True self.dead: bool = False ##### data fed to neural network ##### self.current_frame: List[List[int]] = None self.framebuffer: List[List[int]] = None self.reset_framebuffer()
def compute(self, simulation, closest_rsu): neural_net = Neural_Network() X = self.training_data.pop() y = self.training_label.pop() # print(X) # print(y) with autograd.record(): output = self.net(X) if cfg['attack'] == 'label' and len( closest_rsu.accumulative_gradients ) < cfg['num_faulty_grads']: loss = neural_net.loss(output, 9 - y) else: loss = neural_net.loss(output, y) loss.backward() grad_collect = [] for param in self.net.collect_params().values(): if param.grad_req != 'null': grad_collect.append(param.grad().copy()) self.gradients = grad_collect
def classify(self, show_output=False): """Send the preprocessed images to the NN classifier""" print('{0} Numbers to be classified'.format(len(self.cropped_images))) return_list = [] self.apply_cropping(show_output=show_output) net = Neural_Network() net.load_state_dict(torch.load(TENSOR_LOCATION)) net.eval() for image in self.cropped_images: image = Image.fromarray(image) # Resizes the number and adds a 10 px border transfrom = transforms.Compose([ transforms.Grayscale(), transforms.Resize(self.output_size - self.border_size), transforms.CenterCrop(self.output_size), transforms.ToTensor(), ]) img_tensor = transfrom(image) if show_output: plt.imshow(np.array(img_tensor)[0, :, :], cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Image used for classification') plt.show() img_tensor.unsqueeze_(0) outputs = net.forward(Variable(img_tensor)) dummy, predicted_labels = torch.max(outputs.data, 1) return_list.append(int(predicted_labels.numpy().max())) print('Classified: {0}'.format(predicted_labels.numpy().max())) return return_list
def test_train_ocr(): X1 = np.array(([3, 5], [5, 1], [10, 2]), dtype=float) y1 = np.array(([75], [82], [93]), dtype=float) a, b, c, c_to_recognized = alphabet() inputLayerSize = len(a[0]) hiddenLayerSize = 3 * inputLayerSize outputLayerSize = 1 NN = Neural_Network(inputLayerSize=inputLayerSize, hiddenLayerSize=hiddenLayerSize, outputLayerSize=outputLayerSize) X = np.array((a[0], b[0], c[0]), dtype=float) y = np.array((a[1], b[1], c[1]), dtype=float) train(NN, X, y) X = np.array((c[0]), dtype=float) yHat = NN.forward(X) print('estimate for good C: {}'.format(yHat)) X = np.array((c_to_recognized), dtype=float) yHat = NN.forward(X) print('estimate for bad C: {}'.format(yHat))
def initialize_network_for_validation(network_file_lines, initial_weights_file_lines, dataset_file_lines, isTest, network=None): #print("[main] Inicializando rede") #print("network_file_lines", network_file_lines) #print("initial_weights_file_lines", initial_weights_file_lines) #print("dataset_file_lines", dataset_file_lines) if (network == None): #primeira linha é o fator de regularização network_lambda = float(network_file_lines[0]) #ada linha sendo uma camada e o valor da linha sendo a quantidade de neurônios layers_size = [] for neurons in network_file_lines[1:]: #print("[main] camada com", neurons, "neuronio") layers_size.append(int(neurons)) layers = [] # camadas # faz a leitura dos pesos no arquivo de pesos iniciais passados por linha de comando if (len(initial_weights_file_lines) > 0): #print("initial weghts vector is not null") for line in initial_weights_file_lines: neurons = line.split(';') v_neurons = [] for neuron in neurons: weights = neuron.split(',') v_weights = [] for weight in weights: #pesos de cada neurônio v_weights.append(float(weight)) v_neurons.append(v_weights) layers.append( np.array(v_neurons) ) #cada camada tem seus neurônios que contém seus pesos else: #cria pesos inicias randomicamente entre -1 e 1 #print("initial weights vector is null") #print("layer_sizes", layers_size) for i, layer in enumerate(layers_size[:-1]): v_neurons = [] for i in range(layers_size[i + 1]): weights_v = [] for y in range(layer + 1): #bias weights_v.append(random.triangular(-1, 1, 0)) v_neurons.append(weights_v) layers.append(np.array(v_neurons)) instances = [] for instance in dataset_file_lines: instances.append(instance) #print("[main] Fator de regularizacao:", network_lambda) #print("[main] Quantidade de camadas:", len(layers)) #estrutura geral da rede neural_network = Neural_Network(network_lambda, layers_size, layers) if (isTest): networkPlus = Neural_Network(network_lambda, layers_size, layers) networkMinus = Neural_Network(network_lambda, layers_size, layers) networkClean = Neural_Network(network_lambda, layers_size, layers) back_propagation.gradient_verification(network, dataset_file_lines, isTest, alpha, networkPlus, networkMinus, networkClean, 0.000001) #chama algoritmo de bajpropagation passando a rede e as instancias de treinamento errorReg, network, fx, D = back_propagation.execute( neural_network, dataset_file_lines, isTest, alpha) else: errorReg, network, fx, D = back_propagation.execute( network, dataset_file_lines, isTest, alpha) return errorReg, network, fx
import time from neural_network import Neural_Network, X, y import numpy as np weightsToTry = np.linspace(-5, 5, 1000) costs = np.zeros(1000) NN = Neural_Network() startTime = time.clock() for i in range(1000): NN.W1[0, 0] = weightsToTry[i] yHat = NN.forward(X) costs[i] = 0.5 * sum((y - yHat) ** 2) endTime = time.clock() print(endTime)
def get_model(weights=[], bias=[]): return Neural_Network(9, 6, 3, weights, bias)
[1, 1, 0], [1, 1, 1]), dtype=float) # 7x3 Tensor # y = our output of our neural network. This is a supervised method. y = np.array(([1], [0], [0], [0], [0], [0], [0], [1]), dtype=float) # what value we want to predict xPredicted = np.array(([0, 0, 1]), dtype=float) # Normalize xPredicted X = X / np.amax(X, axis=0) # maximum of X input array # maximum of xPredicted (our input data for the prediction) xPredicted = xPredicted / np.amax(xPredicted, axis=0) # set up our Loss file for graphing lossFile = open("SumSquaredLossList.csv", "w") myNeuralNetwork = Neural_Network(hidden_layer_size=10) # trainingEpochs = 1000 trainingEpochs = 100000 for i in range(trainingEpochs): # train myNeuralNetwork 1,000 times print ("Epoch # " + str(i) + "\n") print("Network Input : \n" + str(X)) print("Expected Output of XOR Gate Neural Network: \n" + str(y)) print("Actual Output from XOR Gate Neural Network: \n" + str(myNeuralNetwork.feedForward(X))) # mean sum squared loss Loss = np.mean(np.square(y - myNeuralNetwork.feedForward(X))) myNeuralNetwork.saveSumSquaredLossList(i, Loss) print("Sum Squared Loss: \n" + str(Loss)) print("\n") myNeuralNetwork.trainNetwork(X, y) myNeuralNetwork.saveWeights()