def createNetwork(learningRate): input_nodes = 784 hidden_nodes = 200 output_nodes = 10 return neuralnetwork.NeuralNetwork(learningRate=learningRate, iNodes=input_nodes, hNodes=hidden_nodes, oNodes=output_nodes)
def runTest(): nn = neuralnetwork.NeuralNetwork([2, 4, 1], 'tanh') minValue = 0 maxValue = 100 #x_array = [[20, 24], [22, 23], [21, 23], [19, 22],[24, 27],[25, 29],[23, 25],[22, 24],[27, 29]] x_array = [[20, 24], [22, 23], [21, 23], [19, 22], [24, 27], [25, 29], [23, 25], [22, 24], [27, 29]] y_array = [22, 23, 22, 20, 25, 26, 27, 24, 23, 28] predict_array = [[19, 20], [26, 28], [24, 26], [21, 24]] print "Network Input:" print x_array print "Network Output:" print y_array scaled_x_array = [] scaled_y_array = [] scaled_predict_array = [] for item in x_array: temp = [] for i in item: temp.append(neuralnetwork._scale_to_binary(i, minValue, maxValue)) scaled_x_array.append(temp) for item in y_array: scaled_y_array.append( neuralnetwork._scale_to_binary(item, minValue, maxValue)) for item in predict_array: temp = [] for i in item: temp.append(neuralnetwork._scale_to_binary(i, minValue, maxValue)) scaled_predict_array.append(temp) #print scaled_y_array #print scaled_x_array #print scaled_predict_array X = np.array(scaled_x_array) #Training data y = np.array(scaled_y_array) #Testing data nn.fit(X, y) count = 0 for i in scaled_predict_array: result = nn.predict(i) #print result result = neuralnetwork.rescale_from_binary(result, minValue, maxValue) print "\nInput values:" print predict_array[count] print "Prediction:" print result[0] count += 1
def keyPressed(mode, event): if not mode.finished: if not mode.started: if event.key == "R": mode.app.setActiveMode(mode.app.startScreen) elif event.key == "T": mode.getTrainPercent() elif event.key == "L": mode.getLearningRate() elif event.key == "N": mode.getNumNeurons() elif event.key == "S" and mode.canStart: mode.started = True mode.training = True data = network.preprocessData() train, mode.testData, trainDiagnosis, mode.testDiagnosis = network.partitionTrainAndTest( data, mode.trainingPercent) mode.network = network.NeuralNetwork( train, trainDiagnosis, mode.numNeurons, mode.learningRate) (mode.finished, mode.accuracies, mode.costs) = mode.network.train() mode.epochs = range(len(mode.accuracies)) else: mode.data = csvToDataFrame() #plot variables over time if event.key == "A": plt.clf() plt.title("Training Accuracy Over Time") plt.xlabel("Epoch") plt.ylabel("Training Accuracy") plt.plot(mode.epochs, mode.accuracies) plt.show() elif event.key == "C": plt.clf() plt.title("Cost over time") plt.xlabel("Epoch") plt.ylabel("Cost") plt.plot(mode.epochs, mode.costs) plt.show() elif event.key == "R": plt.clf() mode.app.inputScreen.appStarted() mode.app.setActiveMode(mode.app.startScreen) elif event.key == "T": if isinstance(mode.testAccuracy, str): mode.testAccuracy, mode.testCost = mode.network.test( mode.testData, mode.testDiagnosis) mode.data = addRowToAccuracies( mode.data, mode.trainingPercent, mode.learningRate, mode.numNeurons, mode.testAccuracy, mode.testCost) mode.data.to_csv("accuracies.txt") elif event.key == "H": mode.app.setActiveMode(mode.app.historyScreen) elif event.key == "S": mode.appStarted()
def neural_net_score(self, matches, word_ids): url_ids = [match.url_id for match in matches] nn = neuralnetwork.NeuralNetwork(self.con) nn.generate_hidden_node(word_ids, url_ids) scores = nn.get_result(word_ids, url_ids) result = dict([(url_ids[i], scores[i]) for i in range(len(url_ids))]) print('CHANGE ME') target = [0.0] * len(url_ids) if 0 < len(url_ids): target[0] = 1.0 nn.dump() nn.back_propagation(target) return self.normalize_scores(result, small_is_better=False)
def test_print_perc_predicted_correctly(self): predicted=np.array([[0.6, 0.2], [0.2, 0.9], [0.9, 0.99], [0.8, 0.2]]) y = np.array([[1, 0], [1, 0], [1, 0], [0, 1]]) count, percentage = nn.NeuralNetwork().get_predicted_correctly(predicted, y) assert count == 1 assert percentage == 25
def __init__(self): self.argumentParser = ap.ArgumentParser(self) self.fileFinder = ff.FileFinder(self) self.inputs = 1 self.nLayers = self.argumentParser.nLayers() self.nNodes = self.argumentParser.nNodes() self.outputs = 1 self.networkType = self.argumentParser.type() self.network = nn.NeuralNetwork(self) self.network.constructNetwork(inputs=self.inputs, nNodes=self.nNodes, nLayers=self.nLayers, outputs=self.outputs, networkType=self.networkType) self.saver = ckps.CheckpointSaver(self, self.argumentParser().save) self.networkTrainer = nt.NetworkTrainer(self, self.saver) self.function = lambda r: r / r * np.random.normal( 0, 1) # +np.sin(7.0*np.pi*r) self.function = lambda r: 1 / r**12 - 1 / r**6 self.function = lambda r: 4 * (1.0 / (r**12) - 1.0 / (r**6)) - 4 * (1.0 / (2.5**12) - 1.0 / (2.5**6)) self.dataGenerator = gen.DataGenerator(0.87, 2.5, self) self.dataGenerator.setFunction(self.function) if not self.argumentParser().file == None: self.dataGenerator.setGeneratorType("file") else: self.dataGenerator.setGeneratorType("function") #self.dataGenerator.setGeneratorType("VMC") #self.dataGenerator.setGeneratorType("noise") self.dataSize = int(9987) self.numberOfEpochs = int(100) self.batchSize = int(500) self.testSize = self.dataSize #int(600) self.testInterval = 5000 self.printer = printer.Printer(self) self.printer.printSetup() self.plotter = plotter.Plotter(self)
def squarestest(): squares = list(map(list, itertools.product([0, 1], repeat=4))) print(squares) NN = neuralnetwork.NeuralNetwork(height, width, inputSize, outputSize) print(NN.calculate([.25, 0.5, 0.75, 1])) print(NN.backpropagate([.25, 0.5, 0.75, 1], [1], 0.1)) print("nice") for i in range(epochCount): for square in squares: NN.backpropagate(square, correct(square), learningRate) for square in squares: test = random.choice(squares) print("Actual:") print(correct(test)) print("NN Calculation") print(NN.calculate(test))
def main(arglist): #NORMALIZAR ATRIBUTOS if(arglist.dataset.split('.')[1] == 'csv'): dataset = pd.read_csv(arglist.dataset, sep=';') else: dataset = pd.read_csv(arglist.dataset, sep='\t') #print(dataset) normalizedDataset = dataset.apply(lambda x: (x-x.min())/(x.max()-x.min()), axis=0) #print(normalizedDataset) #VALIDACÃO CRUZADA #rede neural #uma lista de vetores [nx1](sendo n o numero de neuronios na camada) #uma lista de vetores de pesos para a conexão entre cada camada neural = neuralnetwork.NeuralNetwork(np.array([13,4,3,1]), 2, 2) neural.train(normalizedDataset)
if H[i][0] == Y[i][0] and H[i][0] == 1: rec[0] += 1 elif H[i][0] == Y[i][0] and H[i][0] == 0: rec[1] += 1 elif H[i][0] != Y[i][0] and H[i][0] == 1: rec[2] += 1 else: rec[3] += 1 return rec # get the dataset X_train, Y_train, X_cv, Y_cv, X_test, Y_test = get_dataset() # train a neural network with the training dataset nn = NN.NeuralNetwork(2, 1, 5, 2) print("Initial parameters of the neural network:\n",nn.ini_Theta) Theta, steps, Js = nn.gradient_descent(X_train, Y_train, nn.ini_Theta, alpha=0.001) # get the cross-validation hypotheses values by the parameters trained by neural network H_cv, A = nn.forward_propagation(X_cv, Theta) H_cv = np.array([[int(value + 0.5) for row in H_cv for value in row]]).reshape([-1, 2]) ac = accuracy(H_cv, Y_cv) rec = recall(H_cv, Y_cv) precision = rec[0] / (rec[0] + rec[2]) recall = rec[0] / (rec[0] + rec[3]) print(""" ===== Analysis result ===== > accuracy: %f, > true positive: %d, > true negative: %d,
show_last = 5 #Cuantas partidas mostrar. 5 = Mostrar la mejor red de las últimas 5 generaciones x = 16 #tamaño del mapa y = 10 ticks = 120 #cuantos ticks jugará la ia bonus_ticks = 80 #cuantos ticks gana al comer visual_delay = 0.1 #tiempo en segundos entre cada tic de juego score_mult = 5 #Cuantos puntos vale cada comida net_arch = [7,5,3] #el primer elemento es la cantidad de inputs, el ultimo de outputs, #y los intermedios son capas ocultas. [7,5,3] son 7 inputs, 3 outputs #y una sola capa oculta con 5 neuronas pop = [] for i in range(population): pop.append(neuralnetwork.NeuralNetwork(net_arch)) graphics_enabled = False delay = 0 generations = 0 best = [] best_nn = [] avg = [] avg_score = [] best_score = [] start = time.time() while generations < total_generations: fitness_array = []
#data_inputs = [[0,0], # [0,1], # [1,0], # [1,1]] #data_outputs = [[0], # [1], # [1], # [0]] #data_inputs = [] #data_outputs = [] #for i in range(1000): # data_input = [rd.randint(0,10), rd.randint(0,10)] # data_output = [data_input[0] + data_input[1]] # data_inputs.append(data_input) # data_outputs.append(data_output) print("Olá, sou uma Rede Neural Artificial.\nMeu objetivo é aprender a converter numeros binarios em decimais") np.set_printoptions(suppress=True) epochs = 60000 learning_rate = 0.001 function_activation = ReLU rede_neural = nn.NeuralNetwork([4,8,8,1]) rede_neural.predictAll(data_inputs, function_activation) print("Essas são minhas predições sem passar por qualquer processo de apredizagem.") print("Agora, baseada na biologia dos seres vivos, irei aprender com os meus erros!") print("\nTreinar Rede Neural?") input("") while(rede_neural.MSE(data_inputs, data_outputs, function_activation) > 0.004): rede_neural.train(epochs, data_inputs, data_outputs, learning_rate, function_activation) rede_neural.predictAll(data_inputs, function_activation) print("Chupa otario!")
# correct answer is first value correct_label = int(one_letter[0]) # scale and shift the inputs inputs = (numpy.asfarray(one_letter[1:]) / 255.0 * 0.99) + 0.01 # query the network outputs = n.query(inputs) # the index of the highest value corresponds to the label label = numpy.argmax(outputs) # append correct or incorrect to list if (label == correct_label): # network's answer matches correct answer, add 1 to scorecard scorecard.append(1) else: # network's answer doesn't match correct answer, add 0 to scorecard scorecard.append(0) # calculate the performance score, the fraction of correct answers scorecard_array = numpy.asarray(scorecard) print("performance = ", scorecard_array.sum() / scorecard_array.size) for i in range(5): n = neuralnetwork.NeuralNetwork( learningRate=learning_rate, iNodes=input_nodes, hNodes=hidden_nodes, oNodes=output_nodes) trainAndTest(n)
# 生成数据集 seed = 22 m = 500 rdm = np.random.RandomState(seed) X = rdm.randn(m, 2) Y = np.array([[int(x0**2 + x1**2 < 2)] for (x0, x1) in X]) Y[0] = [1] Y[1] = [0] Y_c = ['red' if y[0] else 'blue' for y in Y] print("X:\n", X) print("Y:\n", Y) # 搭建神经网络 NN = nn.NeuralNetwork(2, 1, 4, 1) # 用无正则化、正则化两种情况训练神经网络 Theta_nR, steps_nR, Js_nR = NN.gradient_descent(X, Y, NN.ini_Theta, alpha=0.002, lamda=0) Theta_R, steps_R, Js_R = NN.gradient_descent(X, Y, NN.ini_Theta, alpha=0.002, lamda=1) # 定义测试 xx0, xx1 = np.meshgrid(np.arange(-3, 3, 0.01), np.arange(-3, 3, 0.01)) X0_test = xx0.ravel() X1_test = xx1.ravel() h_nR = NN.forward_propagation(np.c_[X0_test, X1_test], Theta_nR)[0] h_nR = h_nR.reshape(xx0.shape) h_R = NN.forward_propagation(np.c_[X0_test, X1_test], Theta_R)[0] h_R = h_R.reshape(xx0.shape)
label='train set') plt.legend(framealpha=0.5) # 获取数据集并分为训练集,交叉验证集,测试集 np.random.seed(0) m = 2000 X, Y_ = make_moons(m, noise=0.20) Y = np.array(list(map(lambda y: [1, 0] if y == 1 else [0, 1], Y_))) X_train, Y_train = X[:int(0.6 * m)], Y[:int(0.6 * m)] X_cv, Y_cv = X[int(0.6 * m):int(0.8 * m)], Y[int(0.6 * m):int(0.8 * m)] X_test, Y_test = X[int(0.8 * m):], Y[int(0.8 * m):] # 搭建神经网络 NN = nn.NeuralNetwork(2, 4, 4, 2) # 训练模型 Theta, steps, Js = NN.gradient_descent(X_train, Y_train, NN.ini_Theta, alpha=0.001, threshold=1e-7) # 获得训练后的数据进行交叉验证与测试 h_cv, A = NN.forward_propagation(X_cv, Theta) J_cv = NN.cost_function(Y_cv, h_cv, Theta) h_test, A = NN.forward_propagation(X_test, Theta) J_test = NN.cost_function(Y_test, h_test, Theta) print("===== Fitting Result of Cross Validation =====")
# specify the layers, first layer should be 784 and output layer should be 10 for this example layer_sizes_fwd = (784, 16, 16, 10) layer_sizes_rev = (10, 16, 16, 784) # number of images used for training, the rest is used for testing the performance training_set_size = 10000 #get training images training_set_images = training_images[:training_set_size] training_set_labels = training_labels[:training_set_size] test_set_images = training_images[training_set_size:] test_set_labels = training_labels[training_set_size:] # initialize neural network net = nn.NeuralNetwork(layer_sizes_fwd) # first training session net.train_network(training_set_images, training_set_labels, 4, 10, 4.0) # evaluate performance after first training session net.print_accuracy(test_set_images, test_set_labels) net.calculate_average_cost(test_set_images, test_set_labels) # second training session net.train_network(training_set_images, training_set_labels, 8, 20, 2.0) # evaluate performance after second training session net.print_accuracy(test_set_images, test_set_labels) net.calculate_average_cost(test_set_images, test_set_labels)
""" Runs MNIST data as training and test data for neuralnetwork.py """ import mnist_loader import neuralnetwork training_data, validation_data, test_data = mnist_loader.load_data_wrapper() nn = neuralnetwork.NeuralNetwork([784, 30, 10]) # Use MNIST training_data to train NN over 30 epochs, with mini-batch size of 10, and a learning rate of 3.0 nn.train(training_data, 30, 10, 3.0, test_data=test_data)
import numpy as np import neuralnetwork as nn import matplotlib.pyplot as plt def load_data(inpt, label): return list(zip( inpt, label)) #returns a list of (x,y) tuples where x = img & y = lbl with np.load('mnist.npz') as data: #print(data.files) #Shows the different files & their names training_data = load_data(data['training_images'], data['training_labels']) test_data = load_data(data['test_images'], data['test_labels']) ''' #Show an Image: plt.imshow(training_data[0][0].reshape(28,28), cmap='gray') plt.show() input('Press <Enter> to continue') ''' layer_sizes = (784, 5, 10) net = nn.NeuralNetwork(layer_sizes) net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
def main(): max_retention_rate = 0.5 min_retention_rate = 0.3 retention_rate_delta = 0.1 random_select = 0.001 #05 mutate = .001 #target = 1.0 # accuracy target = 0.0 # rmse n_generation = 30 n_population = 30 model = N.NeuralNetwork() x_train, y_train, x_valid, y_valid = loaddataset() p = population(n_population) current_retention_rate = max_retention_rate best_grade = np.inf previous_grade = np.inf previous_retention_rate = max_retention_rate #fitness_history = [grade(model, p, x_train, y_train, x_valid, y_valid, target), ] fitness_history = [] for i in xrange(n_generation): print 'Running generation %d...' % i p = evolve(model, p, x_train, y_train, x_valid, y_valid, target, retain=current_retention_rate, random_select=random_select, mutate=mutate) grade_ = grade(model, i, p, x_train, y_train, x_valid, y_valid, target) print 'Generation=%d grade=%f' % (i, grade_) fitness_history.append(grade_) if grade_ > previous_grade: current_retention_rate *= (1.0 - retention_rate_delta) if current_retention_rate < min_retention_rate: current_retention_rate = min_retention_rate else: print 'evolve: grade=%f > previous grade=%f, reducing retention from %f to %f' % \ (grade_, previous_grade, previous_retention_rate, current_retention_rate) else: current_retention_rate *= (1.0 + retention_rate_delta) if current_retention_rate > max_retention_rate: current_retention_rate = max_retention_rate else: print 'evolve: grade=%f < previous grade=%f, increasing retention from %f to %f' % \ (grade_, previous_grade, previous_retention_rate, current_retention_rate) previous_retention_rate = current_retention_rate previous_grade = grade_ if grade_ < best_grade: best_grade = grade_ for i in fitness_history: print i print 'Best grade %f' % best_grade best = find_best_individual(model, p, x_train, y_train, x_valid, y_valid, target) # Reports dict_ = dict(zip(chromosomes.keys(), best[0][1])) dict_.update(chromosomes_fixed) model.set_params(**dict_) print 'Best Model:', model print '%s' % chromosomes.keys() for item in best: print item
def do_POST(self): if self.path == '/api/v1/setup': content_len = int(self.headers.get('Content-Length')) post_body = self.rfile.read(content_len).decode("utf-8") data = json.loads(post_body) xShape = data["xShape"] hiddenLayers = data["hiddenLayers"] tShape = data["tShape"] nnType = data["nnType"] #classifier, recuring, etc self.nnStatus["nnType"] = nnType if nnType == "classifier": self.nnStatus["nnet"] = nn.NeuralNetworkClassifier( xShape, hiddenLayers, tShape) else: self.nnStatus["nnet"] = nn.NeuralNetwork( xShape, hiddenLayers, tShape) self.nnStatus["status"] = "Initialized" self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() rtnData = {} rtnData["status"] = self.nnStatus["status"] _data = json.dumps(rtnData) self.wfile.write(bytes(_data, 'utf8')) return elif self.path == '/api/v1/train': def trainNet(data): epochs = data["epochs"] learningRate = data["learningRate"] method = data["method"] #adam or sgd x = np.array(data["x"]) t = np.array(data["t"]) rtnData["status"] = "Training" self.nnStatus["status"] = "Training" self.start = time.time() self.nnStatus["nnet"].train(x, t, epochs, learningRate, method=method, verbose=False) self.elapsed = (time.time() - self.start) self.nnStatus["status"] = "Ready" content_len = int(self.headers.get('Content-Length')) post_body = self.rfile.read(content_len).decode("utf-8") data = json.loads(post_body) rtnData = {} print(self.nnStatus["status"]) if self.nnStatus["status"] in ["Initialized", "Ready"]: self.send_response(200) self.nnStatus["status"] = "Training" thread = threading.Thread(target=trainNet, kwargs={'data': data}) thread.start() else: self.send_response(400) rtnData[ "message"] = "Not ready for training, must be in initialized or ready status." rtnData["status"] = self.nnStatus["status"] self.send_header('Content-type', 'application/json') self.end_headers() post_data = json.dumps(rtnData) self.wfile.write(bytes(post_data, 'utf8')) return elif self.path == '/api/v1/full': def trainFold(data): self.stats["train"] = {} self.stats["train"]["x"] = [] self.stats["train"]["t"] = [] self.stats["train"]["y"] = [] self.stats["train"]["correct"] = [] self.stats["validate"] = {} self.stats["validate"]["x"] = [] self.stats["validate"]["t"] = [] self.stats["validate"]["y"] = [] self.stats["validate"]["correct"] = [] self.stats["test"] = {} self.stats["test"]["x"] = [] self.stats["test"]["t"] = [] self.stats["test"]["y"] = [] self.stats["test"]["correct"] = [] for Xtrain, Ttrain, Xvalidate, Tvalidate, Xtest, Ttest in self.generate_k_fold_cross_validation_sets( X, T, folds, shuffle): self.nnStatus["nnet"].train(Xtrain, Ttrain, epochs, learningRate, method=method, verbose=False) Ytrain = self.nnStatus["nnet"].use(Xtrain) self.stats["train"]["correct"].append( self.percent_correct(Ytrain[0], Ttrain)) if self.nnStatus["nnType"] == "classifier": Ytrain = [Ytrain[0].tolist(), Ytrain[1].tolist()] else: Ytrain = Ytrain.tolist() self.stats["train"]["x"].append(Xtrain.tolist()) self.stats["train"]["y"].append(Ytrain) self.stats["train"]["t"].append(Ttrain.tolist()) Yvalidate = self.nnStatus["nnet"].use(Xvalidate) self.stats["validate"]["correct"].append( self.percent_correct(Yvalidate[0], Tvalidate)) if self.nnStatus["nnType"] == "classifier": Yvalidate = [ Yvalidate[0].tolist(), Yvalidate[1].tolist() ] else: Yvalidate = Yvalidate.tolist() self.stats["validate"]["x"].append(Xvalidate.tolist()) self.stats["validate"]["y"].append(Yvalidate) self.stats["validate"]["t"].append(Tvalidate.tolist()) Ytest = self.nnStatus["nnet"].use(Xtest) self.stats["test"]["correct"].append( self.percent_correct(Ytest[0], Ttest)) if self.nnStatus["nnType"] == "classifier": Ytest = [Ytest[0].tolist(), Ytest[1].tolist()] else: Ytest = Ytest.tolist() self.stats["test"]["x"].append(Xtest.tolist()) self.stats["test"]["y"].append(Ytest) self.stats["test"]["t"].append(Ttest.tolist()) self.nnStatus["status"] = "Ready" print(self.nnStatus["status"]) content_len = int(self.headers.get('Content-Length')) post_body = self.rfile.read(content_len).decode("utf-8") data = json.loads(post_body) hiddenLayers = data["hiddenLayers"] nnType = data["nnType"] #classifier, recuring, etc epochs = int(data["epochs"]) learningRate = float(data["learningRate"]) method = data["method"] #adam or sgd dataSet = np.array(data["data"]) tColumnCount = int(data["tColumnCount"]) folds = int(data["folds"]) shuffle = data["shuffle"] X = dataSet[:, :-tColumnCount] T = dataSet[:, -tColumnCount:] self.nnStatus["nnType"] = nnType if nnType == "classifier": self.nnStatus["nnet"] = nn.NeuralNetworkClassifier( X.shape[1], hiddenLayers, len(np.unique(T))) else: self.nnStatus["nnet"] = nn.NeuralNetwork( X.shape[1], hiddenLayers, T.shape[1]) rtnData = {} self.nnStatus["status"] = "Training" rtnData["status"] = "Training" thread = threading.Thread(target=trainFold, kwargs={'data': data}) thread.start() self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() post_data = json.dumps(rtnData) self.wfile.write(bytes(post_data, 'utf8')) return
def test_basic(self): net=nn.NeuralNetwork(layers_shape=(2,3), with_bias=False) assert len(net.layers_weights) == 1 assert net.layers_weights[0].shape == (3,2) assert np.all(-1 <= net.layers_weights[0]) and np.all(net.layers_weights[0] < 1)
import numpy as np import mnist_loader import matplotlib.pyplot as plt import neuralnetwork as nn traning_data, validation_data, test_data = mnist_loader.load_data_wrapper() nework = nn.NeuralNetwork([784, 30, 10]) network.SGD(traning_data, 30, 10, 3.0, test_data=test_data)
pg.init() pg.font.init() p_count = 200 generation = 0 best_score = 0 p_best_score = 0 best_gen = 0 mlr = 1 olr = 0.5 lr = olr mr = 0.1 game = fpc.game(p_count, 1000, 400) screen = pg.display.set_mode((1400, 900)) visualizer = nn.Visualizer(pg.Vector2(600, 900)) console = nn.Console(pg.Vector2(800, 300)) network = nn.NeuralNetwork(0, 4, (6, 5, 5, 1)) nets = ne.NeuroEvolution(network, p_count) def updateWindow(): fpd.drawGame(game) screen.blit(game.screen, (0, 0)) screen.blit(visualizer.screen, (800, 0)) console.drawLog() screen.blit(console.screen, (0, 600)) pg.display.update() while True: spec = 0 while game.isOver() is False:
def main(): print 'Running', __file__, '...' params = { 'Model': 'neuralnetwork', 'TrainFile': '../data/train.csv', 'TestFile': '../data/test.csv', 'n_fold': 5, 'TrainSize': .9 } # 1. Generate data df = dataset.load_train_data(params) train_data = df.values # Skip the passengerid X_train = train_data[:, 2:] Y_train = train_data[:, 0].astype(int) # 2. Partition training data trainSize = int(params['TrainSize'] * np.size(Y_train)) x_train, x_valid = X_train[:trainSize, :], X_train[trainSize:, :] y_train, y_valid = Y_train[:trainSize], Y_train[trainSize:] df = dataset.load_test_data(params) X_test = df.values x_test_index = X_test[:, 0] x_test = X_test[:, 1:] print 'Analyzing training data ', params[ 'Model'], 'datapoints=', x_train.shape[0], 'features=', x_train.shape[ 1] rng = np.random.RandomState(5000) classifier = N.NeuralNetwork() param_grid = dict( network=[[9, 18, 18, 1], [9, 24, 1], [9, 45, 1]], connection_rate=[.6, .7], learning_rate=[.07, .1], learning_momentum=[.005, .05], initial_weight=[.73, .82], desired_error=[0.0001], epoch=[100], hidden_activation=[N.SIGMOID, N.SIGMOID_STEPWISE, N.SIGMOID_SYMMETRIC], output_activation=[N.SIGMOID_SYMMETRIC], training_algorithm=[N.TRAIN_RPROP], show=[500]) # 3. Search for the best estimator cv_ = cv.StratifiedShuffleSplit(y_train, n_iter=params['n_fold'], train_size=params['TrainSize'], random_state=rng) grid = grid_search.GridSearchCV(classifier, param_grid=param_grid, cv=cv_) grid.fit(x_train, y_train) best_estimator = grid.best_estimator_ print 'Best estimator:', best_estimator scores = cv.cross_val_score(best_estimator, x_train, y_train, cv=params['n_fold']) print('Train: (folds=%d) Score for %s accuracy=%0.5f (+/- %0.5f)' % \ (params['n_fold'], params['Model'], scores.mean(), scores.std())) y_valid_pred = best_estimator.predict(x_valid) print"Valid: Score for %s accuracy=%0.5f rmse=%0.5f" % \ (params['Model'], metrics.accuracy_score(y_valid, y_valid_pred), np.sqrt(metrics.mean_squared_error(y_valid, y_valid_pred))) # 4. Run found estimator on the test data. print 'Analyzing test data ', params['Model'], 'datapoints=', x_test.shape[ 0], 'features=', x_test.shape[1] process_test_data(params, best_estimator, x_test_index, x_test)
def get(self, request, *args, **kwargs): hidden_layer = request.GET.get('layers', 3) base_value = request.GET.get( 'base_value', 20) # Should not be passed. Should be stored in DB learning_rate = request.GET.get('lrate', 2) function = request.GET.get('function', 'tanh') epochs = request.GET.get('epochs', 50000) #predict = request.GET.get('predict', 19) test = request.GET.get('test', False) dataformat = request.GET.get('format', 'html') epochs = checkIfInt(epochs) if not epochs: return JsonResponse( {'error': 'Incorrect epoch value. Send as integer'}) hidden_layer = checkIfInt(hidden_layer) if not hidden_layer: return JsonResponse( {'error': 'Incorrect layer value. Send as integer'}) base_value = checkIfInt(base_value) if not base_value: return JsonResponse( {'error': 'Incorrect base value. Send as integer'}) examples = TrainingExample.objects.all() examples_list = examples.values_list('datainput', flat=True) correct_list = examples.values_list('dataoutput', flat=True) layers = [1, hidden_layer, 1] y = correct_list[:99] # [base_value] * len(examples) n = neuralnetwork.NeuralNetwork(layers, function) predict = examples_list[99] scaled_x = [] scaled_y = [] scaled_predict = [] index = 0 start = time.time() # When the process started. Even scaling for item in examples_list[:99]: scaled_x.append([self.scale_to_binary(item)]) scaled_y.append(self.scale_to_binary(y[index])) index += 1 predict = self.scale_to_binary(predict) scaled_y = np.array(scaled_y) scaled_x = np.array(scaled_x) n.fit(scaled_x, scaled_y, learning_rate, epochs) prediction = n.predict([predict]) result = self.scale_from_binary(prediction[0]) end = time.time() seconds = end - start if test: AnnResult.objects.create( prediction=result, epochs=epochs, hidden_layer_size=hidden_layer, seconds=seconds, truevalue=correct_list[99], function=function, ) results = AnnResult.objects.all() def_epoch = 50000 def_layers = 3 epoch_tests = [] layer_tests = [] epoch_predictions = [] layer_predictions = [] for item in results: if item.epochs == def_epoch: # I was testing layers layer_tests.append(item.hidden_layer_size) layer_predictions.append(item.prediction) if item.hidden_layer_size == def_layers: # I was testing epochs epoch_tests.append(item.epochs) epoch_predictions.append(item.prediction) args = {} args['epoch_tests'] = epoch_tests args['epoch_predictions'] = epoch_predictions args['layer_tests'] = layer_tests args['layer_predictions'] = layer_predictions args['correct_answer'] = correct_list[99] if dataformat == 'html': return render(request, self.template_name, args) else: return JsonResponse(args)
end_gen: int = 176 # --------------------------------------------------------------------------- # POPULATION LOADING / CREATION: # Loading population data from file or creating a new population. filename: str = "gen" + str(start_gen) + ".pkl" try: popa.load(filename) except: # Creates a new population if it cannot load from file. print("Creating a brand new population. Let there be light!") population = [ nn.NeuralNetwork(layer_sizes, 0) for agent in range(population_size) ] popa.write(population, filename) else: # Loads population from an existing file. print("Loading existing population from file: {}".format(filename)) population = popa.load(filename) # --------------------------------------------------------------------------- # EVOLUTION LOOP: for gen in range(start_gen, end_gen): # Resets agent score so old agents don't get an advantage.
def test_basic_weights(self, ): net = nn.NeuralNetwork(layers_weights=[np.array([1])], regularization_value=5) assert net.layers_weights == [np.array([1])] assert net.regularization_value == 5
#importing libraries import neuralnetwork import numpy as np # construct the XOR dataset X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) # define our 2-2-1 neural network and train it nn = neuralnetwork.NeuralNetwork([2, 2, 1], alpha=0.5) nn.fit(X, y, epochs=20000) for (x, target) in zip(X, y): pred = nn.predict(x)[0][0] step = 1 if pred > 0.5 else 0 print("[INFO] data={}, ground-truth={}, pred={:.4f}, step={}".format( x, target[0], pred, step))
def rock_paper_scissors(): def convert(char): if char == 'R': return 0 elif char == 'P': return 1 elif char == 'S': return 2 def go_back(i): if i == 0: return 'P' elif i == 1: return 'S' elif i == 2: return 'R' def to_input(o, t): output = [] if o == 0: output.append(1) output.append(0) output.append(0) elif o == 1: output.append(0) output.append(1) output.append(0) elif o == 2: output.append(0) output.append(0) output.append(1) if t == 0: output.append(1) output.append(0) output.append(0) elif t == 1: output.append(0) output.append(1) output.append(0) elif t == 2: output.append(0) output.append(0) output.append(1) return output def to_output(c): output = [] if c == 0: output.append(1) output.append(0) output.append(0) elif c == 1: output.append(0) output.append(1) output.append(0) elif c == 2: output.append(0) output.append(0) output.append(1) return output def choose(l): result = -1 weight = -1 for i, item in enumerate(l): if item > weight: result = i weight = item return result NN = neuralnetwork.NeuralNetwork(height, width, 6, 3) print("Rock.. Paper.. Scissors... (R P S)") one = convert(input()) print(go_back(random.randint(0, 2))) print("Rock.. Paper.. Scissors... (R P S)") two = convert(input()) print(go_back(random.randint(0, 2))) while True: print("Rock.. Paper.. Scissors... (R P S)") out = convert(input()) guess = NN.calculate(to_input(one, two)) print("Guess:") print(guess) print("Real:") print(to_output(out)) print(go_back(choose(guess))) NN.backpropagate(to_input(one, two), to_output(out), 1) one = two two = out
app = Flask(__name__) # Initialize neural net # Hyperparameters inputLayerSize = config.inputLayerSize outputLayerSize = config.outputLayerSize hiddenLayerSize = config.hiddenLayerSize # Regularization lambd = config.lambd # Configure activation function useAct = config.useAct net = nn.NeuralNetwork(inputLayerSize, outputLayerSize, hiddenLayerSize, lambd, useAct) with open('../Parameters/active/weights{}.csv'.format(config.suffix), 'r') as f: reader = csv.reader(f) params = np.array(list(reader), dtype=float) net.setParams(params) # Route configurations @app.route("/forward", methods=['POST']) def forward(): json = request.get_json() X = np.array(json['X'], dtype=float)
for clf in classificadores: if (clf == 'knn'): print("\n============ KNN ============\n") classificador = KNeighborsClassifier(n_neighbors=3, metric='euclidean') if (clf == 'bayes'): print("\n============ Bayes ============\n") classificador = GaussianNB() if (clf == 'svm'): print("\n============ SVM ============\n") classificador = SVC() if (clf == 'neuralnetwork'): print("\n============ NeuralNetwork ============\n") classificador = neuralnetwork.NeuralNetwork() classificador.fit(xtrain, ytrain) pred = classificador.predict(xtest) print('Predição:', pred) print(' Real:', ytest) print('Matriz de confusão:') print(metrics.confusion_matrix(ytest, pred)) print('\nRelatório de classificação:') print(metrics.classification_report(ytest, pred)) print('Acuracia:', 100 * metrics.accuracy_score(ytest, pred), '% \n\n') time.sleep(0.4)