def test(): a = NeuralNetwork(3, [[2, 'sigmoid'], [1, 'relu']]) b = NeuralNetwork(3, [[2, 'sigmoid'], [1, 'sigmoid']]) print(a.layers) print(b.layers) print(crossover(a, b).layers)
def test_activation_funcs(): x_plus_one = lambda x: x + 1 nn = NeuralNetwork(50, 2, (20, 10, 5), [x_plus_one]) assert list(nn.activation_funcs) == [x_plus_one] * 4 x_plus_two = lambda x: x + 2 nn = NeuralNetwork(50, 2, (20, ), [x_plus_one, x_plus_two]) assert list(nn.activation_funcs) == [x_plus_one, x_plus_two]
def crossover(self): a = self.pick_one() b = self.pick_one() w, b = a.DNA.crossover(b.DNA) childDNA = NeuralNetwork(5, 8, 1, weights=w, bias=b) childDNA.mutate(self.mr) return childDNA
def test_nn_blanks(): x, y = load_tictactoe_csv("tic-tac-toeWBlanksTraining.csv") nn = NeuralNetwork(x, y, 11, .00066) nn.train(100000) boards = [] labels = [] with open("tic-tac-toeWBlanksValidation.csv") as file: for line in file: cols = line.strip().split(",") board = [] for s in cols[:-1]: if s == "o": board += [0] elif s == "x": board += [1] else: board += [2] label = [0] if cols[-1] == "Owin" else [1] labels.append(label) boards.append(board) lines = np.array(boards) outputs = np.array(labels) count = 0 right = 0 wrong = 0 for line in lines: actual_output = outputs[count] calc_output = int(nn.inference(line) + .5) # rounds to 0 or 1 if actual_output == calc_output: right += 1 else: wrong += 1 count += 1 print("Accuracy: " + str(right / (right + wrong)))
def main(): load_from_file = True # number of input, hidden and output nodes input_nodes = 784 hidden_nodes = 200 output_nodes = 10 # learning rate learning_rate = 0.1 epochs = 5 datasets = 60000 n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate) if not load_from_file: train(n, output_nodes, datasets, epochs) wih, who = n.get_weights() pandas.DataFrame(wih).to_csv(os.path.join(os.getcwd(), 'wih.csv'), sep=';', header=None, index=False) pandas.DataFrame(who).to_csv(os.path.join(os.getcwd(), 'who.csv'), sep=';', header=None, index=False) else: wih = pandas.read_csv(os.path.join(os.getcwd(), 'wih.csv'), sep=';', header=None) who = pandas.read_csv(os.path.join(os.getcwd(), 'who.csv'), sep=';', header=None) n.weights(wih.values, who.values) test(n) show(n)
def __init__(self, y, u, k, omega, Retau, verbose=False, model=None): self.y = np.copy(y) ny = np.size(self.y) self.verbose = verbose self.q = np.zeros(3*ny, dtype=np.float) self.Retau = Retau self.nu = 1e-4 self.q[0:3*ny:3] = u[:] self.q[1:3*ny:3] = k[:] self.q[2:3*ny:3] = omega[:] self.writedir = "." self.tol = 1e-11 self.ny = ny self.n = self.ny*3 self.maxiter = 10 self.dt = 1e6 self.force_boundary = False self.neq = 1 self.rho = 1.0 self.dp = calc_dp(self.Retau, self.nu) self.sigma_w = 0.5 self.beta_0 = 0.0708 self.gamma_w = 13.0/25.0 self.sigma_k = 0.6 self.beta_s = 0.09 self.model = model if self.model == None or self.model == "linear": self.beta = np.ones(ny, dtype=np.float) elif self.model == 'nn': self.nn = NeuralNetwork(sizes = [1, 3, 1]) self.beta = np.random.randn(self.nn.n)*1e-2 self.nn.set_from_vector(self.beta)
def test_weight_matrix_without_hidden_layers(): weights = np.random.uniform(size=200) matrix = weights.reshape(50, 4) nn = NeuralNetwork(49, 4, [], range(1)) nn.weights = weights assert np.all(np.equal(list(nn.weight_matrices)[0], matrix))
def run_network(learning_rate, hidden_nodes): brain = NeuralNetwork(784, hidden_nodes, 10) brain.set_learning_rate(learning_rate) counter = 0 for image, label in zip(training_images, training_labels): counter += 1 print( f"Backward propogating the {counter} image, which was a {label} with array: {labels_dict[label]}." ) brain.train(image.ravel(), labels_dict[label]) #if counter > 20_000: break #print(brain.weights_ho[2:3, 2:4]) correct = 0 for i in range(len(testing_images)): output = brain.feedforward(testing_images[i].ravel()) output = output.ravel() #print(testing_images[i].ravel()) print(output) print( f"The letter was thought to be {output.argmax()} by the Neural network, but was actually {testing_labels[i]}." ) if output.argmax() == testing_labels[i]: correct += 1 print("IT WAS CORRECT!") print(correct / len(testing_images)) return correct / len(testing_images)
def main(): brain = NeuralNetwork(784, 3000, 10) brain.set_learning_rate(.001) counter = 0 training_images_norm = normalize_images(training_images) testing_images_norm = normalize_images(testing_images) for image, label in zip(training_images_norm, training_labels): counter += 1 print( f"Backward propogating the {counter} image, which was a {label} with array: {labels_dict[label]}." ) brain.train_tanh(image.ravel(), labels_dict[label]) #learning_rate_annealing_cyclic(brain, counter) #brain, iteration #print(f"Learning Rate: {brain.learning_rate}") if counter >= 1000: break correct = 0 for i in range(len(testing_images_norm)): output = brain.feedforward_tanh(testing_images_norm[i].ravel()) output = output.ravel() #print(testing_images[i].ravel()) print(output) print( f"The letter was thought to be {output.argmax()} by the Neural network, but was actually {testing_labels[i]}." ) if output.argmax() == testing_labels[i]: correct += 1 print("IT WAS CORRECT!") print(correct / i)
def crossover(n1: NeuralNetwork, n2: NeuralNetwork) -> NeuralNetwork: if n1.input_size != n2.input_size: raise Exception('not compatible networks') input_size = n1.input_size layers = [] for l1, l2 in zip(n1.layers, n2.layers): a1 = l1[3] a2 = l2[3] if len(l1[0]) != len(l2[0]): raise Exception('not compatible layers') s = len(l1[0]) layer = [s, choose(a1, a2)] layers.append(layer) result = NeuralNetwork(input_size, layers) for l in range(len(result.layers)): w = result.layers[l][0] for i in range(len(w)): for j in range(len(w[0])): val = choose(n1.layers[l][0][i][j], n2.layers[l][0][i][j]) w[i][j] = mutate(val) b = result.layers[l][1] for i in range(len(b)): for j in range(len(b[0])): val = choose(n1.layers[l][1][i][j], n2.layers[l][1][i][j]) b[i][j] = mutate(val) return result
def test_nn_5(): x, y = load_tictactoe_csv("tic-tac-toeWBlanks.csv") nn = NeuralNetwork(x, y, 20, .01) nn.train(100000) print(nn.loss()) print(nn.accuracy_calculator()) assert nn.loss() < .01
def train(XTrain, YTrain, args): """ This function is used for the training phase. Parameters ---------- XTrain : numpy matrix The matrix containing samples features (not indices) for training. YTrain : numpy matrix The array containing labels for training. args : List The list of parameters to set up the NN model. Returns ------- NN : NeuralNetwork object This should be the trained NN object. """ # 1. Initializes a network object with given args. nn = NeuralNetwork(args["NNodes"], args["activate"], args["deltaActivate"], args["task"]) # 2. Train the model with the function "fit". # (hint: use the plotDecisionBoundary function to visualize after training) # Parameters TODO: arguments or script # Neural Network Execution nn.fit(XTrain, YTrain, args["learningRate"], args["epochs"], args["regLambda"], args["batchSize"]) # 3. Return the model. return nn
def __init__(self, iq, eq, mass, x, id, final_mass, breed_mass_div, breed_chance, size_factor, move_brain=None, social_brain=None, parent_id=None): self.iq = iq self.eq = eq if move_brain is not None: self.move_brain = move_brain else: if iq == 1: # if the size of the hidden layers is 1, the amount of hidden layers doesn't matter self.move_brain = NeuralNetwork([3, 1]) else: self.move_brain = NeuralNetwork([3, iq, iq, 1]) if social_brain is not None: self.social_brain = social_brain else: if eq == 1: # if the size of the hidden layers is 1, the amount of hidden layers doesn't matter self.social_brain = NeuralNetwork([4, eq, 2]) else: self.social_brain = NeuralNetwork([4, eq, eq, 2]) self.parent_id = parent_id self.mass = mass self.energy = mass self.speed = (1 / mass) * size_factor * G_SPEED_FACTOR self.health = mass self.final_mass = final_mass self.breed_mass_div = breed_mass_div self.breed_chance = breed_chance self.x = x self.id = id self.size_factor = size_factor
def task1(): # 二分类 net = NeuralNetwork([2, 4, 1], activation='line', softmax_=False) train_N = 200 test_N = 100 x = np.random.normal(loc=0.0, scale=2.0, size=(train_N, 2)) a = 1.0 b = 0.15 f = lambda x: a * x + b plt.figure(1) plt.plot(x, f(x), 'g', label='真实分割线') # 线性分割前面的点 y = np.zeros([train_N, 1]) for i in range(train_N): if f(x[i, 0]) >= x[i, 1]: # 点在直线下方 y[i] = 1 plt.plot(x[i, 0], x[i, 1], 'bo', markersize=8, label='类一') else: # 点在直线上方 y[i] = -1 plt.plot(x[i, 0], x[i, 1], 'ro', markersize=8, label='类二') plt.legend(labels=['真实分割线'], loc=1) plt.title('随机数生成及展示') plt.show() wb = net.train(x, y, epochs=100, lr=0.001, batchsize=8) newx = np.random.normal(loc=0.0, scale=2.0, size=(test_N, 2)) y_preds = np.array( list(map(net.forward, newx, (wb for _ in range(len(newx)))))) plt.figure(2) plt.plot(x, f(x), 'g', label='真实分割线') for i in range(test_N): if y_preds[i][0] > 0: plt.plot(newx[i, 0], newx[i, 1], 'b^', markersize=8, label='类一(预测)') else: plt.plot(newx[i, 0], newx[i, 1], 'r^', markersize=8, label='类二(预测)') plt.legend(labels=['真实分割线'], loc=1) # plt.plot(x, f(x), 'y') # plt.legend() plt.show()
def test_nn_3(): x, y = load_tictactoe_csv("tic-tac-toeWBlanksSmall.csv") nn = NeuralNetwork(x, y, 10, .004) nn.train(10000) print("3 " + str(nn.loss())) print(nn.accuracy_calculator()) assert nn.loss() < .1
def test(): nn = NeuralNetwork(shape) print("0", nn.forward([0, 0])) print("fitness", fitness(nn), "\n") for i in range(10): nn.mutate(1) print("0", nn.forward([0, 0])) print("fitness", fitness(nn), "\n")
def __init__(self, xPos, yPos,xPos_range, yPos_range, initEmpty, vertical_fuel_depletion_rate=0.05, horizontal_fuel_depletion_rate=0.05, name='agent', color=(0, 0, 0, 50)): # Call the parent's constructor super().__init__() self.gravity = 0.0 self.drag = 0.0 self.lift = -10 self.push = 2 self.maxLim_y_velocity = 20 self.minLim_y_velocity = -20 self.maxLim_x_velocity = 4 self.minLim_x_velocity = -4 self.velocity_y = 0 self.velocity_x = 0 self.radius = 20 self.color = color self.current_closest_block = None self.fuel = 1.0 self.failure_meter = 0.0 self.vertical_fuel_depletion_rate = vertical_fuel_depletion_rate self.horizontal_fuel_depletion_rate = horizontal_fuel_depletion_rate if xPos_range is not None: xPos = np.random.randint(xPos_range[0], xPos_range[1]) if yPos_range is not None: yPos = np.random.randint(yPos_range[0], yPos_range[1]) self.name = name self.image = pygame.Surface([self.radius, self.radius], pygame.SRCALPHA) self.image.fill(self.color) self.rect = self.image.get_rect() self.rect.x = xPos self.rect.y = yPos self.previous_xPos = self.rect.right self.starting_xPos = xPos self.starting_yPos = yPos self.timeSamplesExperianced = 1 self.totalDistanceFromGapOverTime = 0 self.fitness = 0 self.avgDistFromGap = 0 msLayeruUnits = [12, 7, 2] msActFunctions = ["relu", "tanh"] self.functional_system = NeuralNetwork(layer_units=msLayeruUnits, activation_func_list=msActFunctions) if initEmpty == False: self.functional_system.init_layers(init_type="he_normal") else: self.functional_system.init_layers(init_type="zeros")
def test_or_nn(verbose=0): x, y = create_or_nn_data() nn = NeuralNetwork(x, y, 4, 1) nn.feedforward() if verbose > 0: print("OR 1 " + str(nn.loss())) print("NN output " + str(nn._output)) print(nn.accuracy_precision()) assert nn.loss() < .04
def task3(): train_N = 100 test_N = 100 x1 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [-10, 10] x2 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [10, 10] x3 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [-10, -10] x4 = np.random.normal(loc=0.0, scale=4.0, size=(train_N, 2)) + [10, -10] y1 = np.array([[1., 0., 0., 0.] for _ in range(train_N)]) y2 = np.array([[0., 1., 0., 0.] for _ in range(train_N)]) y3 = np.array([[0., 0., 1., 0.] for _ in range(train_N)]) y4 = np.array([[0., 0., 0., 1.] for _ in range(train_N)]) plt.plot(x1[:, 0], x1[:, 1], 'ro') plt.plot(x2[:, 0], x2[:, 1], 'yo') plt.plot(x3[:, 0], x3[:, 1], 'bo') plt.plot(x4[:, 0], x4[:, 1], 'go') plt.show() x = np.vstack((x1, x2, x3, x4)) y = np.vstack((y1, y2, y3, y4)) net = NeuralNetwork([2, 4, 4], activation='relu', softmax_=True) wb = net.train(x, y, loss='cross_entropy', epochs=200, lr=0.01, batchsize=2) # print("over") newx1 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [-10, 10] newx2 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [10, 10] newx3 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [-10, -10] newx4 = np.random.normal(loc=0.0, scale=4.0, size=(test_N, 2)) + [10, -10] newx = np.vstack((newx1, newx2, newx3, newx4)) y_preds = np.array( list(map(net.forward, newx, (wb for _ in range(len(newx)))))) # print(y_preds.shape) # y_preds = np.array([softmax(a) for a in np.squeeze(y_preds)]) print(y_preds) # print(y_preds) sty = ['r^', 'y^', 'b^', 'g^'] plt.figure(2) for i in range(test_N): plt.plot(newx[i, 0], newx[i, 1], sty[int(np.argmax(y_preds[i]).max())], markersize=8, label='类一(预测)') plt.show()
def do_train(self, command): train_parser = argparse.ArgumentParser(prog=self.prog+' train') train_parser.add_argument('-d', '--data', type=str, required=True, dest="datasets", nargs="*", help="path to the dataset file") train_parser.add_argument('-i', '--inodes', type=int, help="Number of input nodes") train_parser.add_argument('-o', '--onodes', type=int, help="Number of output nodes") train_parser.add_argument('-n', '--hnodes', type=int, required=True, help="Number of hidden nodes") train_parser.add_argument('-l', '--l_rate', type=float, default=0.1, help="Learning rate") train_parser.add_argument('-e', '--epoch', type=int, default=5, help="Numbers of iteration data set will be used in training") train_parser.add_argument('--out', type=str, default="out") args = train_parser.parse_args(command) self.print_("Creating Network object..", c="magenta", c_attrs=["bold"], trailing=True) sys.stdout.flush() temp_ds = Dataset.load(args.datasets[0]) n = NeuralNetwork(temp_ds.input_nodes, args.hnodes, temp_ds.output_nodes, args.l_rate) self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False) #self.print_("Converting Images to arrays...", c="magenta", c_attrs=["bold"]) #data = [] #dataset = Dataset(n.input_nodes, n.output_nodes) #for i, csv in enumerate(args.fn): # dataset.input_csv(csv, training=True) # sys.stdout.flush() # self.print_("\r\t({}/{})".format(i+1, len(args.fn)), trailing=True, use_prog=False) #self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False) self.print_("Training Network...", c="magenta", c_attrs=["bold"]) errors = 0 for dataset_fn in args.datasets: dataset = Dataset.load(dataset_fn) for j, record in enumerate(dataset.records): #for i in xrange(0, len(record.inputs)): percent = str((float(j+1)/len(dataset.records))*100)[:5] data = n.train(record, args.epoch) sys.stdout.flush() self.print_("\r\t{}% Error: {}".format(percent, data.output_errors[len(data.output_errors)-1][0]), c_attrs=["bold"], use_prog=False, trailing=True) self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False) self.print_("Saving Network ({}.nn.pkl)...".format(args.out), c="magenta", c_attrs=["bold"], trailing=True) n.save(args.out) self.print_(" OK", c="blue", c_attrs=["bold"], use_prog=False) self.print_("ERRORS: {}".format(errors))
def createNN(self): intputCount = self.size * self.size outputCount = self.size * self.size self.neural_network = NeuralNetwork( intputCount, intputCount * 6, outputCount, [ intputCount * 5, intputCount * 4, intputCount * 3, intputCount * 2 ])
def test_weights_init(): # Pass in a list instead of actual activation for test weight_min = -0.5 weight_max = 0.5 nn = NeuralNetwork(10, 2, [], range(1), weight_min=weight_min, weight_max=weight_max) assert all([weight_min <= weight <= weight_max for weight in nn.weights])
def __init__(self, weights=None): self.weights = weights super().__init__(weights) self.network = NeuralNetwork(input_shape=(1, 22)) self.genome_len = self.network.num_nodes if self.weights is not None: self.network.weights = self.network.weights_to_ndarray(self.weights) else: self.weights = self.network.weights_to_list() self.genome = self.weights
def graph_annealing(): space = [x for x in range(24000)] brain = NeuralNetwork(4, 4, 2) x = list() y = list() for value in space: x.append(value) y.append(learning_rate_annealing_expo_cyclic(brain, value)) plt.plot(x, y) plt.show()
def __init__(self): self.logger = logging.getLogger('predictor.Predictor') self.nn = NeuralNetwork() with open(self.BRANDS, 'r') as f: self.brands = np.array(json.load(f)) with open(self.CATEGORIES, 'r') as f: self.categories = np.array(json.load(f)) with open(self.TOKENIZER, 'r') as f: self.tokenizer = tokenizer_from_json(f.read()) self.nn.load([self.MODEL_ARCH, self.MODEL_WEIGHTS])
def test_ttt_nn(verbose=0): x, y = load_tictactoe_csv("tic-tac-toe-1.csv") nn = NeuralNetwork(x, y, 4, .1) nn.load_4_layer_ttt_network() nn.feedforward() if verbose > 0: print("NN 1 " + str(nn.loss())) print("NN output " + str(nn._output)) print(nn.accuracy_precision()) assert nn.loss() < .02
def test_weight_matrix_with_hidden_layers(): weights_a = np.random.uniform(0, 1, 55) weights_b = np.random.uniform(0, 1, 12) matrix_a = weights_a.reshape((11, 5)) matrix_b = weights_b.reshape((6, 2)) nn = NeuralNetwork(10, 2, (5, ), range(2)) nn.weights = list(it.chain(weights_a, weights_b)) for m1, m2 in zip(nn.weight_matrices, [matrix_a, matrix_b]): assert np.all(np.equal(m1, m2))
def seek_fp(x): nn = NeuralNetwork(connection_matrix, transmission_history_len=10**4) nn.set_strengthen_functions(strengthen_functions.__dict__['PF' + str(pf)]) nn.initialize_synapses_strength(x, .1) nn.strengthen_rate = delta strength_stats = [] for _ in range(200000): neurons_stimulated = set(np.where(neurons_stimulated_probs > np.random.rand(N))[0]) nn.propagate_once(neurons_stimulated, transform_func) strength_stats.append(nn.stats()['strength']) return strength_stats
def train(): X_train, X_test, Y_train, Y_test = dp.get_data() deep_net = NeuralNetwork(layers=[Dense(n_neurons=13, activation=Sigmoid()), Dense(n_neurons=13, activation=Sigmoid()), Dense(n_neurons=1, activation=Linear())], loss=MeanSquaredError(), seed=80718) trainer = Trainer(deep_net, SGD(learning_rate=0.01)) trainer.train(X_train, Y_train, X_test, Y_test, epochs=1_000, eval_period=100, batch_size=23, seed=80718)
def run_training(set, sep, args, num_classes): exist_tr = os.path.isfile("training_images/" + set + "/" + set + sep + "train.pkl") exist_test = os.path.isfile("training_images/" + set + "/" + set + sep + "test.pkl") dataset_train = None dataset_test = None if exist_tr and exist_test: dataset_train = load_from_pickle(set, sep + "train") dataset_test = load_from_pickle(set, sep + "test") else: dataset_train = load_from_csv(set, sep + "train") dataset_test = load_from_csv(set, sep + "test") img_size = 28 img_pixels = img_size * img_size print("Training neural network...") iter_limit = 100 for i, arg in zip(range(len(args)), args): if '-it=' in arg: iter_limit = int(arg.split("=")[1]) nodes = 100 for i, arg in zip(range(len(args)), args): if '-n=' in arg: nodes = int(arg.split("=")[1]) acc_limit = 0.85 for i, arg in zip(range(len(args)), args): if '-a=' in arg: acc_limit = float(arg.split("=")[1]) if acc_limit >= 1.0: print("Accuracy threshold is too high. Reset to 0.85...") acc_limit = 0.85 nn = NeuralNetwork(img_pixels, num_classes, nodes) params = None if '-r' in args: print("Resuming from previous training...") params = np.load("trained/" + set + "_params.npy") acc = 0.0 iter = 0 while acc < acc_limit and iter < iter_limit: data_train, labels_train = build_dataset(dataset_train, num_classes) data_test, labels_test = build_dataset(dataset_test, num_classes) params = nn.train(data_train, labels_train, params) np.save("trained/" + set + "_params", params) acc = nn.test(data_test, labels_test) print("Iteration {} completed with {}% accuracy".format( iter, round(acc * 100, 4))) iter += 1