def create_phenotype(chromo): num_inputs = chromo.sensors num_neurons = len(chromo.node_genes) - num_inputs #num_outputs = chromo.actuators network = ann.ANN(num_inputs, num_neurons) if chromo.node_genes[-1].activation_type == 'tanh': network.set_logistic(0) # create neurons neuron_type = None for ng in chromo.node_genes[num_inputs:]: if ng.type == 'OUTPUT': neuron_type = 1 else: neuron_type = 0 #print 'Creating neuron: ', ng.id-num_inputs-1, ng.bias, ng.response, neuron_type network.set_neuron(ng.id-num_inputs-1, ng.bias, ng.response, neuron_type) # create connections for cg in chromo.conn_genes: if cg.enabled: if cg.innodeid-1 < num_inputs: # set sensory input network.set_sensory_weight(cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight) #print "Sensory: ", cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight else: # set interneuron connection network.set_synapse(cg.innodeid-num_inputs-1, cg.outnodeid-num_inputs-1, cg.weight) #print "Inter..: ", cg.innodeid-num_inputs, cg.outnodeid-num_inputs-1, cg.weight return network
def find_optimal_hidden_layer(epochs, xin, yin, batch_start, batch_size, min_layer_size, max_layer_size): global nn outcomes = [] # [hiddenLayers][accuracyPercent] input_nodes = 17 output_nodes = 26 tests_per_value = 3 # How many times to test the same results to average the results # Loop from input node count to output node count for nodes in range(min_layer_size, max_layer_size): print("") print("Processing with", nodes, "hidden nodes") avg_acc = 0.0 for i in range(0, tests_per_value): print(" Test# ", i) nn = ann.ANN(input_nodes, nodes, output_nodes, learn) run_epochs(epochs, xin, yin, batch_start, batch_size, False) # Test against the validation set of 4000 avg_acc += validate( " Nodes: " + str(nodes) + " - test: " + str(i), split, rows - split, False, False) outcomes.append( (nodes, avg_acc / tests_per_value) ) # store the averaged results incase weights negatively or positively overly affected it. return outcomes
def __init__(self, irc, tpl, annpl, master, animetiming): threading.Thread.__init__(self) self.irc, self.tpl, self.annpl, self.master, self.animetiming = irc, tpl, annpl, master, animetiming self.twitter = twitter.Twitter(self.irc, self.tpl) self.ann = ann.ANN(self.irc, self.annpl) self.master_online_status = False self.namelist = {}
def test(): plt.ion() plt.show() datasets = load_data() cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.) cl.fit(datasets, lr = 0.01, batch_size = 100, n_epochs = 1000) print cl.get_neg_log(data, T.cast(y, 'int32')).mean()
def __init__(self, num_inputs, num_hidden_nodes=20, training_data_ratio=2.0/3.0, iterations=1000, labels=(1, 2, 3)): """ Create an instance of th MultiANN class used to execute the Multi-class ANN Problem :param num_hidden_nodes: Number of hidden nodes to use in ANN :param training_data_ratio: ratio of the input data to use as training data :param iterations: The total number of iterations to train the ANN :param labels: A tuple of labels, the first label in the tuple corresponds to the first output node """ self._training_data_ratio = training_data_ratio self._iterations = iterations self._multiclass_helper = ann.MulticlassHelper(labels) self._network = ann.ANN(num_inputs=num_inputs, num_hidden_nodes=num_hidden_nodes, num_output_nodes=len(labels), learning_rate=0.5)
def create_ffphenotype(chromo): """ Receives a chromosome and returns its phenotype (a neural network) """ num_inputs = chromo.sensors num_neurons = len(chromo.node_genes) - num_inputs num_outputs = chromo.actuators network = ann.ANN(num_inputs, num_neurons) if chromo.node_genes[-1].activation_type == 'tanh': network.set_logistic(0) # creates a dict mapping node_order + output node to [0, 1, 3, ... , n] value = 0 mapping = {} # hidden nodes for id in chromo.node_order: mapping[id] = value #print 'Hidden params: ',value, chromo.node_genes[id - 1].bias, chromo.node_genes[id - 1].response, 0 network.set_neuron(value, chromo.node_genes[id - 1].bias, chromo.node_genes[id - 1].response, 0) value += 1 # output nodes for ng in chromo.node_genes[num_inputs:num_outputs + num_inputs]: mapping[ng.id] = value #print 'Output params: ', value, ng.bias, ng.response, 1 network.set_neuron(value, ng.bias, ng.response, 1) value += 1 for cg in chromo.conn_genes: if cg.enabled: if cg.innodeid - 1 < num_inputs: # set sensory input network.set_sensory_weight(cg.innodeid - 1, mapping[cg.outnodeid], cg.weight) #print "Sensory: ", cg.innodeid-1, mapping[cg.outnodeid], cg.weight else: # set interneuron connection network.set_synapse(mapping[cg.innodeid], mapping[cg.outnodeid], cg.weight) #print "Inter..: ", mapping[cg.innodeid], mapping[cg.outnodeid], cg.weight return network
def __init__(self, num_inputs, threshold=0.5, num_hidden_nodes=20, training_data_ratio=2.0 / 3.0, iterations=1000): """ Create an instance of th BinaryANN class used to execute the Binary ANN Problem :param num_hidden_nodes: Number of hidden nodes to use in ANN :param threshold: The minimum threshold value to consider output as 'class 1' (i.e. spam). :param training_data_ratio: ratio of the input data to use as training data :param iterations: The total number of iterations to train the ANN """ self._training_data_ratio = training_data_ratio self._iterations = iterations self._threshold = threshold self._network = ann.ANN(num_inputs=num_inputs, num_hidden_nodes=num_hidden_nodes, num_output_nodes=1, learning_rate=0.5) self._metrics = metrics.BinaryClassificationMetrics()
for cg in chromo.conn_genes: if cg.enabled: if cg.innodeid - 1 < num_inputs: # set sensory input network.set_sensory_weight(cg.innodeid - 1, cg.outnodeid - num_inputs - 1, cg.weight) #print "Sensory: ", cg.innodeid-1, cg.outnodeid-num_inputs-1, cg.weight else: # set interneuron connection network.set_synapse(cg.innodeid - num_inputs - 1, cg.outnodeid - num_inputs - 1, cg.weight) #print "Inter..: ", cg.innodeid-num_inputs, cg.outnodeid-num_inputs-1, cg.weight return network if __name__ == "__main__": # setting a network manually network = ann.ANN(2, 2) #network.set_logistic(True) network.set_neuron(0, 0.0, 4.924273, 0) network.set_neuron(1, 0.0, 4.924273, 1) network.set_sensory_weight(1, 1, -0.09569) network.set_sensory_weight(0, 0, 1.0) network.set_synapse(0, 1, 0.97627) for i in range(10): print network.pactivate([1.0, 0.0])
def test_ann(): im = array(I.open('./1.JPG').convert('L').resize((252, 142))) reconstructor = ann.ANN(prod(im.shape), prod(im.shape), hiddens=[1]) reconstructor.fit(im, lr=0.1)
# test() data, y = cPickle.load(open('data.dat', 'rb')) y = numpy.asarray(y, dtype = 'int32') total = len(y) size = int(total * 0.05) # data, y = theano.shared(data, borrow = True), T.cast(theano.shared(y, borrow = True), 'int32') #3 generation-long memory memory = [[0] * total] * 3 #random sample training sample ind = choice(total, size) cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.) max_iteration = 10 iteration = 0 while iteration < max_iteration: train_set = (theano.shared(data[ind]), theano.shared(y[ind])) def plot_in_f2(self): plt.clf() pred = self.pred(train_set[0]) plot(train_set[0].get_value(), pred) plt.draw() cl.fit((train_set, train_set), lr = 0.01, batch_size = 100, n_epochs = 300, plot = plot_in_f2, plot_interval = 299)
import detector # Get data image_handler = ImageHandler(0.0) train_images, train_targets = image_handler.get_all_train_data() # Augment data a.add_invert(train_images, train_targets) # Select feature extraction methods method = [e.apply_isodata_threshold] # Apply methods train_images = e.extract(method, train_images, True, True, 40) # Create classifier a_nn = ann.ANN() # Train classifier a_nn.train(train_images, train_targets) # Read image to perform detection on detection_image = filehandler.read_detection_image(2) # Perform sliding window method on image detector = detector.Detector(detection_image, 1, 20) windows = detector.sliding_window() # Apply feature extraction methods to images containing letters extracted_frames = e.extract(method, deepcopy(windows), True) # Predict letters predictions = a_nn.predict(extracted_frames)
"#333333", "#888888", "#330033", "#880088", "#FF00FF", "#008800", "#00FF00", "#000088", "#0000FF", "#000000" ] generations = len(generation_colors) # Random Input for giggles # The input does not really matter in this case # The evolution algorithm is only dependant on the output # It will however optimize the input for the output so it's # rather useful for teaching stuff to play games :) # eg: create a network with one input per possible action # in the game and then evolve the network after each round # depending on the output. network = ann.ANN(1) # Some Layers so that the network can learn network.add_layer(ann.Layer(6)) network.add_layer(ann.Layer(8)) # Output, Should eventually become 0, 0 since that maximizes the function network.add_layer(ann.Layer(2)) # 10 Family members # There's a bunch of other options defaults is: # def __init__(self, family_sz, selection_bias=0.75, verbose=True, # mutation_chance=0.5, mutation_severity=0.4, inheritance=0.4): ga = ann.Genetic(20, verbose=False)
import ann #importing C++ module net = ann.ANN(3, 3) # bias input net.set_sensory_weight(0, 0, 1.5) net.set_sensory_weight(0, 1, 1.5) net.set_sensory_weight(0, 2, 1.5) # input 1 net.set_sensory_weight(1, 0, 1.5) net.set_sensory_weight(1, 1, 1.5) # input 2 net.set_sensory_weight(2, 0, 1.5) net.set_sensory_weight(2, 1, 1.5) # inter-neurons net.set_synapse(0, 2, 0.5) net.set_synapse(1, 2, 0.5) net.set_synapse(2, 1, -0.5) # neuron's properties: id, bias, response, type net.set_neuron(0, 0, 1, 0) # hidden net.set_neuron(1, 0, 1, 0) # hidden net.set_neuron(2, 0, 1, 1) # output for i in range(10): print(net.sactivate([1.2, 0.2, 0.2])) #print net.get_neuron_output(0) #print net.get_neuron_output(1) print(net.get_neuron_output(2))
import os import sys sys.path.append(os.getcwd() + "/..") import ann import numpy as np from matplotlib import pyplot as plt import random import time import progressbar np.random.seed(0) random.seed(0) NN = ann.ANN([2, 3, 3, 1], [1, 1, 1, 1], "sigmoid", 0.9) Input = [] Output = [] for ind in range(6): Input.append([np.random.normal([-1.0, -1.0], 0.2)]) Output.append([np.array([1.0])]) for ind in range(6): Input.append([np.random.normal([1.0, -1.0], 0.2)]) Output.append([np.array([1.0])]) for ind in range(6): Input.append([np.random.normal([0.0, 1.0], 0.2)]) Output.append([np.array([1.0])])
for i in range(l - time_len): cur = [] for i_ in range(time_len): t = [(_ == data[i+i_] and 1) or 0 for _ in range(16)] cur.extend(t) # cur.append(t) # cur.append(data[i_+i]) x.append(cur) y.append(data[i+time_len]) print data x = array(x) y = array(y) print x print y return x, y if __name__ == '__main__': time_len = 7 x, y = split(a, time_len) ind = int(len(x) * 0.7) train_x = x[:ind] train_y = y[:ind] test_x = x[ind:] test_y = y[ind:] cl = ann.ANN(time_len * 16, 16, hiddens = [40, 40, 40], lmbd = 0) cl.fit(ann.load_data( ([train_x, train_y], [test_x, test_y]) ), lr = 1.0, n_epochs = inf) pass
import tensorflow as tf import pandas as pd import numpy as np import constant as ct #import cnn as model1 import k_means as model2 import ann as model3 import data_transform if __name__ == "__main__": # 1. instantiate model calss k_means = model2.K_Means() ann = model3.ANN() # 2. set config arg_dict2 = {'dir_model': 'hihi'} k_means.set_config(k_means, arg_dict2) arg_dict3 = {'tmp': ''} ann.set_config(ann, arg_dict3) # 3. operate each model # 3-1 : read data # 3-2 : set_x,y,sequence # 3-3 : execute model ## k-means dt = data_transform.Data_transform() x2 = dt.read_csv("/root/SMART/in_cluster/nor.csv") k_means.set_x(k_means, x2)
def __init__(self, parameters): self.params = parameters p = self.params self.net = ann.ANN([p.look_back]+p.hidden_layers+[p.extrap],\ activation='tanh+ax', learning_rate=p.learning_rate,\ rand_scale=.1)
inputs = n outputs = len(lookupTable) nodes_layer1 = 500 learning_rate = 0.1 reg_param = 1e-2 threshold_fn = 'logistic' cost_fn = 'cross_entropy' epochs = 50 momentum_rate = 0.01 learning_accelaration = 1.05 learning_backup = 0.5 results = [] epoch_list = [1,5,10,20,50,100,500,1000] nodes_layer1 = 1000 for i in range(len(epoch_list)): epochs = epoch_list[i] a = ann.ANN(inputs, outputs, [nodes_layer1], epochs, learning_rate, momentum_rate, learning_accelaration, learning_backup, reg_param, threshold_fn, cost_fn) a.fit(trainX, trainY) predY = a.predict(cvX) #predY1 = a.predict(XTest) from sklearn.metrics import accuracy_score a = accuracy_score(cvY, predY) results.append((epochs, a)) print "debug#2- epochs: ", epochs, " , accuracy: ", a results = np.array(results) np.save('results2.npy', results)
plt.close() print("Starting Neural Network Training and Validation") print("********* Runtime parameters ***********") print("Used Batch Size: " + str(batch_size)) print("Used Epochs: " + str(epochs)) print("Used Learn Rate: " + str(learn)) start_ann = time.time() print("Loading file and setting up X and Y") data, rows, cols, split, X, Y, output_nodes = load_and_process_file( "./Letters.csv") print("Creating Neural Network") nn = ann.ANN(X.shape[1], hidden_nodes, output_nodes, learn) # finding_optimal tries to identify what the best number of hidden layers to use is. print("Running Training") if finding_optimal: layer_results = find_optimal_hidden_layer(epochs, X[range(0, split), :], Y[range(0, split), :], 0, batch_size, 1, 100) layer_results = np.asarray(layer_results) print(layer_results) print("Best hidden Layer Size:", np.argmax(layer_results[:, -1], axis=0)) else: run_epochs(epochs, X[range(0, split), :], Y[range(0, split), :], 0, batch_size, True) end_ann = time.time(