def main(): import time start_time = time.time() for seed in range(10): random.seed(seed) stabiter = 10000 runiter = 1000 grn = GRN(delta=1) #grn.read_genome("moo.dat") grn.build_genes() grn.add_extra("EXTRA_sineval", 0.0, [0] * 32) grn.precalc_matrix() grn.regulate_matrix(stabiter) onff = 0 for i in range(0, 50): if i % 10 == 0: if onff == 1: onff = 0 else: onff = 1 inputval = onff extra_vals = {'sineval': inputval} grn.set_extras(extra_vals) grn.regulate_matrix(runiter) for conc in grn.conc_list: print conc[-1] filename = "conc" + str(seed) graph.plot_2d(grn.conc_list, filename) print "took", str(time.time() - start_time)
def multicore(): delta = 20 #set up the evo strategy best_list, mut_list = [], [] evo = popstrat.Evostrategy(5000, 50) children = evo.iterate(evo.pop) nodes = ("*",) job_server = pp.Server(8, ppservers=nodes) print "Starting pp with", job_server.get_ncpus(), "workers" start_time = time.time() for i in range(50): run_time = time.time() jobs = [(child, job_server.submit(run_grn, (child['genome'], delta), (), ("grn","numpy","math"))) for child in children] for child, result in jobs: results, conclist = result() bestidx = results.index(max(results)) child['fitness'] = results[bestidx] #plotting the best with colors children = evo.iterate(children) bestgenome = evo.pop[-1]['genome'] bestresult, conclist = run_grn(bestgenome, delta) bestidx = bestresult.index(max(bestresult)) filename = "best_gen_"+str("%03d" % i) print filename colors = [] simplist = [] for idx, result in enumerate(bestresult): if idx == len(bestresult)-1: simplist.append(conclist[idx]) colors.append('k') elif idx == bestidx: colors.append('g') simplist.append(conclist[idx]) # elif result == 0: # colors.append('b') # else: # colors.append('r') graph.plot_2d(simplist, filename, colors) print "gen:", evo.gen_count, "fitness:", evo.pop[-1]['fitness'] if evo.adaptive: evo.adapt_mutation() best_list.append(evo.pop[-1]['fitness']) mut_list.append(evo.mut_rate) mutfile = open('mutrate.txt','a') mutfile.write(str(mut_list)+'\n') mutfile.close()
def main(): """Read data, train on data, test on data, BAM""" esn = ESN(input_size=1, hidden_size=100, output_size=1) test_damping(esn, 200) #create inputs and training data #data = datagen.mackeyglass(900, trunc=300) * 0.5 data = loadtxt('varsine.dat') inputs = empty(300) inputs.fill(0.0) tmp = empty(300) tmp.fill(0.5) inputs = hstack((inputs, tmp)) tmp.fill(1.0) inputs = hstack((inputs, tmp)) graph.plot_2d([inputs, data],"data") #train and test the esn train_out = esn.train(data, inputs) test_out = esn.test(data, inputs) graph.plot_2d([train_out, data[100:900]], "train_out") graph.plot_2d([test_out, data], "test_out") #plot residuals train_res = residual(train_out, data[100:]) test_res = residual(test_out, data) graph.plot_2d([train_res],"train_residual") graph.plot_2d([test_res],"test_residual") #calculate mean square error print "Train MSE", mse(train_out, data[100:]) print "Test MSE", mse(test_out, data)
def main(): """Read data, train on data, test on data, BAM""" esn = ESN(input_size=1, hidden_size=100, output_size=1) test_damping(esn, 200) #create inputs and training data #data = datagen.mackeyglass(900, trunc=300) * 0.5 data = loadtxt('varsine.dat') inputs = empty(300) inputs.fill(0.0) tmp = empty(300) tmp.fill(0.5) inputs = hstack((inputs, tmp)) tmp.fill(1.0) inputs = hstack((inputs, tmp)) graph.plot_2d([inputs,data],"data") #train and test the esn train_out = esn.train(data, inputs) test_out = esn.test(data, inputs) graph.plot_2d([train_out, data[100:900]], "train_out") graph.plot_2d([test_out, data], "test_out") #plot residuals train_res = residual(train_out, data[100:]) test_res = residual(test_out, data) graph.plot_2d([train_res],"train_residual") graph.plot_2d([test_res],"test_residual") #calculate mean square error print "Train MSE", mse(train_out, data[100:]) print "Test MSE", mse(test_out, data)
def graph_fields(self): """for all companies""" for key in self.fields: results_list = [] for record in self.table: result = [] for column in self.fields[key]: result.append(record[column]) results_list.append(result) graph.plot_2d(results_list, key)
def main(): """comparison code""" import random random.seed(1) genome = [random.randint(0, 1) for _ in range(0, 5000)] results, conc_list = run_grn(genome, delta=1, syncsize=1, offset=10) if len(conc_list) > 1: graph.plot_2d(conc_list, "testdata" + str(1))
def main(): """comparison code""" import random random.seed(1) genome = [random.randint(0, 1) for _ in range(0, 5000)] results, conc_list = run_grn(genome, delta=1, syncsize=1, offset=10) if len(conc_list) > 1: graph.plot_2d(conc_list, "testdata"+str(1))
def main(): """comparison code""" import random random.seed(1) genome = [random.randint(0, 1) for _ in range(0, 5000)] results, conc_list = run_grn(genome, delta=1, syncsize=100, offset=10) minresult = 126 - max(results) print "bestfit:", minresult if len(conc_list) > 1: graph.plot_2d(conc_list, "sinedata"+str(1))
def train(self, target, inputs, trim=100): """Calculate weights between hidden layer and output layer for a given time series, uses pseudo-inverse training step""" acts = zeros((len(target), self.hidden_size)) summed_acts = [] #create initial state for the hidden nodes for i in range(len(acts[0])): acts[0][i] = (random()*2)-1 # create the activations for i in range(1, len(target)): # turn target into array targ, inp = array([target[i-1]]), array([inputs[i-1]]) # dotting target with back weights as the teacher signal activation = tanh(dot(acts[i-1], self.weights['hidden'])+ dot(self.weights['back'], targ)+ dot(self.weights['input'], inp)) # leaky integrator: prev state effects current state acts[i] = ((1-self.alpha) * acts[i-1]) acts[i] += self.alpha * activation #trim out the initial 100 activations as they are unstable target = target[trim:] inputs = inputs[trim:] acts = acts[trim:, :] #store activations and plot self.acts = acts graph.plot_2d(acts.T, "training_activations") #add the inputs to the activations acts = vstack((acts.T, inputs)).T # Pseudo-inverse to train the output and setting weights tinv = arctanh(target) clf = linear_model.RidgeCV(alphas=[0.01, 0.1, 1.0, 10.0]) #clf = linear_model.Ridge(alpha=0.5) #clf = linear_model.LassoCV() clf.fit(acts, tinv) self.weights['out'] = linear_model.ridge_regression(acts, tinv, alpha=.15) self.weights['out'] = clf.coef_ #self.weights['out'] = linalg.lstsq(acts, tinv)[0] #residual = dot(acts, self.weights['out']) - tinv graph.bar_plot(self.weights['out'], "weights") # checking the output against previous activations train_out = [] for act in acts: output = tanh(dot(act, self.weights['out'])) train_out.append(output) return train_out
def graph_fields(table, fields): for field in fields: print "graphing", field['name'] results_list = [] for record in table: result = [] for column in field['cols']: result.append(record[column]) results_list.append(result) graph.plot_2d(results_list, "graphs/"+field['name'])
def main(): """comparison code""" import random random.seed(1) genome = [random.randint(0, 1) for _ in range(0, 5000)] results, conc_list = run_grn(genome, delta=1, syncsize=100, offset=10) minresult = 126 - max(results) print "bestfit:", minresult if len(conc_list) > 1: graph.plot_2d(conc_list, "sinedata" + str(1))
def train(self, target, inputs, trim=100): """Calculate weights between hidden layer and output layer for a given time series, uses pseudo-inverse training step""" acts = zeros((len(target), self.hidden_size)) summed_acts = [] #create initial state for the hidden nodes for i in range(len(acts[0])): acts[0][i] = (random()*2)-1 # create the activations for i in range(1, len(target)): # turn target into array targ, inp = array([target[i-1]]), array([inputs[i-1]]) # dotting target with back weights as the teacher signal activation = tanh(dot(acts[i-1],self.weights['hidden'])+ dot(self.weights['back'], targ)+ dot(self.weights['input'], inp)) # leaky integrator: prev state effects current state acts[i] = ((1-self.alpha) * acts[i-1]) acts[i] += self.alpha * activation #trim out the initial 100 activations as they are unstable target = target[trim:] inputs = inputs[trim:] acts = acts[trim:, :] #store activations and plot self.acts = acts graph.plot_2d(acts.T, "training_activations") #add the inputs to the activations acts = vstack((acts.T,inputs)).T # Pseudo-inverse to train the output and setting weights tinv = arctanh(target) clf = linear_model.RidgeCV(alphas=[0.01, 0.1, 1.0, 10.0]) #clf = linear_model.Ridge(alpha=0.5) #clf = linear_model.LassoCV() clf.fit(acts, tinv) self.weights['out'] = linear_model.ridge_regression(acts, tinv,alpha=.15) self.weights['out'] = clf.coef_ #self.weights['out'] = linalg.lstsq(acts, tinv)[0] #residual = dot(acts, self.weights['out']) - tinv graph.bar_plot(self.weights['out'], "weights") # checking the output against previous activations train_out = [] for act in acts: output = tanh(dot(act, self.weights['out'])) train_out.append(output) return train_out
def graph_attrib(table, fields): for field in fields: print "graphing", field['name'] results_list = [] for record in table: result = [] for column in field['cols']: result.append(record[column]) results_list.append(result) graph.plot_2d(results_list, field['name']) graph.plot_ave(results_list, field['name']) graph.boxplot_data(results_list, field['name'])
def test_damping(esn, iterations): """Checking if the network stabilises""" acts = zeros((iterations, esn.hidden_size)) #set them to random initial activations for i in range(len(acts[0])): acts[0][i] = (random()*2)-1 # lets see if it dampens out for i in range(1, iterations): acts[i] = ((1-esn.alpha) * acts[i-1]) acts[i] += esn.alpha * tanh(dot(acts[i-1], esn.weights['hidden'])) graph.plot_2d(acts.T, "damped_activations")
def main(): # input not used yet esn = ESN(input_size=1, hidden_size=20, output_size=1) esn.test_damping(200) #gendata = datagen.sine(600) * 0.5 data = loadtxt('sine2.dat') train_data = data[:300] test_data = data[:50] train_out = esn.train(train_data) test_out = esn.test(test_data) graph.plot_2d([train_out, data[100:300]], "train_out") graph.plot_2d([test_out, data[:50]], "test_out") #plot residuals train_res = residual(train_out, train_data[100:]) test_res = residual(test_out, test_data) graph.plot_2d([train_res], "train_residual") graph.plot_2d([test_res], "test_residual") #calculate mse train_error = mse(train_out, train_data[100:]) test_error = mse(test_out, test_data) print "Train MSE", train_error print "Test MSE", test_error
def multicore(filename, delta, popsize, generations): #set up the evo strategy evo = popstrat.Evostrategy(5000, popsize) children = evo.iterate(evo.pop) nodes = ("*", ) job_server = pp.Server(8, ppservers=nodes) print "Starting pp with", job_server.get_ncpus(), "workers" start_time = time.time() for i in range(generations): run_time = time.time() jobs = [(child, job_server.submit(run_grn, (child['genome'], delta), (), ("cgrn as grn", "numpy", "math"))) for child in children] for child, result in jobs: results, conclist = result() bestidx = results.index(max(results)) child['fitness'] = results[bestidx] #plotting the best with colors children = evo.iterate(children) bestgenome = evo.pop[-1]['genome'] bestresult, conclist = run_grn(bestgenome, delta) bestidx = bestresult.index(max(bestresult)) colors = [] simplist = [] for idx, result in enumerate(bestresult): if idx == len(bestresult) - 1: simplist.append(conclist[idx]) colors.append('k') elif idx == bestidx: colors.append('g') simplist.append(conclist[idx]) else: simplist.append(conclist[idx]) colors.append('r') graph.plot_2d(simplist, filename, colors) print "gen:", evo.gen_count, "fitness:", evo.pop[-1]['fitness'] if evo.adaptive: evo.adapt_mutation() res_file = open(filename, "a") res_file.write(str(evo.pop[-1]) + '\n') res_file.close()
def main(): grn = GRN() grn.read_genome("eoinseed1.txt") grn.build_genes() #grn.add_gene(0.0, "EXTRA") grn.precalc_matrix() grn.regulate_matrix(10000, False) # for i in range(0,3): # init_concs = [] # for conc in grn.conc_list: # init_concs.append(conc[i]) # print "ROUND ", i # print init_concs graph.plot_2d(grn.conc_list, 0)
def simple_grn(graphname): size = 5 iterations = 10 edges = np.zeros([size, size]) nodes = np.empty([size]) result_array = [[] for i in range(size)] """ Initialise the nodes to have equal concentrations and initialise the weight array with vals between -1,1 with stepsize 1 """ nodes.fill(1.0 / size) weightvals = np.arange(-1, 1.1, 0.1) for x in np.nditer(edges, op_flags=['readwrite']): #x[...] = float(random.randint(-1,1)) / (size) x[...] = round(random.choice(weightvals), 2) print "nodes\n", nodes, "\nedges\n", edges for itr in range(iterations): #nudge a node halfway through #if itr == (iterations/2): # nodes[0] = nodes[0] * 0.2 #calculate concentration changes for the nodes change = np.empty(size) for i in range(len(nodes)): change[i] = np.dot(nodes, edges[i]) # alter concs and prevent it from going below zero print "iter:", itr, "conc:", nodes, "change:", change nodes = np.add(nodes, change) nodes[nodes < 0] = 0.000001 #normalize # total = sum(nodes) # nodes = np.divide(nodes,total) #record concentrations for i in range(size): result_array[i].append(nodes[i]) graph.plot_2d(result_array, "graph" + str(graphname))
def main(): import time if(len(sys.argv) != 2): print "Usage: " + sys.argv[0] + " <rseed>" sys.exit(1) seed = int(sys.argv[1]) random.seed(seed) #read in the compustat data for given companies delta = 20 companies = [] company_list = ['ARCHER-DANIELS-MIDLAND CO', 'ARTHUR J GALLAGHER & CO', 'ASCENA RETAIL GROUP INC', 'ASTEC INDUSTRIES INC', 'ASTORIA FINANCIAL CORP', 'BLACKBAUD INC', 'BMC SOFTWARE INC','BOSTON BEER INC -CL A', 'BRADY CORP', 'BRIGGS & STRATTON'] database = csvreader.CSVReader('compustat.csv') for company in company_list: companies.append(database.get_company(company)) #set up the evo strategy best_list, mut_list = [], [] evo = popstrat.Evostrategy(5000,50) children = evo.iterate(evo.pop) for i in range(50): for child in children: start_time = time.time() fitness = run_grn(child['genome'], companies, delta) child['fitness'] = fitness print "fitness:",child['fitness'],"time taken", str(time.time()-start_time) children = evo.iterate(children) if evo.adaptive: evo.adapt_mutation() best_list.append(evo.pop[-1]['fitness']) mut_list.append(evo.mut_rate) graph.plot_2d([best_list], 'bestfit') graph.plot_2d([mut_list], 'mutrate')
def main(): best_list, mut_list = [], [] evo = Evostrategy(5000) fitness = evo.onemax_fitness(evo.parent['genome']) indiv = {'genome':evo.parent['genome'], 'fitness':fitness} child = evo.iterate(indiv) for i in range(100): child['fitness'] = evo.onemax_fitness(child['genome']) child = evo.iterate(child) print evo.parent['fitness'] if evo.parent['fitness'] == 5000: break if evo.adaptive: evo.adapt_mutation() best_list.append(evo.parent['fitness']) mut_list.append(evo.mut_rate) graph.plot_2d([best_list], 'bestfit') graph.plot_2d([mut_list], 'mutrate')
def singlecore(filename, delta, popsize, generations): """Run serially""" #set up the evo strategy best_list, mut_list = [], [] evo = popstrat.Evostrategy(5000, popsize) children = evo.iterate(evo.pop) for i in range(generations): for child in children: results, conclist = run_grn(child['genome'], delta) bestidx = results.index(max(results)) child['fitness'] = results[bestidx] print "fitness:", child['fitness'] children = evo.iterate(children) bestgenome = evo.pop[-1]['genome'] results, conclist = run_grn(bestgenome, delta) filename = "best_gen_"+str(i) graph.plot_2d(conclist, filename) if evo.adaptive: evo.adapt_mutation() best_list.append(evo.pop[-1]['fitness']) mut_list.append(evo.mut_rate) print "best overall fitness", evo.pop[-1]['fitness'] graph.plot_2d([best_list], 'bestfit') graph.plot_2d([mut_list], 'mutrate')
def main(): # random.seed(2) # grn = GRN(delta=1) # grn.build_genes() # grn.add_extra("EXTRA_sineval", 0.0, [0]*32) # grn.precalc_matrix() # grn.regulate_matrix(5) import time start_time = time.time() for seed in range(10): random.seed(seed) stabiter = 10000 runiter = 1000 grn = GRN(delta=1) grn.build_genes() grn.add_extra("EXTRA_sineval", 0.0, [0]*32) grn.precalc_matrix() grn.regulate_matrix(stabiter) onff = 0 for i in range(0, 50): if i % 10 == 0: if onff == 1: onff = 0 else: onff = 1 inputval = onff extra_vals = {'sineval': inputval} grn.set_extras(extra_vals) grn.regulate_matrix(runiter) for conc in grn.conc_list: print conc[-1] filename = "demo"+str(seed) graph.plot_2d(grn.conc_list, filename) print "took", str(time.time() - start_time)
def singlecore(filename, delta, popsize, generations): """Run serially""" #set up the evo strategy best_list, mut_list = [], [] evo = popstrat.Evostrategy(5000, popsize) children = evo.iterate(evo.pop) for i in range(generations): for child in children: results, conclist = run_grn(child['genome'], delta) bestidx = results.index(max(results)) child['fitness'] = results[bestidx] print "fitness:", child['fitness'] children = evo.iterate(children) bestgenome = evo.pop[-1]['genome'] results, conclist = run_grn(bestgenome, delta) filename = "best_gen_" + str(i) graph.plot_2d(conclist, filename) if evo.adaptive: evo.adapt_mutation() best_list.append(evo.pop[-1]['fitness']) mut_list.append(evo.mut_rate) print "best overall fitness", evo.pop[-1]['fitness'] graph.plot_2d([best_list], 'bestfit') graph.plot_2d([mut_list], 'mutrate')
def test_damping(self, iterations): """Checking if the network stabilises""" numpy.set_printoptions(precision=4, linewidth=2000) acts = zeros((iterations, self.hidden_size)) acts[0] = [ 0.949029602769840, -0.195694633071382, -0.737442723592159, 0.449467774003339, 0.799036414715420, -0.658597370832685, -0.913942617588595, -0.0416815501470254, -0.812127430489660, 0.300099663301922, 0.904555077842031, -0.0845746012151277, 0.0737612940120569, -0.867025708396808, -0.0122598328349043, -0.164918055240550, -0.415485899463317, -0.420672124930247, 0.507691993375378, -0.806408576103972 ] # lets see if it dampens out for i in range(1, iterations): #acts[i] = ((1-self.alpha) * acts[i-1]) #acts[i] += self.alpha * tanh(dot(acts[i-1], # self.weights['hidden'])) product = dot(acts[i - 1], self.weights['hidden'].T) # print "product",product # print "tanh", tanh(product) graph.plot_2d(acts.T, "damped_activations")
def multicore(filename, syncsize, offset, delta, popsize, generations): """ Uses parallel python to evaluate, PP can also be used to distribute evaluations to machines accross the network""" #set up the evo strategy evo = popstrat.Evostrategy(5000, popsize) children = evo.iterate(evo.pop) nodes = ("*",) job_server = pp.Server(ncpus=8, ppservers=nodes) print "Starting pp with", job_server.get_ncpus(), "workers" for _ in range(generations): jobs = [(child, job_server.submit(run_grn, (child['genome'], delta, syncsize, offset), (), ("cgrn as grn","numpy","math"))) for child in children] for child, result in jobs: results, conclist = result() bestidx = results.index(max(results)) child['fitness'] = results[bestidx] print "gen:", evo.gen_count, "fitness:", evo.pop[-1]['fitness'] children = evo.iterate(children) if evo.adaptive: evo.adapt_mutation() res_file = open(filename,"a") res_file.write(str(evo.pop[-1])+'\n') res_file.close() restopname = filename+".rest" #plotting the best with colors bestgenome = evo.pop[-1]['genome'] bestresult, conclist = run_grn(bestgenome, delta, syncsize, offset, restopname) bestidx = bestresult.index(max(bestresult)) fitness = round(max(bestresult), 1) colors = [] for idx, result in enumerate(bestresult): # draw the input in black if idx == len(bestresult)-1: colors.append('k') # draw the best in green elif idx == bestidx: colors.append('g') # draw p_genes in red elif result > 0: colors.append('r') # draw TFs in blue else: colors.append('b') print "colors", colors graphname = filename.split('/')[2] graphname = graphname[:-4] + "-F" + str(fitness) graph.plot_2d(conclist, graphname, colors, (0, 1))
def train(self, target, trim=100): """Calculate weights between hidden layer and output layer for a given time series, uses pseudo-inverse training step""" acts = zeros((len(target), self.hidden_size)) summed_acts = [] acts[0] = [ 0.949029602769840, -0.195694633071382, -0.737442723592159, 0.449467774003339, 0.799036414715420, -0.658597370832685, -0.913942617588595, -0.0416815501470254, -0.812127430489660, 0.300099663301922, 0.904555077842031, -0.0845746012151277, 0.0737612940120569, -0.867025708396808, -0.0122598328349043, -0.164918055240550, -0.415485899463317, -0.420672124930247, 0.507691993375378, -0.806408576103972 ] # create the activations self.weights['back'] = self.weights['back'].T for i in range(1, len(target)): # turn target into array, should it be -1? t = array([target[i - 1]]) # dotting target with back weights as the teacher signal activation = tanh( dot(acts[i - 1], self.weights['hidden'].T) + dot(self.weights['back'], t)) #decay activation by alpha acts[i] = ((1 - self.alpha) * acts[i - 1]) acts[i] += self.alpha * activation #print i-1, acts[i-1] #trim out the initial activations as they are unstable target = target[trim:] acts = acts[trim:, :] #store activations and plot self.acts = acts graph.plot_2d(acts.T, "sample_activations") #print "last",acts[-1] # Pseudo-inverse to train the output and setting weights tinv = arctanh(target) pinv = linalg.pinv(acts) aconj = acts.conjugate() tconj = tinv.conjugate() pinvaconj = linalg.pinv(aconj) numpy.set_printoptions(precision=4, linewidth=2000) # print "Pseudo inverse" #for row in aconj: # print row print "acts", aconj.shape, "targ", tconj.shape #self.weights['out'] = dot(pinv, tinv) #self.weights['out'] = dot(pinvaconj,tconj) self.weights['out'] = linalg.lstsq(aconj, tconj)[0] print "\nnumpy lstsq", self.weights['out'] #self.weights['out'] = linalg.lstsq(aconj,tconj)[0] #self.weights['out'] = scialg.lsqr(aconj,tconj)[0] #self.weights['out'] = array([-2.62751866452261,0.911724638334178,0,0,0,-9.47053869171588,0,0,-3.90801067449960,0,7.05096786026327,11.5512880240272,7.35498461772589,-0.221961288369604,0,0,0.681830006046591,0,0,-0.458162995895149]) print "\nnew weights", self.weights['out'] graph.bar_plot(self.weights['out'], "weights") # checking the output against previous activations train_out = [] for act in acts: output = tanh(dot(act, self.weights['out'])) train_out.append(output) return train_out
def graph_company(self, company_name): """all fields for a given company""" data = self.get_company(company_name) for key in data: graph.plot_2d([data[key]], company_name + ' ' + key)
def main(): # random.seed(2) # stabiter = 10000 # runiter = 1000 # grn = GRN(delta=1) # grn.build_genes() # grn.add_extra("EXTRA_sineval", 0.0, [0]*32) # grn.precalc_matrix() # grn.regulate_matrix(stabiter) # onff = 0 # for i in range(0, 10): # if i % 10 == 0: # if onff == 1: # onff = 0 # else: # onff = 1 # extra_vals = {'sineval': onff} # grn.set_extras(extra_vals) # grn.regulate_matrix(10) # for i in range(len(grn.conc_list[0])): # concs = [] # for j in range(len(grn.conc_list)): # concs.append(grn.conc_list[j][i]) # print concs import time start_time = time.time() for seed in range(10): random.seed(seed) stabiter = 10000 runiter = 1000 grn = GRN(delta=1) grn.build_genes() grn.add_extra("EXTRA_sineval", 0.0, [0] * 32) grn.precalc_matrix() grn.regulate_matrix(stabiter) onff = 0 for i in range(0, 50): if i % 10 == 0: if onff == 1: onff = 0 else: onff = 1 inputval = onff extra_vals = {'sineval': inputval} grn.set_extras(extra_vals) grn.regulate_matrix(runiter) print "rows", len(grn.conc_list), "cols", len(grn.conc_list[0]) for conc in grn.conc_list: print conc[-1] filename = "demo" + str(seed) graph.plot_2d(grn.conc_list, filename) print "took", str(time.time() - start_time)
def multicore(filename, syncsize, offset, delta, popsize, generations): """ Uses parallel python to evaluate, PP can also be used to distribute evaluations to machines accross the network""" #set up the evo strategy evo = popstrat.Evostrategy(5000, popsize) children = evo.iterate(evo.pop) nodes = ("*", ) job_server = pp.Server(ncpus=8, ppservers=nodes) print "Starting pp with", job_server.get_ncpus(), "workers" for _ in range(generations): jobs = [(child, job_server.submit(run_grn, (child['genome'], delta, syncsize, offset), (), ("cgrn as grn", "numpy", "math"))) for child in children] for child, result in jobs: results, conclist = result() bestidx = results.index(max(results)) child['fitness'] = results[bestidx] print "gen:", evo.gen_count, "fitness:", evo.pop[-1]['fitness'] children = evo.iterate(children) if evo.adaptive: evo.adapt_mutation() res_file = open(filename, "a") res_file.write(str(evo.pop[-1]) + '\n') res_file.close() restopname = filename + ".rest" #plotting the best with colors bestgenome = evo.pop[-1]['genome'] bestresult, conclist = run_grn(bestgenome, delta, syncsize, offset, restopname) bestidx = bestresult.index(max(bestresult)) fitness = round(max(bestresult), 1) colors = [] for idx, result in enumerate(bestresult): # draw the input in black if idx == len(bestresult) - 1: colors.append('k') # draw the best in green elif idx == bestidx: colors.append('g') # draw p_genes in red elif result > 0: colors.append('r') # draw TFs in blue else: colors.append('b') print "colors", colors graphname = filename.split('/')[2] graphname = graphname[:-4] + "-F" + str(fitness) graph.plot_2d(conclist, graphname, colors, (0, 1))