def insert_sorted_test(): net = Network(1, 1, 1) for _ in range(100): e = Edge(net.nodes[0], net.nodes[0], random.randint(1000)) net.insert_sorted(net.edges, e) for i in range(1, 100): assert (net.edges[i].innv >= net.edges[i - 1].innv)
def trained_model_test(): Network.setParams(5 + 1, 1, 20) pop = Population(100, 5 + 1, 1, 20, stock_environment.Stock_env) for epoch in range(500): start_time = time.perf_counter() stock_environment.Stock_env.setRandomStart() baseline = stock_environment.Stock_env.momentum_bot( stock_environment.Stock_env.trainingDat[ stock_environment.Stock_env.random_stock], stock_environment.Stock_env.random_chunk_start, stock_environment.CHUNK) pop.setBaseline(baseline) pop.run() print(f"Momentum Fitness {baseline}") print(f"Perfect Fitness {stock_environment.Stock_env.perfect_bot()}") print(f"Chunk size {stock_environment.CHUNK}") print(f"Epoch {epoch}") print(f"Has {len(pop.population)} # of species") print(f"Has {pop.getCurrentPop()} # of members") print(f"Edge size {Network.edgeInnv.x}") print(f"Node size {Network.nodeInnv.x}") print( f"Average nodes per network: {np.mean([len([node for node in net.nodes if node.enabled])for species in pop.population for net in species.nets ])}" ) # printNetwork(pop.population[0].nets[0]) total_time = time.perf_counter() - start_time getTimes()['Untimed'] = total_time - totalTime() printTimer(scale=total_time) print(f"Elapsed time: { total_time}") resetTimer() pop.validate()
def main(file_name): try: data = np.load('Data/' + str(file_name) + '.npy') data = np.clip(data, 0, 5) / 5 except: print("Invalid File Name") return net = Network() net.load() f = open('Detections_' + str(file_name) + '.txt', 'a') for x in np.arange(0, np.shape(data)[1] - config.L, config.L): c = data[:, x:x + config.L] buffer = ImgBuffer() for n in range(np.shape(c)[0] - config.L): currIm = np.reshape(c[n:n + config.L, :], [1, config.L, config.L]) pred = net.predict(currIm) boxes = process_pred(pred) buffer.update_timer() buffer.process_new(boxes, n, x) buffer.process_existing() buffer.clear_buffer() np.savetxt(f, buffer.final_array, delimiter=',', newline='\n') f.close()
def __init__(self, actions, device, lr=1e-2, gamma=0.99): super(Reinforce, self).__init__() self.device = device self.policy = Network(4, actions).to(device) self.optimizer = optim.Adam(self.policy.parameters(), lr=lr) self.memory = [] self.gamma = gamma self.max_steps = 10000
def __init__(self, n_word, turns_to_sleep): self.score = 0 self.turns_to_sleep = turns_to_sleep # every turn he goes through +1 when reaching 5 goes to sleep 1 turn self.sleep = np.random.randint(0, turns_to_sleep) self.now = 0 # 0=ready, 1=sleeping, 2=waiting-other # 3 output = 3 possible actions to do self.hearing = Network([n_word, 1, 3]) self.speaking = Network([3, 1, n_word]) # 10 output = 10 words
def main(): file_name = 'tmp' USRP_Host = '127.0.0.1' data_port = 5678 context = zmq.Context() socket = context.socket(zmq.SUB) socket.connect('tcp://%s:%s' %(USRP_Host, data_port)) socket.setsockopt_string(zmq.SUBSCRIBE, '') f = 460 x = np.linspace(f-1.25, f+1.25, 2500).reshape(-1,1) thermalfloor = np.load('ThermalFloorModel.npy') relevant_floor = thermalfloor[2245:2245+32] net = Network() net.load() f = open('Detections_' + str(file_name) + '.txt', 'a') #fig, ax = plt.subplots(1,1) #plt.ion() imgbuffer = ImgBuffer() i = 0 imgwindow = np.zeros([1,config.L, config.L]) while True: #plt.cla() try: md = socket.recv_json(flags=0) message = socket.recv(flags=zmq.NOBLOCK, copy=True, track=False) indata = np.frombuffer((message), dtype=md['dtype']) indata = indata.reshape(md['shape']) for k in range(config.L - 1): imgwindow[0,k,:] = imgwindow[0,k+1,:] imgwindow[0,-1] = np.clip(10.*np.log10(indata) - relevant_floor.T, 0, 5)/5 pred = net.predict(imgwindow) boxes = process_pred(pred) imgbuffer.update_timer() imgbuffer.process_new(boxes, i, 2245) imgbuffer.process_existing() #ax.imshow(imgwindow[0], vmax = 1, vmin = 0) #plt.savefig(str(i)) except zmq.ZMQError: time.sleep(.1) except KeyboardInterrupt: socket.close() context.term() sys.exit() i += 1
class Reinforce(object): def __init__(self, actions, device, lr=1e-2, gamma=0.99): super(Reinforce, self).__init__() self.device = device self.policy = Network(4, actions).to(device) self.optimizer = optim.Adam(self.policy.parameters(), lr=lr) self.memory = [] self.gamma = gamma self.max_steps = 10000 def get_action_and_prob(self, state): state = torch.FloatTensor(state).unsqueeze(0).to(self.device) action_predictions = self.policy(state).cpu() action_probabilities = F.softmax(action_predictions, dim=-1) distributions = Categorical(action_probabilities) action = distributions.sample() return action.item(), distributions.log_prob(action) def run_episode(self, env, train): state = env.reset() total_rewards = 0 loss = 0 for times_step in range(self.max_steps): if not train: env.render() action, log_prob = self.get_action_and_prob(state) state, reward, done, _ = env.step(action) total_rewards += reward if train: self.add_rewards((reward, log_prob)) if done: break if train: loss = self.train_policy() return total_rewards, loss def add_rewards(self, data): self.memory.append(data) def train_policy(self): self.policy.train() discounted_reward = 0 self.optimizer.zero_grad() policy_loss = [] for reward, prob in reversed(self.memory): discounted_reward = reward + self.gamma * discounted_reward policy_loss.append(-prob * discounted_reward) torch.tensor(policy_loss) policy_loss = torch.cat(policy_loss) policy_loss.sum().backward() self.optimizer.step() del self.memory[:] return policy_loss.mean().item()
def multilayertest(): n1 = Network(1, 1, 1) n1._add_edge(n1.nodes[0], n1.nodes[2], 3) # n1._add_edge(n1.nodes[0], n1.nodes[3], 3) # n1._add_edge(n1.nodes[1], n1.nodes[2], 3) # n1._add_edge(n1.nodes[1], n1.nodes[2], 3) # n1._add_edge(n1.nodes[1], n1.nodes[3], 3) # n1.mutate_add_edge() # n1.mutate_add_node() printNetwork(n1) '''
def crossoverTest(): numInputs = random.randint(1, 6) numRnn = random.randint(1, 6) numOutputs = random.randint(1, 6) numNets = 100 nets = [None] * numNets for netIdx in range(numNets): net = Network(numInputs, numOutputs, numRnn, empty=False) newNodes = random.randint(1, 6) newEdges = random.randint(1, 6) for hiddenIdx in range(newEdges): net.mutate_add_edge() for hiddenIdx in range(newNodes): net.mutate_add_node() nets[netIdx] = net for i in range(100): for j in range(i + 1, 100): newNet = Network.crossover(nets[i], nets[j]) # Test innovation numbers all transfered over oldInnovation1 = set([edge.innv for edge in nets[i].edges]) oldInnovation2 = set([edge.innv for edge in nets[j].edges]) newInnovation = set([edge.innv for edge in newNet.edges]) assert (len(oldInnovation2) > 0) assert (len(oldInnovation1) > 0) assert (len(newInnovation) > 0) assert ( oldInnovation1.union(oldInnovation2).issubset(newInnovation)) # Test edge innovations are in increasing order for i in range(1, len(newNet.edges)): assert (newNet.edges[i].innv > newNet.edges[i - 1].innv) for node in newNet.nodes: edgesIn = node.edgesIn for i in range(1, len(edgesIn)): assert (edgesIn[i].innv > edgesIn[i - 1].innv)
class Agent(): # aka Gino def __init__(self, n_word, turns_to_sleep): self.score = 0 self.turns_to_sleep = turns_to_sleep # every turn he goes through +1 when reaching 5 goes to sleep 1 turn self.sleep = np.random.randint(0, turns_to_sleep) self.now = 0 # 0=ready, 1=sleeping, 2=waiting-other # 3 output = 3 possible actions to do self.hearing = Network([n_word, 1, 3]) self.speaking = Network([3, 1, n_word]) # 10 output = 10 words def reset(self): self.score = 0 self.sleep = np.random.randint(0, self.turns_to_sleep) self.now = 0 def shout_word(self, action_requested): out = self.speaking.forward_propagation(action_requested) return convert_to_onehot(out) def do_action(self, word, action_requested): result = convert_to_onehot(self.hearing.forward_propagation(word)) return (result == action_requested).all() def change_score(self, points): self.score += points def check_sleeping(self): if self.sleep >= self.turns_to_sleep: # goes to sleep self.sleep = 0 self.now = 1 else: self.sleep += 1 self.now = 0 def is_ready(self): return self.now == 0 def is_sleeping(self): return self.now == 1 def is_waiting(self): return self.now == 2
def run(): Network.setParams(5+1,1,2) net = Network(5 + 1, 1, 2) for i in range(100): net.mutate_add_edge() net.mutate_add_node() inputs = [1238, 1238, 1230, 138201, 123] NUM_IT = 6 outputs = [] for i in range(NUM_IT): outputs.append(net.feedforward(inputs.copy())[0]) save_model(net, "models/testnet.pkl") newModel = load_model("models/testnet.pkl") compareOutputs = [] for i in range(NUM_IT): compareOutputs.append(newModel.feedforward(inputs.copy())[0]) print(compareOutputs) print(outputs) assert(compareOutputs == outputs)
def main(file_name): #try: # data = np.load('Data/' + str(file_name) + '.npy') # data = np.clip(data,0,5)/5 #except: # print("Invalid File Name") # return h5 = h5py.File(file_name, "r+") data = np.transpose(h5['psd']) data = 10. * np.log10(data) data = np.clip(data, 0, 5) / 5. if 'detections' in h5: del h5['detections'] d = h5.create_dataset("detections", (4, 0), maxshape=(4, None), dtype="float32") net = Network() net.load() #f = open('Detections_' + str(file_name) + '.txt', 'a') for x in np.arange(0, np.shape(data)[1] - config.L, config.L): c = data[:, x:x + config.L] buffer = ImgBuffer() for n in range(0, np.shape(c)[0] - config.L, config.f): currIm = np.reshape(c[n:n + config.L, :], [1, config.L, config.L]) pred = net.predict(currIm) boxes = process_pred(pred) buffer.update_timer() buffer.process_new(boxes, n, x) buffer.process_existing() buffer.clear_buffer() currsz = d.shape[1] d.resize(currsz + len(buffer.final_array), 1) for i in np.arange(0, len(buffer.final_array)): d[:, i + currsz] = buffer.final_array[i] #np.savetxt(f, buffer.final_array, delimiter=',', newline='\n') #f.close() boxes = np.transpose(h5['detections']).tolist() if 'merged_detections' in h5: del h5['merged_detections'] newboxes = [] while len(boxes): new = [boxes.pop()] for testbox in new: addIdx = [] for i, box in enumerate(boxes): if RectIntersect(testbox, box): new.append(box) addIdx.append(i) for i in sorted(addIdx, reverse=True): del boxes[i] new = np.asarray(new) print(new.shape) newboxes.append([ np.min(new[:, 0]), np.min(new[:, 1]), np.max(new[:, 2]), np.max(new[:, 3]) ]) newboxes = np.transpose(np.asarray(newboxes)) print(newboxes.shape) if newboxes.shape[0] != 0: merged = h5.create_dataset("merged_detections", newboxes.shape, dtype="float32") merged[:, :] = newboxes else: del h5['merged_detections'] h5.close()
def manual_perfect(): env = MEMORY_env n = Network(1 + 1, 1, 1, empty=False) n._add_edge(n.nodes[0], n.nodes[4], 1) n._add_edge(n.nodes[2], n.nodes[3], 1) print(env.evaluate(n))
# biases = set([]) # Check edges don't have same nodes # edges = [] # for species in pop.population: # for net in species.nets: # for edge in net.edges: # if(edge.nodeIn.innv == 2): # biases.add(edge.nodeOut.innv) # edges.append(edge) # print(f"Found {len(biases)} biases") # for e1 in edges: # for e2 in edges: # if (e1.nodeIn.innv == e2.nodeIn.innv and e1.nodeOut.innv == e2.nodeOut.innv): # assert(e1.innv == e2.innv) pop.test() def manual_perfect(): env = MEMORY_env n = Network(1 + 1, 1, 1, empty=False) n._add_edge(n.nodes[0], n.nodes[4], 1) n._add_edge(n.nodes[2], n.nodes[3], 1) print(env.evaluate(n)) Network.setParams(1 + 5, 1, 1) population_test() # manual_perfect()
if g.forward_propagation(np.array([0.4]))[0] > n: return True, g.export(), g return False, False def _find_bests(ginos, n_bests): results = {} for g in ginos: results[g.forward_propagation(np.array([0.4]))[0]] = g.export() ret = [] for key in sorted(results)[-n_bests:]: ret.append(list(results[key])) return ret k = 0 n = 0.999 pop = 3000 n_bests = 800 ginos = [Network([1, 5, 1]) for _ in range(pop)] while not _find_res(ginos, n)[0]: k += 1 bests = _find_bests(ginos, n_bests) for i, vect in enumerate(genetic_change(bests, pop)): ginos[i]._import(vect) tmp = _find_res(ginos, n) print(tmp, tmp[2].forward_propagation(np.array([0.4])), k)
# training data : 60000 samples # reshape and normalize input data X_train = X_train.reshape(X_train.shape[0], 1, 28 * 28) X_train = X_train.astype('float32') X_train /= 255 # encode output which is a number in range [0,9] into a vector of size 10 # e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] y_train = np_utils.to_categorical(y_train) # same for test data : 10000 samples X_test = X_test.reshape(X_test.shape[0], 1, 28 * 28) X_test = X_test.astype('float32') X_test /= 255 y_test = np_utils.to_categorical(y_test) net = Network(Loss.MSE, Loss.MSE_der) net.add(FCLayer(28 * 28, 15)) net.add(ActivationLayer(Activations.Sigmoid, Activations.Sigmoid_der)) net.add(FCLayer(15, 15)) net.add(ActivationLayer(Activations.Sigmoid, Activations.Sigmoid_der)) net.add(FCLayer(15, 10)) net.add(ActivationLayer(Activations.Sigmoid, Activations.Sigmoid_der)) print(X_train.shape) net.fit(X_train, y_train, 0.1, 30) print("Predicted:") rand = np.random.randint(0, X_test.shape[0] - 4) print(net.forward(X_test[rand:rand + 3])) print("True:") print(y_test[rand:rand + 3])
from DataGen import * #from Helper import * from Config import Config from Net import Network from Plotting import * from DataHandle import * import h5py config = Config() #train_data, test_data, train_labels, test_labels = gen_TestTrain(); #plot_25_ims() #plt.show() net = Network() load = False if load: net.load() else: train_data, test_data, train_labels, test_labels = gen_TestTrain() net.train(train_data, train_labels, 100) net.save() datafile = 'Data/1460.npy' exit(0) #try: # data = np.load(datafile) # data = np.clip(data,0,5)/5