def ampli(l, n): ll = [0 for _ in range(n)] for i in range(n // 2): ll[i] = max(l) if index_max(l) == 0 else min(l) for i in range(n // 2, n): ll[i] = max(l) if index_max(l) == 1 else min(l) return ll
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) simu.rms('control', inputs, outputs) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) simu.perf('control', outputs) #wager ratio if(index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #momentum & lrate tmp = list(network['SoN'].stateOutputNeurons) if(tmp == 0): plot['lrate'] += (0.15 + (0.15 - 0.025)) plot['momentum'] += 0.5 elif(abs(tmp[0] - tmp[1]) <= 0.5): plot['lrate'] +=(0.15) plot['momentum'] +=(0.5) else: plot['lrate'] +=(0.025) plot['momentum'] +=(0.075)
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('control', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('control', outputs) simu.perf('SoN', cell) #wager ratio if (index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 network['FoN'].calc_output(network['SoN'].stateHiddenNeurons) if (index_max( network['FoN'].stateOutputNeurons) == index_max(outputs)): plot['FoN_rms'] += 1 simu.perf('FoN', outputs)
def step_statictics(simu, network, plot, inputs, outputs): res = [ network['feedback'] [i].calc_output(network['FoN'].stateHiddenNeurons + network['SoN'].stateHiddenNeurons) for i in range(10) ] cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if (index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #feedback if (index_max(res) == index_max(outputs)): plot['feedback'] += 1
def step_statictics(simu, network, plot, inputs, outputs): entire_FoN = inputs + network["FoN"].stateHiddenNeurons + network["FoN"].stateOutputNeurons lin = simu.examples.ninputs lhi = lin + nbr_hidden lout = lhi + simu.examples.noutputs # rms simu.rms("FoN", inputs, outputs) simu.rms("SoN", network["FoN"].stateHiddenNeurons, entire_FoN) plot["SoN_rms_input"] += ( network["SoN"].calc_RMS_range(network["FoN"].stateHiddenNeurons, entire_FoN, 0, lin) / lout * (lin - 0) ) plot["SoN_rms_hidden"] += ( network["SoN"].calc_RMS_range(network["FoN"].stateHiddenNeurons, entire_FoN, lin, lhi) / lout * (lhi - lin) ) plot["SoN_rms_output"] += ( network["SoN"].calc_RMS_range(network["FoN"].stateHiddenNeurons, entire_FoN, lhi, lout) / lout * (lout - lhi) ) # err simu.err("FoN", outputs) if not compare_f(inputs, network["SoN"].stateOutputNeurons[0:lin], 0.3): plot["SoN_err_input"] += 1 if not compare_f(network["FoN"].stateHiddenNeurons, network["SoN"].stateOutputNeurons[lin:lhi], 0.3): plot["SoN_err_hidden"] += 1 if index_max(network["SoN"].stateOutputNeurons[lhi:lout]) != index_max(network["FoN"].stateOutputNeurons): plot["SoN_err_output"] += 1 # discretize simu.discretize("FoN", index_max(outputs), discretize)
def step_statictics(simu, network, plot, inputs, outputs): res = [ network['feedback'][i].calc_output(network['SoN'].stateHiddenNeurons + ampli(network['SoN'].stateOutputNeurons,6)) for i in range(10)] cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if(index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #feedback if(index_max(res) == index_max(outputs)): plot['feedback'] += 1
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) simu.rms('control', inputs, outputs) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) simu.perf('control', outputs) #wager ratio if (index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #momentum & lrate tmp = list(network['SoN'].stateOutputNeurons) if (tmp == 0): plot['lrate'] += (0.15 + (0.15 - 0.025)) plot['momentum'] += 0.5 elif (abs(tmp[0] - tmp[1]) <= 0.5): plot['lrate'] += (0.15) plot['momentum'] += (0.5) else: plot['lrate'] += (0.025) plot['momentum'] += (0.075)
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('control', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('control', outputs) simu.perf('SoN', cell) #wager ratio if(index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 network['FoN'].calc_output(network['SoN'].stateHiddenNeurons) if(index_max(network['FoN'].stateOutputNeurons) == index_max(outputs)): plot['FoN_rms'] += 1 simu.perf('FoN', outputs)
def step_statictics(simu, network, plot, inputs, outputs): entire_FoN = inputs + network['FoN'].stateHiddenNeurons + network['FoN'].stateOutputNeurons lin = simu.examples.ninputs lhi = lin + lin // 4 lout = lhi + simu.examples.noutputs #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, entire_FoN) plot['SoN_rms_input'] += network['SoN'].calc_RMS_range(network['FoN'].stateHiddenNeurons, entire_FoN, 0, lin) / lout * (lin - 0) plot['SoN_rms_hidden'] += network['SoN'].calc_RMS_range(network['FoN'].stateHiddenNeurons, entire_FoN, lin, lhi) / lout * (lhi - lin) plot['SoN_rms_output'] += network['SoN'].calc_RMS_range(network['FoN'].stateHiddenNeurons, entire_FoN, lhi, lout) / lout * (lout - lhi) #err simu.err('FoN', outputs) plot['SoN_err_input'] += 1 - compare(inputs, network['SoN'].stateOutputNeurons[0:lin]) if(not compare_f(network['FoN'].stateHiddenNeurons, network['SoN'].stateOutputNeurons[lin:lhi], 0.3)): plot['SoN_err_hidden'] += 1 if(index_max(network['SoN'].stateOutputNeurons[lhi:lout]) != index_max(network['FoN'].stateOutputNeurons)): plot['SoN_err_output'] += 1 #discretize simu.discretize('FoN', index_max(outputs), discretize)
def step_statictics(simu, network, plot, inputs, outputs): entire_FoN = inputs + network['FoN'].stateHiddenNeurons + network[ 'FoN'].stateOutputNeurons lin = simu.examples.ninputs lhi = lin + lin // 4 lout = lhi + simu.examples.noutputs #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, entire_FoN) plot['SoN_rms_input'] += network['SoN'].calc_RMS_range( network['FoN'].stateHiddenNeurons, entire_FoN, 0, lin) / lout * (lin - 0) plot['SoN_rms_hidden'] += network['SoN'].calc_RMS_range( network['FoN'].stateHiddenNeurons, entire_FoN, lin, lhi) / lout * (lhi - lin) plot['SoN_rms_output'] += network['SoN'].calc_RMS_range( network['FoN'].stateHiddenNeurons, entire_FoN, lhi, lout) / lout * (lout - lhi) #err simu.err('FoN', outputs) plot['SoN_err_input'] += 1 - compare( inputs, network['SoN'].stateOutputNeurons[0:lin]) if (not compare_f(network['FoN'].stateHiddenNeurons, network['SoN'].stateOutputNeurons[lin:lhi], 0.3)): plot['SoN_err_hidden'] += 1 if (index_max(network['SoN'].stateOutputNeurons[lhi:lout]) != index_max(network['FoN'].stateOutputNeurons)): plot['SoN_err_output'] += 1 #discretize simu.discretize('FoN', index_max(outputs), discretize)
def step_statictics(simu, network, plot, inputs, outputs): cell = [0 for _ in range(10)] for k in range(10): if index_max_nth(network['FoN'].stateOutputNeurons, k) == index_max(outputs): cell[k] = 1 break #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if (index_max(network['SoN'].stateOutputNeurons) == 0): plot['high_wager'] += 1 #feedback if (index_max_nth( network['FoN'].stateOutputNeurons, index_max( network['SoN'].stateOutputNeurons)) == index_max(outputs)): plot['feedback'] += 1
def step_learn(network, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) network['FoN'].train(inputs, outputs)
def step_learn(network, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning tmp = list(network['FoN'].stateHiddenNeurons) network['FoN'].train(inputs, outputs, ampli(network['SoN'].stateOutputNeurons, 20)) network['SoN'].train(tmp, cell) network['control'].train(inputs, outputs)
def step_learn(network, inputs, outputs): for i in range(10): network['feedback'][i].train( network['FoN'].stateHiddenNeurons + network['SoN'].stateHiddenNeurons, outputs[i]) cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) network['FoN'].train(inputs, outputs)
def step_learn(network, inputs, outputs): for i in range(10): network['feedback'][i].train(network['SoN'].stateHiddenNeurons + ampli(network['SoN'].stateOutputNeurons,6) , outputs[i]) cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) network['FoN'].train(inputs, outputs)
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if (index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #feedback if (index_max(network['SoN'].stateOutputNeurons) == 1): if (index_max( network['FoN'].stateOutputNeurons) == index_max(outputs)): plot['feedback'] += 1 if (index_max(network['SoN'].stateOutputNeurons) == 0): if (index_max_nth(network['FoN'].stateOutputNeurons, 1) == index_max(outputs)): plot['feedback'] += 1
def step_statictics(simu, network, plot, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if(index_max(network['SoN'].stateOutputNeurons) == 1): plot['high_wager'] += 1 #feedback if(index_max(network['SoN'].stateOutputNeurons) == 1): if(index_max(network['FoN'].stateOutputNeurons) == index_max(outputs)): plot['feedback'] += 1 if(index_max(network['SoN'].stateOutputNeurons) == 0): if(index_max_nth(network['FoN'].stateOutputNeurons,1) == index_max(outputs)): plot['feedback'] += 1
def step_learn(network, inputs, outputs): #Learning res = [network['SoN'][i].calc_output(network['FoN'].stateHiddenNeurons) for i in range(6)] f_success = False if(index_max(network['FoN'].stateOutputNeurons) == index_max(outputs)): f_success = True l = 0.1 + l_to_lr(res[0:3]) * 2.5 / 70 m = 0.2 + l_to_lr(res[3:6]) * 6.5 / 70 [ network['SoN'][i].train(f_success) for i in range(6)] network['FoN'].set_learning_rate(l) network['FoN'].set_momentum(m) network['FoN'].train(inputs, outputs) network['control'].train(inputs, outputs)
def custom_plot(self, additional): for k in additional: if(k == Simulation.DISCRETIZE): for i in range(self.examples.noutputs): for j in range(self.nbr_epoch): if(self.plots['discretize_div'][i][j] != 0): self.plots['discretize'][i][j] /= (self.plots['discretize_div'][i][j] * (self.nbDiscre ** self.nbDiscre)) colors = [(0.2, 0.8, 0.88), 'b', 'g', 'r', 'c', 'm', 'y', 'k', (0.8, 0.1, 0.8), (0., 0.2, 0.5)] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for j in range(self.examples.noutputs): ax.scatter([self.plots['discretize'][j][k] for k in self.plots['discretize_valid'][j]], [j] * len(self.plots['discretize_valid'][j]), self.plots['discretize_valid'][j], color=colors[j], marker='x') ax.set_xlabel('DISCRETIZED VALUE') ax.set_ylabel('SHAPE') ax.set_zlabel('EPOCH') path = "/tmp/pyplot.%s.%s.png" % (sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show() plt.title('Discretize hidden layer') plt.ylabel('DISCRETIZED VALUE') plt.xlabel("EPOCHS") for j in range(self.examples.noutputs): plt.plot(self.plots['discretize_valid'][j], [self.plots['discretize'][j][k] for k in self.plots['discretize_valid'][j]], '.', color=colors[j]) path = "/tmp/pyplot.%s.%s.png" % (sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) try: plt.savefig(path) except ValueError: print('Cannot save discretize_cloud') try: plt.show() except ValueError: print('Cannot display discretize_cloud') elif(k == Simulation.PROTOTYPE): lplot = [[0. for _ in range(self.examples.ninputs)] for _ in range(self.examples.noutputs)] for network in self.networks: for i in range(len(self.examples.inputs)): network['FoN'].calc_output(self.examples.inputs[i]) network['SoN'].calc_output(network['FoN'].stateHiddenNeurons) im = index_max(self.examples.outputs[i]) for j in range(self.examples.ninputs): lplot[im][j] += network['SoN'].stateOutputNeurons[j] fig = plt.figure() plt.clf() for i in range(self.examples.noutputs): rpr.show_repr(lplot[i], self.width, fig, 250 + i, i) path = "/tmp/pyplot.%s.%s.png" % (sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show()
def step_learn(network, inputs, outputs): cell = [0 for _ in range(10)] for k in range(10): if index_max_nth(network['FoN'].stateOutputNeurons, k) == index_max(outputs): cell[k] = 1 break #Learning network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) network['FoN'].train(inputs, outputs)
def step_learn(network, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning tmp = list(network['SoN'].stateOutputNeurons) network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) if(index_max(tmp) == 0): network['FoN'].set_learning_rate(0.15 + (0.15 - 0.025)) network['FoN'].set_momentum(0.5) elif(abs(tmp[0] - tmp[1]) <= 0.5): network['FoN'].set_learning_rate(0.15) network['FoN'].set_momentum(0.5) else: network['FoN'].set_learning_rate(0.025) network['FoN'].set_momentum(0.075) network['FoN'].train(inputs, outputs) network['control'].train(inputs, outputs)
def step_learn(network, inputs, outputs): cell = [0., 0.] if index_max(network['FoN'].stateOutputNeurons) == index_max(outputs): cell = [0., 1] else: cell = [1, 0.] #Learning tmp = list(network['SoN'].stateOutputNeurons) network['SoN'].train(network['FoN'].stateHiddenNeurons, cell) if (index_max(tmp) == 0): network['FoN'].set_learning_rate(0.15 + (0.15 - 0.025)) network['FoN'].set_momentum(0.5) elif (abs(tmp[0] - tmp[1]) <= 0.5): network['FoN'].set_learning_rate(0.15) network['FoN'].set_momentum(0.5) else: network['FoN'].set_learning_rate(0.025) network['FoN'].set_momentum(0.075) network['FoN'].train(inputs, outputs) network['control'].train(inputs, outputs)
def step_statictics(simu, network, plot, inputs, outputs): cell = [0 for _ in range(10)] for k in range(10): if index_max_nth(network['FoN'].stateOutputNeurons, k) == index_max(outputs): cell[k] = 1 break #rms simu.rms('FoN', inputs, outputs) simu.rms('SoN', network['FoN'].stateHiddenNeurons, cell) #err simu.perf('FoN', outputs) simu.perf('SoN', cell) #wager ratio if(index_max(network['SoN'].stateOutputNeurons) == 0): plot['high_wager'] += 1 #feedback if(index_max_nth(network['FoN'].stateOutputNeurons, index_max(network['SoN'].stateOutputNeurons)) == index_max(outputs)): plot['feedback'] += 1
def sample_landmark(self, landmarks, trajector, usebest=False): ''' Weight by inverse of distance to landmark center and choose probabilistically ''' lm_probabilities = self.all_landmark_probs(landmarks, trajector) if usebest: index = index_max(lm_probabilities) else: index = categorical_sample(lm_probabilities) sampled_landmark = landmarks[index] head_on = self.get_head_on_viewpoint(sampled_landmark) self.set_orientations(sampled_landmark, head_on) return sampled_landmark, lm_probabilities[index], self.get_entropy(lm_probabilities), head_on
def sample_relation(self, trajector, bounding_box, perspective, landmark, step=0.02, usebest=False): """ Sample a relation given a trajector and landmark. Evaluate each relation and probabilisticaly choose the one that is likely to generate the trajector given a landmark. """ rel_probabilities, rel_classes = self.all_relation_probs(trajector, bounding_box, perspective, landmark, step) if usebest: index = index_max(rel_probabilities) else: index = categorical_sample(rel_probabilities) index = rel_probabilities.cumsum().searchsorted( random.sample(1) )[0] return rel_classes[index], rel_probabilities[index], self.get_entropy(rel_probabilities)
def sample_landmark(self, landmarks, trajector, usebest=False): ''' Weight by inverse of distance to landmark center and choose probabilistically ''' lm_probabilities = self.all_landmark_probs(landmarks, trajector) if usebest: index = index_max(lm_probabilities) else: index = categorical_sample(lm_probabilities) sampled_landmark = landmarks[index] head_on = self.get_head_on_viewpoint(sampled_landmark) self.set_orientations(sampled_landmark, head_on) return sampled_landmark, lm_probabilities[index], self.get_entropy( lm_probabilities), head_on
def prototypes(self): lplot = [[0. for _ in range(self.examples.ninputs)] for _ in range(self.examples.noutputs)] for i in range(self.examples.noutputs): for j in range(self.examples.ninputs): for k in range(len(self.examples.inputs)): if(i == index_max(self.examples.outputs[k])): lplot[i][j] += self.examples.inputs[k][j] fig = plt.figure() plt.clf() for i in range(self.examples.noutputs): rpr.show_repr(lplot[i], 16, fig, 250 + i, i) path = "/tmp/pyplot.%s.%s.png" % (sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show()
def prototypes(self): lplot = [[0. for _ in range(self.examples.ninputs)] for _ in range(self.examples.noutputs)] for i in range(self.examples.noutputs): for j in range(self.examples.ninputs): for k in range(len(self.examples.inputs)): if (i == index_max(self.examples.outputs[k])): lplot[i][j] += self.examples.inputs[k][j] fig = plt.figure() plt.clf() for i in range(self.examples.noutputs): rpr.show_repr(lplot[i], 16, fig, 250 + i, i) path = "/tmp/pyplot.%s.%s.png" % ( sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show()
def sample_relation(self, trajector, bounding_box, perspective, landmark, step=0.02, usebest=False): """ Sample a relation given a trajector and landmark. Evaluate each relation and probabilisticaly choose the one that is likely to generate the trajector given a landmark. """ rel_probabilities, rel_classes = self.all_relation_probs( trajector, bounding_box, perspective, landmark, step) if usebest: index = index_max(rel_probabilities) else: index = categorical_sample(rel_probabilities) index = rel_probabilities.cumsum().searchsorted(random.sample(1))[0] return rel_classes[index], rel_probabilities[index], self.get_entropy( rel_probabilities)
#learning for epoch in range(nbEpoch): err_one_network = {'first_order': 0., 'high_order_10': 0.} for network in networks: l_exx = list(range(len(examples.inputs))) shuffle(l_exx) for ex in l_exx[0:nbTry]: resf1 = [ network['first_order'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs) ] network['high_order_10'].calc_output(examples.inputs[ex]) if (index_max(resf1) != index_max(examples.outputs[ex])): err_one_network['first_order'] += 1 if (index_max(network['high_order_10'].stateOutputNeurons) != index_max(examples.outputs[ex])): err_one_network['high_order_10'] += 1 network['high_order_10'].train(examples.inputs[ex], examples.outputs[ex]) for i in range(nbOutputs): network['first_order'][i].train(examples.inputs[ex], examples.outputs[ex][i]) #add plot err_plot['first_order'].append(err_one_network['first_order'] / (nbTry * nbr_network))
def newtask(l): imax = index_max(l) l[imax] = 0. l[nbOutputs - 1 - imax] = 1.
def newtask(l): imax = index_max(l) l[imax] = 0.0 l[nbOutputs - 1 - imax] = 1.0
def perf(self, key, outputs): if (index_max( self.lnetwork[key].stateOutputNeurons) == index_max(outputs)): self.lavg_plot[key + '_perf'] += 1
def custom_plot(self, additional): for k in additional: if (k == Simulation.DISCRETIZE): for i in range(self.examples.noutputs): for j in range(self.nbr_epoch): if (self.plots['discretize_div'][i][j] != 0): self.plots['discretize'][i][j] /= ( self.plots['discretize_div'][i][j] * (self.nbDiscre**self.nbDiscre)) colors = [(0.2, 0.8, 0.88), 'b', 'g', 'r', 'c', 'm', 'y', 'k', (0.8, 0.1, 0.8), (0., 0.2, 0.5)] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for j in range(self.examples.noutputs): ax.scatter([ self.plots['discretize'][j][k] for k in self.plots['discretize_valid'][j] ], [j] * len(self.plots['discretize_valid'][j]), self.plots['discretize_valid'][j], color=colors[j], marker='x') ax.set_xlabel('DISCRETIZED VALUE') ax.set_ylabel('SHAPE') ax.set_zlabel('EPOCH') path = "/tmp/pyplot.%s.%s.png" % ( sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show() plt.title('Discretize hidden layer') plt.ylabel('DISCRETIZED VALUE') plt.xlabel("EPOCHS") for j in range(self.examples.noutputs): plt.plot(self.plots['discretize_valid'][j], [ self.plots['discretize'][j][k] for k in self.plots['discretize_valid'][j] ], '.', color=colors[j]) path = "/tmp/pyplot.%s.%s.png" % ( sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) try: plt.savefig(path) except ValueError: print('Cannot save discretize_cloud') try: plt.show() except ValueError: print('Cannot display discretize_cloud') elif (k == Simulation.PROTOTYPE): lplot = [[0. for _ in range(self.examples.ninputs)] for _ in range(self.examples.noutputs)] for network in self.networks: for i in range(len(self.examples.inputs)): network['FoN'].calc_output(self.examples.inputs[i]) network['SoN'].calc_output( network['FoN'].stateHiddenNeurons) im = index_max(self.examples.outputs[i]) for j in range(self.examples.ninputs): lplot[im][j] += network['SoN'].stateOutputNeurons[ j] fig = plt.figure() plt.clf() for i in range(self.examples.noutputs): rpr.show_repr(lplot[i], self.width, fig, 250 + i, i) path = "/tmp/pyplot.%s.%s.png" % ( sys.argv[0], time.strftime("%m-%d-%H-%M-%S", time.localtime())) plt.savefig(path) plt.show()
for epoch in range(nbEpoch): l_exx = list(range(len(samples.inputs))) shuffle(l_exx) rms_ss = 0. rms_ss2 = 0. for ex in l_exx: first_order.calc_output(samples.inputs[ex]) #compara compara = [] for i in range(100): compara.append((samples.inputs[ex][i] - first_order.stateOutputNeurons[i])) res2 = [high_order[i].calc_output(list(compara)) for i in range(2)] if(index_max(res2) == 0): err += 1 i = index_max(first_order.stateOutputNeurons) j = index_max(samples.outputs[ex]) if ((first_order.stateOutputNeurons[i] > 0.5 and samples.outputs[ex][j] > 0.5 and i == j) \ or(first_order.stateOutputNeurons[i] <= 0.5 and samples.outputs[ex][j] <= 0.5)) : res = [high_order[i].calc_output(compara) for i in range(2)] if(index_max(res) == 0): rms_ss += 1 high_order[0].train(compara, 1.)
'high_order_h' : [], 'high_order_l': []} #learning for epoch in range(nbEpoch): perfo = {'first_order' : 0. , 'high_order_h' : 0., 'high_order_l': 0.} for network in networks: seed(100+epoch*(nbr_network+1)+networks.index(network)) l_exx = list(range(len(examples.inputs))) shuffle(l_exx) for ex in l_exx[0:nbTry]: network['first_order'].calc_output(examples.inputs[ex]) cell = 1 \ if index_max(network['first_order'].stateOutputNeurons) == index_max(examples.outputs[ex]) \ else 0 res = network['high_order_h'].calc_output(network['first_order'].stateHiddenNeurons) if(index_max(network['first_order'].stateOutputNeurons) == index_max(examples.outputs[ex])): perfo['first_order'] += 1 if(res == cell): perfo['high_order_h'] += 1 if(res == 1): perfo['high_order_l'] += 1 #learn network['high_order_h'].train(res == cell)
def perf(self, key, outputs): if(index_max(self.lnetwork[key].stateOutputNeurons) == index_max(outputs)): self.lavg_plot[key + '_perf'] += 1
perfo = {'first_order' : [] , 'high_order_h' : [], 'high_order_l': []} for network in networks: perfo_i = {'first_order' : 0. , 'high_order_h' : 0., 'high_order_l': 0.} l_exx = list(range(10)) shuffle(l_exx) for ex in l_exx: sum_rms['first_order'] += network['first_order'].calc_MS( examples.inputs[ex], examples.outputs[ex]) cell = [mode, 1] \ if index_max(network['first_order'].stateOutputNeurons) == index_max(examples.outputs[ex]) \ else [1, mode] sum_rms['high_order_h'] += network['high_order_h'].calc_MS( network['first_order'].stateHiddenNeurons, cell) sum_rms['high_order_l'] += network['high_order_l'].calc_MS( network['first_order'].stateHiddenNeurons, cell) if(index_max(network['first_order'].stateOutputNeurons) == index_max(examples.outputs[ex])): perfo_i['first_order'] += 1 if(index_max(network['high_order_h'].stateOutputNeurons) == index_max(cell)): perfo_i['high_order_h'] += 1 if(index_max(network['high_order_l'].stateOutputNeurons) == index_max(cell)): perfo_i['high_order_l'] += 1
def err(self, key, outputs): if(index_max(self.lnetwork[key].stateOutputNeurons) != index_max(outputs)): self.lavg_plot[key + '_err'] += 1
def newtask3(l): imax = index_max(l) l[imax] = 0.0 imax = imax - 1 if imax != 0 else nbOutputs - 1 l[imax] = 1
def newtask2(l): imax = index_max(l) l[imax] = 0.0 imax = imax + 1 if imax != nbOutputs - 1 else 0 l[imax] = 1
seed(100) #learning for epoch in range(nbEpoch): err_one_network = {'first_order' : 0. , 'high_order_10' : 0.} for network in networks: l_exx = list(range(len(examples.inputs))) shuffle(l_exx) for ex in l_exx[0:nbTry]: resf1 = [network['first_order'][i].calc_output(examples.inputs[ex]) for i in range(nbOutputs)] network['high_order_10'].calc_output(examples.inputs[ex]) if(index_max(resf1) != index_max(examples.outputs[ex])): err_one_network['first_order'] += 1 if(index_max(network['high_order_10'].stateOutputNeurons) != index_max(examples.outputs[ex])): err_one_network['high_order_10'] += 1 network['high_order_10'].train(examples.inputs[ex], examples.outputs[ex]) for i in range(nbOutputs): network['first_order'][i].train(examples.inputs[ex], examples.outputs[ex][i]) #add plot err_plot['first_order'].append(err_one_network['first_order'] / (nbTry * nbr_network))
def newtask3(l): imax = index_max(l) l[imax] = 0. imax = imax - 1 if imax != 0 else nbOutputs - 1 l[imax] = 1
examples.outputs[ex]) entire_first_order = examples.inputs[ex] + \ network['first_order'].stateHiddenNeurons + \ network['first_order'].stateOutputNeurons sum_rms['high_order_10'] += network['high_order_10'].calc_RMS( network['first_order'].stateHiddenNeurons, entire_first_order) sum_rms['high_order_5'] += network['high_order_5'].calc_RMS( network['first_order'].stateHiddenNeurons, entire_first_order) #error if(index_max(network['first_order'].stateOutputNeurons) != index_max(examples.outputs[ex])): err_one_network['first_order'] += 1 if(index_max(network['high_order_5'].stateOutputNeurons[25:35]) != index_max(network['first_order'].stateOutputNeurons)): err_one_network['high_order_5'] += 1 if(index_max(network['high_order_10'].stateOutputNeurons[25:35]) != index_max(network['first_order'].stateOutputNeurons)): err_one_network['high_order_10'] += 1 #learn network['high_order_10'].train(network['first_order'].stateHiddenNeurons, entire_first_order) network['high_order_5'].train(network['first_order'].stateHiddenNeurons, entire_first_order) network['first_order'].train(examples.inputs[ex], examples.outputs[ex])
def newtask2(l): imax = index_max(l) l[imax] = 0. imax = imax + 1 if imax != nbOutputs - 1 else 0 l[imax] = 1
examples.inputs[ex], examples.outputs[ex]) entire_first_order = examples.inputs[ex] + \ network['first_order'].stateHiddenNeurons + \ network['first_order'].stateOutputNeurons sum_rms['high_order_10'] += network['high_order_10'].calc_RMS( network['first_order'].stateHiddenNeurons, entire_first_order) sum_rms['high_order_5'] += network['high_order_5'].calc_RMS( network['first_order'].stateHiddenNeurons, entire_first_order) #error if (index_max(network['first_order'].stateOutputNeurons) != index_max(examples.outputs[ex])): err_one_network['first_order'] += 1 if (index_max( network['high_order_5'].stateOutputNeurons[25:35]) != index_max(network['first_order'].stateOutputNeurons)): err_one_network['high_order_5'] += 1 if (index_max( network['high_order_10'].stateOutputNeurons[25:35]) != index_max(network['first_order'].stateOutputNeurons)): err_one_network['high_order_10'] += 1 #learn network['high_order_10'].train( network['first_order'].stateHiddenNeurons, entire_first_order)
for epoch in range(nbEpoch): l_exx = list(range(80)) shuffle(l_exx) rms_ss = 0. rms_ss2 = 0. for ex in l_exx: first_order.calc_output(ptrain_pattern[ex][0]) #compara compara = [] for i in range(48): compara.append(ptrain_pattern[ex][0][i] - first_order.stateOutputNeurons[i]) res2 = [high_order[i].calc_output(compara) for i in range(2)] if (index_max(res2) == 0): err += 1 if (pattern_to_list( first_order.stateOutputNeurons) == pattern_to_list( ptrain_pattern[ex][1])): res = [high_order[i].calc_output(compara) for i in range(2)] if (index_max(res) == 0): rms_ss += 1 high_order[0].train(compara, 1.) high_order[1].train(compara, 0.) else: the += 1
for network in networks: l_exx = list(range(len(examples.inputs))) shuffle(l_exx) for ex in l_exx[0:nbTry]: #RMS network['first_order'].calc_output(examples.inputs[ex]) entire_first_order = examples.inputs[ex] + \ network['first_order'].stateHiddenNeurons + \ network['first_order'].stateOutputNeurons network['high_order_10'].calc_output(network['first_order'].stateHiddenNeurons) im = index_max(examples.outputs[ex]) learned[discretis(network['first_order'].stateHiddenNeurons)][im] += 1 div[im][epoch] += 1 dis[im][epoch] += discretis(network['first_order'].stateHiddenNeurons) dis2[im][epoch] += index_max(network['first_order'].stateOutputNeurons) if(len(valid[im]) == 0): valid[im].append(epoch) elif(valid[im][len(valid[im])-1] != epoch): valid[im].append(epoch) #learn network['high_order_10'].train(network['first_order'].stateHiddenNeurons, entire_first_order)
def pattern_to_list(pat): res = [] for i in range(8): res.append(index_max(pat[i * 6:i * 6 + 6])) return res
def err(self, key, outputs): if (index_max(self.lnetwork[key].stateOutputNeurons) != index_max(outputs)): self.lavg_plot[key + '_err'] += 1
#trials wins = [] wagers = [] last_output = [ [0 for _ in range(5)] for _ in range(nbr_network)] for epoch in range(nbr_epoch): nbr_win = 0 nbr_gwager = 0 for net in range(nbr_network): for trial in range(nbr_trial): for i in range(len(last_output[net])): last_output[net][i] += randmm(0.01, 0.03) bet = index_max(first_order[net].calc_output(last_output[net])) for i in range(len(first_order[net].stateHiddenNeurons)): first_order[net].stateHiddenNeurons[i] += randmm(0.01, 0.03) wager = index_max(high_order[net].calc_output(first_order[net].stateHiddenNeurons)) win = deck(bet) if((win[0] and wager == 0)): nbr_gwager += 1 if(win[0]): high_order[net].train(first_order[net].stateHiddenNeurons, [1. , grid]) outputs = [grid for _ in range(4)] + [1.]
for epoch in range(nbEpoch): for network in networks: l_exx = list(range(len(examples.inputs))) shuffle(l_exx) for ex in l_exx[0:nbTry]: #RMS network['first_order'].calc_output(examples.inputs[ex]) entire_first_order = examples.inputs[ex] + \ network['first_order'].stateHiddenNeurons + \ network['first_order'].stateOutputNeurons network['high_order_10'].calc_output( network['first_order'].stateHiddenNeurons) im = index_max(examples.outputs[ex]) learned[discretis( network['first_order'].stateHiddenNeurons)][im] += 1 div[im][epoch] += 1 dis[im][epoch] += discretis( network['first_order'].stateHiddenNeurons) dis2[im][epoch] += index_max( network['first_order'].stateOutputNeurons) if (len(valid[im]) == 0): valid[im].append(epoch) elif (valid[im][len(valid[im]) - 1] != epoch): valid[im].append(epoch)
high_order.append(ho) #trials wins = [] wagers = [] last_output = [[0 for _ in range(5)] for _ in range(nbr_network)] for epoch in range(nbr_epoch): nbr_win = 0 nbr_gwager = 0 for net in range(nbr_network): for trial in range(nbr_trial): # for i in range(len(last_output[net])): # last_output[net][i] += randmm(0.01, 0.03) bet = index_max(first_order[net].calc_output(last_output[net])) # for i in range(len(first_order[net].stateHiddenNeurons)): # first_order[net].stateHiddenNeurons[i] += randmm(0.01, 0.03) wager = index_max(high_order[net].calc_output( first_order[net].stateHiddenNeurons)) win = deck(bet) if ((win[1] and wager == 0) or ((not win[1]) and wager == 1)): nbr_gwager += 1 if (win[1]): high_order[net].train(first_order[net].stateHiddenNeurons, [1., grid])