def __init__(self,name,nodes): """Create a network holding an array of nodes. An 'X' Origin is automatically created which concatenates the values of each internal element's 'X' Origin. This object is meant to be created using :func:`nef.Network.make_array()`, allowing for the efficient creation of neural groups that can represent large vectors. For example, the following code creates a NetworkArray consisting of 50 ensembles of 1000 neurons, each of which represents 10 dimensions, resulting in a total of 500 dimensions represented:: net=nef.Network('Example Array') A=net.make_array('A',neurons=1000,length=50,dimensions=10,quick=True) The resulting NetworkArray object can be treated like a normal ensemble, except for the fact that when computing nonlinear functions, you cannot use values from different ensembles in the computation, as per NEF theory. :param string name: the name of the NetworkArray to create :param nodes: the nodes to combine together :type nodes: list of NEFEnsembles """ NetworkImpl.__init__(self) self.name=name self.dimension=len(nodes)*nodes[0].dimension self._nodes=nodes self._origins={} self.neurons=0 for n in nodes: self.addNode(n) self.neurons+=n.neurons self.multidimensional=nodes[0].dimension>1 self.createEnsembleOrigin('X') self.setUseGPU(True)
def __init__(self, name, nodes): """Create a network holding an array of nodes. An 'X' Origin is automatically created which concatenates the values of each internal element's 'X' Origin. This object is meant to be created using :func:`nef.Network.make_array()`, allowing for the efficient creation of neural groups that can represent large vectors. For example, the following code creates a NetworkArray consisting of 50 ensembles of 1000 neurons, each of which represents 10 dimensions, resulting in a total of 500 dimensions represented:: net=nef.Network('Example Array') A=net.make_array('A',neurons=1000,length=50,dimensions=10,quick=True) The resulting NetworkArray object can be treated like a normal ensemble, except for the fact that when computing nonlinear functions, you cannot use values from different ensembles in the computation, as per NEF theory. :param string name: the name of the NetworkArray to create :param nodes: the nodes to combine together :type nodes: list of NEFEnsembles """ NetworkImpl.__init__(self) self.name = name self.dimension = len(nodes) * nodes[0].dimension self._nodes = nodes self._origins = {} self.neurons = 0 for n in nodes: self.addNode(n) self.neurons += n.neurons self.multidimensional = nodes[0].dimension > 1 self.createEnsembleOrigin('X') self.setUseGPU(True)
def __init__(self, name = "MotorTransform", mtr_filepath = "", valid_strs = [], vis_dim = 0, \ mtr_dim = 0, neurons_per_dim = 50, inhib_scale = 10.0, tau_in = 0.05, tau_inhib = 0.05, \ in_strs = None, quick = True): NetworkImpl.__init__(self) self.setName(name) net = nef.Network(self, quick) self.dimension = mtr_dim in_terms = [] inhib_terms = [] out_relay = SimpleNEFEns("Output", mtr_dim, pstc = 0.0001, input_name = "") net.add(out_relay) for i,in_str in enumerate(in_strs): x_transform = read_csv(mtr_filepath + in_str + "_x.csv") y_transform = read_csv(mtr_filepath + in_str + "_y.csv") xy_transform = [] transform_w = len(x_transform[0]) transform_h = len(x_transform) for row in range(transform_h): xy_transform.append(x_transform[row]) xy_transform.append(y_transform[row]) # append ignore rows for row in range(mtr_dim - (transform_h * 2)): xy_transform.append(zeros(1,transform_w)) inhib_vec = num_vector(-inhib_scale, 1, len(valid_strs)) inhib_vec[valid_strs.index(in_str)] = 0 ens = net.make_array(in_str, neurons_per_dim, mtr_dim, 1, max_rate = (100,200), quick = quick, storage_code = "%d") ens.addDecodedTermination("Input", xy_transform, tau_in, False) ens.addTermination("Inhib", [[inhib_vec] * neurons_per_dim] * mtr_dim, tau_inhib, False) out_relay.addDecodedTermination(in_str, None, 0.0001, False) net.connect(ens.getOrigin("X"), out_relay.getTermination(in_str)) in_terms.append(ens.getTermination("Input")) inhib_terms.append(ens.getTermination("Inhib")) in_term = EnsembleTermination(net.network, "Input", in_terms) inhib_term = EnsembleTermination(net.network, "Inhib", inhib_terms) net.network.exposeTermination(in_term, "Input") net.network.exposeTermination(inhib_term, "Inhib") net.network.exposeOrigin(out_relay.getOrigin("X"), "X")
def __init__(self, name, N, d, vocab): NetworkImpl.__init__(self) self.name = name scaleFactor = 0.1 smallN = int(math.ceil(float(N) / d)) tauPSC = 0.007 ef1 = RPMutils.defaultEnsembleFactory() ef1.nodeFactory.tauRef = 0.001 test = ef1.make("hypothesis", 1, d) test.setMode(SimulationMode.DIRECT ) #since this is just a relay ensemble for modularity test.fixMode() test.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False) self.addNode(test) self.exposeTermination(test.getTermination("input"), "hypothesis") combine = ef1.make("combine", 800, 8) # combine.setMode(SimulationMode.DIRECT) #since this is just a relay ensemble for modularity # combine.fixMode() # combine.collectSpikes(True) self.addNode(combine) inputVec = [[0] for x in range(8)] #create a population for each possible answer for i in range(8): ans = ef1.make("ans_" + str(i), smallN, 1) ans.addDecodedTermination("input", [vocab[i]], tauPSC, False) self.addNode(ans) self.addProjection(test.getOrigin("X"), ans.getTermination("input")) inputVec[i] = [scaleFactor] combine.addDecodedTermination("in_" + str(i), inputVec, tauPSC, False) inputVec[i] = [0] self.addProjection(ans.getOrigin("X"), combine.getTermination("in_" + str(i))) self.exposeOrigin(combine.getOrigin("X"), "result") if RPMutils.USE_PROBES: self.simulator.addProbe("combine", "X", True)
def __init__(self, name, N, d): NetworkImpl.__init__(self) self.name = name tauPSC = 0.007 ef = RPMutils.defaultEnsembleFactory() netef = networkensemble.NetworkEnsemble(ef) #create the approximate inverse matrix inv = RPMutils.eye(d, 1) for i in range(d/2): tmp = inv[i+1] inv[i+1] = inv[d-i-1] inv[d-i-1] = tmp #create the two input populations Ainv = netef.make("Ainv", N, tauPSC, [inv], None) self.addNode(Ainv) B = netef.make("B", N, tauPSC, [RPMutils.eye(d, 1)], None) self.addNode(B) #create circular convolution network corr = cconv.Cconv("corr", N, d) self.addNode(corr) self.addProjection(Ainv.getOrigin("X"), corr.getTermination("A")) self.addProjection(B.getOrigin("X"), corr.getTermination("B")) #average result T = average.Average("T", N, d) self.addNode(T) self.addProjection(corr.getOrigin("X"), T.getTermination("input")) if RPMutils.USE_PROBES: self.simulator.addProbe("T", "X", True) self.simulator.addProbe("corr", "X", True) self.simulator.addProbe("Ainv", "X", True) self.simulator.addProbe("B", "X", True) self.exposeOrigin(T.getOrigin("X"), "T") self.exposeTermination(Ainv.getTermination("in_0"), "A") self.exposeTermination(B.getTermination("in_0"), "B") # self.exposeTermination(T.getTermination("lrate"), "lrate")
def __init__(self,name,quick=False): """ :param name: If a string, create and wrap a new NetworkImpl with the given *name*. If an existing NetworkImpl, then create a wrapper around that network. :type name: string or NetworkImpl :param boolean quick: Default setting for the *quick* parameter in :func:`nef.Network.make()` """ if isinstance(name,NetworkImpl): self.network=name else: self.network=NetworkImpl() self.network.name=name self.defaults=dict(quick=quick)
def __init__(self, name, N, d): NetworkImpl.__init__(self) self.name = name tauPSC = 0.007 ef = RPMutils.defaultEnsembleFactory() netef = networkensemble.NetworkEnsemble(ef) #scale input to integrator (adaptive learning rate) # scaler = eprod.Eprod("scaler", N, d, oneDinput=True) # self.addNode(scaler) #new idea, try a constant scale #constant scale on input: 0.4 #constant scale on recurrent connection: 0.8 (forget rate 0.2) #create integrator #we scale the input by 1/stepsize because we want the integrator to reach the target value in stepsize*1s, not 1s int = integrator.Integrator("int", N, d, inputScale=0.4, forgetRate=0.2, stepsize=RPMutils.STEP_SIZE) self.addNode(int) # self.addProjection(scaler.getOrigin("X"), int.getTermination("input")) if RPMutils.USE_PROBES: self.simulator.addProbe("int", "X", True) # self.simulator.addProbe("scaler", "X", True) self.exposeOrigin(int.getOrigin("X"), "X") # self.exposeTermination(scaler.getTermination("A"), "input") # self.exposeTermination(scaler.getTermination("B"), "lrate") self.exposeTermination(int.getTermination("input"), "input")
def getTerminations(self): terminations = NetworkImpl.getTerminations(self) decodedTerminations = [] nonDecodedTerminations = [] for term in terminations: if isinstance(term, NetworkImpl.TerminationWrapper): baseTermination = term.getBaseTermination() else: baseTermination = term nodeTerminations = baseTermination.getNodeTerminations() if nodeTerminations and isinstance(nodeTerminations[0], DecodedTermination): decodedTerminations.append(term) elif nodeTerminations and isinstance(nodeTerminations[0], EnsembleTermination): nonDecodedTerminations.append(term) result = nonDecodedTerminations result.extend(decodedTerminations) return result
def getUseGPU(self): for node in self._nodes: if not node.getUseGPU(): return False return NetworkImpl.getUseGPU(self)
def __init__(self, name = "Cleanup Memory", \ in_vec_list = None, out_vec_list = None, tau_in = 0.005, in_scale = 1.0, \ en_inhib = False, tau_inhib = 0.005, tau_smooth = 0.0001, inhib_scale = 2.0, \ en_mut_inhib = False, mut_inhib_scale = 2.0, \ num_neurons_per_vec = 10, threshold = 0.3, \ N_out_vec = None, en_X_out = False, input_name = "Input", \ sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = None, **params): NetworkImpl.__init__(self) self.setName(name) if( mut_inhib_scale <= 0 ): en_mut_inhib = False if( out_vec_list is None ): out_vec_list = in_vec_list self.dimension = len(out_vec_list[0]) if( isinstance(mut_inhib_scale, (int,float)) ): mut_inhib_scale = [mut_inhib_scale] * len(in_vec_list) if( isinstance(inhib_scale, (int,float)) ): inhib_scale = [inhib_scale] * len(in_vec_list) if( isinstance(threshold, (int,float)) ): threshold = [threshold] * len(in_vec_list) in_vec_list = [[in_vec_list[i][d] * in_scale for d in range(len(in_vec_list[i]))] \ for i in range(len(in_vec_list))] self.i_list = [] self.in_vec_list = [] if( str(sim_mode).lower() == 'ideal' ): node = CleanupMemoryNode(name, in_vec_list, out_vec_list, tau_in, en_inhib, tau_inhib, \ threshold = sum(threshold) / len(threshold), en_wta = en_mut_inhib, \ N_out_vec = N_out_vec) self.addNode(node) self.exposeTermination(node.getTermination("Input"), "Input") if( en_inhib ): self.exposeTermination(node.getTermination("Inhib"), "Inhib") self.exposeOrigin(node.getOrigin("Output"), "X") if( en_X_out ): self.exposeOrigin(node.getOrigin("X"), "x0") else: net = nef.Network(self, quick) enss = [] num_items = 0 for out_vec in out_vec_list: if( out_vec is None ): continue else: num_items += 1 in_terms = [] inhib_terms = [] origins = [] en_N_out = not (N_out_vec is None) out_relay = SimpleNEFEns("Output", self.dimension, pstc = tau_smooth) net.add(out_relay) if( en_X_out ): x_relay = SimpleNEFEns("X", num_items + en_N_out, pstc = tau_smooth) net.add(x_relay) for i,in_vec in enumerate(in_vec_list): if( out_vec_list[i] is None ): continue self.in_vec_list.append(in_vec) self.i_list.append(i) pdf = IndicatorPDF(threshold[i] + 0.1, 1) eval_points = [[pdf.sample()[0]] for _ in range(1000)] intercepts = [threshold[i] + n * (1-threshold[i])/(num_neurons_per_vec) for n in range(num_neurons_per_vec)] if( sim_mode == SimulationMode.DIRECT ): ens = SimpleNEFEns("Item" + str(i), 1, input_name = "") net.add(ens) else: ens = net.make("Item" + str(i), num_neurons_per_vec, 1, eval_points = eval_points, \ encoders = [[1]] * num_neurons_per_vec, intercept = intercepts, \ max_rate = (100,200), seed = rand_seed) if( input_name != "" and not input_name is None ): ens.addDecodedTermination(input_name, [in_vec], tau_in, False) in_terms.append(ens.getTermination(input_name)) ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = threshold[i], \ step_val = out_vec_list[i][d]) for d in range(self.dimension)], \ "AXON") enss.append(ens) out_relay.addDecodedTermination("Item" + str(i), None, tau_smooth, False) out_relay.addNeuronCount(ens.getNeuronCount()) net.connect(ens.getOrigin("Output"), out_relay.getTermination("Item" + str(i))) if( en_X_out ): ens.removeDecodedOrigin("X") ens.addDecodedOrigin("X", [FilteredStepFunction(shift = threshold[i])], "AXON") x_relay.addDecodedTermination("Item" + str(i), transpose(delta(num_items + en_N_out, i)), tau_smooth, False) x_relay.addNeuronCount(ens.getNeuronCount()) net.connect(ens.getOrigin("X"), x_relay.getTermination("Item" + str(i))) if( en_inhib ): ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False) inhib_terms.append(ens.getTermination("Inhib")) if( not N_out_vec is None ): N_threshold = min(threshold) pdf = IndicatorPDF(-0.1, N_threshold - 0.1) eval_points = [[pdf.sample()[0]] for _ in range(1000)] intercepts = [-(n * (N_threshold)/(num_neurons_per_vec)) for n in range(num_neurons_per_vec)] if( sim_mode == SimulationMode.DIRECT ): ens = SimpleNEFEns("ItemN", 1, input_name = "") net.add(ens) else: ens = net.make("ItemN", num_neurons_per_vec, 1, eval_points = eval_points, \ encoders = [[-1]] * num_neurons_per_vec, intercept = intercepts, \ max_rate = (300,400), seed = rand_seed) for i in range(len(in_vec_list)): ens.addDecodedTermination("Item" + str(i), [[1]], 0.005, False) net.connect(enss[i].getOrigin("X"), ens.getTermination("Item" + str(i))) ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = N_threshold, \ step_val = N_out_vec[d], mirror = True) for d in range(self.dimension)], \ "AXON") out_relay.addDecodedTermination("ItemN", None, tau_smooth, False) out_relay.addNeuronCount(ens.getNeuronCount()) net.connect(ens.getOrigin("Output"), out_relay.getTermination("ItemN")) if( en_X_out ): ens.removeDecodedOrigin("X") ens.addDecodedOrigin("X", [FilteredStepFunction(shift = N_threshold, mirror = True)], "AXON") x_relay.addDecodedTermination("ItemN", transpose(delta(num_items + en_N_out, num_items)), tau_smooth, False) x_relay.addNeuronCount(ens.getNeuronCount()) net.connect(ens.getOrigin("X"), x_relay.getTermination("ItemN")) if( en_inhib ): ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False) inhib_terms.append(ens.getTermination("Inhib")) if( en_mut_inhib ): for n in range(num_items): for i in range(num_items): if( n != i): enss[i].addTermination("Inhib" + str(n), [[-mut_inhib_scale[i]]] * num_neurons_per_vec, 0.005, False) net.connect(enss[n].getOrigin("X"), enss[i].getTermination("Inhib" + str(n))) if( len(in_terms) > 0 ): in_term = EnsembleTermination(net.network, "Input", in_terms) net.network.exposeTermination(in_term, "Input") net.network.exposeOrigin(out_relay.getOrigin("X"), "X") if( en_X_out ): self.exposeOrigin(x_relay.getOrigin("X"), "x0") if( en_inhib ): inhib_term = EnsembleTermination(net.network, "Inhib", inhib_terms) net.network.exposeTermination(inhib_term, "Inhib") # Reset random seed if( not seed is None ): seed() self.releaseMemory() if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT NetworkImpl.setMode(self, sim_mode) if( sim_mode == SimulationMode.DIRECT ): self.fixMode()
def __init__(self, name = "Gated Integrator", num_dim = 1, neurons_per_dim = 25, \ tau_fb = 0.05, tau_in = 0.010, tau_buf_in = 0.01, tau_inhib = 0.005, \ in_scale = 1.0, fb_scale = 1.00, inhib_scale = 2.0, input_name = "Input", \ en_reset = False, reset_vec = None, en_cyc_in = True, cyc_opt = 0, \ sim_mode = SimulationMode.DEFAULT, quick = True, mode = 1, rand_seed = None, \ cleanup_vecs = None): self.dimension = num_dim NetworkImpl.__init__(self) self.setName(name) if( not reset_vec is None ): en_reset = True self.init_opt = True else: self.init_opt = False if( str(sim_mode).lower() == 'ideal' ): node = GatedIntNode(name, num_dim, tau_in, en_reset, reset_vec, en_cyc_in, cyc_opt) self.addNode(node) if( not(input_name is None or input_name == "") ): self.exposeTermination(node.getTermination("Input"), input_name) else: node.removeTermination("Input") self.exposeTermination(node.getTermination("Cycle"), "Cycle") if( en_reset ): self.exposeTermination(node.getTermination("Reset"), "Reset") if( not en_cyc_in ): self.exposeTermination(node.getTermination("CycleN"), "CycleN") self.exposeOrigin(node.getOrigin("X"), "X") ## TODO if( cleanup_vecs is None ): print("GINT - Cleanupvecs not implemented yet") else: net = nef.Network(self, quick) nn_per_dim = neurons_per_dim if( mode == 1 ): radius = 1/sqrt(num_dim) * 3.5 else: radius = 1 if( mode == -1 ): eval_points = [[1 - random() * 0.6 + 0.15] for _ in range(2000)] encoders = [[1]] intercept = (0.25,1) else: eval_points = None encoders = None intercept = (-1,1) params = dict(max_rate = (100,200), radius = radius, quick = quick, \ intercept = intercept, encoders = encoders, eval_points = eval_points, \ seed = rand_seed) if( sim_mode == SimulationMode.DIRECT ): inhib_mat = [[-inhib_scale]] if( cleanup_vecs is None ): buffer = SimpleNEFEns("buffer", num_dim, input_name = "") else: buffer = CleanupMem("buffer", cleanup_vecs, num_neurons_per_vec = 1, \ tau_in = tau_buf_in, tau_inhib = tau_inhib, \ en_mut_inhib = True, inhib_scale = inhib_scale, \ en_inhib = en_reset and not self.init_opt, \ threshold = 0.5, sim_mode = sim_mode) feedback = SimpleNEFEns("feedback", num_dim, input_name = "") net.add(buffer) net.add(feedback) else: inhib_mat = [[[-inhib_scale]] * nn_per_dim] * num_dim if( cleanup_vecs is None ): buffer = net.make_array("buffer", nn_per_dim, num_dim, 1, **params) else: buffer = CleanupMem("buffer", cleanup_vecs, num_neurons_per_vec = nn_per_dim, \ tau_in = tau_buf_in, tau_inhib = tau_inhib, \ en_mut_inhib = True, inhib_scale = inhib_scale, \ en_inhib = en_reset and not self.init_opt, threshold = 0.5, \ sim_mode = sim_mode, rand_seed = rand_seed, quick = quick) net.add(buffer) feedback = net.make_array("feedback", nn_per_dim, num_dim, 1, **params) if( cleanup_vecs is None ): buffer.addDecodedTermination("Input", eye(num_dim), tau_buf_in, False) buffer.addDecodedTermination("Feedback", eye(num_dim), 0.005, False) if( en_reset and not self.init_opt ): if( cleanup_vecs is None ): buffer.addTermination("Inhib", inhib_mat, tau_inhib, False) net.network.exposeTermination(buffer.getTermination("Inhib"), "Reset") feedback.addDecodedTermination("Input", diag(num_dim, value = fb_scale), tau_fb, False) feedback.addTermination("Inhib", inhib_mat, tau_inhib, False) if( input_name is None or input_name == "" ): self.num_inputs = 0 else: self.num_inputs = 1 if( not self.init_opt ): if( sim_mode == SimulationMode.DIRECT ): gate = SimpleNEFEns("gate" , num_dim, input_name = "") net.add(gate) else: gate = net.make_array("gate", nn_per_dim, num_dim, 1, **params) if( self.num_inputs ): gate.addDecodedTermination("Input", diag(num_dim, value = in_scale), tau_in, False) net.network.exposeTermination(gate.getTermination("Input"), input_name) gate.addTermination("Inhib", inhib_mat, tau_inhib, False) gate_inhib_name = "Inhib" else: gate = Selector("gate", num_dim, nn_per_dim, num_dim, tau_in = [0.005,tau_in], in_scale = in_scale, \ inhib_scale = inhib_scale, **params) gate.addSuppressTerminations([1]) feedback.addTermination("Reset", inhib_mat, 0.005, False) reset_net = Detector("Reset", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed) net.add(reset_net) net.add(gate) net.network.exposeTermination(reset_net.getTermination("Input"), "Reset") if( self.num_inputs ): net.network.exposeTermination(gate.getTermination("Input 1"), input_name) init_val_in = net.make_input("init_val", reset_vec) net.connect(init_val_in , gate.getTermination("Input 2")) net.connect(reset_net.getOrigin("Reset") , gate.getTermination("Suppress 1_2")) net.connect(reset_net.getOrigin("ResetN"), gate.getTermination("Suppress 2")) net.connect(reset_net.getOrigin("Reset") , feedback.getTermination("Reset")) gate_inhib_name = "Suppress 1" net.connect(gate.getOrigin("X") , buffer.getTermination("Input")) net.connect(buffer.getOrigin("X") , feedback.getTermination("Input")) net.connect(feedback.getOrigin("X"), buffer.getTermination("Feedback")) net.network.exposeOrigin(buffer.getOrigin("X"), "X") if( cyc_opt ): gate_inhib_str = ("CycleN") fb_inhib_str = ("Cycle") else: gate_inhib_str = ("Cycle") fb_inhib_str = ("CycleN") if( en_cyc_in ): cyc_net = Detector("Cycle", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed) net.add(cyc_net) net.connect(cyc_net.getOrigin(gate_inhib_str), gate.getTermination(gate_inhib_name)) net.connect(cyc_net.getOrigin(fb_inhib_str) , feedback.getTermination("Inhib")) net.network.exposeTermination(cyc_net.getTermination("Input"), "Cycle") else: net.network.exposeTermination(gate.getTermination(gate_inhib_name), gate_inhib_str) net.network.exposeTermination(feedback.getTermination("Inhib") , fb_inhib_str) # Reset random seed if( not seed is None ): seed() self.releaseMemory() if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT NetworkImpl.setMode(self, sim_mode) if( sim_mode == SimulationMode.DIRECT ): self.fixMode()
class Network: """Wraps a Nengo network with a set of helper functions for simplifying the creation of Nengo models. This system is meant to allow short, concise code to create Nengo models. For example, we can make a communication channel like this:: import nef net=nef.Network('Test Network') input=net.make_input('input',values=[0]) A=net.make('A',neurons=100,dimensions=1) B=net.make('B',neurons=100,dimensions=1) net.connect(input,A) net.connect(A,B) net.add_to_nengo() This will automatically create the necessary origins, terminations, ensemble factories, and so on needed to create this network. """ serialVersionUID=1 def __init__(self,name,quick=False): """ :param name: If a string, create and wrap a new NetworkImpl with the given *name*. If an existing NetworkImpl, then create a wrapper around that network. :type name: string or NetworkImpl :param boolean quick: Default setting for the *quick* parameter in :func:`nef.Network.make()` """ if isinstance(name,NetworkImpl): self.network=name else: self.network=NetworkImpl() self.network.name=name self.defaults=dict(quick=quick) def make(self,name,neurons,dimensions, tau_rc=0.02,tau_ref=0.002, max_rate=(200,400),intercept=(-1,1), radius=1,encoders=None, decoder_noise=0.1, eval_points=None, noise=None,noise_frequency=1000, mode='spike',add_to_network=True, node_factory=None, decoder_sign=None, quick=None,storage_code=''): """Create and return an ensemble of neurons. :param string name: name of the ensemble (must be unique) :param integer neurons: number of neurons in the ensemble :param integer dimensions: number of dimensions to represent :param float tau_rc: membrane time constant :param float tau_ref: refractory period :param max_rate: range for uniform selection of maximum firing rate in Hz (as a 2-tuple) or a list of maximum rate values to use :type max_rate: tuple or list :param intercept: normalized range for uniform selection of tuning curve x-intercept (as 2-tuple) or a list of intercept values to use :type intercept: tuple or list :param float radius: representational range :param list encoders: list of encoder vectors to use (if None, uniform distribution around unit sphere). The provided encoders will be automatically normalized to unit length. :param float decoder_noise: amount of noise to assume when calculating decoders :param list eval_points: list of points to do optimization over :param float noise: current noise to inject, chosen uniformly from (-noise,noise) :param float noise_frequency: sampling rate (how quickly the noise changes) :param string mode: simulation mode ('direct', 'rate', or 'spike') :param ca.nengo.model.impl.NodeFactory node_factory: a factory to use instead of the default LIF factory (for creating ensembles with neurons other than LIF) :param decoder_sign: +1 for positive decoders, -1 for negative decoders. Set to None to allow both. :type decoder_sign: None, +1, or -1 :param quick: if True, saves data from a created ensemble and will re-use it in the future when creating an ensemble with the same parameters as this one. If None, uses the Network default setting. :type quick: boolean or None :param string storage_code: an extra parameter to allow different quick files even if all other parameters are the same :param boolean add_to_network: flag to indicate if created ensemble should be added to the network :returns: the newly created ensemble """ if( neurons == 0 ): raise Exception("nef_core.make - Num neurons = 0") if quick is None: quick=self.defaults['quick'] if quick: storage_name='quick_%s_%d_%d_%1.3f_%1.3f'%(storage_code,neurons,dimensions,tau_rc,tau_ref) if type(max_rate) is tuple and len(max_rate)==2: storage_name+='_%1.1f_%1.1f'%max_rate else: storage_name+='_%08x'%hash(tuple(max_rate)) if type(intercept) is tuple and len(intercept)==2: storage_name+='_%1.3f_%1.3f'%intercept else: storage_name+='_%08x'%hash(tuple(intercept)) if isinstance(radius,list): storage_name+='_(%s)_%1.3f'%(''.join(['%1.3f'%x for x in radius]),decoder_noise) else: storage_name+='_%1.3f_%1.3f'%(radius,decoder_noise) if encoders is not None: storage_name+='_enc%08x'%hash(tuple([tuple(x) for x in encoders])) if decoder_sign is not None: storage_name+='_sign%d'%decoder_sign if eval_points is not None: storage_name+='_eval%08x'%hash(tuple([tuple(x) for x in eval_points])) if node_factory is not None: storage_name+='_node%s'%node_factory.__class__.__name__ if not java.io.File(storage_name+'.'+FileManager.ENSEMBLE_EXTENSION).exists(): dir=java.io.File('quick') if not dir.exists(): dir.mkdirs() storage_name='quick'+java.io.File.separator+storage_name else: storage_name='' ef=NEFEnsembleFactoryImpl() if node_factory is not None: ef.nodeFactory=node_factory else: if type(max_rate) is tuple and len(max_rate)==2: mr=IndicatorPDF(max_rate[0],max_rate[1]) else: mr=pdfs.ListPDF(max_rate) if type(intercept) is tuple and len(intercept)==2: it=IndicatorPDF(intercept[0],intercept[1]) else: it=pdfs.ListPDF(intercept) ef.nodeFactory=LIFNeuronFactory(tauRC=tau_rc,tauRef=tau_ref,maxRate=mr,intercept=it) if encoders is not None: try: ef.encoderFactory=generators.FixedVectorGenerator(encoders) except: raise Exception('encoders must be a matrix where each row is a non-zero preferred direction vector') if decoder_sign is not None: if decoder_sign<0: ef.approximatorFactory=GradientDescentApproximator.Factory(GradientDescentApproximator.CoefficientsSameSign(False),False) elif decoder_sign>0: ef.approximatorFactory=GradientDescentApproximator.Factory(GradientDescentApproximator.CoefficientsSameSign(True),False) else: ef.approximatorFactory.noise=decoder_noise if eval_points is not None: ef.evalPointFactory=generators.FixedEvalPointGenerator(eval_points) if isinstance(radius,list): r=radius else: r=[radius]*dimensions n=ef.make(name,neurons,r,storage_name,False) if noise is not None: for nn in n.nodes: nn.noise=NoiseFactory.makeRandomNoise(noise_frequency,IndicatorPDF(-noise,noise)) if mode=='rate' or mode==SimulationMode.RATE: n.mode=SimulationMode.RATE elif mode=='direct' or mode==SimulationMode.DIRECT: n.mode=SimulationMode.DIRECT if add_to_network: self.network.addNode(n) return n def make_array(self,name,neurons,length,dimensions=1,**args): """Create and return an array of ensembles. This acts like a high-dimensional ensemble, but actually consists of many sub-ensembles, each one representing a separate dimension. This tends to be much faster to create and can be more accurate than having one huge high-dimensional ensemble. However, since the neurons represent different dimensions separately, we cannot compute nonlinear interactions between those dimensions. .. note:: When forming neural connections from an array to another ensemble (or another array), any specified function to be computed with be computed on each ensemble individually (with the results concatenated together). For example, the following code creates an array and then computes the sum of the squares of each value within it:: net=nef.Network('Squaring Array') input=net.make_input('input',[0,0,0,0,0]) A=net.make_array('A',neurons=100,length=5) B=net.make('B',neurons=100,dimensions=1) net.connect(input,A) def square(x): return x[0]*x[0] net.connect(A,B,transform=[1,1,1,1,1],func=square) All of the parameters from :py:func:`nef.Network.make()` can also be used. :param string name: name of the ensemble array (must be unique) :param integer neurons: number of neurons in the ensemble :param integer length: number of ensembles in the array :param integer dimensions: number of dimensions each ensemble represents :returns: the newly created :class:`nef.array.NetworkArray` """ nodes=[] storage_code=args.get('storage_code','') for i in range(length): if '%' in storage_code: args['storage_code']=storage_code%i n=self.make('%d'%i,neurons,dimensions,add_to_network=False,**args) nodes.append(n) ensemble=array.NetworkArray(name,nodes) self.network.addNode(ensemble) ensemble.mode=ensemble.nodes[0].mode return ensemble def make_input(self,name,values,zero_after_time=None): """Create and return a FunctionInput of dimensionality ``len(values)`` with *values* as its constants. Python functions can be provided instead of fixed values. :param string name: name of created node :param values: numerical values for the function. If a list, can contain a mixture of floats and functions (floats are fixed input values, and functions are called with the current time and must return a single float). If values is a function, will be called with the current time and can return either a single float or a list of floats. :type values: list or function :param zero_after_time: if not None, any fixed input value will change to 0 after this amount of time :type zero_after_time: float or None :returns: the created FunctionInput """ funcs=[] if callable(values): d=values(0) if isinstance(d,(tuple,list)): for i in range(len(d)): funcs.append(functions.PythonFunction(lambda x,i=i:values(x)[i],time=True)) else: funcs.append(functions.PythonFunction(lambda x:values(x),time=True)) else: for v in values: if callable(v): f=functions.PythonFunction(v,time=True) elif zero_after_time is None: f=ConstantFunction(1,v) else: f=PiecewiseConstantFunction([zero_after_time],[v,0]) funcs.append(f) input=FunctionInput(name,funcs,Units.UNK) self.network.addNode(input) return input def make_fourier_input(self,name,dimensions=None,base=1,high=10,power=0.5,seed=None): """Create and return a FunctionInput that randomly varies. The variation is generated by randomly generating fourier components with frequencies that are multiples of ``base`` up to ``high``, and normalized to have an rms power of ``power``. :param string name: name of created node :param dimensions: dimensionality of the input. If None, will default to the longest of any of the lists given in the other parameters, or 1 if there are no lists. :type dimensions: int or None :param base: fundamental (lowest) frequency for the fourier series. If a list, will use different values for each dimension. Default is 1Hz :type base: float or list :param high: maximum frequency for the fourier series. If a list, will use different values for each dimension. Default is 10Hz :type high: float or list :param power: RMS power for the random function. If a list, will use different values for each dimension. Default is 0.5 :type power: float or list :param seed: random number seed to use. If a list, will use different values for each dimension. If None, a random seed will be chosen. :type seed: int or list or None. :returns: the created FunctionInput """ def fix(x): if isinstance(x,(tuple,list)): return x else: return [x] high=fix(high) base=fix(base) power=fix(power) seed=fix(seed) if dimensions is None: dimensions=max(len(base),len(high),len(power),len(seed)) funcs=[] for i in range(dimensions): s=seed[i%len(seed)] if s is None: s=random.randint(0,0x7ffffff) f=FourierFunction(base[i%len(base)],high[i%len(high)],power[i%len(power)],s) funcs.append(f) input=FunctionInput(name,funcs,Units.UNK) self.network.addNode(input) return input def _parse_pre(self,pre,func,origin_name): if isinstance(pre,Origin): if func is not None: raise Exception('Cannot compute a function from a specified Origin') return pre elif isinstance(pre,FunctionInput): if func is not None: raise Exception('Cannot compute a function from a FunctionInput') return pre.getOrigin('origin') elif isinstance(pre,NEFEnsemble) or (hasattr(pre,'getOrigin') and hasattr(pre,'addDecodedOrigin')): if func is not None: if isinstance(func,Function): if origin_name is None: fname=func.__class__.__name__ if '.' in fname: fname=fname.split('.')[-1] else: fname=origin_name origin=pre.addDecodedOrigin(fname,[func],'AXON') else: if origin_name is None: fname=func.__name__ else: fname=origin_name try: origin=pre.getOrigin(fname) except StructuralException: origin=None if origin is None: if isinstance(pre,array.NetworkArray): dim=pre._nodes[0].dimension else: dim=pre.dimension value=func([0]*dim) if isinstance(value,(int,float)): origin=pre.addDecodedOrigin(fname,[functions.PythonFunction(func,dim)],'AXON') else: funcs=[functions.PythonFunction(func,dim,use_cache=True,index=i) for i in range(len(value))] origin=pre.addDecodedOrigin(fname,funcs,'AXON') return origin else: return pre.getOrigin('X') else: raise Exception('Unknown object to connect from') def compute_transform(self,dim_pre,dim_post, weight=1,index_pre=None,index_post=None): """Helper function used by :func:`nef.Network.connect()` to create a *dim_pre* by *dim_post* matrix. All values are either 0 or *weight*. *index_pre* and *index_post* are used to determine which values are non-zero, and indicate which dimensions of the pre-synaptic ensemble should be routed to which dimensions of the post-synaptic ensemble. For example, with ``dim_pre=2`` and ``dim_post=3``, ``index_pre=[0,1],index_post=[0,1]`` means to take the first two dimensions of pre and send them to the first two dimensions of post, giving a transform matrix of ``[[1,0],[0,1],[0,0]]`` If an index is None, the full range [0,1,2,...,N] is assumed, so the above example could just be ``index_post=[0,1]`` :param integer dim_pre: first dimension of transform matrix :param integer dim_post: second dimension of transform matrix :param float weight: the non-zero value to put into the matrix :param index_pre: the indexes of the pre-synaptic dimensions to use :type index_pre: list of integers or a single integer :param index_post: the indexes of the post-synaptic dimensions to use :type index_post: list of integers or a single integer :returns: a two-dimensional transform matrix performing the requested routing """ t=[[0]*dim_pre for i in range(dim_post)] if index_pre is None: index_pre=range(dim_pre) elif isinstance(index_pre,int): index_pre=[index_pre] if index_post is None: index_post=range(dim_post) elif isinstance(index_post,int): index_post=[index_post] for i in range(max(len(index_pre),len(index_post))): pre=index_pre[i%len(index_pre)] post=index_post[i%len(index_post)] t[post][pre]=weight return t def connect(self,pre,post, transform=None,weight=1,index_pre=None,index_post=None, pstc=0.01,func=None,weight_func=None,origin_name=None, modulatory=False,plastic_array=False,create_projection=True): """Connect two nodes in the network. *pre* and *post* can be strings giving the names of the nodes, or they can be the nodes themselves (FunctionInputs and NEFEnsembles are supported). They can also be actual Origins or Terminations, or any combinaton of the above. If *post* is set to an integer or None, an origin will be created on the *pre* population, but no other action will be taken. pstc is the post-synaptic time constant of the new Termination If transform is not None, it is used as the transformation matrix for the new termination. You can also use *weight*, *index_pre*, and *index_post* to define a transformation matrix instead. *weight* gives the value, and *index_pre* and *index_post* identify which dimensions to connect (see :func:`nef.Network.compute_transform()` for more details). For example:: net.connect(A,B,weight=5) with both A and B as 2-dimensional ensembles, will use ``[[5,0],[0,5]]`` as the transform. Also, you can do:: net.connect(A,B,index_pre=2,index_post=5) to connect the 3rd element in A to the 6th in B. You can also do:: net.connect(A,B,index_pre=[0,1,2],index_post=[5,6,7]) to connect multiple elements. If *func* is not None, a new Origin will be created on the pre-synaptic ensemble that will compute the provided function. The name of this origin will taken from the name of the function, or *origin_name*, if provided. If an origin with that name already exists, the existing origin will be used rather than creating a new one. If *weight_func* is not None, the connection will be made using a synaptic connection weight matrix rather than a DecodedOrigin and a Decoded Termination. The computed weight matrix will be passed to the provided function, which is then free to modify any values in that matrix, returning a new one that will actually be used. This allows for direct control over the connection weights, rather than just using the once computed via the NEF methods. :param pre: The item to connect from. Can be a string (the name of the ensemble), an Ensemble (made via :func:`nef.Network.make()`), an array of Ensembles (made via :func:`nef.Network.make_array()`), a FunctionInput (made via :func:`nef.Network.make_input()`), or an Origin. :param post: The item to connect to. Can be a string (the name of the ensemble), an Ensemble (made via :func:`nef.Network.make()`), an array of Ensembles (made via :func:`nef.Network.make_array()`), or a Termination. :param transform: The linear transfom matrix to apply across the connection. If *transform* is T and *pre* represents ``x``, then the connection will cause *post* to represent ``Tx``. Should be an N by M array, where N is the dimensionality of *pre* and M is the dimensionality of *post*, but a 1-dimensional array can be given if either N or M is 1. :type transform: array of floats :param float pstc: post-synaptic time constant for the neurotransmitter/receptor implementing this connection :param float weight: scaling factor for a transformation defined with *index_pre* and *index_post*. Ignored if *transform* is not None. See :func:`nef.Network.compute_transform()` :param index_pre: the indexes of the pre-synaptic dimensions to use. Ignored if *transform* is not None. See :func:`nef.Network.compute_transform()` :type index_pre: list of integers or a single integer :param index_post: the indexes of the post-synaptic dimensions to use. Ignored if *transform* is not None. See :func:`nef.Network.compute_transform()` :type index_post: list of integers or a single integer :param function func: function to be computed by this connection. If None, computes ``f(x)=x``. The function takes a single parameter x which is the current value of the *pre* ensemble, and must return wither a float or an array of floats. For example:: def square(x): return x[0]*x[0] net.connect(A,B,func=square) def powers(x): return x[0],x[0]^2,x[0]^3 net.connect(A,B,func=powers) def product(x): return x[0]*x[1] net.connect(A,B,func=product) :param string origin_name: The name of the origin to create to compute the given function. Ignored if func is None. If an origin with this name already exists, the existing origin is used instead of creating a new one. :param weight_func: if not None, converts the connection to use an explicit connection weight matrix between each neuron in the ensembles. This is mathematically identical to the default method (which simply uses the stored encoders and decoders for the ensembles), but much slower, since we are no longer taking advantage of the factorable weight matrix. However, using weight_func also allows explicit control over the individual connection weights, as the computed weight matrix is passed to *weight_func*, which can make changes to the matrix before returning it. :type weight_func: function or None :param boolean modulatory: whether the created connection should be marked as modulatory, meaning that it does not directly affect the input current to the neurons, but instead may affect internal parameters in the neuron model. :param boolean plastic_array: configure the connection to be learnable. See :func:`nef.Network.learn()`. :param boolean create_projection: flag to disable actual creation of the connection. If False, any needed Origin and/or Termination will be created, and the return value will be the tuple ``(origin,termination)`` rather than the created projection object. :returns: the created Projection, or ``(origin,termination)`` if *create_projection* is False. """ if isinstance(pre,str): pre=self.network.getNode(pre) if isinstance(post,str): post=self.network.getNode(post) # Check if pre and post are set if projection is to be created if( create_projection ): msg_str = "" if( pre is None ): msg_str += "(pre is not defined)" if( post is None ): msg_str += "(post is not defined)" if( len( msg_str ) > 0 ): raise Exception("nef_core.connect create_projection - " + msg_str) # determine the origin and its dimensions origin=self._parse_pre(pre,func,origin_name) dim_pre=origin.dimensions # check for the special case of being given a pre-existing termination if isinstance(post,Termination): self.network.addProjection(origin,post) return if isinstance(post, int): dim_post=post elif post is None: dim_post = 1 else: dim_post=post.dimension if transform is None: transform=self.compute_transform(dim_pre,dim_post,weight,index_pre,index_post) else: # handle 1-d transform vectors by changing to 1xN or Nx1 if isinstance(transform[0],(int,float)): if dim_pre==1: transform=[[x] for x in transform] elif dim_post==1: transform=[transform] else: raise Exception("Don't know how to turn %s into a %sx%s matrix"%(transform,dim_post,dim_pre)) elif len(transform)!=dim_post and len(transform[0])!=dim_pre: raise Exception("transform must be a %dx%d matrix"%(dim_post,dim_pre)) if plastic_array: suffix = '' attempts = 1 while attempts < 100: try: if hasattr(origin,'decoders'): term = post.addPlasticTermination(pre.name + suffix,transform,pstc,origin.decoders,weight_func) else: term = post.addPlasticTermination(pre.name + suffix,transform,pstc, [[0.0]*pre.dimension]*pre.neurons,weight_func) break except StructuralException,e: exception = e attempts += 1 suffix = '(%d)' % attempts else: raise exception#StructuralException('cannot create termination %s'%pre.name) return self.network.addProjection(pre.getOrigin('AXON'),term) if weight_func is not None: # calculate weights and pass them to the given function decoder=origin.decoders encoder=post.encoders w=MU.prod(encoder,MU.prod(transform,MU.transpose(decoder))) #gain is handled elsewhere w=weight_func(w) term=post.addPESTermination(pre.name,w,pstc,False) if not create_projection: return pre.getOrigin('AXON'),term self.network.addProjection(pre.getOrigin('AXON'),term) elif (post is not None) and (not isinstance(post, int)): suffix='' attempts=1 while attempts<100: try: term=post.addDecodedTermination(pre.name+suffix,transform,pstc,modulatory) break except StructuralException,e: exception=e attempts+=1 suffix='(%d)'%attempts else: raise exception#StructuralException('cannot create termination %s'%pre.name) if post is None or isinstance(post, int): return origin if not create_projection: return origin,term return self.network.addProjection(origin,term)
def makePopulation(self, name, N, tauPSC, matrices, outputfuncs): """Create a network ensemble that doesn't split by dimension (just used to save memory when running in direct mode.""" numin = len(matrices) d = len(matrices[0]) pop = self.ef.make(name, N, d) Main = NetworkImpl() Main.name = name Main.addNode(pop) for i in range(numin): pop.addDecodedTermination("in_" + str(i), matrices[i], tauPSC, False) Main.exposeTermination(pop.getTermination("in_" + str(i)), "in_" + str(i)) if outputfuncs != None: pop.addDecodedOrigin("output", outputfuncs, "AXON") Main.exposeOrigin(pop.getOrigin("output"), "X") else: Main.exposeOrigin(pop.getOrigin("X"), "X") return Main
def makeNetwork(self, name, N, tauPSC, matrices, outputfuncs): """Create a network ensemble that splits by dimension.""" Main = NetworkImpl() Main.name = name numin = len(matrices) #number of inputs din = [0 for i in range(numin)] #dimension of each input for i in range(numin): din[i] = len(matrices[i][0]) dout = len(matrices[0]) #dimension of output smallN = int(math.ceil(float(N) / dout)) #neurons per population defef = RPMutils.defaultEnsembleFactory() #create input populations (just relay nodes) inputs = [] for i in range(numin): inputs = inputs + [defef.make("in_" + str(i), 1, din[i])] inputs[i].addDecodedTermination("input", RPMutils.eye(din[i], 1), 0.0001, False) Main.exposeTermination(inputs[i].getTermination("input"), "in_" + str(i)) Main.addNode(inputs[i]) inputs[i].setMode(SimulationMode.DIRECT) inputs[i].fixMode() #output population (another relay node) output = defef.make("output", 1, dout) Main.exposeOrigin(output.getOrigin("X"), "X") Main.addNode(output) output.setMode(SimulationMode.DIRECT) output.fixMode() resultTerm = [[0] for x in range(dout)] #create dimension populations for i in range(dout): pop = self.ef.make("mid_" + str(i), smallN, 1) Main.addNode(pop) for j in range(numin): pop.addDecodedTermination("in_" + str(j), [matrices[j][i]], tauPSC, False) Main.addProjection(inputs[j].getOrigin("X"), pop.getTermination("in_" + str(j))) resultTerm[i] = [1] output.addDecodedTermination("in_" + str(i), resultTerm, 0.0001, False) resultTerm[i] = [0] if outputfuncs == None: Main.addProjection(pop.getOrigin("X"), output.getTermination("in_" + str(i))) else: pop.addDecodedOrigin("output", [outputfuncs[i]], "AXON") Main.addProjection(pop.getOrigin("output"), output.getTermination("in_" + str(i))) return Main
def __init__(self, name = "Vision", file_path = "", in_dim = 28*28, tau_psc = 0.0001, \ mu_filename = "", output_vecs = None, mut_inhib_scale = 2, net = None, \ en_bypass = False, en_norm = False, en_neuron_rep = False, \ sim_mode = SimulationMode.DEFAULT, sim_mode_cleanup = None, quick = True): if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT if( sim_mode == SimulationMode.DIRECT ): N = 1 N2 = 1 else: N = 3 # Number of neurons for layer 1 to 3 N2 = 10 # Number of neurons for layer 4 N3 = 20 # Number of neurons (per dim) for layer N NN = 20 R1 = 7#50#10 R2 = 7#20#2 R3 = 7#15#5 R4 = 1.5#2 RN = 1.5 I1 = (-0.3,1)#(-0.1,0.5) I2 = (-0.3,1)#(-0.125,0.75) I3 = (-0.3,1)#(-0.15,0.8) I4 = (-1,1) IN = (-1,1) E1 = [[1]] E2 = [[1]] E3 = [[1]] E4 = None EN = None NetworkImpl.__init__(self) if( net is None ): net = nef.Network(self, quick) self.setName(name) def transform(x): return 1.0/(1 + exp(-x[0])) # params = dict(max_rate = (100,200), quick = quick, encoders=[[1]], intercept=(0,0.8), mode = sim_mode) # params = dict(max_rate = (50,100), quick = quick, encoders=[[1]], intercept=(-0.1,0.8), mode = sim_mode) # params = dict(max_rate = (50,60), mode = sim_mode, tau_ref=0.005) params = dict(max_rate = (50,60), mode = SimulationMode.DIRECT, tau_ref=0.005) ## HARDCODED DIRECT SIM MODE in_ens = net.make('Input', 1, in_dim) net.network.exposeTermination(in_ens.addDecodedTermination("Input", eye(in_dim), 0.0001, False), "Input") in_ens.setMode(SimulationMode.DIRECT) in_ens.fixMode() w1 = read_csv(file_path + 'mat_1_w.csv') b1 = read_csv(file_path + 'mat_1_b.csv') layer1 = net.make_array('layer1', N, len(w1[0]), radius = R1, intercept = I1, encoders = E1, **params) bias1 = net.make_input('bias1', b1[0]) net.connect(bias1, layer1) net.connect(in_ens, layer1, transform = numeric.array(w1).T, pstc = tau_psc) w2 = read_csv(file_path + 'mat_2_w.csv') b2 = read_csv(file_path + 'mat_2_b.csv') layer2 = net.make_array('layer2', N, len(w2[0]), radius = R2, intercept = I2, encoders = E2, **params) bias2 = net.make_input('bias2', b2[0]) net.connect(bias2, layer2) net.connect(layer1, layer2, func = transform, transform = numeric.array(w2).T, pstc = tau_psc) w3 = read_csv(file_path + 'mat_3_w.csv') b3 = read_csv(file_path + 'mat_3_b.csv') layer3 = net.make_array('layer3', N, len(w3[0]), radius = R3, intercept = I3, encoders = E3, **params) bias3 = net.make_input('bias3', b3[0]) net.connect(bias3, layer3) net.connect(layer2, layer3, func = transform, transform = numeric.array(w3).T, pstc = tau_psc) w4 = read_csv(file_path + 'mat_4_w.csv') b4 = read_csv(file_path + 'mat_4_b.csv') layer4 = net.make_array('layer4', N2, len(w4[0]), radius = R4, intercept = I4, encoders = E4, **params) bias4 = net.make_input('bias4', b4[0]) net.connect(bias4, layer4) net.connect(layer3, layer4, func = transform, transform = numeric.array(w4).T, pstc = tau_psc) if( en_norm ): if( sim_mode == SimulationMode.DIRECT ): sim_mode_N = SimulationMode.RATE else: sim_mode_N = sim_mode # layerN = net.make('layerN', N3 * len(w4[0]), len(w4[0]), max_rate = (50,60), radius = RN, intercept = IN, encoders = EN, quick = quick, mode = sim_mode_N) layerN = net.make('layerN', N3 * len(w4[0]), len(w4[0]), radius = RN, intercept = IN, encoders = EN, quick = quick, **params) net.connect(layer4, layerN, pstc = tau_psc) layerN.fixMode() else: layerN = layer4 if( en_neuron_rep ): layerNeur = net.make_array('layerNeur', NN * len(w4[0]), len(w4[0]), \ radius = RN, intercept = IN, encoders = EN, quick = quick, \ max_rate = (100,200), mode = SimulationMode.DEFAULT, tau_ref=0.005) net.connect(layer4, layerNeur, pstc = tau_psc) layerNeur.fixMode() if( output_vecs is None or mu_filename == "" ): net.network.exposeOrigin(layerN.getOrigin("X"), "X") self.dimension = len(w4[0]) else: if( sim_mode_cleanup is None ): sim_mode_cleanup = sim_mode visual_am = make_VisHeir_AM(net, "Vision Assoc Mem", file_path, mu_filename, output_vecs, \ mut_inhib_scale, sim_mode_cleanup, quick) net.connect(layerN.getOrigin("X"), visual_am.getTermination("Input")) if( en_bypass ): net.network.exposeOrigin(layerN.getOrigin("X"), "Vis Raw") net.network.exposeOrigin(visual_am.getOrigin("X"), "X") self.dimension = len(output_vecs[0]) self.setMode(sim_mode) if( sim_mode == SimulationMode.DIRECT): self.fixMode() self.releaseMemory()
def setMode(self, sim_mode): if( sim_mode == SimulationMode.DIRECT ): sim_mode = SimulationMode.RATE NetworkImpl.setMode(self, sim_mode)
def __init__(self, name = "Memory Block", num_dim = 1, neurons_per_dim = 25, tau_in = 0.005, \ in_scale = 1.0, fb_scale = 1.00, inhib_scale = 2.0, input_name = "Input", \ reset_opt = 0, reset_vec = None, cyc_opt = 0, en_gint_out = False, tau_buf_in = 0.01,\ sim_mode = SimulationMode.DEFAULT, quick = True, mode = 1, rand_seed = 0, cleanup_vecs = None): # mode: 1 - hrr mode (radius is scaled to num_dim), 0 - normal mode, # -1 - aligned mode (eval points chosen around 1 and 0) self.dimension = num_dim self.sim_mode = sim_mode NetworkImpl.__init__(self) self.setName(name) if( not reset_vec is None ): reset_opt = reset_opt | 1 if( str(sim_mode).lower() == 'ideal' ): node = MemBlockNode(name, num_dim, tau_in, reset_opt, reset_vec, cyc_opt, en_gint_out, in_scale) self.addNode(node) if( not(input_name is None or input_name == "") ): self.exposeTermination(node.getTermination("Input"), input_name) self.exposeTermination(node.getTermination("Cycle"), "Cycle") if( not reset_opt == 0 ): self.exposeTermination(node.getTermination("Reset"), "Reset") self.exposeOrigin(node.getOrigin("X"), "X") if( en_gint_out ): self.exposeOrigin(node.getOrigin("GINT1"), "GINT1") self.exposeOrigin(node.getOrigin("GINT2"), "GINT2") else: net = nef.Network(self, quick) gint1 = GatedInt("GINT1", num_dim, neurons_per_dim, in_scale = in_scale, fb_scale = fb_scale, tau_in = tau_in, \ inhib_scale = inhib_scale, en_reset = reset_opt & 1, reset_vec = reset_vec, en_cyc_in = False, \ cyc_opt = cyc_opt, mode = mode, quick = quick, rand_seed = rand_seed, input_name = input_name, \ sim_mode = sim_mode, tau_buf_in = tau_buf_in, cleanup_vecs = cleanup_vecs) gint2 = GatedInt("GINT2", num_dim, neurons_per_dim, fb_scale = fb_scale, inhib_scale = inhib_scale, \ en_reset = reset_opt & 2, en_cyc_in = False, cyc_opt = cyc_opt, mode = mode, \ quick = quick, rand_seed = rand_seed, sim_mode = sim_mode, cleanup_vecs = cleanup_vecs) net.add(gint1) net.add(gint2) net.connect(gint1.getOrigin("X"), gint2.getTermination("Input")) if( not(input_name is None or input_name == "") ): net.network.exposeTermination(gint1.getTermination("Input"), input_name) net.network.exposeOrigin(gint2.getOrigin("X"), "X") if( en_gint_out ): net.network.exposeOrigin(gint1.getOrigin("X"), "GINT1") net.network.exposeOrigin(gint2.getOrigin("X"), "GINT2") if( reset_opt > 0 ): rst_terms = [] if( reset_opt & 1 ): rst_terms.append(gint1.getTermination("Reset")) if( reset_opt & 2 ): rst_terms.append(gint2.getTermination("Reset")) rst_term = EnsembleTermination(net.network, "Reset", rst_terms) net.network.exposeTermination(rst_term, "Reset") cyc_net = Detector("Cycle", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed) net.add(cyc_net) net.connect(cyc_net.getOrigin("Cycle") , gint1.getTermination("Cycle")) net.connect(cyc_net.getOrigin("Cycle") , gint2.getTermination("CycleN")) net.connect(cyc_net.getOrigin("CycleN"), gint1.getTermination("CycleN")) net.connect(cyc_net.getOrigin("CycleN"), gint2.getTermination("Cycle")) net.network.exposeTermination(cyc_net.getTermination("Input"), "Cycle") self.releaseMemory() if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT self.setMode(sim_mode) if( sim_mode == SimulationMode.DIRECT ): self.fixMode()
def __init__(self, name, N, d): NetworkImpl.__init__(self) self.name = name tauPSC = 0.007 Wr = self.calcWreal(d) Wi = self.calcWimag(d) halfd = int(d / 2) + 1 halfN = int(math.ceil(float(N) * halfd / d)) ef = RPMutils.defaultEnsembleFactory() netef = networkensemble.NetworkEnsemble(ef) #create input populations A = ef.make("A", 1, d) A.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False) A.setMode(SimulationMode.DIRECT ) #since this is just a relay ensemble for modularity A.fixMode() self.addNode(A) B = ef.make("B", 1, d) B.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False) B.setMode(SimulationMode.DIRECT ) #since this is just a relay ensemble for modularity B.fixMode() self.addNode(B) #this is the new method, where we collapse the fft into the eprod #populations to calculate the element-wise product of our vectors so far eprods = [] #note: we scale the output of the eprods by d/2, which we will undo at the #end, to keep the overall length of each dimension around 1 #(the average value of each dimension of a normalized d dimensional vector is 1/sqrt(d), #so 1/sqrt(d)*1/sqrt(d) = 1/d, so when we add the scale the resulting average dimension #should be around d/2d i.e. 1/2) #the 2 is added to give us a bit of a buffer, better to have the dimensions too small #than too large and run into saturation problems multscale = float(d) / 2.0 eprods = eprods + [ eprod.Eprod("eprod0", halfN, halfd, scale=multscale, weights=[Wr, Wr], maxinput=2.0 / math.sqrt(d)) ] eprods = eprods + [ eprod.Eprod("eprod1", halfN, halfd, scale=multscale, weights=[Wi, Wi], maxinput=2.0 / math.sqrt(d)) ] eprods = eprods + [ eprod.Eprod("eprod2", halfN, halfd, scale=multscale, weights=[Wi, Wr], maxinput=2.0 / math.sqrt(d)) ] eprods = eprods + [ eprod.Eprod("eprod3", halfN, halfd, scale=multscale, weights=[Wr, Wi], maxinput=2.0 / math.sqrt(d)) ] for i in range(4): self.addNode(eprods[i]) self.addProjection(A.getOrigin("X"), eprods[i].getTermination("A")) self.addProjection(B.getOrigin("X"), eprods[i].getTermination("B")) #negative identity matrix (for subtraction) negidentity = [[0 for x in range(d)] for x in range(d)] for i in range(d): negidentity[i][i] = -1 #note: all this halfd/expansion stuff is because the fft of a real value #is symmetrical, so we do all our computations on just one half and then #add in the symmetrical other half at the end #matrix for expanding real half-vectors (with negative for subtraction) expand = RPMutils.eye(halfd, 1) negexpand = RPMutils.eye(halfd, -1) #matrix for expanding imaginary half-vectors imagexpand = RPMutils.eye(halfd, 1) midpoint = halfd - 1 - (d + 1) % 2 for i in range(int(math.ceil(d / 2.0) - 1)): expand = expand + [expand[midpoint - i]] negexpand = negexpand + [negexpand[midpoint - i]] imagexpand = imagexpand + [[-x for x in imagexpand[midpoint - i]]] #multiply real components rprod = netef.make("rprod", N, tauPSC, [expand, negexpand], None) self.addNode(rprod) self.addProjection(eprods[0].getOrigin("X"), rprod.getTermination("in_0")) self.addProjection(eprods[1].getOrigin("X"), rprod.getTermination("in_1")) #multiply imaginary components iprod = netef.make("iprod", N, tauPSC, [imagexpand, imagexpand], None) self.addNode(iprod) self.addProjection(eprods[2].getOrigin("X"), iprod.getTermination("in_0")) self.addProjection(eprods[3].getOrigin("X"), iprod.getTermination("in_1")) #now calculate IFFT of Z = (rprod) + (iprod)i #we only need to calculate the real part, since we know the imaginary component is 0 Winvr = self.calcInvWreal(d) Winvi = self.calcInvWimag(d) for i in range(d): for j in range(d): Winvr[i][j] = Winvr[i][j] * (1.0 / multscale) Winvi[i][j] = Winvi[i][j] * (1.0 / multscale) negWinvi = [[0 for x in range(d)] for x in range(d)] for i in range(d): for j in range(d): negWinvi[i][j] = -Winvi[i][j] result = netef.make("result", N, tauPSC, [Winvr, negWinvi], None) self.addNode(result) self.addProjection(rprod.getOrigin("X"), result.getTermination("in_0")) self.addProjection(iprod.getOrigin("X"), result.getTermination("in_1")) if RPMutils.USE_PROBES: self.simulator.addProbe("A", "X", True) self.simulator.addProbe("B", "X", True) self.simulator.addProbe("eprod0", "X", True) self.simulator.addProbe("eprod1", "X", True) self.simulator.addProbe("eprod2", "X", True) self.simulator.addProbe("eprod3", "X", True) self.simulator.addProbe("rprod", "X", True) self.simulator.addProbe("iprod", "X", True) self.simulator.addProbe("result", "X", True) self.exposeTermination(A.getTermination("input"), "A") self.exposeTermination(B.getTermination("input"), "B") self.exposeOrigin(result.getOrigin("X"), "X")
def __init__(self, N, d, matrix): NetworkImpl.__init__(self) self.name = "SequenceSolver" self.N = N self.d = d ef1 = RPMutils.defaultEnsembleFactory() #load matrix data from file matrixData = self.loadSequenceMatrix(matrix) #the two input signals, A and B, representing the sequence of example pairs Ain = matrixData[0] Bin = matrixData[1] self.addNode(Ain) self.addNode(Bin) #the adaptive learning rate # lrate = matrixData[2] # self.addNode(lrate) #calculate the T for the current A and B calcT = transform.Transform("calcT", N, d) self.addNode(calcT) self.addProjection(Ain.getOrigin("origin"), calcT.getTermination("A")) self.addProjection(Bin.getOrigin("origin"), calcT.getTermination("B")) # self.addProjection(lrate.getOrigin("origin"), calcT.getTermination("lrate")) if RPMutils.USE_CLEANUP: #run T through cleanup memory cleanT = memory.Memory("cleanT", N, d) self.addNode(cleanT) self.addProjection(calcT.getOrigin("T"), cleanT.getTermination("dirty")) #calculate the result of applying T to the second last cell secondLast = matrixData[3] self.addNode(secondLast) calcLast = cconv.Cconv("calcLast", N, d) self.addNode(calcLast) self.addProjection(secondLast.getOrigin("origin"), calcLast.getTermination("A")) if RPMutils.USE_CLEANUP: self.addProjection(cleanT.getOrigin("clean"), calcLast.getTermination("B")) else: self.addProjection(calcT.getOrigin("T"), calcLast.getTermination("B")) if RPMutils.LOAD_RULES: self.removeProjection(calcLast.getTermination("B")) rulesig = matrixData[len(matrixData) - 1] self.addNode(rulesig) self.addProjection(rulesig.getOrigin("origin"), calcLast.getTermination("B")) #compare the result to the possible answers to determine which is most similar if not RPMutils.RUN_WITH_CONTROLLER: testSimilarity = similarity.Similarity("testSimilarity", N, d, matrixData[4:]) self.addNode(testSimilarity) self.addProjection(calcLast.getOrigin("X"), testSimilarity.getTermination("hypothesis")) self.simulator.addProbe("testSimilarity", "result", True) if RPMutils.USE_CLEANUP: Tprobe = self.simulator.addProbe("cleanT", "clean", True) else: Tprobe = self.simulator.addProbe("calcT", "T", True) answerprobe = self.simulator.addProbe("calcLast", "X", True) if RPMutils.USE_CLEANUP and RPMutils.DYNAMIC_MEMORY: self.simulator.addSimulatorListener( memorylistener.MemoryManagementListener( RPMutils.cleanupDataFile(), RPMutils.cleanupFile(d, RPMutils.VOCAB_SIZE))) if RPMutils.RUN_WITH_CONTROLLER: self.simulator.addSimulatorListener( proberecorder.ProbeRecorder( Tprobe, RPMutils.resultFile("sequencesolver"), 0.05)) self.simulator.addSimulatorListener( proberecorder.ProbeRecorder( answerprobe, RPMutils.hypothesisFile("sequencesolver"), 0.05)) self.setMode(RPMutils.SIMULATION_MODE)
def __init__(self, name = "Detector", detect_vec = None, inhib_vec = None, tau_in = 0.005, \ en_inhib = False, en_inhibN = None, tau_inhib = 0.005, in_scale = 1.0, inhib_scale = 2.0,\ en_out = True, en_N_out = False, en_X_out = False, num_neurons = 20, detect_threshold = 0.4, \ sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = 0, net = None, input_name = "Input"): self.dimension = 1 NetworkImpl.__init__(self) ens_name = name if( not isinstance(net, nef.Network) ): if( not net is None ): net = nef.Network(net, quick) else: ens_name = "detect" net = nef.Network(self, quick) self.setName(name) if( detect_vec is None ): detect_vec = [1] vec_dim = len(detect_vec) detect_vec_scale = [detect_vec[n] * in_scale for n in range(vec_dim)] if( en_inhib ): if( inhib_vec is None ): inhib_vec = [1] inhib_dim = len(inhib_vec) if( en_inhibN is None ): en_inhibN = en_inhib max_rate = (100,200) max_rateN = (300,400) detect_threshold = max(min(detect_threshold, 0.8), 0.2) intercepts = [detect_threshold + n * (1-detect_threshold)/(num_neurons) for n in range(num_neurons)] interceptsN = [-(n * (detect_threshold)/(num_neurons)) for n in range(num_neurons)] params = dict(intercept = intercepts , max_rate = max_rate , quick = quick) paramsN = dict(intercept = interceptsN, max_rate = max_rateN, quick = quick) out_func = FilteredStepFunction(shift = detect_threshold, mirror = False) out_funcN = FilteredStepFunction(shift = detect_threshold, mirror = True) if( rand_seed >= 0 ): PDFTools.setSeed(rand_seed) seed(rand_seed) params["encoders"] = [[1]] * num_neurons paramsN["encoders"] = [[-1]] * num_neurons pdf = IndicatorPDF(detect_threshold + 0.1, 1.1) pdfN = IndicatorPDF(-0.1, detect_threshold - 0.1) params["eval_points"] = [[pdf.sample()[0]] for _ in range(1000)] paramsN["eval_points"] = [[pdfN.sample()[0]] for _ in range(1000)] if( en_out ): if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ): detect = SimpleNEFEns(ens_name, 1, input_name = "") net.add(detect) else: detect = net.make(ens_name, num_neurons, 1, **params) if( not input_name is None ): detect.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False) if( en_inhib ): inhib_vec_scale = [inhib_vec[n] * -inhib_scale for n in range(inhib_dim)] detect.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False) detect.removeDecodedOrigin("X") detect.addDecodedOrigin("X", [out_func], "AXON") if( en_X_out ): detect.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON") self.exposeOrigin(detect.getOrigin("x0"), "x0") if( en_N_out ): if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ): detectN = SimpleNEFEns(ens_name + "N", 1, input_name = "") net.add(detectN) else: detectN = net.make(ens_name + "N", num_neurons, 1, **paramsN) if( not input_name is None ): detectN.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False) if( en_inhibN ): detectN.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False) detectN.removeDecodedOrigin("X") detectN.addDecodedOrigin("X", [out_funcN], "AXON") if( en_X_out ): detectN.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON") self.exposeOrigin(detectN.getOrigin("x0"), "x0N") input_terms = [] inhib_terms = [] if( en_out ): if( not input_name is None ): input_terms.append(detect.getTermination(input_name)) self.exposeOrigin(detect.getOrigin("X"), name) if( en_inhib ): inhib_terms.append(detect.getTermination("Inhib")) if( en_N_out ): if( not input_name is None ): input_terms.append(detectN.getTermination(input_name)) self.exposeOrigin(detectN.getOrigin("X"), str(name + "N")) if( en_inhibN ): inhib_terms.append(detectN.getTermination("Inhib")) if( len(input_terms) > 0 ): input_term = EnsembleTermination(self, input_name, input_terms) self.exposeTermination(input_term, input_name) if( len(inhib_terms) > 0 ): inhib_term = EnsembleTermination(self, "Inhib", inhib_terms) self.exposeTermination(inhib_term, "Inhib") if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT NetworkImpl.setMode(self, sim_mode) if( sim_mode == SimulationMode.DIRECT ): self.fixMode() self.releaseMemory()
def __init__(self, name = "Selector", num_dim = 1, neurons_per_dim = 25, ens_per_array = 0, num_items = 2, in_scale = 1.0, \ tau_in = 0.005, tau_inhib = 0.005, inhib_scale = 2.0, \ inhib_vecs = [], en_sum_out = True, \ sim_mode = SimulationMode.DEFAULT, quick = True, **params): # Note: tau_in and tau_inhib and inhib_scale can be lists. self.dimension = num_dim self.num_items = num_items if( ens_per_array == 0 ): ens_per_array = 8 max_ens_per_array = sqrt(num_dim) while( ens_per_array < max_ens_per_array ): if( num_dim % ens_per_array == 0 ): break else: ens_per_array += 1 if( str(sim_mode).lower() == 'ideal' ): sim_mode = SimulationMode.DIRECT # Deepcopy inhib_vec list... otherwise append function will affect other classes. inhib_vecs = [inhib_vecs[n] for n in range(len(inhib_vecs))] len_diff = num_items - len(inhib_vecs) if( len_diff != 0 ): if( len(inhib_vecs) > 0 ): print(len(inhib_vecs)) print(str(inhib_vecs)) print("Selector.__init__ [" + name + "] - inhib_vec length and num_item mismatch") for n in range(len_diff): inhib_vecs.append([1]) inhib_dim = len(inhib_vecs[0]) NetworkImpl.__init__(self) net = nef.Network(self, quick) self.setName(name) self.ens_per_array = min(num_dim, ens_per_array) self.dim_per_ens = num_dim / ens_per_array self.neurons_per_ens = neurons_per_dim * self.dim_per_ens self.make_mode = sim_mode enss = [] if( en_sum_out ): out_relay = SimpleNEFEns("Output", self.dimension, pstc = 0.0001, input_name = "") net.add(out_relay) if( not isinstance(tau_in, list) ): tau_in = [tau_in] * num_items if( not isinstance(tau_inhib, list) ): tau_inhib = [tau_inhib] * num_items if( not isinstance(inhib_scale, list) ): inhib_scale = [inhib_scale] * num_items self.inhib_scale = inhib_scale self.tau_inhib = tau_inhib self.tau_in = tau_in if( not "max_rate" in params ): params["max_rate"] = (100,200) if( not "quick" in params ): params["quick"] = quick for item in range(num_items): inhib_vec_scale = [inhib_vecs[item][n] * -inhib_scale[item] for n in range(inhib_dim)] if( sim_mode == SimulationMode.DIRECT ): ens = SimpleNEFEns("Item " + str(item+1), self.dimension, pstc = tau_in[item], input_name = None) net.add(ens) inhib_mat = [inhib_vec_scale] else: ens = net.make_array("Item " + str(item+1), self.neurons_per_ens, self.ens_per_array, \ self.dim_per_ens, **params) inhib_mat = [[inhib_vec_scale] * self.neurons_per_ens] * self.ens_per_array in_term = ens.addDecodedTermination("Input", diag(num_dim, value = in_scale), tau_in[item], False) inhib_term = ens.addTermination("Inhib", inhib_mat, tau_inhib[item], False) enss.append(ens) net.network.exposeTermination(in_term, "Input " + str(item+1)) net.network.exposeTermination(inhib_term, "Suppress " + str(item+1)) if( not en_sum_out ): net.network.exposeOrigin(ens.getOrigin("X"), "Output " + str(item+1)) else: out_relay.addDecodedTermination("Item" + str(item+1), None, 0.0001, False) out_relay.addNeuronCount(ens.getNeuronCount()) net.connect(ens.getOrigin("X"), out_relay.getTermination("Item" + str(item+1))) if( en_sum_out ): net.network.exposeOrigin(out_relay.getOrigin("X"), "X") NetworkImpl.setMode(self, sim_mode) if( sim_mode == SimulationMode.DIRECT ): self.fixMode() if( not seed is None ): seed() self.releaseMemory()
def __init__(self, stateN, stateD, state_encoders, actions, learningrate, stateradius=1.0, Qradius=1.0, load_weights=None): NetworkImpl.__init__(self) self.name = "QNetwork" net = nef.Network(self, seed=HRLutils.SEED, quick=False) N = 50 statelength = math.sqrt(2*stateradius**2) tauPSC = 0.007 num_actions = len(actions) init_Qs = 0.0 weight_save = 600.0 #period to save weights (realtime, not simulation time) #set up relays state_relay = net.make("state_relay", 1, stateD, mode="direct") state_relay.fixMode() state_relay.addDecodedTermination("input", MU.I(stateD), 0.001, False) #create state population state_fac = HRLutils.node_fac() state_fac.setIntercept(IndicatorPDF(0,1)) state_pop = net.make("state_pop", stateN, stateD, radius=statelength, node_factory=state_fac, encoders=state_encoders) # eval_points=MU.I(stateD)) # state_pop = net.make_array("state_pop", stateN/stateD, stateD, # node_factory=state_fac) state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE]) net.connect(state_relay, state_pop, pstc=tauPSC) #create population tied to previous state (to be used in learning) saved_state = memory.Memory("saved_state", N*4, stateD, inputscale=50, radius=stateradius, direct_storage=True) net.add(saved_state) net.connect(state_relay, saved_state.getTermination("target")) old_state_pop = net.make("old_state_pop", stateN, stateD, radius=statelength, node_factory=state_fac, encoders=state_encoders) # eval_points=MU.I(stateD)) # old_state_pop = net.make_array("old_state_pop", stateN/stateD, stateD, # node_factory=state_fac) old_state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE]) net.connect(saved_state, old_state_pop, pstc=tauPSC) #set up action nodes decoders = state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders() actionvals = actionvalues.ActionValues("actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders) net.add(actionvals) decoders = old_state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders() old_actionvals = actionvalues.ActionValues("old_actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders) net.add(old_actionvals) net.connect(state_pop.getOrigin("AXON"), actionvals.getTermination("state")) net.connect(old_state_pop.getOrigin("AXON"), old_actionvals.getTermination("state")) if load_weights != None: self.loadWeights(load_weights) #find error between old_actionvals and actionvals valdiff = net.make_array("valdiff", N, num_actions, node_factory = HRLutils.node_fac()) net.connect(old_actionvals, valdiff, transform=MU.diag([2]*num_actions), pstc=tauPSC) net.connect(actionvals, valdiff, transform=MU.diag([-2]*num_actions), pstc=tauPSC) #doubling values to get a bigger error signal #calculate diff between curr_state and saved_state and use that to gate valdiff statediff = net.make_array("statediff", N, stateD, intercept=(0.2,1)) net.connect(state_relay, statediff, pstc=tauPSC) net.connect(saved_state, statediff, transform=MU.diag([-1]*stateD), pstc=tauPSC) net.connect(statediff, valdiff, func=lambda x: [abs(v) for v in x], transform = [[-10]*stateD for _ in range(valdiff.getNeurons())], pstc=tauPSC) net.connect(valdiff, actionvals.getTermination("error")) #periodically save the weights class WeightSaveThread(threading.Thread): def __init__(self, func, prefix, period): threading.Thread.__init__(self) self.func = func self.prefix = prefix self.period = period def run(self): while True: time.sleep(self.period) self.func(self.prefix) wsn = WeightSaveThread(self.saveWeights, os.path.join("weights","tmp"), weight_save) wsn.start() self.exposeTermination(state_relay.getTermination("input"), "state") self.exposeTermination(old_actionvals.getTermination("error"), "error") self.exposeTermination(saved_state.getTermination("transfer"), "save_state") self.exposeOrigin(actionvals.getOrigin("X"), "vals") self.exposeOrigin(old_actionvals.getOrigin("X"), "old_vals")