def add(self, key, v): # Perform checks if (isinstance(v, HRR)): self.hrr[key] = v self.keys.append(key) if self.vectors is None: self.vectors = numeric.array([self.hrr[key].v]) else: self.vectors = numeric.resize( self.vectors, (len(self.keys), self.dimensions)) self.vectors[-1, :] = self.hrr[key].v # Generate vector pairs if (self.include_pairs or self.vector_pairs is not None): for k in self.keys[:-1]: self.key_pairs.append('%s*%s' % (k, key)) v = (self.hrr[k] * self.hrr[key]).v if self.vector_pairs is None: self.vector_pairs = numeric.array([v]) else: self.vector_pairs = numeric.resize( self.vector_pairs, (len(self.key_pairs), self.dimensions)) self.vector_pairs[-1, :] = v else: raise TypeError( 'hrr.Vocabulary.add() Type error: Argument provided not of HRR type' )
def input_transform( Ashape, Bshape, FFTshape, first ): am,an = Ashape bm,bn = Bshape M,N = FFTshape DM = DFT( M ) # get DFT matrix for rows DN = DFT( N ) # get DFT matrix for cols if( first ): c = am*an W = unpaddedTransform( DM[:,:am], DN[:an,:] ) else: c = bm*bn W = unpaddedTransform( DM[:,:bm], DN[:bn,:] ) T = [] for i in range(4*M*N): if( first ): if( i % 2 == 0 ): T.extend( array( [W[i/4,:].real, zeros(c)] ) ) else: T.extend( array( [W[i/4,:].imag, zeros(c)] ) ) else: if( i % 4 == 0 or i % 4 == 3 ): T.extend( array( [zeros(c), W[i/4,:].real] ) ) else: T.extend( array( [zeros(c), W[i/4,:].imag] ) ) return array(T)
def create(self,net,N=50,dimensions=8,randomize=False): vocab={} for k in self.nodes.keys(): node=net.get(k,None) if node is None: dim=dimensions if randomize is False and len(self.nodes[k])+1>dim: dim=len(self.nodes[k])+1 node=net.make_array(k,N,dim) if not hrr.Vocabulary.registered.has_key(id(node)): v=hrr.Vocabulary(node.dimension,randomize=randomize) v.register(node) vocab[k]=hrr.Vocabulary.registered[id(node)] # ensure all terms are parsed before starting for k,v in self.connect.items(): pre_name,post_name=k for pre_term,post_term in v: pre=vocab[pre_name].parse(pre_term).v post=vocab[post_name].parse(post_term).v for k,v in self.connect.items(): pre_name,post_name=k t=numeric.zeros((vocab[post_name].dimensions,vocab[pre_name].dimensions),typecode='f') for pre_term,post_term in v: pre=vocab[pre_name].parse(pre_term).v post=vocab[post_name].parse(post_term).v t+=numeric.array([pre*bb for bb in post]) if pre_name==post_name: if pre_name in self.inhibit: for pre_term in vocab[pre_name].keys: pre=vocab[pre_name].parse(pre_term).v*self.inhibit[pre_name] post_value=numeric.zeros(vocab[post_name].dimensions,typecode='f') for post_term in vocab[pre_name].keys: if pre_term!=post_term: post_value+=vocab[post_name].parse(post_term).v t+=numeric.array([pre*bb for bb in post_value]) if pre_name in self.excite: t+=numeric.eye(len(t))*self.excite[pre_name] net.connect(net.get(pre_name),net.get(post_name),transform=t) for i,(pre,post) in enumerate(self.ands): D=len(pre) node=net.make('and%02d'%i,D*N,D) for j,p in enumerate(pre): t=numeric.zeros((D,vocab[p[0]].dimensions),typecode='f') t[j,:]=vocab[p[0]].parse(p[1]).v*math.sqrt(D) net.connect(net.get(p[0]),node,transform=t) def result(x,v=vocab[post[0]].parse(post[1]).v): for xx in x: if xx<0.4: return [0]*len(v) #TODO: This is pretty arbitrary.... return v net.connect(node,net.get(post[0]),func=result) return net
def tick(self): s = np.array(self.dx.get()) + self.lambd*(np.array(self.x.get()) - np.array(self.x_desired.get())) # shuld be ddx_r, which in this case is -lambda*dx Y = np.array([self.ddx.get()[0], self.dx.get()[0]*abs(self.dx.get()[0]), math.sin(self.x.get()[0])]) # ddx_r = -lambd * dx self.u.set(sum(self.a * Y) - self.kappa * s) self.s.set(s) self.a_val.set(self.a) dt = 0.001 self.a -= self.rho*(s*Y)*dt
def tick(self): self.counter += 1 if self.counter % LEARNING_PERIOD == 0: #10 FIXME #t_start = time.time() delta = -rho * np.array(self.s.get())*0.00001 *.1 Y = np.array(list(self.Y.get())) Y.shape = 300,1 #Y.shape = 150,1 da = np.dot(Y, delta) da.shape = 300,1 #Bug fix ###decoder = np.array(self.origin.decoders) ###self.origin.decoders = decoder + da #FIXME: this line takes 50ms to run self.origin.decoders += da #FIXME: attempt to make it faster
def tick(self): self.counter += 1 if self.counter%10 == 0: delta = -rho * np.array(self.s.get())*0.00001 * .1 Y = np.array(list(self.Y.get())) Y.shape = 300,1 da = np.dot(Y, delta) da.shape = 300,1 #Bug fix decoder = np.array(self.origin.decoders) #print( "%.9f, %.9f, %.9f, %.9f" % ( decoder[0][0], decoder[13][0], # decoder[26][0], decoder[85][0] ) ) #print( decoder.shape ) #print( da.shape ) #print( (decoder + da).shape ) self.origin.decoders = decoder + da
def tick(self): # Burn through old messages, and only use the latest ones # This will allow the controller to keep working if the simulation slows # down. In the future they should be synchronized while True: if self.read_socket( self.sock_in ): try: data_in = json.loads( self.odom_str ) except JSONDecodeError: break self.sensor_data = data_in else: break self.position.set([self.sensor_data["roll"]]) self.velocity.set([self.sensor_data["wx"]]) self.counter += 1 if self.counter % CONTROL_PERIOD == 0: torque = np.array(self.torque.get())[0] # Send the command to the simulator as a torque data_out = '{"force":[0,0,0],"torque":[%f,0,0]}\n' % torque self.sock_out.send( data_out )
def __init__(self,N=None,data=None): if data is not None: self.v=array(data) elif N is not None: self.randomize(N) else: raise Exception('Must specify size or data for HRR')
def calc_weights(self,encoder,decoder): self.N1=len(decoder[0]) self.D=len(decoder) self.N2=len(encoder) self.getTermination('input').setDimensions(self.N1) self.getOrigin('output').setDimensions(self.N2) self.tables=[] self.histograms=[] for dim in range(self.D): cdfs=[] self.tables.append(make_output_table([e[dim] for e in encoder])) for i in range(self.N1): d=decoder[dim][i]/spike_strength if d<0: decoder_sign=-1 d=-d else: decoder_sign=1 histogram=compute_histogram(d,[e[dim] for e in encoder]) cdf=compute_cdf(histogram) cdfs.append((decoder_sign,cdf)) self.histograms.append(cdfs) return numeric.array(MU.prod(encoder,decoder))
def make(net, name='System', neurons=100, A=[[0]], tau_feedback=0.1): A = numeric.array(A) assert len(A.shape) == 2 assert A.shape[0] == A.shape[1] dimensions = A.shape[0] state = net.make(name, neurons, dimensions) Ap = A * tau_feedback + numeric.identity(dimensions) net.connect(state, state, transform=Ap, pstc=tau_feedback) if net.network.getMetaData("linear") == None: net.network.setMetaData("linear", HashMap()) linears = net.network.getMetaData("linear") linear = HashMap(4) linear.put("name", name) linear.put("neurons", neurons) linear.put("A", MU.clone(A)) linear.put("tau_feedback", tau_feedback) linears.put(name, linear) if net.network.getMetaData("templates") == None: net.network.setMetaData("templates", ArrayList()) templates = net.network.getMetaData("templates") templates.add(name) if net.network.getMetaData("templateProjections") == None: net.network.setMetaData("templateProjections", HashMap()) templateproj = net.network.getMetaData("templateProjections") templateproj.put(name, name)
def calc_weights(self, encoder, decoder): self.N1 = len(decoder[0]) self.D = len(decoder) self.N2 = len(encoder) self.getTermination('input').setDimensions(self.N1) self.getOrigin('output').setDimensions(self.N2) self.tables = [] self.histograms = [] for dim in range(self.D): cdfs = [] self.tables.append(make_output_table([e[dim] for e in encoder])) for i in range(self.N1): d = decoder[dim][i] / spike_strength if d < 0: decoder_sign = -1 d = -d else: decoder_sign = 1 histogram = compute_histogram(d, [e[dim] for e in encoder]) cdf = compute_cdf(histogram) cdfs.append((decoder_sign, cdf)) self.histograms.append(cdfs) return numeric.array(MU.prod(encoder, decoder))
def __init__(self, N=None, data=None): if data is not None: self.v = array(data) elif N is not None: self.randomize(N) else: raise Exception('Must specify size or data for HRR')
def __init__(self,name,dims,trials_per_block=40,block_rewards=[[0.21,0.63],[0.63,0.21],[0.12,0.72],[0.72,0.12]]): # parameters self.dims = dims self.trials_per_block = trials_per_block if len(block_rewards[0]) != dims: raise Exception('block_reward dimensionality must match dims') self.block_rewards = block_rewards # vars and constants self.trial_num = 0 self.delay_t = 0.0 self.approach_t = 0.0 self.reward_t = 0.0 self.reward = [0.0] * dims self.thalamus_sum = [0.0] * dims self.thalamus_choice = 0 self.rewarded = 0 self.reward_val = 1.0 self.gate_val = [0.9] self.vstr_gate_val = [1.0] self.data_log = [] # generate random state_d-d unit vector self.ctx_val = array([random.gauss(0,1) for i in range(state_d)]) self.ctx_val /= norm(self.ctx_val) self.state = 'delay' nef.SimpleNode.__init__(self,name)
def make(net,name='System',neurons=100,A=[[0]],tau_feedback=0.1): A=numeric.array(A) assert len(A.shape)==2 assert A.shape[0]==A.shape[1] dimensions=A.shape[0] state=net.make(name,neurons,dimensions) Ap=A*tau_feedback+numeric.identity(dimensions) net.connect(state,state,transform=Ap,pstc=tau_feedback) if net.network.getMetaData("linear") == None: net.network.setMetaData("linear", HashMap()) linears = net.network.getMetaData("linear") linear=HashMap(4) linear.put("name", name) linear.put("neurons", neurons) linear.put("A", MU.clone(A)) linear.put("tau_feedback", tau_feedback) linears.put(name, linear) if net.network.getMetaData("templates") == None: net.network.setMetaData("templates", ArrayList()) templates = net.network.getMetaData("templates") templates.add(name) if net.network.getMetaData("templateProjections") == None: net.network.setMetaData("templateProjections", HashMap()) templateproj = net.network.getMetaData("templateProjections") templateproj.put(name, name)
def create(self): stored_rule = self.net.make_input("StoredRule", rule_info) sensor_in = self.net.make("SENSOR IN", 1, nd, mode = 'direct') sensor_data_in = self.net.make("S_DATA IN", 1, nd, mode = 'direct') self.add_sink(sensor_in, "sensor_in") ##>]## self.add_sink(sensor_data_in, "sensor_data_in") ##>]## ant_add = self.net.make("ANT ADD", 1, nd, mode = 'direct') ant_add.addDecodedTermination("SENSOR", np.array(vocab.hrr["ANTxSENSOR"].get_transform_matrix()) * 0.4, 0.001, False) ant_add.addDecodedTermination("S_DATA", np.array(vocab.hrr["ANTxS_DATA"].get_transform_matrix()) * 0.6, 0.001, False) self.net.connect("SENSOR IN", ant_add.getTermination("SENSOR")) self.net.connect("S_DATA IN", ant_add.getTermination("S_DATA")) cconv_pos_out = self.net.make("CConv Pos Out", 1, nd, mode = 'direct') cconv_pos_ens = make_convolution(self.net, "CCONV -> POS", None, None, cconv_pos_out, nn_cconv, radius = 6, \ invert_second = True, quick = True) self.net.connect("StoredRule", cconv_pos_ens.getTermination("A")) self.net.connect("ANT ADD", cconv_pos_ens.getTermination("B")) cleanup_pos = CleanupMem("CleanupPos", pos_list, en_mut_inhib = True, tau_in = pstc_base, in_scale = 1.0, threshold = 0.4) self.net.add(cleanup_pos) self.net.connect(cconv_pos_out.getOrigin("X"), cleanup_pos.getTermination("Input")) cconv_rule_out = self.net.make("CConv Rule Out", 1, nd, mode = 'direct') cconv_rule_ens = make_convolution(self.net, "CCONV -> RULE", None, None, cconv_rule_out, nn_cconv, radius = 6, \ invert_second = True, quick = True) self.net.connect("StoredRule", cconv_rule_ens.getTermination("A")) self.net.connect(cleanup_pos.getOrigin("X"), cconv_rule_ens.getTermination("B")) action = self.net.make("ACTION", 1, nd, mode = 'direct') action.addDecodedTermination("Input", vocab.hrr["~(CONSxACTION)"].get_transform_matrix(), 0.001, False) action_cu = CleanupMem("CleanupAction", action_list, en_mut_inhib = True, tau_in = pstc_base, \ threshold = 0.15, in_scale = 1.2, tau_smooth = pstc_base) self.net.add(action_cu) self.net.connect(cconv_rule_out, action.getTermination("Input")) self.net.connect(action, action_cu.getTermination("Input")) self.add_source(action_cu.getOrigin("X"), "act_out") ##]>## action_data = self.net.make("ACTION DATA", 1, nd, mode = 'direct') action_data.addDecodedTermination("Input", vocab.hrr["~(CONSxA_DATA)"].get_transform_matrix(), 0.001, False) action_data_cu = CleanupMem("CleanupActionData", action_data_list, en_mut_inhib = True, tau_in = pstc_base, \ threshold = 0.15, in_scale = 1.2, tau_smooth = pstc_base) self.net.add(action_data_cu) self.net.connect(cconv_rule_out, action_data.getTermination("Input")) self.net.connect(action_data, action_data_cu.getTermination("Input")) self.add_source(action_data_cu.getOrigin("X"), "act_data_out") ##]>##
def alen(a): """Return the length of a Python object interpreted as an array of at least 1 dimension. """ try: return len(a) except TypeError: return len(array(a,ndmin=1))
def _init(self, dtype): self.dtype = numeric.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = '%24.16e' precname = 'double' elif dtype is ntypes.single: itype = ntypes.int32 fmt = '%15.7e' precname = 'single' elif dtype is ntypes.longdouble: itype = ntypes.longlong fmt = '%s' precname = 'long double' elif dtype is ntypes.half: itype = ntypes.int16 fmt = '%12.5e' precname = 'half' else: raise ValueError, repr(dtype) machar = MachAr(lambda v:array([v], dtype), lambda v:_frz(v.astype(itype))[0], lambda v:array(_frz(v)[0], dtype), lambda v: fmt % array(_frz(v)[0], dtype), 'numpy %s precision floating point number' % precname) for word in ['precision', 'iexp', 'maxexp','minexp','negep', 'machep']: setattr(self,word,getattr(machar, word)) for word in ['tiny','resolution','epsneg']: setattr(self,word,getattr(machar, word).flat[0]) self.max = machar.huge.flat[0] self.min = -self.max self.eps = machar.eps.flat[0] self.nexp = machar.iexp self.nmant = machar.it self.machar = machar self._str_tiny = machar._str_xmin.strip() self._str_max = machar._str_xmax.strip() self._str_epsneg = machar._str_epsneg.strip() self._str_eps = machar._str_eps.strip() self._str_resolution = machar._str_resolution.strip() return self
def _init(self, dtype): self.dtype = numeric.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = '%24.16e' precname = 'double' elif dtype is ntypes.single: itype = ntypes.int32 fmt = '%15.7e' precname = 'single' elif dtype is ntypes.longdouble: itype = ntypes.longlong fmt = '%s' precname = 'long double' elif dtype is ntypes.half: itype = ntypes.int16 fmt = '%12.5e' precname = 'half' else: raise ValueError(repr(dtype)) machar = MachAr(lambda v: array([v], dtype), lambda v: _frz(v.astype(itype))[0], lambda v: array(_frz(v)[0], dtype), lambda v: fmt % array(_frz(v)[0], dtype), 'numpy %s precision floating point number' % precname) for word in [ 'precision', 'iexp', 'maxexp', 'minexp', 'negep', 'machep' ]: setattr(self, word, getattr(machar, word)) for word in ['tiny', 'resolution', 'epsneg']: setattr(self, word, getattr(machar, word).flat[0]) self.max = machar.huge.flat[0] self.min = -self.max self.eps = machar.eps.flat[0] self.nexp = machar.iexp self.nmant = machar.it self.machar = machar self._str_tiny = machar._str_xmin.strip() self._str_max = machar._str_xmax.strip() self._str_epsneg = machar._str_epsneg.strip() self._str_eps = machar._str_eps.strip() self._str_resolution = machar._str_resolution.strip() return self
def connect(self, and_neurons=50): # ensure all terms are parsed before starting for k,v in self.connections.items(): pre_name,post_name=k for pre_term,post_term in v: pre=self.spa.sources[pre_name].parse(pre_term).v post=self.spa.sinks[post_name].parse(post_term).v for k,v in self.connections.items(): pre_name,post_name=k t=numeric.zeros((self.spa.sinks[post_name].dimensions,self.spa.sources[pre_name].dimensions),typecode='f') for pre_term,post_term in v: pre=self.spa.sources[pre_name].parse(pre_term).v post=self.spa.sinks[post_name].parse(post_term).v t+=numeric.array([pre*bb for bb in post]) if pre_name==post_name: if pre_name in self.inhibit: for pre_term in self.spa.sources[pre_name].keys: pre=self.spa.sources[pre_name].parse(pre_term).v*self.inhibit[pre_name] post_value=numeric.zeros(self.spa.sources[post_name].dimensions,typecode='f') for post_term in self.spa.sources[pre_name].keys: if pre_term!=post_term: post_value+=self.spa.sources[post_name].parse(post_term).v t+=numeric.array([pre*bb for bb in post_value]) if pre_name in self.excite: t+=numeric.eye(len(t))*self.excite[pre_name] self.spa.net.connect('source_'+pre_name,'sink_'+post_name,transform=t) for i,(pre,post) in enumerate(self.ands): D=len(pre) aname='and%02d'%i self.net.make(aname,D*and_neurons,D) for j,p in enumerate(pre): t=numeric.zeros((D,self.spa.sources[p[0]].dimensions),typecode='f') t[j,:]=self.spa.sources[p[0]].parse(p[1]).v*math.sqrt(D) self.spa.net.connect('source_'+p[0],self.name+'.'+aname,transform=t) def result(x,v=self.spa.sinks[post[0]].parse(post[1]).v): for xx in x: if xx<0.4: return [0]*len(v) #TODO: This is pretty arbitrary.... return v self.spa.net.connect(self.name+'.'+aname,'sink_'+post[0],func=result)
def process_projection(self, p): origin = p.origin termination = p.termination if hasattr(origin, 'baseOrigin'): origin = origin.baseOrigin if hasattr(termination, 'baseTermination'): termination = termination.baseTermination pre = origin.node post = termination.node if isinstance(pre, ca.nengo.model.impl.NetworkArrayImpl) and isinstance( post, ca.nengo.model.impl.NetworkArrayImpl): for term in termination.nodeTerminations: transform = np.array(term.transform) index = 0 for i, n in enumerate(origin.nodeOrigins): t = transform[:, index:index + n.dimensions] index += n.dimensions if not iszero(t): self.projections.append( Projection(self, n, term, transform=t)) elif isinstance(pre, ca.nengo.model.impl.NetworkArrayImpl): transform = np.array(termination.transform) index = 0 for i, n in enumerate(origin.nodeOrigins): t = transform[:, index:index + n.dimensions] index += n.dimensions self.projections.append( Projection(self, n, termination, transform=t)) elif isinstance(post, ca.nengo.model.impl.NetworkArrayImpl): for t in termination.nodeTerminations: if isinstance(pre, ca.nengo.model.impl.FunctionInput): self.inputs.append(str(self.populations[t.node].name)) else: self.projections.append(Projection(self, origin, t)) elif isinstance(pre, ca.nengo.model.impl.FunctionInput): self.inputs.append(str(self.populations[post].name)) elif pre in self.populations and post in self.populations: self.projections.append(Projection(self, origin, termination)) else: print 'WARNING: Unknown projection', p print ' pre: ', pre print ' post: ', post
def _init(self, dtype): self.dtype = numeric.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = "%24.16e" precname = "double" elif dtype is ntypes.single: itype = ntypes.int32 fmt = "%15.7e" precname = "single" elif dtype is ntypes.longdouble: itype = ntypes.longlong fmt = "%s" precname = "long double" elif dtype is ntypes.half: itype = ntypes.int16 fmt = "%12.5e" precname = "half" else: raise ValueError(repr(dtype)) machar = MachAr( lambda v: array([v], dtype), lambda v: _frz(v.astype(itype))[0], lambda v: array(_frz(v)[0], dtype), lambda v: fmt % array(_frz(v)[0], dtype), "numpy %s precision floating point number" % precname, ) for word in ["precision", "iexp", "maxexp", "minexp", "negep", "machep"]: setattr(self, word, getattr(machar, word)) for word in ["tiny", "resolution", "epsneg"]: setattr(self, word, getattr(machar, word).flat[0]) self.max = machar.huge.flat[0] self.min = -self.max self.eps = machar.eps.flat[0] self.nexp = machar.iexp self.nmant = machar.it self.machar = machar self._str_tiny = machar._str_xmin.strip() self._str_max = machar._str_xmax.strip() self._str_epsneg = machar._str_epsneg.strip() self._str_eps = machar._str_eps.strip() self._str_resolution = machar._str_resolution.strip() return self
def output_transform(dimensions): ifft=np.array(discrete_fourier_transform_inverse(dimensions)) def makeifftrow(D,i): if i==0 or i*2==D: return ifft[i] if i<=D/2: return ifft[i]+ifft[-i].real-ifft[-i].imag*1j return np.zeros(dimensions) ifftm=np.array([makeifftrow(dimensions,i) for i in range(dimensions/2+1)]) ifftm2=[] for i in range(dimensions/2+1): ifftm2.append(ifftm[i].real) ifftm2.append(-ifftm[i].real) ifftm2.append(-ifftm[i].imag) ifftm2.append(-ifftm[i].imag) ifftm2=np.array(ifftm2) return ifftm2.T
def calc_input_transform(self,buffer,vocab): r=[] for p in self.productions: v=p.lhs.get(buffer,None) if v is None: r.append([0]*vocab.dimensions) else: r.append(vocab.parse(v).v*p.lhs_scale) return numeric.array(r)
def calc_output_gates(self,buffer,vocab): r=[] for p in self.productions: v=p.rhs.get(buffer,None) if v!=True: r.append([0]) else: r.append([-1]) return numeric.array(r).T
def input_transform(dimensions, first, invert=False): fft = array(discrete_fourier_transform(dimensions)) M = [] for i in range((dimensions / 2 + 1) * 4): if invert: row = fft[-(i / 4)] else: row = fft[i / 4] if first: if i % 2 == 0: row2 = array([row.real, zeros(dimensions)]) else: row2 = array([row.imag, zeros(dimensions)]) else: if i % 4 == 0 or i % 4 == 3: row2 = array([zeros(dimensions), row.real]) else: row2 = array([zeros(dimensions), row.imag]) M.extend(row2) return M
def rhs_route(self, source, sink, conv, weight): t = [] vocab = self.spa.sinks[sink] for n in self.names: rule = self.rules[n] if rule.rhs_route.get((source, sink, conv), None) == weight: t.append([-1]) else: t.append([0]) return numeric.array(t).T
def rhs_direct(self, sink_name): t = [] vocab = self.spa.sinks[sink_name] for n in self.names: rule = self.rules[n] row = rule.rhs_direct.get(sink_name, None) if row is None: row = [0] * vocab.dimensions else: row = vocab.parse(row).v t.append(row) return numeric.array(t).T
def rhs_route(self,source,sink,conv,weight): t=[] vocab=self.spa.sinks[sink] for n in self.names: rule=self.rules[n] if rule.rhs_route.get((source,sink,conv),None)==weight: t.append([-1]) else: t.append([0]) return numeric.array(t).T
def input_transform(dimensions,first,invert=False): fft=np.array(discrete_fourier_transform(dimensions)) M=[] for i in range((dimensions/2+1)*4): if invert: row=fft[-(i/4)] else: row=fft[i/4] if first: if i%2==0: row2=np.array([row.real,np.zeros(dimensions)]) else: row2=np.array([row.imag,np.zeros(dimensions)]) else: if i%4==0 or i%4==3: row2=np.array([np.zeros(dimensions),row.real]) else: row2=np.array([np.zeros(dimensions),row.imag]) M.extend(row2) return M
def make(net, name='System', neurons=100, A=[[0]], tau_feedback=0.1): A = numeric.array(A) assert len(A.shape) == 2 assert A.shape[0] == A.shape[1] dimensions = A.shape[0] state = net.make(name, neurons, dimensions) Ap = A * tau_feedback + numeric.identity(dimensions) net.connect(state, state, transform=Ap, pstc=tau_feedback)
def make(net,name='System',neurons=100,A=[[0]],tau_feedback=0.1): A=numeric.array(A) assert len(A.shape)==2 assert A.shape[0]==A.shape[1] dimensions=A.shape[0] state=net.make(name,neurons,dimensions) Ap=A*tau_feedback+numeric.identity(dimensions) net.connect(state,state,transform=Ap,pstc=tau_feedback)
def rhs_direct(self,sink_name): t=[] vocab=self.spa.sinks[sink_name] for n in self.names: rule=self.rules[n] row=rule.rhs_direct.get(sink_name,None) if row is None: row=[0]*vocab.dimensions else: row=vocab.parse(row).v t.append(row) return numeric.array(t).T
def add_input(self, origin, transform, learn=False): if transform is None: return lg = self.get_param('lg') pstc_input = self.get_param('pstc_input') if self.match is not None and origin.node is self.match.net.network: pstc_input = pstc_input / 2 o1, t1 = self.net.connect(origin, self.net.network.getNode('StrD1'), transform=(1 + lg) * numeric.array(transform), pstc=pstc_input, plastic_array=learn, create_projection=False) tname = t1.name + '_D1' self.net.network.exposeTermination(t1, tname) self.spa.net.network.addProjection( o1, self.net.network.getTermination(tname)) o1, t1 = self.net.connect(origin, self.net.network.getNode('StrD2'), transform=(1 - lg) * numeric.array(transform), pstc=pstc_input, plastic_array=learn, create_projection=False) tname = t1.name + '_D2' self.net.network.exposeTermination(t1, tname) self.spa.net.network.addProjection( o1, self.net.network.getTermination(tname)) o1, t1 = self.net.connect(origin, self.net.network.getNode('STN'), transform=transform, pstc=pstc_input, plastic_array=learn, create_projection=False) tname = t1.name + '_STN' self.net.network.exposeTermination(t1, tname) self.spa.net.network.addProjection( o1, self.net.network.getTermination(tname))
def process_projection(self, p): origin = p.origin termination = p.termination if hasattr(origin, 'baseOrigin'): origin = origin.baseOrigin if hasattr(termination, 'baseTermination'): termination = termination.baseTermination pre = origin.node post = termination.node if isinstance(pre, ca.nengo.model.impl.NetworkArrayImpl) and isinstance(post, ca.nengo.model.impl.NetworkArrayImpl): for term in termination.nodeTerminations: transform = np.array(term.transform) index = 0 for i,n in enumerate(origin.nodeOrigins): t = transform[:,index:index+n.dimensions] index += n.dimensions if not iszero(t): self.projections.append(Projection(self, n, term, transform=t)) elif isinstance(pre, ca.nengo.model.impl.NetworkArrayImpl): transform = np.array(termination.transform) index = 0 for i,n in enumerate(origin.nodeOrigins): t = transform[:,index:index+n.dimensions] index += n.dimensions self.projections.append(Projection(self, n, termination, transform=t)) elif isinstance(post, ca.nengo.model.impl.NetworkArrayImpl): for t in termination.nodeTerminations: if isinstance(pre, ca.nengo.model.impl.FunctionInput): self.inputs.append(str(self.populations[t.node].name)) else: self.projections.append(Projection(self, origin, t)) elif isinstance(pre, ca.nengo.model.impl.FunctionInput): self.inputs.append(str(self.populations[post].name)) elif pre in self.populations and post in self.populations: self.projections.append(Projection(self, origin, termination)) else: print 'WARNING: Unknown projection', p print ' pre: ',pre print ' post: ',post
def transform_to(self, other, keys=None): if keys is None: keys = list(self.keys) for k in other.keys: if k not in keys: keys.append(k) t = numeric.zeros((other.dimensions, self.dimensions), typecode='f') for k in keys: a = self[k].v b = other[k].v t += array([a * bb for bb in b]) return t
def output_transform(dimensions): ifft = array(discrete_fourier_transform_inverse(dimensions)) def makeifftrow(D, i): if i == 0 or i * 2 == D: return ifft[i] if i <= D / 2: return ifft[i] + ifft[-i].real - ifft[-i].imag * 1j return zeros(dimensions) ifftm = array( [makeifftrow(dimensions, i) for i in range(dimensions / 2 + 1)]) ifftm2 = [] for i in range(dimensions / 2 + 1): ifftm2.append(ifftm[i].real) ifftm2.append(-ifftm[i].real) ifftm2.append(-ifftm[i].imag) ifftm2.append(-ifftm[i].imag) ifftm2 = array(ifftm2) return ifftm2.T
def transform_to(self,other,keys=None): if keys is None: keys=list(self.keys) for k in other.keys: if k not in keys: keys.append(k) t=numeric.zeros((other.dimensions,self.dimensions),typecode='f') for k in keys: a=self[k].v b=other[k].v t+=array([a*bb for bb in b]) return t
def __init__(self, name, dims, learners, trials_per_block=10, environments=[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1], learn=[ True, True, True, True, True, True, True, True, False, False, False, False, False, False, False, False ], block_rewards=[[0.12, 0.72], [0.72, 0.12], [0.12, 0.72], [0.72, 0.12], [0.12, 0.72], [0.72, 0.12], [0.12, 0.72], [0.72, 0.12], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]): # parameters self.dims = dims self.trials_per_block = trials_per_block if len(block_rewards[0]) != dims: raise Exception('block_reward dimensionality must match dims') if len(environments) != len(block_rewards): raise Exception('must specify environment for each block') self.block_rewards = block_rewards # vars and constants self.trial_num = 0 self.delay_t = 0.0 self.approach_t = 0.0 self.reward_t = 0.0 self.reward = [0.0] * dims self.thalamus_sum = [0.0] * dims self.thalamus_choice = 0 self.rewarded = 0 self.reward_val = 1.0 self.gate_val = [0.9] self.vstr_gate_val = [0.9] self.data_log = [] self.learners = learners self.learn = learn # generate random cortical states self.environments = environments self.ctx_states = [] for i in range(max(self.environments) + 1): state = array([random.gauss(0, 1) for i in range(state_d)]) state /= norm(state) self.ctx_states.append(state) self.ctx_state = self.ctx_states[self.environments[0]] self.state = 'delay' nef.SimpleNode.__init__(self, name)
def connect(self, lg=0.2, pstc_input=0.002, verbose=False, N_match=150, pstc_match=0.002): if verbose: print ' parsing rules' self.rules.initialize(self.spa) # Store rules in the documentation comment for this network for use in the interactive mode view # TODO: Figure out a different way to do this, as this line is pretty much the only Nengo-specific # bit of code in here. self.net.network.documentation = 'BG: ' + ','.join(self.rules.names) for (a,b) in self.rules.get_lhs_matches(): t=self.rules.lhs_match(a,b) name='match_%s_%s'%(a,b) vocab1 = self.spa.sources[a] vocab2 = self.spa.sources[b] assert vocab1==vocab2 dim = vocab1.dimensions self.net.make_array(name,N_match,dim,dimensions=2,encoders=[[1,1],[1,-1],[-1,-1],[-1,1]],radius=1.4) t1=numeric.zeros((dim*2,dim),typecode='f') t2=numeric.zeros((dim*2,dim),typecode='f') for i in range(dim): t1[i*2,i]=1.0 t2[i*2+1,i]=1.0 self.spa.net.connect('source_'+a, self.name+'.'+name, transform=t1, pstc=pstc_match) self.spa.net.connect('source_'+b, self.name+'.'+name, transform=t2, pstc=pstc_match) transform=numeric.array([t for i in range(dim)]).T def product(x): return x[0]*x[1] self.net.connect(name, 'StrD1', transform=(1+lg)*transform, pstc=pstc_input, func=product) self.net.connect(name, 'StrD2', transform=(1-lg)*transform, pstc=pstc_input, func=product) self.net.connect(name, 'STN', transform=transform, pstc=pstc_input, func=product) # TODO: add support for matches (do this with a subnetwork, not a separate module) #if len(self.rules.get_lhs_matches())>0: # self.match=spa.match.Match(self,pstc_match=self.p.pstc_input/2) # self.spa.add_module(self.name+'_match',self.match,create=True,connect=True) for source in self.spa.sources.keys(): if verbose: print ' connecting core inputs from',source transform=self.rules.lhs(source) if transform is None: continue self.spa.net.connect('source_'+source, self.name+'.StrD1', transform=(1+lg)*transform, pstc=pstc_input) self.spa.net.connect('source_'+source, self.name+'.StrD2', transform=(1-lg)*transform, pstc=pstc_input) self.spa.net.connect('source_'+source, self.name+'.STN', transform=transform, pstc=pstc_input)
def add(self,key,v): # Perform checks if(isinstance(v,HRR)): self.hrr[key] = v self.keys.append(key) if self.vectors is None: self.vectors=numeric.array([self.hrr[key].v]) else: self.vectors=numeric.resize(self.vectors,(len(self.keys),self.dimensions)) self.vectors[-1,:]=self.hrr[key].v # Generate vector pairs if(self.include_pairs or self.vector_pairs is not None): for k in self.keys[:-1]: self.key_pairs.append('%s*%s'%(k,key)) v=(self.hrr[k]*self.hrr[key]).v if self.vector_pairs is None: self.vector_pairs=numeric.array([v]) else: self.vector_pairs=numeric.resize(self.vector_pairs,(len(self.key_pairs),self.dimensions)) self.vector_pairs[-1,:]=v else: raise TypeError('hrr.Vocabulary.add() Type error: Argument provided not of HRR type')
def add_projection(self, origin, dim, target, transform, tau, weights=False): for i in range(dim): name = '%s.%d'%(origin, i) if name not in self.projections: self.projections[name]=[] trans = numeric.array(transform)[:,i] if not is_zero(trans): if weights: assert max(trans)==min(trans) assert max(trans)<0 target=target+'*' trans = trans[:1] self.projections[name].append(('%s'%target, trans, tau))
def calc_input_same_transform(self,buffer,vocab): r=[] for p in self.productions: v=p.lhs.get(buffer,None) if v is True: r.append([1*p.lhs_scale]) elif v is False: r.append([-1*p.lhs_scale]) elif isinstance(v,(float,int)): r.append([v*p.lhs_scale]) else: r.append([0]) return numeric.array(r)
def handle_projection(origin, termination, data, prefix, transform_start=None, transform_end=None): node1 = origin.node node2 = termination.node while node1 in data.networks: origin = origin.getWrappedOrigin() node1 = origin.node while node2 in data.networks: termination = termination.getWrappedTermination() node2 = termination.node if node1 in data.inputs: pass elif node1 in data.arrays: start=0 for o in origin.getWrappedOrigin().getNodeOrigins(): end = start + o.dimensions handle_projection(o, termination, data, prefix, transform_start=start, transform_end=end) start += o.dimensions elif node2 in data.arrays: for t in termination.getWrappedTermination().getNodeTerminations(): handle_projection(origin, t, data, prefix, transform_start=transform_start, transform_end=transform_end) elif node1 in data.ensembles and node2 in data.ensembles: if termination.__class__.__name__ in ['DecodedTermination']: trans = numeric.array(termination.transform) if transform_start is not None: trans = trans[:,transform_start:transform_end] data.population[data.ensembles[node1]].add_projection(origin.name, origin.dimensions, data.ensembles[node2], trans, termination.tau) elif termination.__class__.__name__ in ['EnsembleTermination']: weights = numeric.array([t.weights for t in termination.getNodeTerminations()]) data.population[data.ensembles[node1]].add_projection(origin.name, origin.dimensions, data.ensembles[node2], weights, termination.tau, weights=True) else: print 'Unknown projection', prefix, node1, node2 else: print 'Unknown projection', prefix, node1, node2
def connect(self): self.net.connect('Vision Integrator', 'Threshold') self.net.connect('Vision Integrator', 'Vision Integrator') vocab = self.spa.sources[self.name] vocab.parse('RIGHT+LEFT+FWD+BACK') #add items to vocab pd = [] # list of preferred direction vectors for item in ['RIGHT','LEFT','FWD','BACK']: pd.append(vocab[item].v.tolist()) transform = np.array(pd).T self.net.connect('Threshold', 'Cleanup', transform=[[0,1],[-1,0],[1,0],[-1,0]]) self.net.connect('Cleanup', 'Visual SP', transform=transform)
def __init__(self, name, lambd=lambd, kappa=kappa, rho=rho, D=1): nef.Node.__init__(self, name) self.lambd = lambd self.kappa = kappa self.rho = rho self.x = self.make_input('x', dimensions=D) self.dx = self.make_input('dx', dimensions=D) self.ddx = self.make_input('ddx', dimensions=D) self.x_desired = self.make_input('desired', dimensions=D) self.a = np.array([0.0, 0.0, 0.0]) self.u = self.make_output('u', dimensions=D) self.s = self.make_output('s', dimensions=D) self.a_val = self.make_output('a', dimensions=3)
def __new__(subtype, data, dtype=None, copy=True): if isinstance(data, matrix): dtype2 = data.dtype if (dtype is None): dtype = dtype2 if (dtype2 == dtype) and (not copy): return data return data.astype(dtype) if isinstance(data, N.ndarray): if dtype is None: intype = data.dtype else: intype = N.dtype(dtype) new = data.view(subtype) if intype != data.dtype: return new.astype(intype) if copy: return new.copy() else: return new if isinstance(data, str): data = _convert_from_string(data) # now convert data to an array arr = N.array(data, dtype=dtype, copy=copy) ndim = arr.ndim shape = arr.shape if (ndim > 2): raise ValueError, "matrix must be 2-dimensional" elif ndim == 0: shape = (1, 1) elif ndim == 1: shape = (1, shape[0]) order = False if (ndim == 2) and arr.flags.fortran: order = True if not (order or arr.flags.contiguous): arr = arr.copy() ret = N.ndarray.__new__(subtype, shape, arr.dtype, buffer=arr, order=order) return ret
def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- array1, array2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or sequence of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> np.atleast_1d(1.0) array([ 1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) [array([1]), array([3, 4])] """ res = [] for ary in arys: res.append(array(ary, copy=False, subok=True, ndmin=1)) if len(res) == 1: return res[0] else: return res
def output_transform( Ashape, Bshape, FFTshape ): am,an = Ashape M,N = FFTshape ZM = DFTinverse( M ) # get inverse DFT matrix ZN = DFTinverse( N ) # get inverse DFT matrix amo,ano = ((M - am) // 2, (N - an) // 2) W = unpaddedTransform( ZM[amo:amo+am,:], ZN[:,ano:ano+an] ) T = [] for j in range(M*N): T.append( W[:,j].real ) T.append( -W[:,j].real ) T.append( -W[:,j].imag ) T.append( -W[:,j].imag ) return array(T).T
def generate_pairs(self): """ This function is intended to be used in situations where a vocabulary has already been created without including pairs, but it becomes necessary to have the pairs (for graphing in interactive plots, for example). This is essentially identical to the add function above, except that it makes all the pairs in one pass (and without adding new vectors). """ self.key_pairs = [] self.vector_pairs = None for i in range(1, len(self.keys)): for k in self.keys[:i]: key = self.keys[i] self.key_pairs.append('%s*%s'%(k,key)) v=(self.hrr[k]*self.hrr[key]).v if self.vector_pairs is None: self.vector_pairs=numeric.array([v]) else: self.vector_pairs=numeric.resize(self.vector_pairs,(len(self.key_pairs),self.dimensions)) self.vector_pairs[-1,:]=v
def connect(self): self.bg.rules.initialize(self.spa) N=self.p.match_neurons for (a,b) in self.bg.rules.get_lhs_matches(): t=self.bg.rules.lhs_match(a,b) name='%s_%s'%(a,b) dim=self.spa.sources[a].dimensions if N==0: m=self.net.make(name,1,dim*2,quick=True,mode='direct') def dotproduct(x): return sum([x[2*i]*x[2*i+1] for i in range(len(x)/2)]) funcs=[nef.functions.PythonFunction(dotproduct)] m.addDecodedOrigin('product',funcs,'AXON') else: m=self.net.make_array(name,N,dim,dimensions=2,encoders=[[1,1],[1,-1],[-1,-1],[-1,1]],quick=True,radius=1.4,storage_code="%d") def product(x): return x[0]*x[1] m.addDecodedOrigin('product',[nef.functions.PythonFunction(product,dim)],'AXON') self.net.network.exposeOrigin(m.getOrigin('product'),name) t1=numeric.zeros((dim*2,dim),typecode='f') t2=numeric.zeros((dim*2,dim),typecode='f') for i in range(dim): t1[i*2,i]=1.0 t2[i*2+1,i]=1.0 va=self.spa.vocab(a) vb=self.spa.vocab(b) if va is not vb: t2=numeric.dot(t2,vb.transform_to(va)) m.addDecodedTermination(a,t1,self.p.pstc_match,False) m.addDecodedTermination(b,t2,self.p.pstc_match,False) self.net.network.exposeTermination(m.getTermination(a),name+'_1') self.net.network.exposeTermination(m.getTermination(b),name+'_2') self.spa.net.connect(self.spa.sources[a],self.net.network.getTermination(name+'_1')) self.spa.net.connect(self.spa.sources[b],self.net.network.getTermination(name+'_2')) if N==0: transform=[t for i in range(1)] else: transform=[t for i in range(dim)] self.bg.add_input(self.net.network.getOrigin(name),numeric.array(transform).T)
def atleast_2d(*arys): """ View inputs as arrays with at least two dimensions. Parameters ---------- array1, array2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have two or more dimensions are preserved. Returns ------- res, res2, ... : ndarray An array, or tuple of arrays, each with ``a.ndim >= 2``. Copies are avoided where possible, and views with two or more dimensions are returned. See Also -------- atleast_1d, atleast_3d Examples -------- >>> np.atleast_2d(3.0) array([[ 3.]]) >>> x = np.arange(3.0) >>> np.atleast_2d(x) array([[ 0., 1., 2.]]) >>> np.atleast_2d(x).base is x True >>> np.atleast_2d(1, [1, 2], [[1, 2]]) [array([[1]]), array([[1, 2]]), array([[1, 2]])] """ res = [] for ary in arys: res.append(array(ary, copy=False, subok=True, ndmin=2)) if len(res) == 1: return res[0] else: return res
def add_projection(self, origin, dim, target, transform, tau, weights=False): for i in range(dim): name = '%s.%d' % (origin, i) if name not in self.projections: self.projections[name] = [] trans = numeric.array(transform)[:, i] if not is_zero(trans): if weights: assert max(trans) == min(trans) assert max(trans) < 0 target = target + '*' trans = trans[:1] self.projections[name].append(('%s' % target, trans, tau))
def lhs(self, source_name): m = [] dim = None for n in self.names: rule = self.rules[n] row = rule.lhs.get(source_name, None) if isinstance(row, str): vocab = self.spa.sources[source_name] row = vocab.parse(row).v * rule.scale m.append(row) if row is not None: if dim is None: dim = len(row) elif len(row) != dim: raise Exception( 'Rows of different lengths connecting from %s' % source_name) if dim is None: return None for i in range(len(m)): if m[i] is None: m[i] = [0] * dim return numeric.array(m)