コード例 #1
0
ファイル: cleanup.py プロジェクト: mcchong/nengo_1.4
    def complete(self,
                 N_per_D=30,
                 scaling=1,
                 min_intercept=0.1,
                 mutual_inhibit=0,
                 feedback=0,
                 pstc_feedback=0.01):
        vocab = self.spa.sources[self.name]

        self.net.make_array('cleanup',
                            50,
                            len(vocab.keys),
                            intercept=(min_intercept, 1),
                            encoders=[[1]])
        transform = [vocab.parse(k).v for k in vocab.keys]
        self.net.connect('input', 'cleanup', transform=transform, pstc=0.001)

        t = numeric.zeros((vocab.dimensions, len(vocab.keys)), typecode='f')
        for i in range(len(vocab.keys)):
            t[:, i] += vocab.parse(vocab.keys[i]).v * scaling
        self.net.connect('cleanup', 'output', transform=t,
                         pstc=0.001)  #, func=lambda x: 1)

        if mutual_inhibit != 0 or feedback != 0:
            t = (numeric.eye(len(vocab.keys)) - 1) * mutual_inhibit
            t += numeric.eye(len(vocab.keys)) * feedback
            self.net.connect('cleanup',
                             'cleanup',
                             transform=t,
                             pstc=pstc_feedback)
コード例 #2
0
ファイル: integrator.py プロジェクト: tbekolay/nef_jython
def make(net, name='Integrator', neurons=100, dimensions=1, tau_feedback=0.1, tau_input=0.01, scale=1):
    if (dimensions<8):
        integrator=net.make(name,neurons,dimensions)
    else:
        integrator=net.make_array(name, int(neurons/dimensions),dimensions, quick=True)
    net.connect(integrator,integrator,pstc=tau_feedback)
    integrator.addDecodedTermination('input',numeric.eye(dimensions)*tau_feedback*scale,tau_input,False)
    if net.network.getMetaData("integrator") == None:
        net.network.setMetaData("integrator", HashMap())
    integrators = net.network.getMetaData("integrator")

    integrator=HashMap(6)
    integrator.put("name", name)
    integrator.put("neurons", neurons)
    integrator.put("dimensions", dimensions)
    integrator.put("tau_feedback", tau_feedback)
    integrator.put("tau_input", tau_input)
    integrator.put("scale", scale)

    integrators.put(name, integrator)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(name)

    if net.network.getMetaData("templateProjections") == None:
        net.network.setMetaData("templateProjections", HashMap())
    templateproj = net.network.getMetaData("templateProjections")
    templateproj.put(name, name)

    integrator.addDecodedTermination('input', numeric.eye(dimensions)*tau_feedback*scale, tau_input, False)
コード例 #3
0
ファイル: latch.py プロジェクト: w6hu/nengo
    def create(self,dimensions,N_per_D=30,pstc_feedback=0.01,latch_inhibit=2,
               pstc_latch_inhibit=0.006,neurons_detect=100,latch_detect_threshold=0.7,use_array=True,
               pstc_latch=0.01,feedback=1,compete=0,input_weight=1,subdimensions=None):

        #TODO: support subdimensions
        input=self.net.make('input',N_per_D*dimensions,dimensions,quick=True)
        if use_array:
            buffer=self.net.make_array('buffer',N_per_D,dimensions,encoders=[[1]],intercept=(0,1),quick=True)
        else:    
            buffer=self.net.make('buffer',N_per_D*dimensions,dimensions,encoders=numeric.eye(dimensions),intercept=(0,1),quick=True)
        feed=self.net.make('feedback',N_per_D*dimensions,dimensions,quick=True)
        feed.addTermination('gate',[[-latch_inhibit]]*feed.neurons,pstc_latch_inhibit,False)
        detect=self.net.make('detect',neurons_detect,1,intercept=(latch_detect_threshold,1),encoders=[[1]],quick=True)
        def length(x):
            s=sum([xx*xx for xx in x])
            return math.sqrt(s)
        self.net.connect(input,detect,func=length,pstc=pstc_latch)
        self.net.connect(detect,feed.getTermination('gate'))
        self.net.connect(buffer,feed,pstc=pstc_latch)
        self.net.connect(input,buffer,pstc=pstc_latch,weight=input_weight)

        t=numeric.eye(dimensions)*feedback
        if compete>0:
            t2=numeric.eye(dimensions)-1
            t2=t2*(compete*feedback)
            t=t+t2
        self.net.connect(feed,buffer,pstc=pstc_latch,transform=t)

        self.add_source(buffer.getOrigin('X'))
        self.add_sink(input)
コード例 #4
0
    def create(self,
               rule_neurons=40,
               rule_threshold=0.2,
               bg_output_weight=-3,
               pstc_output=0.015,
               mutual_inhibit=1,
               pstc_inhibit=0.008,
               pstc_to_gate=0.002,
               pstc_gate=0.008,
               N_per_D=30,
               pstc_route_input=0.002,
               pstc_route_output=0.002,
               neurons_gate=25,
               route_scale=1,
               pstc_input=0.01):
        D = self.bg.rules.rule_count

        self.bias = self.net.make_input('bias', [1])
        self.rules = self.net.make_array('rules',
                                         rule_neurons,
                                         D,
                                         intercept=(rule_threshold, 1),
                                         encoders=[[1]],
                                         quick=True,
                                         storage_code="%d")
        self.net.connect(self.bias, self.rules)

        self.net.network.exposeOrigin(self.rules.getOrigin('X'), 'rules')

        if mutual_inhibit > 0:
            self.net.connect(self.rules,
                             self.rules, (numeric.eye(D) - 1) * mutual_inhibit,
                             pstc=pstc_inhibit)

        spa.view.rule_watch.add(self.net.network, self.bg.rules.names)
コード例 #5
0
ファイル: cleanup.py プロジェクト: Elhamahm/nengo_1.4
 def complete(self, N_per_D=30, scaling=1, min_intercept=0.1, mutual_inhibit=0, feedback=0, pstc_feedback=0.01):
     vocab=self.spa.sources[self.name]
         
     self.net.make_array('cleanup', 50, len(vocab.keys), intercept=(min_intercept,1), encoders=[[1]])
     transform=[vocab.parse(k).v for k in vocab.keys]
     self.net.connect('input','cleanup',transform=transform, pstc=0.001)
         
     t=numeric.zeros((vocab.dimensions,len(vocab.keys)),typecode='f')
     for i in range(len(vocab.keys)):
         t[:,i]+=vocab.parse(vocab.keys[i]).v*scaling
     self.net.connect('cleanup','output',transform=t, pstc=0.001)#, func=lambda x: 1)            
     
     if mutual_inhibit!=0 or feedback!=0:
         t=(numeric.eye(len(vocab.keys))-1)*mutual_inhibit
         t+=numeric.eye(len(vocab.keys))*feedback
         self.net.connect('cleanup','cleanup',transform=t, pstc=pstc_feedback)
コード例 #6
0
ファイル: basalganglia.py プロジェクト: Elhamahm/nengo_1.4
def make(net,name='Basal Ganglia', dimensions=1, neurons=100, pstc=0.01, netbg=None, same_neurons=True, tau_ampa=0.002, tau_gaba=0.008, radius=1.5):

    if netbg is None:
        netbg=nef.Network(name)
    input=netbg.make('input',1,dimensions,quick=True,mode='direct')
    output=netbg.make('output',1,dimensions,quick=True,mode='direct')
    nps.basalganglia.make_basal_ganglia(netbg,input,output, dimensions=dimensions, neurons=neurons, same_neurons=same_neurons, tau_ampa=0.002, tau_gaba=0.008, radius=radius)

    input.addDecodedTermination('input',numeric.eye(dimensions),pstc,False)
    netbg.network.exposeTermination(input.getTermination('input'),'input')
    netbg.network.exposeOrigin(output.getOrigin('X'),'output')
    
    if net is not None:
        net.add(netbg.network)
    
        if net.network.getMetaData("BasalGanglia") == None:
            net.network.setMetaData("BasalGanglia", HashMap())
        bgs = net.network.getMetaData("BasalGanglia")

        bg=HashMap(5)
        bg.put("name", name)
        bg.put("dimensions", dimensions)
        bg.put("neurons", neurons)
        bg.put("pstc", pstc)
        bg.put("same_neurons", same_neurons)

        bgs.put(name, bg)

        if net.network.getMetaData("templates") == None:
            net.network.setMetaData("templates", ArrayList())
        templates = net.network.getMetaData("templates")
        templates.add(name)

    return netbg
コード例 #7
0
ファイル: integrator.py プロジェクト: hunse/nengo_1.4
def make(net,name='Integrator',neurons=100,dimensions=1,tau_feedback=0.1,tau_input=0.01,scale=1):
    if (dimensions<8):
        integrator=net.make(name,neurons,dimensions)
    else:
        integrator=net.make_array(name, int(neurons/dimensions),dimensions, quick=True)
    net.connect(integrator,integrator,pstc=tau_feedback)
    integrator.addDecodedTermination('input',numeric.eye(dimensions)*tau_feedback*scale,tau_input,False)
コード例 #8
0
def connect(net,A,B,transform=None):
    dartboard=DartboardConnection('Dartboard:%s:%s'%(A.name,B.name))
    o,t=net.connect(A,B,transform=transform,weight_func=lambda e,d: dartboard.calc_weights(e,d),create_projection=False)
    net.add(dartboard)
    net.connect(o,dartboard.getTermination('input'))
    B.removeTermination(t.name)
    t2=B.addTermination(t.name,numeric.eye(dartboard.N2),t.tau,False)
    net.connect(dartboard.getOrigin('output'),t2)
コード例 #9
0
ファイル: flow.py プロジェクト: Sophrinix/nengo
    def create(self,net,N=50,dimensions=8,randomize=False):
        vocab={}
        for k in self.nodes.keys():
            node=net.get(k,None)
            if node is None:
                dim=dimensions
                if randomize is False and len(self.nodes[k])+1>dim:
                    dim=len(self.nodes[k])+1
                node=net.make_array(k,N,dim)
            if not hrr.Vocabulary.registered.has_key(id(node)):
                v=hrr.Vocabulary(node.dimension,randomize=randomize)
                v.register(node)
            vocab[k]=hrr.Vocabulary.registered[id(node)]

        # ensure all terms are parsed before starting
        for k,v in self.connect.items():
            pre_name,post_name=k
            for pre_term,post_term in v:
                pre=vocab[pre_name].parse(pre_term).v
                post=vocab[post_name].parse(post_term).v
        
        for k,v in self.connect.items():
            pre_name,post_name=k
            
            t=numeric.zeros((vocab[post_name].dimensions,vocab[pre_name].dimensions),typecode='f')
            for pre_term,post_term in v:
                pre=vocab[pre_name].parse(pre_term).v
                post=vocab[post_name].parse(post_term).v
                t+=numeric.array([pre*bb for bb in post])

            if pre_name==post_name:         
                if pre_name in self.inhibit:
                    for pre_term in vocab[pre_name].keys:
                        pre=vocab[pre_name].parse(pre_term).v*self.inhibit[pre_name]
                        post_value=numeric.zeros(vocab[post_name].dimensions,typecode='f')
                        for post_term in vocab[pre_name].keys:
                            if pre_term!=post_term:
                                post_value+=vocab[post_name].parse(post_term).v
                        t+=numeric.array([pre*bb for bb in post_value])
                if pre_name in self.excite:
                    t+=numeric.eye(len(t))*self.excite[pre_name]
                    
            net.connect(net.get(pre_name),net.get(post_name),transform=t)    
        
        for i,(pre,post) in enumerate(self.ands):
            D=len(pre)
            node=net.make('and%02d'%i,D*N,D)
            for j,p in enumerate(pre):
                t=numeric.zeros((D,vocab[p[0]].dimensions),typecode='f')
                t[j,:]=vocab[p[0]].parse(p[1]).v*math.sqrt(D)
                net.connect(net.get(p[0]),node,transform=t)                
            def result(x,v=vocab[post[0]].parse(post[1]).v):
                for xx in x:
                    if xx<0.4: return [0]*len(v)  #TODO: This is pretty arbitrary....
                return v
            net.connect(node,net.get(post[0]),func=result)    
                
        return net    
コード例 #10
0
ファイル: thalamus.py プロジェクト: Elhamahm/nengo_1.4
    def init(self, rule_neurons=40, rule_threshold=0.2, mutual_inhibit=1, pstc_mutual=0.008):
        D = self.bg.rules.count()

        self.net.make_input("bias", [1])
        self.net.make_array("rules", rule_neurons, D, intercept=(rule_threshold, 1), encoders=[[1]])
        self.net.connect("bias", "rules")

        if mutual_inhibit > 0:
            self.net.connect("rules", "rules", (np.eye(D) - 1) * mutual_inhibit, pstc=pstc_mutual)
コード例 #11
0
def connect(net, A, B, transform=None):
    dartboard = DartboardConnection('Dartboard:%s:%s' % (A.name, B.name))
    o, t = net.connect(A,
                       B,
                       transform=transform,
                       weight_func=lambda e, d: dartboard.calc_weights(e, d),
                       create_projection=False)
    net.add(dartboard)
    net.connect(o, dartboard.getTermination('input'))
    B.removeTermination(t.name)
    t2 = B.addTermination(t.name, numeric.eye(dartboard.N2), t.tau, False)
    net.connect(dartboard.getOrigin('output'), t2)
コード例 #12
0
ファイル: hrr.py プロジェクト: mcchong/nengo_1.4
 def __init__(self,dimensions,randomize=True,unitary=False,max_similarity=0.1,include_pairs=False):
     self.dimensions=dimensions
     self.randomize=randomize
     self.unitary=unitary
     self.max_similarity=max_similarity
     self.hrr={}
     self.hrr['I']=HRR(data=numeric.eye(dimensions)[0])
     self.keys=[]
     self.key_pairs=[]
     self.vectors=None
     self.vector_pairs=None
     self.include_pairs=include_pairs
     Vocabulary.defaults[dimensions]=self
コード例 #13
0
def make(net,
         name='Basal Ganglia',
         dimensions=1,
         neurons=100,
         pstc=0.01,
         netbg=None,
         same_neurons=True,
         tau_ampa=0.002,
         tau_gaba=0.008,
         radius=1.5):

    if netbg is None:
        netbg = nef.Network(name)
    input = netbg.make('input', 1, dimensions, quick=True, mode='direct')
    output = netbg.make('output', 1, dimensions, quick=True, mode='direct')
    nps.basalganglia.make_basal_ganglia(netbg,
                                        input,
                                        output,
                                        dimensions=dimensions,
                                        neurons=neurons,
                                        same_neurons=same_neurons,
                                        tau_ampa=0.002,
                                        tau_gaba=0.008,
                                        radius=radius)

    input.addDecodedTermination('input', numeric.eye(dimensions), pstc, False)
    netbg.network.exposeTermination(input.getTermination('input'), 'input')
    netbg.network.exposeOrigin(output.getOrigin('X'), 'output')

    if net is not None:
        net.add(netbg.network)

        if net.network.getMetaData("BasalGanglia") == None:
            net.network.setMetaData("BasalGanglia", HashMap())
        bgs = net.network.getMetaData("BasalGanglia")

        bg = HashMap(5)
        bg.put("name", name)
        bg.put("dimensions", dimensions)
        bg.put("neurons", neurons)
        bg.put("pstc", pstc)
        bg.put("same_neurons", same_neurons)

        bgs.put(name, bg)

        if net.network.getMetaData("templates") == None:
            net.network.setMetaData("templates", ArrayList())
        templates = net.network.getMetaData("templates")
        templates.add(name)

    return netbg
コード例 #14
0
ファイル: thalamus.py プロジェクト: cognitiongroup/nengo
    def connect(self, weight_GPi=-3, pstc_GPi=0.008, pstc_output=0.01, neurons_gate=40, gate_threshold=0.3, pstc_to_gate=0.002, pstc_gate=0.008, channel_N_per_D=50, pstc_channel=0.01):
        self.bg.rules.initialize(self.spa)

        # Store rules in the documentation comment for this network for use in the interactive mode view    
        self.net.network.documentation = 'THAL: ' + ','.join(self.bg.rules.names)
                
        self.spa.net.connect(self.bg.name+'.GPi', self.name+'.rule', weight=weight_GPi, pstc=pstc_GPi, func=self.bg.get_output_function())


        # make direct outputs
        for name in self.spa.sinks.keys():
            t=self.bg.rules.rhs_direct(name)
            if t is not None:
                self.spa.net.connect(self.name+'.rule', 'sink_'+name, t, pstc_output)

        # make gated outputs
        for source, sink, conv, weight in self.bg.rules.get_rhs_routes():
            t=self.bg.rules.rhs_route(source,sink,conv, weight)
            
            gname='gate_%s_%s'%(source,sink)
            if weight!=1: gname+='(%1.1f)'%weight
            gname=gname.replace('.','_')
            
            self.net.make(gname, neurons_gate, 1, encoders=[[1]], intercept=(gate_threshold, 1))
            self.net.connect('rule', gname, transform=t, pstc=pstc_to_gate)
            self.net.connect('bias', gname)
            
            cname='channel_%s_%s'%(source,sink)
            if weight!=1: cname+='(%1.1f)'%weight
            cname=cname.replace('.','_')
            
            vocab1=self.spa.sources[source]
            vocab2=self.spa.sinks[sink]
            
            self.net.make(cname, channel_N_per_D*vocab2.dimensions, vocab2.dimensions)
            
            if vocab1 is vocab2: 
                transform=None            
            else:
                transform=vocab1.transform_to(vocab2)
                
            if conv is None:
                transform2=np.eye(vocab2.dimensions)*weight
            else:
                transform2=vocab2.parse(conv).get_transform_matrix()*weight    
                
            self.spa.net.connect('source_'+source, self.name+'.'+cname, transform=transform, pstc=pstc_channel)        
            self.spa.net.connect(self.name+'.'+cname, 'sink_'+sink, pstc=pstc_channel, transform=transform2)
            
        
            self.net.connect(gname, cname, encoders=-10, pstc=pstc_gate)
コード例 #15
0
ファイル: basalganglia.py プロジェクト: hunse/nengo_1.4
def make(net,name='Basal Ganglia',dimensions=1,pstc=0.01,netbg=None,same_neurons=True):

    if netbg is None:
        netbg=nef.Network(name)
    input=netbg.make('input',1,dimensions,quick=True,mode='direct')
    output=netbg.make('output',1,dimensions,quick=True,mode='direct')
    nps.basalganglia.make_basal_ganglia(netbg,input,output,dimensions,same_neurons=same_neurons)

    input.addDecodedTermination('input',numeric.eye(dimensions),pstc,False)
    netbg.network.exposeTermination(input.getTermination('input'),'input')
    netbg.network.exposeOrigin(output.getOrigin('X'),'output')
    
    if net is not None:
        net.add(netbg.network)
コード例 #16
0
ファイル: thalamus.py プロジェクト: Sophrinix/nengo
    def create(self,rule_neurons=40,rule_threshold=0.2,bg_output_weight=-3,
               pstc_output=0.015,mutual_inhibit=1,pstc_inhibit=0.008,
               pstc_to_gate=0.002,pstc_gate=0.008,N_per_D=30,
               pstc_route_input=0.002,pstc_route_output=0.002,neurons_gate=25,
               route_scale=1,pstc_input=0.01):
        D=self.bg.rules.rule_count
        
        self.bias=self.net.make_input('bias',[1])
        self.rules=self.net.make_array('rules',rule_neurons,D,intercept=(rule_threshold,1),encoders=[[1]],quick=True,storage_code="%d")
        self.net.connect(self.bias,self.rules)



        self.net.network.exposeOrigin(self.rules.getOrigin('X'),'rules')

        if mutual_inhibit>0:
            self.net.connect(self.rules,self.rules,(numeric.eye(D)-1)*mutual_inhibit,pstc=pstc_inhibit)
コード例 #17
0
ファイル: flow.py プロジェクト: mcchong/nengo_1.4
    def connect(self, and_neurons=50):

        # ensure all terms are parsed before starting
        for k,v in self.connections.items():
            pre_name,post_name=k
            for pre_term,post_term in v:
                pre=self.spa.sources[pre_name].parse(pre_term).v
                post=self.spa.sinks[post_name].parse(post_term).v
        
        for k,v in self.connections.items():
            pre_name,post_name=k
            
            t=numeric.zeros((self.spa.sinks[post_name].dimensions,self.spa.sources[pre_name].dimensions),typecode='f')
            for pre_term,post_term in v:
                pre=self.spa.sources[pre_name].parse(pre_term).v
                post=self.spa.sinks[post_name].parse(post_term).v
                t+=numeric.array([pre*bb for bb in post])

            if pre_name==post_name:         
                if pre_name in self.inhibit:
                    for pre_term in self.spa.sources[pre_name].keys:
                        pre=self.spa.sources[pre_name].parse(pre_term).v*self.inhibit[pre_name]
                        post_value=numeric.zeros(self.spa.sources[post_name].dimensions,typecode='f')
                        for post_term in self.spa.sources[pre_name].keys:
                            if pre_term!=post_term:
                                post_value+=self.spa.sources[post_name].parse(post_term).v
                        t+=numeric.array([pre*bb for bb in post_value])
                if pre_name in self.excite:
                    t+=numeric.eye(len(t))*self.excite[pre_name]
                    
            self.spa.net.connect('source_'+pre_name,'sink_'+post_name,transform=t)    
        
        for i,(pre,post) in enumerate(self.ands):
            D=len(pre)
            aname='and%02d'%i
            self.net.make(aname,D*and_neurons,D)
            for j,p in enumerate(pre):
                t=numeric.zeros((D,self.spa.sources[p[0]].dimensions),typecode='f')
                t[j,:]=self.spa.sources[p[0]].parse(p[1]).v*math.sqrt(D)
                self.spa.net.connect('source_'+p[0],self.name+'.'+aname,transform=t)                
            def result(x,v=self.spa.sinks[post[0]].parse(post[1]).v):
                for xx in x:
                    if xx<0.4: return [0]*len(v)  #TODO: This is pretty arbitrary....
                return v
            self.spa.net.connect(self.name+'.'+aname,'sink_'+post[0],func=result)    
コード例 #18
0
ファイル: integrator.py プロジェクト: w6hu/nengo
def make(net,
         name='Integrator',
         neurons=100,
         dimensions=1,
         tau_feedback=0.1,
         tau_input=0.01,
         scale=1):
    if (dimensions < 8):
        integrator = net.make(name, neurons, dimensions)
    else:
        integrator = net.make_array(name,
                                    int(neurons / dimensions),
                                    dimensions,
                                    quick=True)
    net.connect(integrator, integrator, pstc=tau_feedback)
    integrator.addDecodedTermination(
        'input',
        numeric.eye(dimensions) * tau_feedback * scale, tau_input, False)
コード例 #19
0
ファイル: thalamus.py プロジェクト: mcchong/nengo_1.4
    def init(self,
             rule_neurons=40,
             rule_threshold=0.2,
             mutual_inhibit=1,
             pstc_mutual=0.008):
        D = self.bg.rules.count()

        self.net.make_input('bias', [1])
        self.net.make_array('rule',
                            rule_neurons,
                            D,
                            intercept=(rule_threshold, 1),
                            encoders=[[1]])
        self.net.connect('bias', 'rule')

        if mutual_inhibit > 0:
            self.net.connect('rule',
                             'rule', (np.eye(D) - 1) * mutual_inhibit,
                             pstc=pstc_mutual)
コード例 #20
0
ファイル: thalamus.py プロジェクト: Elhamahm/nengo_1.4
def make(net,name='Network Array', neurons=50, dimensions=2, inhib_scale=3, tau_inhib=.005, useQuick=True, mutual_inhib = False, mutual_inhib_weight = 1, pstc_mutual_inhib = 0.008):
    thalamus = net.make_array(name, neurons, dimensions, max_rate=(100,300), intercept=(-1, 0), radius=1, encoders=[[1]], quick=useQuick)    

    # setup inhibitory scaling matrix
    inhib_scaling_matrix = [[0]*dimensions for i in range(dimensions)]
    for i in range(dimensions):
        inhib_scaling_matrix[i][i] = -inhib_scale
    # setup inhibitory matrix
    inhib_matrix = []
    for i in range(dimensions):
        inhib_matrix_part = [[inhib_scaling_matrix[i]] * neurons]
        inhib_matrix.append(inhib_matrix_part[0])

    thalamus.addTermination('bg_input', inhib_matrix, tau_inhib, False)

    def addOne(x):
        return [x[0]+1]            
    net.connect(thalamus, None, func=addOne, origin_name='xBiased', create_projection=False)

    if mutual_inhib:
        net.connect(thalamus.getOrigin("xBiased"), thalamus, (numeric.eye(dimensions)-1) * mutual_inhib_weight, pstc = pstc_mutual_inhib)
    
    if net.network.getMetaData("Thalamus") == None:
        net.network.setMetaData("Thalamus", HashMap())
    thals = net.network.getMetaData("Thalamus")

    thal=HashMap(6)
    thal.put("name", name)
    thal.put("neurons", neurons)
    thal.put("dimensions", dimensions)
    thal.put("inhib_scale", inhib_scale)
    thal.put("tau_inhib", tau_inhib)
    thal.put("useQuick", useQuick)

    thals.put(name, thal)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(name)

    return thalamus
コード例 #21
0
ファイル: integrator.py プロジェクト: mcchong/nengo_1.4
def make(net,
         name='Integrator',
         neurons=100,
         dimensions=1,
         tau_feedback=0.1,
         tau_input=0.01,
         scale=1):
    if (dimensions < 8):
        integrator = net.make(name, neurons, dimensions)
    else:
        integrator = net.make_array(name,
                                    int(neurons / dimensions),
                                    dimensions,
                                    quick=True)
    net.connect(integrator, integrator, pstc=tau_feedback)
    integrator.addDecodedTermination(
        'input',
        numeric.eye(dimensions) * tau_feedback * scale, tau_input, False)
    if net.network.getMetaData("integrator") == None:
        net.network.setMetaData("integrator", HashMap())
    integrators = net.network.getMetaData("integrator")

    integrator = HashMap(6)
    integrator.put("name", name)
    integrator.put("neurons", neurons)
    integrator.put("dimensions", dimensions)
    integrator.put("tau_feedback", tau_feedback)
    integrator.put("tau_input", tau_input)
    integrator.put("scale", scale)

    integrators.put(name, integrator)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(name)

    if net.network.getMetaData("templateProjections") == None:
        net.network.setMetaData("templateProjections", HashMap())
    templateproj = net.network.getMetaData("templateProjections")
    templateproj.put(name, name)
コード例 #22
0
def make(net,errName='error', N_err=50, preName='pre', postName='post', rate=5e-7):

    # get pre and post ensembles from their names
    pre = net.network.getNode(preName)
    post = net.network.getNode(postName)
    
    # modulatory termination (find unused termination)
    count=0
    while 'mod_%02d'%count in [t.name for t in post.terminations]:
        count=count+1
    modname = 'mod_%02d'%count
    post.addDecodedTermination(modname, numeric.eye(post.dimension), 0.005, True)
    
    # random weight matrix to initialize projection from pre to post
    def rand_weights(w):
        for i in range(len(w)):
            for j in range(len(w[0])):
                w[i][j] = random.uniform(-1e-3,1e-3)
        return w
    weight = rand_weights(numeric.zeros((post.neurons, pre.neurons)).tolist())
    
    # non-decoded termination (to learn transformation)
    count = 0
    prename = pre.getName()
    while '%s_%02d'%(prename,count) in [t.name for t in post.terminations]:
        count=count+1
    prename = '%s_%02d'%(prename, count)

    post.addPESTermination(prename, weight, 0.005, False)
    
    # Create error ensemble
    error = net.make(errName, N_err, post.dimension)
    
    # Add projections
    net.connect(error.getOrigin('X'),post.getTermination(modname))
    net.connect(pre.getOrigin('AXON'),post.getTermination(prename))

    # Set learning rule on the non-decoded termination
    net.learn(post,prename,modname,rate=rate)
コード例 #23
0
def make(net,
         name='Basal Ganglia',
         dimensions=1,
         pstc=0.01,
         netbg=None,
         same_neurons=True):

    if netbg is None:
        netbg = nef.Network(name)
    input = netbg.make('input', 1, dimensions, quick=True, mode='direct')
    output = netbg.make('output', 1, dimensions, quick=True, mode='direct')
    nps.basalganglia.make_basal_ganglia(netbg,
                                        input,
                                        output,
                                        dimensions,
                                        same_neurons=same_neurons)

    input.addDecodedTermination('input', numeric.eye(dimensions), pstc, False)
    netbg.network.exposeTermination(input.getTermination('input'), 'input')
    netbg.network.exposeOrigin(output.getOrigin('X'), 'output')

    if net is not None:
        net.add(netbg.network)
コード例 #24
0
ファイル: thalamus.py プロジェクト: Elhamahm/nengo_1.4
    def connect(
        self,
        weight_GPi=-3,
        pstc_GPi=0.008,
        pstc_output=0.01,
        neurons_gate=40,
        gate_threshold=0.3,
        pstc_to_gate=0.002,
        pstc_gate=0.008,
        channel_N_per_D=30,
        pstc_channel=0.01,
        array_dimensions=16,
        verbose=False,
    ):
        self.bg.rules.initialize(self.spa)

        # Store rules in the documentation comment for this network for use in the interactive mode view
        self.net.network.documentation = "THAL: " + ",".join(self.bg.rules.names)

        self.spa.net.connect(
            self.bg.name + ".GPi",
            self.name + ".rules",
            weight=weight_GPi,
            pstc=pstc_GPi,
            func=self.bg.get_output_function(),
        )

        if verbose:
            print "  making direct connections to:"
        # make direct outputs
        for name in self.spa.sinks.keys():
            if verbose:
                print "      " + name
            t = self.bg.rules.rhs_direct(name)
            if t is not None:
                self.spa.net.connect(self.name + ".rules", "sink_" + name, t, pstc_output)

        used_names = []
        if verbose:
            print "  making gated connections:"
        # make gated outputs
        for source, sink, conv, weight in self.bg.rules.get_rhs_routes():
            t = self.bg.rules.rhs_route(source, sink, conv, weight)
            if verbose:
                print "      %s->%s" % (source, sink)

            index = 0
            name = "%s_%s" % (source, sink)
            if weight != 1:
                name += "(%1.1f)" % weight
                name = name.replace(".", "_")
            while name in used_names:
                index += 1
                name = "%s_%s_%d" % (source, sink, index)
                if weight != 1:
                    name += "(%1.1f)" % weight
                    name = name.replace(".", "_")
            used_names.append(name)

            gname = "gate_%s" % (name)

            self.net.make(gname, neurons_gate, 1, encoders=[[1]], intercept=(gate_threshold, 1))
            self.net.connect("rules", gname, transform=t, pstc=pstc_to_gate)
            self.net.connect("bias", gname)

            cname = "channel_%s" % (name)

            vocab1 = self.spa.sources[source]
            vocab2 = self.spa.sinks[sink]

            if array_dimensions is None:
                self.net.make(cname, channel_N_per_D * vocab2.dimensions, vocab2.dimensions)
            else:
                self.net.make_array(
                    cname,
                    channel_N_per_D * array_dimensions,
                    length=vocab2.dimensions / array_dimensions,
                    dimensions=array_dimensions,
                )

            if vocab1 is vocab2:
                transform = None
            else:
                transform = vocab1.transform_to(vocab2)

            if conv is None:
                transform2 = np.eye(vocab2.dimensions) * weight
            else:
                transform2 = vocab2.parse(conv).get_transform_matrix() * weight

            self.spa.net.connect("source_" + source, self.name + "." + cname, transform=transform, pstc=pstc_channel)
            self.spa.net.connect(self.name + "." + cname, "sink_" + sink, pstc=pstc_channel, transform=transform2)

            self.net.connect(gname, cname, encoders=-10, pstc=pstc_gate)

        for source, sink, conv, weight in self.bg.rules.get_rhs_route_convs():
            t = self.bg.rules.rhs_route_conv(source, sink, conv, weight)
            if verbose:
                print "      %s*%s->%s" % (source, conv, sink)

            index = 0
            name = "%s_%s_%s" % (source, conv, sink)
            if weight != 1:
                name += "(%1.1f)" % weight
                name = name.replace(".", "_")
            while name in used_names:
                index += 1
                name = "%s_%s_%d" % (source, sink, index)
                if weight != 1:
                    name += "(%1.1f)" % weight
                    name = name.replace(".", "_")
            used_names.append(name)

            gname = "gate_%s" % (name)

            self.net.make(gname, neurons_gate, 1, encoders=[[1]], intercept=(gate_threshold, 1))
            self.net.connect("rules", gname, transform=t * 2, pstc=pstc_to_gate)
            self.net.connect("bias", gname)

            cname = "channel_%s" % (name)

            inv1 = False
            if source[0] == "~":
                source = source[1:]
                inv1 = True
            inv2 = False
            if conv[0] == "~":
                conv = conv[1:]
                inv2 = True

            vocab1 = self.spa.sources[source]
            vocab2 = self.spa.sources[conv]
            vocab3 = self.spa.sinks[sink]

            assert vocab1 == vocab2
            assert vocab1 == vocab3

            convolution.connect(
                self.spa.net,
                self.name + "." + cname,
                vocab1.dimensions,
                "source_" + source,
                "source_" + conv,
                "sink_" + sink,
                invert1=inv1,
                invert2=inv2,
            )

            self.net.connect(gname, cname, encoders=-10, pstc=pstc_gate)
コード例 #25
0
def make(net,errName='error', N_err=50, preName='pre', postName='post', rate=5e-4, oja=False):

    # get pre and post ensembles from their names
    pre = net.network.getNode(preName)
    post = net.network.getNode(postName)
    
    # modulatory termination (find unused termination)
    count=0
    while 'mod_%02d'%count in [t.name for t in post.terminations]:
        count=count+1
    modname = 'mod_%02d'%count
    post.addDecodedTermination(modname, numeric.eye(post.dimension), 0.005, True)
    
    # random weight matrix to initialize projection from pre to post
    def rand_weights(w):
        for i in range(len(w)):
            for j in range(len(w[0])):
                w[i][j] = random.uniform(-1e-3,1e-3)
        return w
    weight = rand_weights(numeric.zeros((post.neurons, pre.neurons)).tolist())
    
    # non-decoded termination (to learn transformation)
    count = 0
    prename = pre.getName()
    while '%s_%02d'%(prename,count) in [t.name for t in post.terminations]:
        count=count+1
    prename = '%s_%02d'%(prename, count)

    post.addPESTermination(prename, weight, 0.005, False)
    
    # Create error ensemble
    try:
        net.get(errName) # if it already exists
    except StructuralException:
        net.make(errName, N_err, post.dimension)

    # Add projections
    net.connect(errName, post.getTermination(modname))
    net.connect(pre.getOrigin('AXON'),post.getTermination(prename))

    # Set learning rule on the non-decoded termination
    net.learn(post,prename,modname,rate=rate,oja=oja)

    if net.network.getMetaData("learnedterm") == None:
        net.network.setMetaData("learnedterm", HashMap())
    learnedterms = net.network.getMetaData("learnedterm")

    learnedterm=HashMap(5)
    learnedterm.put("errName", errName)
    learnedterm.put("N_err", N_err)
    learnedterm.put("preName", preName)
    learnedterm.put("postName", postName)
    learnedterm.put("rate", rate)

    learnedterms.put(errName, learnedterm)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(errName)

    if net.network.getMetaData("templateProjections") == None:
        net.network.setMetaData("templateProjections", HashMap())
    templateproj = net.network.getMetaData("templateProjections")
    templateproj.put(errName, postName)
    templateproj.put(preName, postName)
コード例 #26
0
 def add_mutual_inhibition(self,weight=1):
     N=self.production_count
     self.net.connect('prod','prod',(numeric.eye(N)-1)*weight)
コード例 #27
0
def make(net,
         errName='error',
         N_err=50,
         preName='pre',
         postName='post',
         rate=5e-4,
         oja=False):

    # get pre and post ensembles from their names
    pre = net.network.getNode(preName)
    post = net.network.getNode(postName)

    # modulatory termination (find unused termination)
    count = 0
    while 'mod_%02d' % count in [t.name for t in post.terminations]:
        count = count + 1
    modname = 'mod_%02d' % count
    post.addDecodedTermination(modname, numeric.eye(post.dimension), 0.005,
                               True)

    # random weight matrix to initialize projection from pre to post
    def rand_weights(w):
        for i in range(len(w)):
            for j in range(len(w[0])):
                w[i][j] = random.uniform(-1e-3, 1e-3)
        return w

    weight = rand_weights(numeric.zeros((post.neurons, pre.neurons)).tolist())

    # non-decoded termination (to learn transformation)
    count = 0
    prename = pre.getName()
    while '%s_%02d' % (prename, count) in [t.name for t in post.terminations]:
        count = count + 1
    prename = '%s_%02d' % (prename, count)

    post.addPESTermination(prename, weight, 0.005, False)

    # Create error ensemble
    try:
        net.get(errName)  # if it already exists
    except StructuralException:
        net.make(errName, N_err, post.dimension)

    # Add projections
    net.connect(errName, post.getTermination(modname))
    net.connect(pre.getOrigin('AXON'), post.getTermination(prename))

    # Set learning rule on the non-decoded termination
    net.learn(post, prename, modname, rate=rate, oja=oja)

    if net.network.getMetaData("learnedterm") == None:
        net.network.setMetaData("learnedterm", HashMap())
    learnedterms = net.network.getMetaData("learnedterm")

    learnedterm = HashMap(5)
    learnedterm.put("errName", errName)
    learnedterm.put("N_err", N_err)
    learnedterm.put("preName", preName)
    learnedterm.put("postName", postName)
    learnedterm.put("rate", rate)

    learnedterms.put(errName, learnedterm)

    if net.network.getMetaData("templates") == None:
        net.network.setMetaData("templates", ArrayList())
    templates = net.network.getMetaData("templates")
    templates.add(errName)

    if net.network.getMetaData("templateProjections") == None:
        net.network.setMetaData("templateProjections", HashMap())
    templateproj = net.network.getMetaData("templateProjections")
    templateproj.put(errName, postName)
    templateproj.put(preName, postName)
コード例 #28
0
ファイル: thalamus.py プロジェクト: mcchong/nengo_1.4
    def connect(self,
                weight_GPi=-3,
                pstc_GPi=0.008,
                pstc_output=0.01,
                neurons_gate=40,
                gate_threshold=0.3,
                pstc_to_gate=0.002,
                pstc_gate=0.008,
                channel_N_per_D=50,
                pstc_channel=0.01):
        self.bg.rules.initialize(self.spa)

        # Store rules in the documentation comment for this network for use in the interactive mode view
        self.net.network.documentation = 'THAL: ' + ','.join(
            self.bg.rules.names)

        self.spa.net.connect(self.bg.name + '.GPi',
                             self.name + '.rule',
                             weight=weight_GPi,
                             pstc=pstc_GPi,
                             func=self.bg.get_output_function())

        # make direct outputs
        for name in self.spa.sinks.keys():
            t = self.bg.rules.rhs_direct(name)
            if t is not None:
                self.spa.net.connect(self.name + '.rule', 'sink_' + name, t,
                                     pstc_output)

        # make gated outputs
        for source, sink, conv, weight in self.bg.rules.get_rhs_routes():
            t = self.bg.rules.rhs_route(source, sink, conv, weight)

            gname = 'gate_%s_%s' % (source, sink)
            if weight != 1: gname += '(%1.1f)' % weight
            gname = gname.replace('.', '_')

            self.net.make(gname,
                          neurons_gate,
                          1,
                          encoders=[[1]],
                          intercept=(gate_threshold, 1))
            self.net.connect('rule', gname, transform=t, pstc=pstc_to_gate)
            self.net.connect('bias', gname)

            cname = 'channel_%s_%s' % (source, sink)
            if weight != 1: cname += '(%1.1f)' % weight
            cname = cname.replace('.', '_')

            vocab1 = self.spa.sources[source]
            vocab2 = self.spa.sinks[sink]

            self.net.make(cname, channel_N_per_D * vocab2.dimensions,
                          vocab2.dimensions)

            if vocab1 is vocab2:
                transform = None
            else:
                transform = vocab1.transform_to(vocab2)

            if conv is None:
                transform2 = np.eye(vocab2.dimensions) * weight
            else:
                transform2 = vocab2.parse(conv).get_transform_matrix() * weight

            self.spa.net.connect('source_' + source,
                                 self.name + '.' + cname,
                                 transform=transform,
                                 pstc=pstc_channel)
            self.spa.net.connect(self.name + '.' + cname,
                                 'sink_' + sink,
                                 pstc=pstc_channel,
                                 transform=transform2)

            self.net.connect(gname, cname, encoders=-10, pstc=pstc_gate)
コード例 #29
0
    def create(self, net, N=50, dimensions=8, randomize=False):
        vocab = {}
        for k in self.nodes.keys():
            node = net.get(k, None)
            if node is None:
                dim = dimensions
                if randomize is False and len(self.nodes[k]) + 1 > dim:
                    dim = len(self.nodes[k]) + 1
                node = net.make_array(k, N, dim)
            if not hrr.Vocabulary.registered.has_key(id(node)):
                v = hrr.Vocabulary(node.dimension, randomize=randomize)
                v.register(node)
            vocab[k] = hrr.Vocabulary.registered[id(node)]

        # ensure all terms are parsed before starting
        for k, v in self.connect.items():
            pre_name, post_name = k
            for pre_term, post_term in v:
                pre = vocab[pre_name].parse(pre_term).v
                post = vocab[post_name].parse(post_term).v

        for k, v in self.connect.items():
            pre_name, post_name = k

            t = numeric.zeros(
                (vocab[post_name].dimensions, vocab[pre_name].dimensions),
                typecode='f')
            for pre_term, post_term in v:
                pre = vocab[pre_name].parse(pre_term).v
                post = vocab[post_name].parse(post_term).v
                t += numeric.array([pre * bb for bb in post])

            if pre_name == post_name:
                if pre_name in self.inhibit:
                    for pre_term in vocab[pre_name].keys:
                        pre = vocab[pre_name].parse(
                            pre_term).v * self.inhibit[pre_name]
                        post_value = numeric.zeros(vocab[post_name].dimensions,
                                                   typecode='f')
                        for post_term in vocab[pre_name].keys:
                            if pre_term != post_term:
                                post_value += vocab[post_name].parse(
                                    post_term).v
                        t += numeric.array([pre * bb for bb in post_value])
                if pre_name in self.excite:
                    t += numeric.eye(len(t)) * self.excite[pre_name]

            net.connect(net.get(pre_name), net.get(post_name), transform=t)

        for i, (pre, post) in enumerate(self.ands):
            D = len(pre)
            node = net.make('and%02d' % i, D * N, D)
            for j, p in enumerate(pre):
                t = numeric.zeros((D, vocab[p[0]].dimensions), typecode='f')
                t[j, :] = vocab[p[0]].parse(p[1]).v * math.sqrt(D)
                net.connect(net.get(p[0]), node, transform=t)

            def result(x, v=vocab[post[0]].parse(post[1]).v):
                for xx in x:
                    if xx < 0.4:
                        return [0] * len(
                            v)  #TODO: This is pretty arbitrary....
                return v

            net.connect(node, net.get(post[0]), func=result)

        return net