예제 #1
0
파일: array.py 프로젝트: hunse/nengo_1.4
 def __init__(self,name,nodes):
     """Create a network holding an array of nodes.  An 'X' Origin
     is automatically created which concatenates the values of each
     internal element's 'X' Origin.
     
     This object is meant to be created using :func:`nef.Network.make_array()`, allowing for the
     efficient creation of neural groups that can represent large vectors.  For example, the
     following code creates a NetworkArray consisting of 50 ensembles of 1000 neurons, each of 
     which represents 10 dimensions, resulting in a total of 500 dimensions represented::
     
       net=nef.Network('Example Array')
       A=net.make_array('A',neurons=1000,length=50,dimensions=10,quick=True)
       
     The resulting NetworkArray object can be treated like a normal ensemble, except for the
     fact that when computing nonlinear functions, you cannot use values from different
     ensembles in the computation, as per NEF theory.
     
     :param string name: the name of the NetworkArray to create
     :param nodes: the nodes to combine together
     :type nodes: list of NEFEnsembles
     """        
     NetworkImpl.__init__(self)
     self.name=name
     self.dimension=len(nodes)*nodes[0].dimension
     self._nodes=nodes
     self._origins={}
     self.neurons=0
     for n in nodes:
         self.addNode(n)
         self.neurons+=n.neurons
     self.multidimensional=nodes[0].dimension>1
     self.createEnsembleOrigin('X')
     self.setUseGPU(True)
예제 #2
0
 def __init__(self, name, nodes):
     """Create a network holding an array of nodes.  An 'X' Origin
     is automatically created which concatenates the values of each
     internal element's 'X' Origin.
     
     This object is meant to be created using :func:`nef.Network.make_array()`, allowing for the
     efficient creation of neural groups that can represent large vectors.  For example, the
     following code creates a NetworkArray consisting of 50 ensembles of 1000 neurons, each of 
     which represents 10 dimensions, resulting in a total of 500 dimensions represented::
     
       net=nef.Network('Example Array')
       A=net.make_array('A',neurons=1000,length=50,dimensions=10,quick=True)
       
     The resulting NetworkArray object can be treated like a normal ensemble, except for the
     fact that when computing nonlinear functions, you cannot use values from different
     ensembles in the computation, as per NEF theory.
     
     :param string name: the name of the NetworkArray to create
     :param nodes: the nodes to combine together
     :type nodes: list of NEFEnsembles
     """
     NetworkImpl.__init__(self)
     self.name = name
     self.dimension = len(nodes) * nodes[0].dimension
     self._nodes = nodes
     self._origins = {}
     self.neurons = 0
     for n in nodes:
         self.addNode(n)
         self.neurons += n.neurons
     self.multidimensional = nodes[0].dimension > 1
     self.createEnsembleOrigin('X')
     self.setUseGPU(True)
예제 #3
0
파일: motor_node.py 프로젝트: mirror/spaun
    def __init__(self, name = "MotorTransform", mtr_filepath = "", valid_strs = [], vis_dim = 0, \
                 mtr_dim = 0, neurons_per_dim = 50, inhib_scale = 10.0, tau_in = 0.05, tau_inhib = 0.05, \
                 in_strs = None, quick = True):
        
        NetworkImpl.__init__(self)
        self.setName(name)
        net = nef.Network(self, quick)
        
        self.dimension = mtr_dim
        
        in_terms = []
        inhib_terms = []

        out_relay = SimpleNEFEns("Output", mtr_dim, pstc = 0.0001, input_name = "")
        net.add(out_relay)

        for i,in_str in enumerate(in_strs):
            x_transform = read_csv(mtr_filepath + in_str + "_x.csv")
            y_transform = read_csv(mtr_filepath + in_str + "_y.csv")
            xy_transform = []
            
            transform_w = len(x_transform[0])
            transform_h = len(x_transform)

            for row in range(transform_h):
                xy_transform.append(x_transform[row])
                xy_transform.append(y_transform[row])
            # append ignore rows
            for row in range(mtr_dim - (transform_h * 2)):
                xy_transform.append(zeros(1,transform_w))

            inhib_vec = num_vector(-inhib_scale, 1, len(valid_strs))
            inhib_vec[valid_strs.index(in_str)] = 0

            ens = net.make_array(in_str, neurons_per_dim, mtr_dim, 1, max_rate = (100,200), quick = quick, storage_code = "%d")
            ens.addDecodedTermination("Input", xy_transform, tau_in, False)
            ens.addTermination("Inhib", [[inhib_vec] * neurons_per_dim] * mtr_dim, tau_inhib, False)

            out_relay.addDecodedTermination(in_str, None, 0.0001, False)
            net.connect(ens.getOrigin("X"), out_relay.getTermination(in_str))

            in_terms.append(ens.getTermination("Input"))
            inhib_terms.append(ens.getTermination("Inhib"))

        in_term    = EnsembleTermination(net.network, "Input", in_terms)
        inhib_term = EnsembleTermination(net.network, "Inhib", inhib_terms)

        net.network.exposeTermination(in_term, "Input")
        net.network.exposeTermination(inhib_term, "Inhib")
        net.network.exposeOrigin(out_relay.getOrigin("X"), "X")
    def __init__(self, name, N, d, vocab):
        NetworkImpl.__init__(self)
        self.name = name

        scaleFactor = 0.1
        smallN = int(math.ceil(float(N) / d))
        tauPSC = 0.007

        ef1 = RPMutils.defaultEnsembleFactory()
        ef1.nodeFactory.tauRef = 0.001

        test = ef1.make("hypothesis", 1, d)
        test.setMode(SimulationMode.DIRECT
                     )  #since this is just a relay ensemble for modularity
        test.fixMode()
        test.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False)
        self.addNode(test)
        self.exposeTermination(test.getTermination("input"), "hypothesis")

        combine = ef1.make("combine", 800, 8)
        #        combine.setMode(SimulationMode.DIRECT) #since this is just a relay ensemble for modularity
        #        combine.fixMode()
        #        combine.collectSpikes(True)
        self.addNode(combine)

        inputVec = [[0] for x in range(8)]

        #create a population for each possible answer
        for i in range(8):
            ans = ef1.make("ans_" + str(i), smallN, 1)
            ans.addDecodedTermination("input", [vocab[i]], tauPSC, False)
            self.addNode(ans)

            self.addProjection(test.getOrigin("X"),
                               ans.getTermination("input"))

            inputVec[i] = [scaleFactor]
            combine.addDecodedTermination("in_" + str(i), inputVec, tauPSC,
                                          False)
            inputVec[i] = [0]

            self.addProjection(ans.getOrigin("X"),
                               combine.getTermination("in_" + str(i)))

        self.exposeOrigin(combine.getOrigin("X"), "result")

        if RPMutils.USE_PROBES:
            self.simulator.addProbe("combine", "X", True)
예제 #5
0
    def __init__(self, name, N, d):
        NetworkImpl.__init__(self)
        self.name = name
        
        tauPSC = 0.007
        
        ef = RPMutils.defaultEnsembleFactory()
        netef = networkensemble.NetworkEnsemble(ef)
        
        #create the approximate inverse matrix
        inv = RPMutils.eye(d, 1)
        for i in range(d/2):
            tmp = inv[i+1]
            inv[i+1] = inv[d-i-1]
            inv[d-i-1] = tmp
        
        #create the two input populations
        Ainv = netef.make("Ainv", N, tauPSC, [inv], None)
        self.addNode(Ainv)
        
        B = netef.make("B", N, tauPSC, [RPMutils.eye(d, 1)], None)
        self.addNode(B)
          
        #create circular convolution network
        corr = cconv.Cconv("corr", N, d)
        self.addNode(corr)
        
        self.addProjection(Ainv.getOrigin("X"), corr.getTermination("A"))
        self.addProjection(B.getOrigin("X"), corr.getTermination("B"))
        
        #average result
        T = average.Average("T", N, d)
        self.addNode(T)
        
        self.addProjection(corr.getOrigin("X"), T.getTermination("input"))
        
        if RPMutils.USE_PROBES:
            self.simulator.addProbe("T", "X", True)
            self.simulator.addProbe("corr", "X", True)
            self.simulator.addProbe("Ainv", "X", True)
            self.simulator.addProbe("B", "X", True)
        
        self.exposeOrigin(T.getOrigin("X"), "T")
        self.exposeTermination(Ainv.getTermination("in_0"), "A")
        self.exposeTermination(B.getTermination("in_0"), "B")
#        self.exposeTermination(T.getTermination("lrate"), "lrate")
예제 #6
0
    def __init__(self, name, N, d):
        NetworkImpl.__init__(self)
        self.name = name

        tauPSC = 0.007

        ef = RPMutils.defaultEnsembleFactory()
        netef = networkensemble.NetworkEnsemble(ef)

        #scale input to integrator (adaptive learning rate)
        #        scaler = eprod.Eprod("scaler", N, d, oneDinput=True)
        #        self.addNode(scaler)

        #new idea, try a constant scale
        #constant scale on input: 0.4
        #constant scale on recurrent connection: 0.8 (forget rate 0.2)

        #create integrator
        #we scale the input by 1/stepsize because we want the integrator to reach the target value in stepsize*1s, not 1s
        int = integrator.Integrator("int",
                                    N,
                                    d,
                                    inputScale=0.4,
                                    forgetRate=0.2,
                                    stepsize=RPMutils.STEP_SIZE)
        self.addNode(int)

        #        self.addProjection(scaler.getOrigin("X"), int.getTermination("input"))

        if RPMutils.USE_PROBES:
            self.simulator.addProbe("int", "X", True)
#            self.simulator.addProbe("scaler", "X", True)

        self.exposeOrigin(int.getOrigin("X"), "X")
        #        self.exposeTermination(scaler.getTermination("A"), "input")
        #        self.exposeTermination(scaler.getTermination("B"), "lrate")
        self.exposeTermination(int.getTermination("input"), "input")
예제 #7
0
파일: selector.py 프로젝트: tcstewar/parser
    def __init__(self, name = "Selector", num_dim = 1, neurons_per_dim = 25,
                 ens_per_array = 0, num_items = 2, in_scale = 1.0, \
                 tau_in = 0.005, tau_inhib = 0.005, inhib_scale = 2.0, \
                 inhib_vecs = [], en_sum_out = True, \
                 sim_mode = SimulationMode.DEFAULT, quick = True, **params):
        # Note: tau_in and tau_inhib and inhib_scale can be lists.

        self.dimension = num_dim
        self.num_items = num_items
        
        if( ens_per_array == 0 ):
            ens_per_array = 8
            max_ens_per_array = sqrt(num_dim)
            while( ens_per_array < max_ens_per_array ):
                if( num_dim % ens_per_array == 0 ):
                    break
                else:
                    ens_per_array += 1
        
        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT

        # Deepcopy inhib_vec list... otherwise append function will affect other classes.
        inhib_vecs = [inhib_vecs[n] for n in range(len(inhib_vecs))] 
        len_diff = num_items - len(inhib_vecs)
        if( len_diff != 0 ):
            if( len(inhib_vecs) > 0 ):
                print(len(inhib_vecs))
                print(str(inhib_vecs))
                print("Selector.__init__ [" + name + "] - inhib_vec length and num_item mismatch")
            for n in range(len_diff):
                inhib_vecs.append([1])
        inhib_dim = len(inhib_vecs[0])

        NetworkImpl.__init__(self)
        net = nef.Network(self, quick)
        self.setName(name)

        self.ens_per_array   = min(num_dim, ens_per_array)
        self.dim_per_ens     = num_dim / ens_per_array
        self.neurons_per_ens = neurons_per_dim * self.dim_per_ens

        self.make_mode         = sim_mode

        enss = []
        if( en_sum_out ):
            out_relay = SimpleNEFEns("Output", self.dimension, pstc = 0.0001, input_name = "")
            net.add(out_relay)

        if( not isinstance(tau_in, list) ):
            tau_in = [tau_in] * num_items
        if( not isinstance(tau_inhib, list) ):
            tau_inhib = [tau_inhib] * num_items
        if( not isinstance(inhib_scale, list) ):
            inhib_scale = [inhib_scale] * num_items

        self.inhib_scale = inhib_scale
        self.tau_inhib   = tau_inhib
        self.tau_in      = tau_in

        if( not "max_rate" in params ):
            params["max_rate"] = (100,200)
        if( not "quick" in params ):
            params["quick"] = quick

        for item in range(num_items):
            inhib_vec_scale = [inhib_vecs[item][n] * -inhib_scale[item] for n in range(inhib_dim)]
            if( sim_mode == SimulationMode.DIRECT ):
                ens = SimpleNEFEns("Item " + str(item+1), self.dimension, pstc = tau_in[item], input_name = None)
                net.add(ens)
                inhib_mat = [inhib_vec_scale]
            else:
                ens = net.make_array("Item " + str(item+1), self.neurons_per_ens, self.ens_per_array, \
                                     self.dim_per_ens, **params)
                inhib_mat = [[inhib_vec_scale] * self.neurons_per_ens] * self.ens_per_array
            in_term = ens.addDecodedTermination("Input", diag(num_dim, value = in_scale), tau_in[item], False)
            inhib_term = ens.addTermination("Inhib", inhib_mat, tau_inhib[item], False)
            enss.append(ens)

            net.network.exposeTermination(in_term, "Input " + str(item+1))
            net.network.exposeTermination(inhib_term, "Suppress " + str(item+1))
            if( not en_sum_out ):
                net.network.exposeOrigin(ens.getOrigin("X"), "Output " + str(item+1))
            else:
                out_relay.addDecodedTermination("Item" + str(item+1), None, 0.0001, False)
                out_relay.addNeuronCount(ens.getNeuronCount())
                net.connect(ens.getOrigin("X"), out_relay.getTermination("Item" + str(item+1)))

        if( en_sum_out ):
            net.network.exposeOrigin(out_relay.getOrigin("X"), "X")

        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()
        if( not seed is None ):
            seed()
        self.releaseMemory()
    def __init__(self, N, d, matrix):
        NetworkImpl.__init__(self)
        self.name = "SequenceSolver"
        self.N = N
        self.d = d

        ef1 = RPMutils.defaultEnsembleFactory()

        #load matrix data from file
        matrixData = self.loadSequenceMatrix(matrix)

        #the two input signals, A and B, representing the sequence of example pairs
        Ain = matrixData[0]
        Bin = matrixData[1]
        self.addNode(Ain)
        self.addNode(Bin)

        #the adaptive learning rate
        #        lrate = matrixData[2]
        #        self.addNode(lrate)

        #calculate the T for the current A and B
        calcT = transform.Transform("calcT", N, d)
        self.addNode(calcT)

        self.addProjection(Ain.getOrigin("origin"), calcT.getTermination("A"))
        self.addProjection(Bin.getOrigin("origin"), calcT.getTermination("B"))
        #        self.addProjection(lrate.getOrigin("origin"), calcT.getTermination("lrate"))

        if RPMutils.USE_CLEANUP:
            #run T through cleanup memory
            cleanT = memory.Memory("cleanT", N, d)
            self.addNode(cleanT)

            self.addProjection(calcT.getOrigin("T"),
                               cleanT.getTermination("dirty"))

        #calculate the result of applying T to the second last cell
        secondLast = matrixData[3]
        self.addNode(secondLast)

        calcLast = cconv.Cconv("calcLast", N, d)
        self.addNode(calcLast)

        self.addProjection(secondLast.getOrigin("origin"),
                           calcLast.getTermination("A"))

        if RPMutils.USE_CLEANUP:
            self.addProjection(cleanT.getOrigin("clean"),
                               calcLast.getTermination("B"))
        else:
            self.addProjection(calcT.getOrigin("T"),
                               calcLast.getTermination("B"))

        if RPMutils.LOAD_RULES:
            self.removeProjection(calcLast.getTermination("B"))
            rulesig = matrixData[len(matrixData) - 1]
            self.addNode(rulesig)
            self.addProjection(rulesig.getOrigin("origin"),
                               calcLast.getTermination("B"))

        #compare the result to the possible answers to determine which is most similar
        if not RPMutils.RUN_WITH_CONTROLLER:
            testSimilarity = similarity.Similarity("testSimilarity", N, d,
                                                   matrixData[4:])
            self.addNode(testSimilarity)

            self.addProjection(calcLast.getOrigin("X"),
                               testSimilarity.getTermination("hypothesis"))
            self.simulator.addProbe("testSimilarity", "result", True)

        if RPMutils.USE_CLEANUP:
            Tprobe = self.simulator.addProbe("cleanT", "clean", True)
        else:
            Tprobe = self.simulator.addProbe("calcT", "T", True)
        answerprobe = self.simulator.addProbe("calcLast", "X", True)

        if RPMutils.USE_CLEANUP and RPMutils.DYNAMIC_MEMORY:
            self.simulator.addSimulatorListener(
                memorylistener.MemoryManagementListener(
                    RPMutils.cleanupDataFile(),
                    RPMutils.cleanupFile(d, RPMutils.VOCAB_SIZE)))

        if RPMutils.RUN_WITH_CONTROLLER:
            self.simulator.addSimulatorListener(
                proberecorder.ProbeRecorder(
                    Tprobe, RPMutils.resultFile("sequencesolver"), 0.05))
            self.simulator.addSimulatorListener(
                proberecorder.ProbeRecorder(
                    answerprobe, RPMutils.hypothesisFile("sequencesolver"),
                    0.05))

        self.setMode(RPMutils.SIMULATION_MODE)
예제 #9
0
파일: mem_block.py 프로젝트: mirror/spaun
    def __init__(self, name = "Memory Block", num_dim = 1, neurons_per_dim = 25, tau_in = 0.005, \
                 in_scale = 1.0, fb_scale = 1.00, inhib_scale = 2.0, input_name = "Input", \
                 reset_opt = 0, reset_vec = None, cyc_opt = 0, en_gint_out = False, tau_buf_in = 0.01,\
                 sim_mode = SimulationMode.DEFAULT, quick = True, mode = 1, rand_seed = 0, cleanup_vecs = None):
        # mode: 1 - hrr mode (radius is scaled to num_dim), 0 - normal mode, 
        #      -1 - aligned mode (eval points chosen around 1 and 0)

        self.dimension = num_dim
        self.sim_mode = sim_mode
        NetworkImpl.__init__(self)
        self.setName(name)

        if( not reset_vec is None ):
            reset_opt = reset_opt | 1

        if( str(sim_mode).lower() == 'ideal' ):
            node = MemBlockNode(name, num_dim, tau_in, reset_opt, reset_vec, cyc_opt, en_gint_out, in_scale)
            self.addNode(node)
            if( not(input_name is None or input_name == "") ):
                self.exposeTermination(node.getTermination("Input"), input_name)
            self.exposeTermination(node.getTermination("Cycle"), "Cycle")
            if( not reset_opt == 0 ):
                self.exposeTermination(node.getTermination("Reset"), "Reset")
            self.exposeOrigin(node.getOrigin("X"), "X")
            if( en_gint_out ):
                self.exposeOrigin(node.getOrigin("GINT1"), "GINT1")
                self.exposeOrigin(node.getOrigin("GINT2"), "GINT2")
        else:
            net = nef.Network(self, quick)
            
            gint1 = GatedInt("GINT1", num_dim, neurons_per_dim, in_scale = in_scale, fb_scale = fb_scale, tau_in = tau_in, \
                             inhib_scale = inhib_scale, en_reset = reset_opt & 1, reset_vec = reset_vec, en_cyc_in = False, \
                             cyc_opt = cyc_opt, mode = mode, quick = quick, rand_seed = rand_seed, input_name = input_name, \
                             sim_mode = sim_mode, tau_buf_in = tau_buf_in, cleanup_vecs = cleanup_vecs)
            gint2 = GatedInt("GINT2", num_dim, neurons_per_dim, fb_scale = fb_scale, inhib_scale = inhib_scale, \
                             en_reset = reset_opt & 2, en_cyc_in = False, cyc_opt = cyc_opt, mode = mode, \
                             quick = quick, rand_seed = rand_seed, sim_mode = sim_mode, cleanup_vecs = cleanup_vecs)
            
            net.add(gint1)
            net.add(gint2)
            net.connect(gint1.getOrigin("X"), gint2.getTermination("Input"))
        
            if( not(input_name is None or input_name == "") ):
                net.network.exposeTermination(gint1.getTermination("Input"), input_name)
            net.network.exposeOrigin(gint2.getOrigin("X"), "X")

            if( en_gint_out ):
                net.network.exposeOrigin(gint1.getOrigin("X"), "GINT1")
                net.network.exposeOrigin(gint2.getOrigin("X"), "GINT2")
    
            if( reset_opt > 0 ):
                rst_terms = []
                if( reset_opt & 1 ):
                    rst_terms.append(gint1.getTermination("Reset"))
                if( reset_opt & 2 ):
                    rst_terms.append(gint2.getTermination("Reset"))              
                rst_term = EnsembleTermination(net.network, "Reset", rst_terms)
                net.network.exposeTermination(rst_term, "Reset")

            cyc_net  = Detector("Cycle", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed)
            net.add(cyc_net)
            net.connect(cyc_net.getOrigin("Cycle") , gint1.getTermination("Cycle"))
            net.connect(cyc_net.getOrigin("Cycle") , gint2.getTermination("CycleN"))
            net.connect(cyc_net.getOrigin("CycleN"), gint1.getTermination("CycleN"))
            net.connect(cyc_net.getOrigin("CycleN"), gint2.getTermination("Cycle"))
            net.network.exposeTermination(cyc_net.getTermination("Input"), "Cycle")

            self.releaseMemory()

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        self.setMode(sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()
예제 #10
0
    def __init__(self, name = "Cleanup Memory", \
                 in_vec_list = None, out_vec_list = None, tau_in = 0.005, in_scale = 1.0, \
                 en_inhib = False, tau_inhib = 0.005, tau_smooth = 0.0001, inhib_scale = 2.0, \
                 en_mut_inhib = False, mut_inhib_scale = 2.0, \
                 num_neurons_per_vec = 10, threshold = 0.3, \
                 N_out_vec = None, en_X_out = False, input_name = "Input", \
                 sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = None, **params):
        
        NetworkImpl.__init__(self)
        self.setName(name)

        if( mut_inhib_scale <= 0 ):
            en_mut_inhib = False

        if( out_vec_list is None ):
            out_vec_list = in_vec_list
        self.dimension = len(out_vec_list[0])

        if( isinstance(mut_inhib_scale, (int,float)) ):
            mut_inhib_scale = [mut_inhib_scale] * len(in_vec_list)
        if( isinstance(inhib_scale, (int,float)) ):
            inhib_scale = [inhib_scale] * len(in_vec_list)
        if( isinstance(threshold, (int,float)) ):
            threshold = [threshold] * len(in_vec_list)
        in_vec_list = [[in_vec_list[i][d] * in_scale for d in range(len(in_vec_list[i]))] \
                      for i in range(len(in_vec_list))]
        
        self.i_list = []
        self.in_vec_list = []

        if( str(sim_mode).lower() == 'ideal' ):
            node = CleanupMemoryNode(name, in_vec_list, out_vec_list, tau_in, en_inhib, tau_inhib, \
                                     threshold = sum(threshold) / len(threshold), en_wta = en_mut_inhib, \
                                     N_out_vec = N_out_vec)
            self.addNode(node)
            self.exposeTermination(node.getTermination("Input"), "Input")
            if( en_inhib ):
                self.exposeTermination(node.getTermination("Inhib"), "Inhib")
            self.exposeOrigin(node.getOrigin("Output"), "X")
            if( en_X_out ):
                self.exposeOrigin(node.getOrigin("X"), "x0")
        else:
            net = nef.Network(self, quick)

            enss = []
            
            num_items = 0
            for out_vec in out_vec_list:
                if( out_vec is None ):
                    continue
                else:
                    num_items += 1

            in_terms = []
            inhib_terms = []
            origins = []

            en_N_out = not (N_out_vec is None)

            out_relay = SimpleNEFEns("Output", self.dimension, pstc = tau_smooth)
            net.add(out_relay)

            if( en_X_out ):
                x_relay = SimpleNEFEns("X", num_items + en_N_out, pstc = tau_smooth)
                net.add(x_relay)

            for i,in_vec in enumerate(in_vec_list):
                if( out_vec_list[i] is None ):
                    continue

                self.in_vec_list.append(in_vec)
                self.i_list.append(i)
                
                pdf = IndicatorPDF(threshold[i] + 0.1, 1)
                eval_points = [[pdf.sample()[0]] for _ in range(1000)]
                intercepts = [threshold[i] + n * (1-threshold[i])/(num_neurons_per_vec) for n in range(num_neurons_per_vec)]
                if( sim_mode == SimulationMode.DIRECT ):
                    ens = SimpleNEFEns("Item" + str(i), 1, input_name = "")
                    net.add(ens)
                else:
                    ens = net.make("Item" + str(i), num_neurons_per_vec, 1, eval_points = eval_points, \
                                   encoders = [[1]] * num_neurons_per_vec, intercept = intercepts, \
                                   max_rate = (100,200), seed = rand_seed)
                
                if( input_name != "" and not input_name is None ):
                    ens.addDecodedTermination(input_name, [in_vec], tau_in, False)
                    in_terms.append(ens.getTermination(input_name))
                ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = threshold[i], \
                                     step_val = out_vec_list[i][d]) for d in range(self.dimension)], \
                                     "AXON")
                enss.append(ens)
                
                out_relay.addDecodedTermination("Item" + str(i), None, tau_smooth, False)
                out_relay.addNeuronCount(ens.getNeuronCount())
                net.connect(ens.getOrigin("Output"), out_relay.getTermination("Item" + str(i)))
                
                if( en_X_out ):
                    ens.removeDecodedOrigin("X")
                    ens.addDecodedOrigin("X", [FilteredStepFunction(shift = threshold[i])], "AXON")
                    x_relay.addDecodedTermination("Item" + str(i), transpose(delta(num_items + en_N_out, i)), tau_smooth, False)
                    x_relay.addNeuronCount(ens.getNeuronCount())
                    net.connect(ens.getOrigin("X"), x_relay.getTermination("Item" + str(i)))

                if( en_inhib ):
                    ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False)
                    inhib_terms.append(ens.getTermination("Inhib"))
            
            if( not N_out_vec is None ):
                N_threshold = min(threshold)
                pdf = IndicatorPDF(-0.1, N_threshold - 0.1)
                eval_points = [[pdf.sample()[0]] for _ in range(1000)]
                intercepts  = [-(n * (N_threshold)/(num_neurons_per_vec)) for n in range(num_neurons_per_vec)]
                if( sim_mode == SimulationMode.DIRECT ):
                    ens = SimpleNEFEns("ItemN", 1, input_name = "")
                    net.add(ens)
                else:
                    ens = net.make("ItemN", num_neurons_per_vec, 1, eval_points = eval_points, \
                                   encoders = [[-1]] * num_neurons_per_vec, intercept = intercepts, \
                                   max_rate = (300,400), seed = rand_seed)

                for i in range(len(in_vec_list)):
                    ens.addDecodedTermination("Item" + str(i), [[1]], 0.005, False)
                    net.connect(enss[i].getOrigin("X"), ens.getTermination("Item" + str(i)))

                ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = N_threshold, \
                                     step_val = N_out_vec[d], mirror = True) for d in range(self.dimension)], \
                                     "AXON")
                
                out_relay.addDecodedTermination("ItemN", None, tau_smooth, False)
                out_relay.addNeuronCount(ens.getNeuronCount())
                net.connect(ens.getOrigin("Output"), out_relay.getTermination("ItemN"))
                
                if( en_X_out ):
                    ens.removeDecodedOrigin("X")
                    ens.addDecodedOrigin("X", [FilteredStepFunction(shift = N_threshold, mirror = True)], "AXON")
                    x_relay.addDecodedTermination("ItemN", transpose(delta(num_items + en_N_out, num_items)), tau_smooth, False)
                    x_relay.addNeuronCount(ens.getNeuronCount())
                    net.connect(ens.getOrigin("X"), x_relay.getTermination("ItemN"))

                if( en_inhib ):
                    ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False)
                    inhib_terms.append(ens.getTermination("Inhib"))
             

            if( en_mut_inhib ):
                for n in range(num_items):
                    for i in range(num_items):
                        if( n != i):
                            enss[i].addTermination("Inhib" + str(n), [[-mut_inhib_scale[i]]] * num_neurons_per_vec, 0.005, False)
                            net.connect(enss[n].getOrigin("X"), enss[i].getTermination("Inhib" + str(n)))
            
            if( len(in_terms) > 0 ):
                in_term = EnsembleTermination(net.network, "Input", in_terms)
                net.network.exposeTermination(in_term, "Input")
            net.network.exposeOrigin(out_relay.getOrigin("X"), "X")

            if( en_X_out ):
                self.exposeOrigin(x_relay.getOrigin("X"), "x0")

            if( en_inhib ):
                inhib_term = EnsembleTermination(net.network, "Inhib", inhib_terms)
                net.network.exposeTermination(inhib_term, "Inhib")
            
            # Reset random seed
            if( not seed is None ):
                seed()
            self.releaseMemory()

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()
예제 #11
0
    def __init__(self, name = "Gated Integrator", num_dim = 1, neurons_per_dim = 25, \
                 tau_fb = 0.05, tau_in = 0.010, tau_buf_in = 0.01, tau_inhib = 0.005, \
                 in_scale = 1.0, fb_scale = 1.00, inhib_scale = 2.0, input_name = "Input", \
                 en_reset = False, reset_vec = None, en_cyc_in = True, cyc_opt = 0, \
                 sim_mode = SimulationMode.DEFAULT, quick = True, mode = 1, rand_seed = None, \
                 cleanup_vecs = None):
   
        self.dimension = num_dim
        NetworkImpl.__init__(self)
        self.setName(name)
        
        if( not reset_vec is None ):
            en_reset = True
            self.init_opt = True
        else:
            self.init_opt = False

        if( str(sim_mode).lower() == 'ideal' ):
            node = GatedIntNode(name, num_dim, tau_in, en_reset, reset_vec, en_cyc_in, cyc_opt)
            self.addNode(node)
            if( not(input_name is None or input_name == "") ):
                self.exposeTermination(node.getTermination("Input"), input_name)
            else:
                node.removeTermination("Input")
            self.exposeTermination(node.getTermination("Cycle"), "Cycle")
            if( en_reset ):
                self.exposeTermination(node.getTermination("Reset"), "Reset")
            if( not en_cyc_in ):
                self.exposeTermination(node.getTermination("CycleN"), "CycleN")
            self.exposeOrigin(node.getOrigin("X"), "X")

            ## TODO
            if( cleanup_vecs is None ):
                print("GINT - Cleanupvecs not implemented yet")
        else:
            net = nef.Network(self, quick)
            nn_per_dim = neurons_per_dim

            if( mode == 1 ):
                radius = 1/sqrt(num_dim) * 3.5
            else:
                radius = 1
            if( mode == -1 ):
                eval_points = [[1 - random() * 0.6 + 0.15] for _ in range(2000)]
                encoders    = [[1]]
                intercept   = (0.25,1)
            else:
                eval_points = None
                encoders    = None
                intercept   = (-1,1)

            params = dict(max_rate = (100,200), radius = radius, quick = quick, \
                          intercept = intercept, encoders = encoders, eval_points = eval_points, \
                          seed = rand_seed)
            
            if( sim_mode == SimulationMode.DIRECT ):
                inhib_mat = [[-inhib_scale]]
                if( cleanup_vecs is None ):
                    buffer = SimpleNEFEns("buffer", num_dim, input_name = "")
                else:
                    buffer = CleanupMem("buffer", cleanup_vecs, num_neurons_per_vec = 1, \
                                        tau_in = tau_buf_in, tau_inhib = tau_inhib, \
                                        en_mut_inhib = True, inhib_scale = inhib_scale, \
                                        en_inhib = en_reset and not self.init_opt, \
                                        threshold = 0.5, sim_mode = sim_mode)
                feedback   = SimpleNEFEns("feedback", num_dim, input_name = "")
                net.add(buffer)
                net.add(feedback)
            else:
                inhib_mat = [[[-inhib_scale]] * nn_per_dim] * num_dim
                if( cleanup_vecs is None ):
                    buffer = net.make_array("buffer", nn_per_dim, num_dim, 1, **params)
                else:
                    buffer = CleanupMem("buffer", cleanup_vecs, num_neurons_per_vec = nn_per_dim, \
                                        tau_in = tau_buf_in, tau_inhib = tau_inhib, \
                                        en_mut_inhib = True, inhib_scale = inhib_scale, \
                                        en_inhib = en_reset and not self.init_opt, threshold = 0.5, \
                                        sim_mode = sim_mode, rand_seed = rand_seed, quick = quick)
                    net.add(buffer)
                feedback   = net.make_array("feedback", nn_per_dim, num_dim, 1, **params)
            
            if( cleanup_vecs is None ):
                buffer.addDecodedTermination("Input", eye(num_dim), tau_buf_in, False)
            buffer.addDecodedTermination("Feedback", eye(num_dim), 0.005, False)
            if( en_reset and not self.init_opt ):
                if( cleanup_vecs is None ):
                    buffer.addTermination("Inhib", inhib_mat, tau_inhib, False)
                net.network.exposeTermination(buffer.getTermination("Inhib"), "Reset")

            feedback.addDecodedTermination("Input", diag(num_dim, value = fb_scale), tau_fb, False)
            feedback.addTermination("Inhib", inhib_mat, tau_inhib, False)
            
            if( input_name is None or input_name == "" ):
                self.num_inputs = 0
            else:
                self.num_inputs = 1

            if( not self.init_opt ):
                if( sim_mode == SimulationMode.DIRECT ):
                    gate = SimpleNEFEns("gate"  , num_dim, input_name = "")
                    net.add(gate)
                else:
                    gate = net.make_array("gate", nn_per_dim, num_dim, 1, **params)
                if( self.num_inputs ):
                    gate.addDecodedTermination("Input", diag(num_dim, value = in_scale), tau_in, False)
                    net.network.exposeTermination(gate.getTermination("Input"), input_name)
                gate.addTermination("Inhib", inhib_mat, tau_inhib, False)
                gate_inhib_name = "Inhib"
            else:
                gate = Selector("gate", num_dim, nn_per_dim, num_dim, tau_in = [0.005,tau_in], in_scale = in_scale, \
                                inhib_scale = inhib_scale, **params)
                gate.addSuppressTerminations([1])
                feedback.addTermination("Reset", inhib_mat, 0.005, False)
                reset_net = Detector("Reset", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed)
                net.add(reset_net)
                net.add(gate)
                net.network.exposeTermination(reset_net.getTermination("Input"), "Reset")
                if( self.num_inputs ):
                    net.network.exposeTermination(gate.getTermination("Input 1"), input_name)
                init_val_in = net.make_input("init_val", reset_vec)
                net.connect(init_val_in                  , gate.getTermination("Input 2"))
                net.connect(reset_net.getOrigin("Reset") , gate.getTermination("Suppress 1_2"))
                net.connect(reset_net.getOrigin("ResetN"), gate.getTermination("Suppress 2"))
                net.connect(reset_net.getOrigin("Reset") , feedback.getTermination("Reset"))
                gate_inhib_name = "Suppress 1"

            net.connect(gate.getOrigin("X")    , buffer.getTermination("Input"))
            net.connect(buffer.getOrigin("X")  , feedback.getTermination("Input"))
            net.connect(feedback.getOrigin("X"), buffer.getTermination("Feedback"))

            net.network.exposeOrigin(buffer.getOrigin("X"), "X")

            if( cyc_opt ):
                gate_inhib_str = ("CycleN")
                fb_inhib_str = ("Cycle")
            else:
                gate_inhib_str = ("Cycle")
                fb_inhib_str = ("CycleN")
                
            if( en_cyc_in ):
                cyc_net  = Detector("Cycle", en_N_out = True, sim_mode = sim_mode, rand_seed = rand_seed)
                net.add(cyc_net)
                net.connect(cyc_net.getOrigin(gate_inhib_str), gate.getTermination(gate_inhib_name))
                net.connect(cyc_net.getOrigin(fb_inhib_str)  , feedback.getTermination("Inhib"))
                net.network.exposeTermination(cyc_net.getTermination("Input"), "Cycle")
            else:
                net.network.exposeTermination(gate.getTermination(gate_inhib_name), gate_inhib_str)
                net.network.exposeTermination(feedback.getTermination("Inhib")    , fb_inhib_str)

            # Reset random seed
            if( not seed is None ):
                seed()
            self.releaseMemory()

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()
예제 #12
0
    def __init__(self, name = "Detector", detect_vec = None, inhib_vec = None, tau_in = 0.005, \
                 en_inhib = False, en_inhibN = None, tau_inhib = 0.005, in_scale = 1.0, inhib_scale = 2.0,\
                 en_out = True, en_N_out = False, en_X_out = False, num_neurons = 20, detect_threshold = 0.4, \
                 sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = 0, net = None, input_name = "Input"):
   
        self.dimension = 1
        NetworkImpl.__init__(self)
        ens_name = name
        if( not isinstance(net, nef.Network) ):
            if( not net is None ):
                net = nef.Network(net, quick)
            else:
                ens_name = "detect"
                net = nef.Network(self, quick)
        self.setName(name)

        if( detect_vec is None ):
            detect_vec = [1]

        vec_dim = len(detect_vec)
        detect_vec_scale = [detect_vec[n] * in_scale for n in range(vec_dim)]
        if( en_inhib ):
            if( inhib_vec is None ):
                inhib_vec = [1]
            inhib_dim = len(inhib_vec)
        if( en_inhibN is None ):
            en_inhibN = en_inhib

        max_rate  = (100,200)
        max_rateN = (300,400)
        detect_threshold = max(min(detect_threshold, 0.8), 0.2)
        intercepts  = [detect_threshold + n * (1-detect_threshold)/(num_neurons) for n in range(num_neurons)]
        interceptsN = [-(n * (detect_threshold)/(num_neurons)) for n in range(num_neurons)]
        params  = dict(intercept = intercepts , max_rate = max_rate , quick = quick)
        paramsN = dict(intercept = interceptsN, max_rate = max_rateN, quick = quick)

        out_func  = FilteredStepFunction(shift = detect_threshold, mirror = False)
        out_funcN = FilteredStepFunction(shift = detect_threshold, mirror = True)
        
        if( rand_seed >= 0 ):
            PDFTools.setSeed(rand_seed)
            seed(rand_seed)

        params["encoders"]  = [[1]] * num_neurons
        paramsN["encoders"] = [[-1]] * num_neurons

        pdf  = IndicatorPDF(detect_threshold + 0.1, 1.1)
        pdfN = IndicatorPDF(-0.1, detect_threshold - 0.1)
        params["eval_points"]  = [[pdf.sample()[0]] for _ in range(1000)]
        paramsN["eval_points"] = [[pdfN.sample()[0]] for _ in range(1000)]
        
        if( en_out ):
            if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ):
                detect = SimpleNEFEns(ens_name, 1, input_name = "")
                net.add(detect)
            else:
                detect = net.make(ens_name, num_neurons, 1, **params)
            if( not input_name is None ):
                detect.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False)
            if( en_inhib ):
                inhib_vec_scale = [inhib_vec[n] * -inhib_scale for n in range(inhib_dim)]
                detect.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False)
            
            detect.removeDecodedOrigin("X")
            detect.addDecodedOrigin("X", [out_func], "AXON")

            if( en_X_out ):
                detect.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON")
                self.exposeOrigin(detect.getOrigin("x0"), "x0")

        if( en_N_out ):
            if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ):
                detectN = SimpleNEFEns(ens_name + "N", 1, input_name = "")
                net.add(detectN)
            else:
                detectN = net.make(ens_name + "N", num_neurons, 1, **paramsN)
            if( not input_name is None ):
                detectN.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False)
            if( en_inhibN ):
                detectN.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False)
        
            detectN.removeDecodedOrigin("X")
            detectN.addDecodedOrigin("X", [out_funcN], "AXON")

            if( en_X_out ):
                detectN.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON")
                self.exposeOrigin(detectN.getOrigin("x0"), "x0N")
                
        input_terms = []
        inhib_terms = []
        
        if( en_out ):
            if( not input_name is None ):
                input_terms.append(detect.getTermination(input_name))
            self.exposeOrigin(detect.getOrigin("X"), name)
            if( en_inhib ):
                inhib_terms.append(detect.getTermination("Inhib"))                
        if( en_N_out ):
            if( not input_name is None ):
                input_terms.append(detectN.getTermination(input_name))
            self.exposeOrigin(detectN.getOrigin("X"), str(name + "N"))
            if( en_inhibN ):
                inhib_terms.append(detectN.getTermination("Inhib"))

        if( len(input_terms) > 0 ):
            input_term = EnsembleTermination(self, input_name, input_terms)
            self.exposeTermination(input_term, input_name)
        if( len(inhib_terms) > 0 ):
            inhib_term = EnsembleTermination(self, "Inhib", inhib_terms)
            self.exposeTermination(inhib_term, "Inhib")

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()

        self.releaseMemory()
예제 #13
0
    def __init__(self, name, N, d):
        NetworkImpl.__init__(self)
        self.name = name

        tauPSC = 0.007

        Wr = self.calcWreal(d)
        Wi = self.calcWimag(d)

        halfd = int(d / 2) + 1
        halfN = int(math.ceil(float(N) * halfd / d))

        ef = RPMutils.defaultEnsembleFactory()
        netef = networkensemble.NetworkEnsemble(ef)

        #create input populations
        A = ef.make("A", 1, d)
        A.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False)
        A.setMode(SimulationMode.DIRECT
                  )  #since this is just a relay ensemble for modularity
        A.fixMode()
        self.addNode(A)

        B = ef.make("B", 1, d)
        B.addDecodedTermination("input", RPMutils.eye(d, 1), 0.0001, False)
        B.setMode(SimulationMode.DIRECT
                  )  #since this is just a relay ensemble for modularity
        B.fixMode()
        self.addNode(B)

        #this is the new method, where we collapse the fft into the eprod
        #populations to calculate the element-wise product of our vectors so far
        eprods = []

        #note: we scale the output of the eprods by d/2, which we will undo at the
        #end, to keep the overall length of each dimension around 1
        #(the average value of each dimension of a normalized d dimensional vector is 1/sqrt(d),
        #so 1/sqrt(d)*1/sqrt(d) = 1/d, so when we add the scale the resulting average dimension
        #should be around d/2d i.e. 1/2)
        #the 2 is added to give us a bit of a buffer, better to have the dimensions too small
        #than too large and run into saturation problems
        multscale = float(d) / 2.0
        eprods = eprods + [
            eprod.Eprod("eprod0",
                        halfN,
                        halfd,
                        scale=multscale,
                        weights=[Wr, Wr],
                        maxinput=2.0 / math.sqrt(d))
        ]
        eprods = eprods + [
            eprod.Eprod("eprod1",
                        halfN,
                        halfd,
                        scale=multscale,
                        weights=[Wi, Wi],
                        maxinput=2.0 / math.sqrt(d))
        ]
        eprods = eprods + [
            eprod.Eprod("eprod2",
                        halfN,
                        halfd,
                        scale=multscale,
                        weights=[Wi, Wr],
                        maxinput=2.0 / math.sqrt(d))
        ]
        eprods = eprods + [
            eprod.Eprod("eprod3",
                        halfN,
                        halfd,
                        scale=multscale,
                        weights=[Wr, Wi],
                        maxinput=2.0 / math.sqrt(d))
        ]

        for i in range(4):
            self.addNode(eprods[i])

            self.addProjection(A.getOrigin("X"), eprods[i].getTermination("A"))
            self.addProjection(B.getOrigin("X"), eprods[i].getTermination("B"))

        #negative identity matrix (for subtraction)
        negidentity = [[0 for x in range(d)] for x in range(d)]
        for i in range(d):
            negidentity[i][i] = -1

        #note: all this halfd/expansion stuff is because the fft of a real value
        #is symmetrical, so we do all our computations on just one half and then
        #add in the symmetrical other half at the end

        #matrix for expanding real half-vectors (with negative for subtraction)
        expand = RPMutils.eye(halfd, 1)
        negexpand = RPMutils.eye(halfd, -1)

        #matrix for expanding imaginary half-vectors
        imagexpand = RPMutils.eye(halfd, 1)

        midpoint = halfd - 1 - (d + 1) % 2
        for i in range(int(math.ceil(d / 2.0) - 1)):
            expand = expand + [expand[midpoint - i]]
            negexpand = negexpand + [negexpand[midpoint - i]]

            imagexpand = imagexpand + [[-x for x in imagexpand[midpoint - i]]]

        #multiply real components
        rprod = netef.make("rprod", N, tauPSC, [expand, negexpand], None)
        self.addNode(rprod)
        self.addProjection(eprods[0].getOrigin("X"),
                           rprod.getTermination("in_0"))
        self.addProjection(eprods[1].getOrigin("X"),
                           rprod.getTermination("in_1"))

        #multiply imaginary components
        iprod = netef.make("iprod", N, tauPSC, [imagexpand, imagexpand], None)
        self.addNode(iprod)
        self.addProjection(eprods[2].getOrigin("X"),
                           iprod.getTermination("in_0"))
        self.addProjection(eprods[3].getOrigin("X"),
                           iprod.getTermination("in_1"))

        #now calculate IFFT of Z = (rprod) + (iprod)i
        #we only need to calculate the real part, since we know the imaginary component is 0
        Winvr = self.calcInvWreal(d)
        Winvi = self.calcInvWimag(d)

        for i in range(d):
            for j in range(d):
                Winvr[i][j] = Winvr[i][j] * (1.0 / multscale)
                Winvi[i][j] = Winvi[i][j] * (1.0 / multscale)

        negWinvi = [[0 for x in range(d)] for x in range(d)]
        for i in range(d):
            for j in range(d):
                negWinvi[i][j] = -Winvi[i][j]

        result = netef.make("result", N, tauPSC, [Winvr, negWinvi], None)

        self.addNode(result)

        self.addProjection(rprod.getOrigin("X"), result.getTermination("in_0"))
        self.addProjection(iprod.getOrigin("X"), result.getTermination("in_1"))

        if RPMutils.USE_PROBES:
            self.simulator.addProbe("A", "X", True)
            self.simulator.addProbe("B", "X", True)
            self.simulator.addProbe("eprod0", "X", True)
            self.simulator.addProbe("eprod1", "X", True)
            self.simulator.addProbe("eprod2", "X", True)
            self.simulator.addProbe("eprod3", "X", True)
            self.simulator.addProbe("rprod", "X", True)
            self.simulator.addProbe("iprod", "X", True)
            self.simulator.addProbe("result", "X", True)

        self.exposeTermination(A.getTermination("input"), "A")
        self.exposeTermination(B.getTermination("input"), "B")
        self.exposeOrigin(result.getOrigin("X"), "X")
예제 #14
0
파일: visual_heir.py 프로젝트: mirror/spaun
    def __init__(self, name = "Vision", file_path = "", in_dim = 28*28, tau_psc = 0.0001, \
                 mu_filename = "", output_vecs = None, mut_inhib_scale = 2, net = None, \
                 en_bypass = False, en_norm = False, en_neuron_rep = False, \
                 sim_mode = SimulationMode.DEFAULT, sim_mode_cleanup = None, quick = True):
        
        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        if( sim_mode == SimulationMode.DIRECT ):
            N = 1
            N2 = 1
        else:
            N  = 3  # Number of neurons for layer 1 to 3
            N2 = 10 # Number of neurons for layer 4
        N3 = 20 # Number of neurons (per dim) for layer N
        NN = 20

        R1 = 7#50#10
        R2 = 7#20#2
        R3 = 7#15#5
        R4 = 1.5#2
        RN = 1.5

        I1 = (-0.3,1)#(-0.1,0.5)
        I2 = (-0.3,1)#(-0.125,0.75)
        I3 = (-0.3,1)#(-0.15,0.8)
        I4 = (-1,1)
        IN = (-1,1)

        E1 = [[1]]
        E2 = [[1]]
        E3 = [[1]]
        E4 = None
        EN = None
        
        NetworkImpl.__init__(self)
        if( net is None ):
            net = nef.Network(self, quick)
        self.setName(name)

        def transform(x):
            return 1.0/(1 + exp(-x[0]))

#        params = dict(max_rate = (100,200), quick = quick, encoders=[[1]], intercept=(0,0.8), mode = sim_mode)
#        params = dict(max_rate = (50,100), quick = quick, encoders=[[1]], intercept=(-0.1,0.8), mode = sim_mode)

#        params = dict(max_rate = (50,60), mode = sim_mode, tau_ref=0.005)
        params = dict(max_rate = (50,60), mode = SimulationMode.DIRECT, tau_ref=0.005)   ## HARDCODED DIRECT SIM MODE

        in_ens = net.make('Input', 1, in_dim)
        net.network.exposeTermination(in_ens.addDecodedTermination("Input", eye(in_dim), 0.0001, False), "Input")
        in_ens.setMode(SimulationMode.DIRECT)
        in_ens.fixMode()

        w1 = read_csv(file_path + 'mat_1_w.csv')
        b1 = read_csv(file_path + 'mat_1_b.csv')

        layer1 = net.make_array('layer1', N, len(w1[0]), radius = R1, intercept = I1, encoders = E1, **params)
        bias1  = net.make_input('bias1', b1[0])
        net.connect(bias1, layer1)
        net.connect(in_ens, layer1, transform = numeric.array(w1).T, pstc = tau_psc)

        w2 = read_csv(file_path + 'mat_2_w.csv')
        b2 = read_csv(file_path + 'mat_2_b.csv')

        layer2 = net.make_array('layer2', N, len(w2[0]), radius = R2, intercept = I2, encoders = E2, **params)
        bias2  = net.make_input('bias2', b2[0])
        net.connect(bias2, layer2)
        net.connect(layer1, layer2, func = transform, transform = numeric.array(w2).T, pstc = tau_psc)

        w3 = read_csv(file_path + 'mat_3_w.csv')
        b3 = read_csv(file_path + 'mat_3_b.csv')

        layer3 = net.make_array('layer3', N, len(w3[0]), radius = R3, intercept = I3, encoders = E3, **params)
        bias3  = net.make_input('bias3', b3[0])
        net.connect(bias3, layer3)
        net.connect(layer2, layer3, func = transform, transform = numeric.array(w3).T, pstc = tau_psc)

        w4 = read_csv(file_path + 'mat_4_w.csv')
        b4 = read_csv(file_path + 'mat_4_b.csv')

        layer4 = net.make_array('layer4', N2, len(w4[0]), radius = R4, intercept = I4, encoders = E4, **params)
        bias4  = net.make_input('bias4', b4[0])
        net.connect(bias4, layer4)
        net.connect(layer3, layer4, func = transform, transform = numeric.array(w4).T, pstc = tau_psc)

        if( en_norm ):
            if( sim_mode == SimulationMode.DIRECT ):
                sim_mode_N = SimulationMode.RATE
            else:
                sim_mode_N = sim_mode
    #        layerN = net.make('layerN', N3 * len(w4[0]), len(w4[0]), max_rate = (50,60), radius = RN, intercept = IN, encoders = EN, quick = quick, mode = sim_mode_N)
            layerN = net.make('layerN', N3 * len(w4[0]), len(w4[0]), radius = RN, intercept = IN, encoders = EN, quick = quick, **params)
            net.connect(layer4, layerN, pstc = tau_psc)
            layerN.fixMode()
        else:
            layerN = layer4
        
        if( en_neuron_rep ):
            layerNeur = net.make_array('layerNeur', NN * len(w4[0]), len(w4[0]), \
                                       radius = RN, intercept = IN, encoders = EN, quick = quick, \
                                       max_rate = (100,200), mode = SimulationMode.DEFAULT, tau_ref=0.005)
            net.connect(layer4, layerNeur, pstc = tau_psc)
            layerNeur.fixMode()

        if( output_vecs is None or mu_filename == "" ):
            net.network.exposeOrigin(layerN.getOrigin("X"), "X")
            self.dimension = len(w4[0])
        else:
            if( sim_mode_cleanup is None ):
                sim_mode_cleanup = sim_mode
            
            visual_am = make_VisHeir_AM(net, "Vision Assoc Mem", file_path, mu_filename, output_vecs, \
                                        mut_inhib_scale, sim_mode_cleanup, quick)

            net.connect(layerN.getOrigin("X"), visual_am.getTermination("Input"))
            if( en_bypass ):
                net.network.exposeOrigin(layerN.getOrigin("X"), "Vis Raw")
    
            net.network.exposeOrigin(visual_am.getOrigin("X"), "X")
            self.dimension = len(output_vecs[0])

        self.setMode(sim_mode)
        if( sim_mode == SimulationMode.DIRECT):
            self.fixMode()
        self.releaseMemory()
예제 #15
0
    def __init__(self, stateN, stateD, state_encoders, actions, learningrate,
                 stateradius=1.0, Qradius=1.0, load_weights=None):
        NetworkImpl.__init__(self)
        self.name = "QNetwork"
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)
        
        N = 50
        statelength = math.sqrt(2*stateradius**2)
        tauPSC = 0.007
        num_actions = len(actions)
        init_Qs = 0.0
        weight_save = 600.0 #period to save weights (realtime, not simulation time)
        
        #set up relays
        state_relay = net.make("state_relay", 1, stateD, mode="direct")
        state_relay.fixMode()
        state_relay.addDecodedTermination("input", MU.I(stateD), 0.001, False)
        
        #create state population
        state_fac = HRLutils.node_fac()
        state_fac.setIntercept(IndicatorPDF(0,1))
            
        state_pop = net.make("state_pop", stateN, stateD, 
                              radius=statelength,
                              node_factory=state_fac,
                              encoders=state_encoders)
#                              eval_points=MU.I(stateD))
#        state_pop = net.make_array("state_pop", stateN/stateD, stateD,
#                                   node_factory=state_fac)
        state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        
        net.connect(state_relay, state_pop, pstc=tauPSC)
        
        #create population tied to previous state (to be used in learning)
        saved_state = memory.Memory("saved_state", N*4, stateD, inputscale=50, radius=stateradius,
                                    direct_storage=True)
        net.add(saved_state)
        
        net.connect(state_relay, saved_state.getTermination("target"))
        
        old_state_pop = net.make("old_state_pop", stateN, stateD, 
                              radius=statelength,
                              node_factory=state_fac,
                              encoders=state_encoders)
#                              eval_points=MU.I(stateD))
#        old_state_pop = net.make_array("old_state_pop", stateN/stateD, stateD,
#                                   node_factory=state_fac)
        old_state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        
        net.connect(saved_state, old_state_pop, pstc=tauPSC)
        
        #set up action nodes
        decoders = state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders()
        actionvals = actionvalues.ActionValues("actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders)
        net.add(actionvals)
        
        decoders = old_state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders()
        old_actionvals = actionvalues.ActionValues("old_actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders)
        net.add(old_actionvals)
        
        net.connect(state_pop.getOrigin("AXON"), actionvals.getTermination("state"))
        net.connect(old_state_pop.getOrigin("AXON"), old_actionvals.getTermination("state"))
        
        if load_weights != None:
            self.loadWeights(load_weights)
        
            #find error between old_actionvals and actionvals
        valdiff = net.make_array("valdiff", N, num_actions, node_factory = HRLutils.node_fac())
        net.connect(old_actionvals, valdiff, transform=MU.diag([2]*num_actions), pstc=tauPSC)
        net.connect(actionvals, valdiff, transform=MU.diag([-2]*num_actions), pstc=tauPSC)
            #doubling values to get a bigger error signal
        
            #calculate diff between curr_state and saved_state and use that to gate valdiff
        statediff = net.make_array("statediff", N, stateD, intercept=(0.2,1))
        net.connect(state_relay, statediff, pstc=tauPSC)
        net.connect(saved_state, statediff, transform=MU.diag([-1]*stateD), pstc=tauPSC)
        
        net.connect(statediff, valdiff, func=lambda x: [abs(v) for v in x], 
                    transform = [[-10]*stateD for _ in range(valdiff.getNeurons())], pstc=tauPSC)
        
        net.connect(valdiff, actionvals.getTermination("error"))
        
        #periodically save the weights
        class WeightSaveThread(threading.Thread):
            def __init__(self, func, prefix, period):
                threading.Thread.__init__(self)
                self.func = func
                self.prefix = prefix
                self.period = period
                
            def run(self):
                while True:
                    time.sleep(self.period)
                    self.func(self.prefix)
        wsn = WeightSaveThread(self.saveWeights, os.path.join("weights","tmp"), weight_save)
        wsn.start()
        
        self.exposeTermination(state_relay.getTermination("input"), "state")
        self.exposeTermination(old_actionvals.getTermination("error"), "error")
        self.exposeTermination(saved_state.getTermination("transfer"), "save_state")
        self.exposeOrigin(actionvals.getOrigin("X"), "vals")
        self.exposeOrigin(old_actionvals.getOrigin("X"), "old_vals")