def genVectors(self, N, d):
        if d != 1:
            print "Error, trying to use RangedEvalPointGenerator when d > 1"


#        if not self.low2:
#            return [[PDFTools.sampleFloat(IndicatorPDF(self.low1, self.high1))] for i in range(N)]
#
#        vecs = []
#        for i in range(N):
#            val = PDFTools.sampleFloat(IndicatorPDF(self.low1, self.high2))
#            while not ((val >= self.low1 and val <= self.high1) or (val >= self.low2 and val <= self.high2)):
#                val = PDFTools.sampleFloat(IndicatorPDF(self.low1, self.high2))
#            vecs = vecs + [[val]]
        Nperrange = int(float(N) / self.numranges)
        vecs = []
        for i in range(self.numranges):
            low = self.ranges[i][0]
            high = self.ranges[i][1]
            for j in range(Nperrange):
                val = PDFTools.sampleFloat(IndicatorPDF(low, high))
                vecs = vecs + [[val]]

        for j in range(N - (Nperrange * self.numranges)):
            val = PDFTools.sampleFloat(IndicatorPDF(low, high))
            vecs = vecs + [[val]]

        return vecs
예제 #2
0
def defaultEnsembleFactory():
    ef = NEFMorePoints()
    ef.nodeFactory.tauRC = 0.02
    ef.nodeFactory.tauRef = 0.002
    ef.nodeFactory.maxRate = IndicatorPDF(200, 500)
    ef.nodeFactory.intercept = IndicatorPDF(-1, 1)
    ef.beQuiet()
    return (ef)
예제 #3
0
    def __init__(self, N, d, name="PositiveBias"):
        """Builds the PositiveBias network.

        :param N: base number of neurons
        :param d: dimension of input signal
        :param name: name for network
        """

        self.name = name
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)

        tauPSC = 0.007
        biaslevel = 0.03  # the value to be output for negative inputs

        # threshold the input signal to detect positive values
        nfac = HRLutils.node_fac()
        nfac.setIntercept(IndicatorPDF(0, 0.1))
        neg_thresh = net.make_array("neg_thresh",
                                    N,
                                    d,
                                    encoders=[[1]],
                                    node_factory=nfac)
        neg_thresh.addDecodedTermination("input", MU.I(d), tauPSC, False)

        # create a population that tries to output biaslevel across
        # all dimensions
        bias_input = net.make_input("bias_input", [biaslevel])
        bias_pop = net.make_array(
            "bias_pop",
            N,
            d,
            node_factory=HRLutils.node_fac(),
            eval_points=[[x * 0.01] for x in range(0, biaslevel * 200)])

        net.connect(bias_input, bias_pop, pstc=tauPSC)

        # the individual dimensions of bias_pop are then inhibited by the
        # output of neg_thresh (so any positive values don't get the bias)
        net.connect(neg_thresh,
                    bias_pop,
                    pstc=tauPSC,
                    func=lambda x: [1.0] if x[0] > 0 else [0.0],
                    transform=[[-10 if i == k else 0 for k in range(d)]
                               for i in range(d)
                               for _ in range(bias_pop.getNeurons() / d)])

        # the whole population is inhibited by the learn signal, so that it
        # outputs 0 if the system isn't supposed to be learning
        bias_pop.addTermination("learn",
                                [[-10] for _ in range(bias_pop.getNeurons())],
                                tauPSC, False)

        self.exposeTermination(neg_thresh.getTermination("input"), "input")
        self.exposeTermination(bias_pop.getTermination("learn"), "learn")
        self.exposeOrigin(bias_pop.getOrigin("X"), "X")
    def genVectors(self, N, d):
        if d != len(self.dir):
            print "Error, direction dimension not equal to requested dimension in DirectedEvalPointGenerator"

        vectors = []
        for i in range(N):
            scale = PDFTools.sampleFloat(IndicatorPDF(0.0, 1.0))
            vec = [0.0 for i in range(d)]

            for j in range(d):
                vec[j] = float(self.dir[j]) * scale
            vectors = vectors + [vec]

        return (vectors)
예제 #5
0
    def __init__(self, seed=None):
        from ca.nengo.math.impl import IndicatorPDF
        from ca.nengo.model.neuron.impl import ALIFNeuronFactory
        from ca.nengo.model.neuron.impl import LIFNeuronFactory
        maxrate = IndicatorPDF(10, 50)
        intercept = IndicatorPDF(-1, 1)
        tau_rc = 0.02
        tau_ref = 0.001
        self.lif_factory = LIFNeuronFactory(
            tauRC=tau_rc, tauRef=tau_ref,
            maxRate=maxrate, intercept=intercept)
        tau_adapt = 0.01
        inc_adapt = IndicatorPDF(0.001, 0.02)
        self.alif_factory = ALIFNeuronFactory(
            maxRate=maxrate, intercept=intercept, tauRC=tau_rc,
            tauRef=tau_ref, incN=inc_adapt, tauN=tau_adapt)

        # If no seed passed in, we'll generate one
        if seed is None:
            seed = random.randint(0, 1000)  # That's plenty
        self.seed = seed

        self.net = None
예제 #6
0
def qnetwork(stateN, stateD, state_encoders, actions, learningrate,
                 stateradius=1.0, Qradius=1.0, load_weights=None):
    net = nef.Network("QNetwork")
    with declarative_syntax(net):
        N = 50
        statelength = math.sqrt(2*stateradius**2)
        tauPSC = 0.007
        num_actions = len(actions)
        init_Qs = 0.0
        weight_save = 600.0 #period to save weights (realtime, not simulation time)

        #set up relays
        direct_mode('state_relay', 1, dimension=stateD)
        add_decoded_termination('state_relay', 'input', MU.I(stateD), .001, False)

        #create state population
        ensemble('state_pop', neurons=LIF(stateN),
                 dimensions=stateD,
                 radius=statelength,
                 encoders=state_encoders,
                )
        connect('state_relay', 'state_pop', filter=tauPSC)

        memory('saved_state', neurons=LIF(N * 4), dimension=stateD,
               inputscale=50,
               radius=stateradius,
               direct_storage=True)

        # N.B. the "." syntax refers to an ensemble created by the `memory` macro
        connect('state_relay', 'saved_state.target')

        ensemble('old_state_pop', neurons=LIF(stateN),
                 dimensions=stateD,
                 radius=statelength,
                 encoders=state_encoders)

        connect('saved_state', 'old_state_pop', filter=tauPSC)

        # mess with the intercepts ?
        for name in 'state_pop', 'old_state_pop':
            set_intercepts(name, IndicatorPDF(0, 1))

        fixMode('state_relay')
        fixMode('state_pop', ['default', 'rate'])
        fixMode('old_state_pop', ['default', 'rate'])
예제 #7
0
    def __init__(self, name = "Detector", detect_vec = None, inhib_vec = None, tau_in = 0.005, \
                 en_inhib = False, en_inhibN = None, tau_inhib = 0.005, in_scale = 1.0, inhib_scale = 2.0,\
                 en_out = True, en_N_out = False, en_X_out = False, num_neurons = 20, detect_threshold = 0.4, \
                 sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = 0, net = None, input_name = "Input"):
   
        self.dimension = 1
        NetworkImpl.__init__(self)
        ens_name = name
        if( not isinstance(net, nef.Network) ):
            if( not net is None ):
                net = nef.Network(net, quick)
            else:
                ens_name = "detect"
                net = nef.Network(self, quick)
        self.setName(name)

        if( detect_vec is None ):
            detect_vec = [1]

        vec_dim = len(detect_vec)
        detect_vec_scale = [detect_vec[n] * in_scale for n in range(vec_dim)]
        if( en_inhib ):
            if( inhib_vec is None ):
                inhib_vec = [1]
            inhib_dim = len(inhib_vec)
        if( en_inhibN is None ):
            en_inhibN = en_inhib

        max_rate  = (100,200)
        max_rateN = (300,400)
        detect_threshold = max(min(detect_threshold, 0.8), 0.2)
        intercepts  = [detect_threshold + n * (1-detect_threshold)/(num_neurons) for n in range(num_neurons)]
        interceptsN = [-(n * (detect_threshold)/(num_neurons)) for n in range(num_neurons)]
        params  = dict(intercept = intercepts , max_rate = max_rate , quick = quick)
        paramsN = dict(intercept = interceptsN, max_rate = max_rateN, quick = quick)

        out_func  = FilteredStepFunction(shift = detect_threshold, mirror = False)
        out_funcN = FilteredStepFunction(shift = detect_threshold, mirror = True)
        
        if( rand_seed >= 0 ):
            PDFTools.setSeed(rand_seed)
            seed(rand_seed)

        params["encoders"]  = [[1]] * num_neurons
        paramsN["encoders"] = [[-1]] * num_neurons

        pdf  = IndicatorPDF(detect_threshold + 0.1, 1.1)
        pdfN = IndicatorPDF(-0.1, detect_threshold - 0.1)
        params["eval_points"]  = [[pdf.sample()[0]] for _ in range(1000)]
        paramsN["eval_points"] = [[pdfN.sample()[0]] for _ in range(1000)]
        
        if( en_out ):
            if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ):
                detect = SimpleNEFEns(ens_name, 1, input_name = "")
                net.add(detect)
            else:
                detect = net.make(ens_name, num_neurons, 1, **params)
            if( not input_name is None ):
                detect.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False)
            if( en_inhib ):
                inhib_vec_scale = [inhib_vec[n] * -inhib_scale for n in range(inhib_dim)]
                detect.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False)
            
            detect.removeDecodedOrigin("X")
            detect.addDecodedOrigin("X", [out_func], "AXON")

            if( en_X_out ):
                detect.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON")
                self.exposeOrigin(detect.getOrigin("x0"), "x0")

        if( en_N_out ):
            if( sim_mode == SimulationMode.DIRECT or str(sim_mode).lower() == 'ideal' ):
                detectN = SimpleNEFEns(ens_name + "N", 1, input_name = "")
                net.add(detectN)
            else:
                detectN = net.make(ens_name + "N", num_neurons, 1, **paramsN)
            if( not input_name is None ):
                detectN.addDecodedTermination(input_name, [detect_vec_scale], tau_in, False)
            if( en_inhibN ):
                detectN.addTermination("Inhib", [inhib_vec_scale] * num_neurons, tau_inhib, False)
        
            detectN.removeDecodedOrigin("X")
            detectN.addDecodedOrigin("X", [out_funcN], "AXON")

            if( en_X_out ):
                detectN.addDecodedOrigin("x0", [PostfixFunction("x0", 1)], "AXON")
                self.exposeOrigin(detectN.getOrigin("x0"), "x0N")
                
        input_terms = []
        inhib_terms = []
        
        if( en_out ):
            if( not input_name is None ):
                input_terms.append(detect.getTermination(input_name))
            self.exposeOrigin(detect.getOrigin("X"), name)
            if( en_inhib ):
                inhib_terms.append(detect.getTermination("Inhib"))                
        if( en_N_out ):
            if( not input_name is None ):
                input_terms.append(detectN.getTermination(input_name))
            self.exposeOrigin(detectN.getOrigin("X"), str(name + "N"))
            if( en_inhibN ):
                inhib_terms.append(detectN.getTermination("Inhib"))

        if( len(input_terms) > 0 ):
            input_term = EnsembleTermination(self, input_name, input_terms)
            self.exposeTermination(input_term, input_name)
        if( len(inhib_terms) > 0 ):
            inhib_term = EnsembleTermination(self, "Inhib", inhib_terms)
            self.exposeTermination(inhib_term, "Inhib")

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()

        self.releaseMemory()
예제 #8
0
    def __init__(self, discount, rewardradius=1.0, Qradius=1.0):
        """Builds the ErrorCalc2 network.

        :param discount: discount factor, controls rate of integration
        :param rewardradius: expected radius of reward value
        :param Qradius: expected radius of Q values
        """

        self.name = "ErrorCalc"
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)

        tauPSC = 0.007
        intPSC = 0.1
        N = 50

        # relay for current Q input
        currQ = net.make("currQ", 1, 1, node_factory=HRLutils.node_fac(),
                         mode="direct", radius=Qradius)
        currQ.fixMode()
        currQ.addDecodedTermination("input", [[1]], 0.001, False)

        # input population for resetting the network
        reset_nodefac = HRLutils.node_fac()
        reset_nodefac.setIntercept(IndicatorPDF(0.3, 1.0))
        reset = net.make("reset", N, 1, encoders=[[1]],
                         node_factory=reset_nodefac)
        reset.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        # this population will begin outputting a value once the reset
        # signal exceeds the threshold, and that output will then be
        # used to reset the rest of the network

        reset.addDecodedTermination("input", [[1]], tauPSC, False)

        # relay for stored previous value of Q
        storeQ = net.make("storeQ", 1, 1, node_factory=HRLutils.node_fac(),
                          mode="direct", radius=Qradius)
        storeQ.fixMode()
        storeQ.addDecodedTermination("input", [[1]], 0.001, False)

        # calculate "discount" by integrating output of storeQ
        acc_storeQ = memory.Memory("acc_storeQ", N * 8, 1, inputscale=50)
        net.add(acc_storeQ)

        zero_input = net.make_input("zero_input", [0])

        net.connect(zero_input, acc_storeQ.getTermination("target"))
        net.connect(reset, acc_storeQ.getTermination("transfer"))

        # threshold storeQ value so it won't go below zero.  that is, if we
        # have negative Q values, we don't want to have a negative discount,
        # or that will just drive the highest (negative) Q value upwards, and
        # it will always be selected.  negative Q values are instead pushed
        # upwards by the PositiveBias mechanism.
        Qthresh = net.make("Qthresh", N * 2, 1, encoders=[[1]],
                           eval_points=[[x * 0.001] for x in range(1000)],
                           radius=Qradius, intercept=(0, 1))
        net.connect(storeQ, Qthresh, pstc=tauPSC)
        net.connect(Qthresh, acc_storeQ, pstc=intPSC,
                    transform=[[discount * intPSC]], func=lambda x: max(x[0],
                                                                        0.0))

        # accumulate  reward
        reward = memory.Memory("reward", N * 4, 1, radius=rewardradius,
                               inputscale=50)
        net.add(reward)

        reward.addDecodedTermination("input", [[intPSC]], intPSC, False)

        net.connect(zero_input, reward.getTermination("target"))
        net.connect(reset, reward.getTermination("transfer"))

        # put reward, currQ, storeQ, and discount together to calculate error
        error = net.make("error", N * 2, 1, node_factory=HRLutils.node_fac())

        net.connect(currQ, error, pstc=tauPSC)
        net.connect(reward, error, pstc=tauPSC)
        net.connect(storeQ, error, pstc=tauPSC, transform=[[-1]])
        net.connect(acc_storeQ, error, pstc=tauPSC, transform=[[-1]])

        self.exposeTermination(reward.getTermination("input"), "reward")
        self.exposeTermination(reset.getTermination("input"), "reset")
        self.exposeTermination(currQ.getTermination("input"), "currQ")
        self.exposeTermination(storeQ.getTermination("input"), "storeQ")
        self.exposeOrigin(error.getOrigin("X"), "X")
예제 #9
0
    def __init__(self, name = "Cleanup Memory", \
                 in_vec_list = None, out_vec_list = None, tau_in = 0.005, in_scale = 1.0, \
                 en_inhib = False, tau_inhib = 0.005, tau_smooth = 0.0001, inhib_scale = 2.0, \
                 en_mut_inhib = False, mut_inhib_scale = 2.0, \
                 num_neurons_per_vec = 10, threshold = 0.3, \
                 N_out_vec = None, en_X_out = False, input_name = "Input", \
                 sim_mode = SimulationMode.DEFAULT, quick = True, rand_seed = None, **params):
        
        NetworkImpl.__init__(self)
        self.setName(name)

        if( mut_inhib_scale <= 0 ):
            en_mut_inhib = False

        if( out_vec_list is None ):
            out_vec_list = in_vec_list
        self.dimension = len(out_vec_list[0])

        if( isinstance(mut_inhib_scale, (int,float)) ):
            mut_inhib_scale = [mut_inhib_scale] * len(in_vec_list)
        if( isinstance(inhib_scale, (int,float)) ):
            inhib_scale = [inhib_scale] * len(in_vec_list)
        if( isinstance(threshold, (int,float)) ):
            threshold = [threshold] * len(in_vec_list)
        in_vec_list = [[in_vec_list[i][d] * in_scale for d in range(len(in_vec_list[i]))] \
                      for i in range(len(in_vec_list))]
        
        self.i_list = []
        self.in_vec_list = []

        if( str(sim_mode).lower() == 'ideal' ):
            node = CleanupMemoryNode(name, in_vec_list, out_vec_list, tau_in, en_inhib, tau_inhib, \
                                     threshold = sum(threshold) / len(threshold), en_wta = en_mut_inhib, \
                                     N_out_vec = N_out_vec)
            self.addNode(node)
            self.exposeTermination(node.getTermination("Input"), "Input")
            if( en_inhib ):
                self.exposeTermination(node.getTermination("Inhib"), "Inhib")
            self.exposeOrigin(node.getOrigin("Output"), "X")
            if( en_X_out ):
                self.exposeOrigin(node.getOrigin("X"), "x0")
        else:
            net = nef.Network(self, quick)

            enss = []
            
            num_items = 0
            for out_vec in out_vec_list:
                if( out_vec is None ):
                    continue
                else:
                    num_items += 1

            in_terms = []
            inhib_terms = []
            origins = []

            en_N_out = not (N_out_vec is None)

            out_relay = SimpleNEFEns("Output", self.dimension, pstc = tau_smooth)
            net.add(out_relay)

            if( en_X_out ):
                x_relay = SimpleNEFEns("X", num_items + en_N_out, pstc = tau_smooth)
                net.add(x_relay)

            for i,in_vec in enumerate(in_vec_list):
                if( out_vec_list[i] is None ):
                    continue

                self.in_vec_list.append(in_vec)
                self.i_list.append(i)
                
                pdf = IndicatorPDF(threshold[i] + 0.1, 1)
                eval_points = [[pdf.sample()[0]] for _ in range(1000)]
                intercepts = [threshold[i] + n * (1-threshold[i])/(num_neurons_per_vec) for n in range(num_neurons_per_vec)]
                if( sim_mode == SimulationMode.DIRECT ):
                    ens = SimpleNEFEns("Item" + str(i), 1, input_name = "")
                    net.add(ens)
                else:
                    ens = net.make("Item" + str(i), num_neurons_per_vec, 1, eval_points = eval_points, \
                                   encoders = [[1]] * num_neurons_per_vec, intercept = intercepts, \
                                   max_rate = (100,200), seed = rand_seed)
                
                if( input_name != "" and not input_name is None ):
                    ens.addDecodedTermination(input_name, [in_vec], tau_in, False)
                    in_terms.append(ens.getTermination(input_name))
                ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = threshold[i], \
                                     step_val = out_vec_list[i][d]) for d in range(self.dimension)], \
                                     "AXON")
                enss.append(ens)
                
                out_relay.addDecodedTermination("Item" + str(i), None, tau_smooth, False)
                out_relay.addNeuronCount(ens.getNeuronCount())
                net.connect(ens.getOrigin("Output"), out_relay.getTermination("Item" + str(i)))
                
                if( en_X_out ):
                    ens.removeDecodedOrigin("X")
                    ens.addDecodedOrigin("X", [FilteredStepFunction(shift = threshold[i])], "AXON")
                    x_relay.addDecodedTermination("Item" + str(i), transpose(delta(num_items + en_N_out, i)), tau_smooth, False)
                    x_relay.addNeuronCount(ens.getNeuronCount())
                    net.connect(ens.getOrigin("X"), x_relay.getTermination("Item" + str(i)))

                if( en_inhib ):
                    ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False)
                    inhib_terms.append(ens.getTermination("Inhib"))
            
            if( not N_out_vec is None ):
                N_threshold = min(threshold)
                pdf = IndicatorPDF(-0.1, N_threshold - 0.1)
                eval_points = [[pdf.sample()[0]] for _ in range(1000)]
                intercepts  = [-(n * (N_threshold)/(num_neurons_per_vec)) for n in range(num_neurons_per_vec)]
                if( sim_mode == SimulationMode.DIRECT ):
                    ens = SimpleNEFEns("ItemN", 1, input_name = "")
                    net.add(ens)
                else:
                    ens = net.make("ItemN", num_neurons_per_vec, 1, eval_points = eval_points, \
                                   encoders = [[-1]] * num_neurons_per_vec, intercept = intercepts, \
                                   max_rate = (300,400), seed = rand_seed)

                for i in range(len(in_vec_list)):
                    ens.addDecodedTermination("Item" + str(i), [[1]], 0.005, False)
                    net.connect(enss[i].getOrigin("X"), ens.getTermination("Item" + str(i)))

                ens.addDecodedOrigin("Output", [FilteredStepFunction(shift = N_threshold, \
                                     step_val = N_out_vec[d], mirror = True) for d in range(self.dimension)], \
                                     "AXON")
                
                out_relay.addDecodedTermination("ItemN", None, tau_smooth, False)
                out_relay.addNeuronCount(ens.getNeuronCount())
                net.connect(ens.getOrigin("Output"), out_relay.getTermination("ItemN"))
                
                if( en_X_out ):
                    ens.removeDecodedOrigin("X")
                    ens.addDecodedOrigin("X", [FilteredStepFunction(shift = N_threshold, mirror = True)], "AXON")
                    x_relay.addDecodedTermination("ItemN", transpose(delta(num_items + en_N_out, num_items)), tau_smooth, False)
                    x_relay.addNeuronCount(ens.getNeuronCount())
                    net.connect(ens.getOrigin("X"), x_relay.getTermination("ItemN"))

                if( en_inhib ):
                    ens.addTermination("Inhib", [[-inhib_scale[i]]] * num_neurons_per_vec, tau_inhib, False)
                    inhib_terms.append(ens.getTermination("Inhib"))
             

            if( en_mut_inhib ):
                for n in range(num_items):
                    for i in range(num_items):
                        if( n != i):
                            enss[i].addTermination("Inhib" + str(n), [[-mut_inhib_scale[i]]] * num_neurons_per_vec, 0.005, False)
                            net.connect(enss[n].getOrigin("X"), enss[i].getTermination("Inhib" + str(n)))
            
            if( len(in_terms) > 0 ):
                in_term = EnsembleTermination(net.network, "Input", in_terms)
                net.network.exposeTermination(in_term, "Input")
            net.network.exposeOrigin(out_relay.getOrigin("X"), "X")

            if( en_X_out ):
                self.exposeOrigin(x_relay.getOrigin("X"), "x0")

            if( en_inhib ):
                inhib_term = EnsembleTermination(net.network, "Inhib", inhib_terms)
                net.network.exposeTermination(inhib_term, "Inhib")
            
            # Reset random seed
            if( not seed is None ):
                seed()
            self.releaseMemory()

        if( str(sim_mode).lower() == 'ideal' ):
            sim_mode = SimulationMode.DIRECT
        NetworkImpl.setMode(self, sim_mode)
        if( sim_mode == SimulationMode.DIRECT ):
            self.fixMode()
from ca.nengo.math.impl import IndicatorPDF

# Get access to the Nengo objects using nef_core.py script
import nef

try:
    world.remove(network)
except:
    pass

# Create the network
net = nef.Network('Working Memory')

# Set up a factory to generate neural populations of aLIF neurons
ef = NEFEnsembleFactoryImpl()
maxRate = IndicatorPDF(20, 100)
intercept = IndicatorPDF(-1, 1)
incAdapt = IndicatorPDF(.001, .200)  #wide variety of adapt
nf = ALIFNeuronFactory(maxRate, intercept, incAdapt, .001, .020, .01)
ef.setNodeFactory(nf)

#Make neural populations
Freq_pop = ef.make("Frequency", 200, 1)
Time_pop = ef.make("Time", 200, 1)
Mem_pop = ef.make("2D Memory", 100, 2)

net.add(Freq_pop)
net.add(Time_pop)
net.add(Mem_pop)

#Make the input, 500ms step function
예제 #11
0
    def __init__(self,
                 name,
                 N,
                 d,
                 scale=1.0,
                 weights=None,
                 maxinput=1.0,
                 oneDinput=False):
        # scale is a scale on the output of the multiplication
        # output = (input1.*input2)*scale

        # weights are optional matrices applied to each input
        # output = (C1*input1 .* C2*input2)*scale

        # maxinput is the maximum expected value of any dimension of the
        # inputs. this is used to scale the inputs internally so that the
        # length of the vectors in the intermediate populations are not
        # too small (which results in a lot of noise in the calculations)

        # oneDinput indicates that the second input is one dimensional, and is
        # just a scale on the first input rather than an element-wise product

        self.name = name
        tauPSC = 0.007

        # the size of the intermediate populations
        smallN = int(math.ceil(float(N) / d))

        # the maximum value of the vectors represented by the intermediate
        # populations. the vector is at most [maxinput maxinput], so the length
        # of that is sqrt(maxinput**2 + maxinput**2)
        maxlength = math.sqrt(2 * maxinput**2)

        if weights is not None and len(weights) != 2:
            print "Warning, other than 2 matrices given to eprod"

        if weights is None:
            weights = [MU.I(d), MU.I(d)]

        inputd = len(weights[0][0])

        ef = HRLutils.defaultEnsembleFactory()

        # create input populations
        in1 = ef.make("in1", 1, inputd)
        in1.addDecodedTermination("input", MU.I(inputd), 0.001, False)
        self.addNode(in1)
        in1.setMode(SimulationMode.DIRECT)  # since this is just a relay
        in1.fixMode()

        in2 = ef.make("in2", 1, inputd)
        if not oneDinput:
            in2.addDecodedTermination("input", MU.I(inputd), 0.001, False)
        else:
            # if it is a 1-D input we just expand it to a full vector of that
            # value so that we can treat it as an element-wise product
            in2.addDecodedTermination("input", [[1] for i in range(inputd)],
                                      0.001, False)
        self.addNode(in2)
        in2.setMode(SimulationMode.DIRECT)  # since this is just a relay
        in2.fixMode()

        # ensemble for intermediate populations
        multef = NEFEnsembleFactoryImpl()
        multef.nodeFactory.tauRC = 0.05
        multef.nodeFactory.tauRef = 0.002
        multef.nodeFactory.maxRate = IndicatorPDF(200, 500)
        multef.nodeFactory.intercept = IndicatorPDF(-1, 1)
        multef.encoderFactory = vectorgenerators.MultiplicationVectorGenerator(
        )
        multef.beQuiet()

        result = ef.make("result", 1, d)
        result.setMode(SimulationMode.DIRECT)  # since this is just a relay
        result.fixMode()
        self.addNode(result)

        resultTerm = [[0] for _ in range(d)]
        zeros = [0 for _ in range(inputd)]

        for e in range(d):
            # create a 2D population for each input dimension which will
            # combine the components from one dimension of each of the input
            # populations
            mpop = multef.make('mpop_' + str(e), smallN, 2)

            # make two connection that will select one component from each of
            # the input pops
            # we divide by maxlength to ensure that the maximum length of the
            # 2D vector is 1
            # remember that (for some reason) the convention in Nengo is that
            # the input matrices are transpose of what they would be
            # mathematically
            mpop.addDecodedTermination('a',
                                       [[(1.0 / maxlength) * weights[0][e][i]
                                         for i in range(inputd)], zeros],
                                       tauPSC, False)
            mpop.addDecodedTermination('b', [
                zeros,
                [(1.0 / maxlength) * weights[1][e][i] for i in range(inputd)]
            ], tauPSC, False)

            # multiply the two selected components together
            mpop.addDecodedOrigin("output", [PostfixFunction('x0*x1', 2)],
                                  "AXON")

            self.addNode(mpop)
            self.addProjection(in1.getOrigin('X'), mpop.getTermination('a'))
            self.addProjection(in2.getOrigin('X'), mpop.getTermination('b'))

            # combine the 1D results back into one vector.
            # we scaled each input by 1/maxlength, then multiplied them
            # together for a total scale of 1/maxlength**2, so to undo we
            # multiply by maxlength**2
            resultTerm[e] = [maxlength**2 * scale]
            result.addDecodedTermination('in_' + str(e), resultTerm, 0.001,
                                         False)
            resultTerm[e] = [0]

            self.addProjection(mpop.getOrigin('output'),
                               result.getTermination('in_' + str(e)))

        self.exposeTermination(in1.getTermination("input"), "A")
        self.exposeTermination(in2.getTermination("input"), "B")
        self.exposeOrigin(result.getOrigin("X"), "X")
예제 #12
0
def make_basal_ganglia(net,input,output, dimensions, neurons=100,tau_ampa=0.002,tau_gaba=0.008,input_transform=None,output_weight=1,noise=None,same_neurons=True,radius=1.5,learn=False,bistable=False,bistable_gain=1.0,quick=True):

        # create the necessary neural ensembles
        if same_neurons: code=''
        else: code='%d'
        if bistable:
            from ca.nengo.model.neuron.impl import GruberNeuronFactory
            from ca.nengo.math.impl import IndicatorPDF
            str_factory=GruberNeuronFactory(IndicatorPDF(1*bistable_gain,3*bistable_gain),IndicatorPDF(-1*bistable_gain, 0.5*bistable_gain))
            code+='bsg%1.2f'%bistable_gain
        else:
            str_factory=None
            
        StrD1=net.make_array('StrD1',neurons,dimensions,intercept=(e,1),encoders=[[1]],quick=quick,noise=noise,storage_code='bgStrD1'+code,radius=radius,node_factory=str_factory,decoder_sign=1)
        StrD2=net.make_array('StrD2',neurons,dimensions,intercept=(e,1),encoders=[[1]],quick=quick,noise=noise,storage_code='bgStrD2'+code,radius=radius,node_factory=str_factory,decoder_sign=1)
        
        if bistable:
            from ca.nengo.model.impl import EnsembleTermination
            dopD1 = EnsembleTermination(StrD1,'dopamine',[n.getTermination('dopamine') for n in StrD1.getNodes()])
            StrD1.exposeTermination(dopD1,'dopamine')
            dopD2 = EnsembleTermination(StrD2,'dopamine',[n.getTermination('dopamine') for n in StrD2.getNodes()])
            StrD2.exposeTermination(dopD2,'dopamine')
        
        
        STN=net.make_array('STN',neurons,dimensions,intercept=(ep,1),encoders=[[1]],quick=quick,noise=noise,storage_code='bgSTN'+code,radius=radius,decoder_sign=1)
        GPi=net.make_array('GPi',neurons,dimensions,intercept=(eg,1),encoders=[[1]],quick=quick,noise=noise,storage_code='bgGPi'+code,radius=radius,decoder_sign=1)
        GPe=net.make_array('GPe',neurons,dimensions,intercept=(ee,1),encoders=[[1]],quick=quick,noise=noise,storage_code='bgGPe'+code,radius=radius,decoder_sign=1)

        # connect the input to the striatum and STN (excitatory)
        if not isinstance(input,(list,tuple)):
            input=[input]
        for i in range(len(input)):
            if input_transform is None:
                net.connect(input[i],StrD1,weight=ws*(1+lg),pstc=tau_ampa,plastic_array=learn)
                net.connect(input[i],StrD2,weight=ws*(1-le),pstc=tau_ampa,plastic_array=learn)
                net.connect(input[i],STN,weight=wt,pstc=tau_ampa)
            else:
                net.connect(input[i],StrD1,transform=ws*(1+lg)*array(input_transform[i]),pstc=tau_ampa,plastic_array=learn)
                net.connect(input[i],StrD2,transform=ws*(1-le)*array(input_transform[i]),pstc=tau_ampa,plastic_array=learn)
                net.connect(input[i],STN,transform=array(input_transform[i])*wt,pstc=tau_ampa)


        # connect the striatum to the GPi and GPe (inhibitory)
        def func_str(x):
            if x[0]<e: return 0
            return mm*(x[0]-e)
        net.connect(StrD1,GPi,func=func_str,weight=-wm,pstc=tau_gaba)
        net.connect(StrD2,GPe,func=func_str,weight=-wm,pstc=tau_gaba)

        # connect the STN to GPi and GPe (broad and excitatory)
        def func_stn(x):
            if x[0]<ep: return 0
            return mp*(x[0]-ep)
    
        tr=[[wp]*dimensions for i in range(dimensions)]    
        net.connect(STN,GPi,func=func_stn,transform=tr,pstc=tau_ampa)
        net.connect(STN,GPe,func=func_stn,transform=tr,pstc=tau_ampa)        

        # connect the GPe to GPi and STN (inhibitory)
        def func_gpe(x):
            if x[0]<ee: return 0
            return me*(x[0]-ee)
        net.connect(GPe,GPi,func=func_gpe,weight=-we,pstc=tau_gaba)
        net.connect(GPe,STN,func=func_gpe,weight=-wg,pstc=tau_gaba)

        #connect GPi to output (inhibitory)
        def func_gpi(x):
            if x[0]<eg: return 0
            return mg*(x[0]-eg)
        net.connect(GPi,output,func=func_gpi,pstc=tau_gaba,weight=output_weight)
예제 #13
0
    def __init__(self, gamma, rewardradius=1.0):
        """Builds the ErrorCalc network.

        :param gamma: discount factor
        :param rewardradius: expected radius of reward values
        """

        self.name = "ErrorCalc"
        tauPSC = 0.007
        intPSC = 0.1
        N = 50

        ef = HRLutils.defaultEnsembleFactory()

        # current Q input
        currQ = ef.make("currQ", 1, 1)
        currQ.addDecodedTermination("input", [[1]], 0.001, False)
        self.addNode(currQ)
        currQ.setMode(SimulationMode.DIRECT)
        currQ.fixMode()
        self.exposeTermination(currQ.getTermination("input"), "currQ")

        # input population for resetting the network
        resetef = HRLutils.defaultEnsembleFactory()
        resetef.setEncoderFactory(vectorgenerators.DirectedVectorGenerator([1
                                                                            ]))
        resetef.getNodeFactory().setIntercept(IndicatorPDF(0.3, 1.0))
        reset = resetef.make("reset", N, 1)
        reset.addDecodedTermination("input", [[1]], tauPSC, False)
        self.addNode(reset)
        reset.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.exposeTermination(reset.getTermination("input"), "reset")

        # store previous value of Q
        storeQ = memory.Memory("storeQ", N * 4, 1, inputscale=50)
        self.addNode(storeQ)
        self.addProjection(reset.getOrigin("X"),
                           storeQ.getTermination("transfer"))
        self.addProjection(currQ.getOrigin("X"),
                           storeQ.getTermination("target"))

        # calculate discount
        biasInput = FunctionInput("biasinput", [ConstantFunction(1, 1)],
                                  Units.UNK)
        self.addNode(biasInput)

        discount = memory.Memory("discount",
                                 N * 4,
                                 1,
                                 inputscale=50,
                                 recurweight=gamma)
        self.addNode(discount)
        self.addProjection(biasInput.getOrigin("origin"),
                           discount.getTermination("target"))
        self.addProjection(reset.getOrigin("X"),
                           discount.getTermination("transfer"))

        # accumulate discounted reward
        # do we really need gamma to make this all work? if it proves to be a
        # problem, could try removing it, and just use un-discounted reward.
        # we can just use the fact that the reward integrator will saturate to
        # prevent rewards from going to infinity
        discountreward = eprod.Eprod("discountreward",
                                     N * 4,
                                     1,
                                     weights=[[[1.0 / rewardradius]], [[1.0]]],
                                     oneDinput=True)
        self.addNode(discountreward)
        self.exposeTermination(discountreward.getTermination("A"), "reward")
        self.addProjection(discount.getOrigin("X"),
                           discountreward.getTermination("B"))

        reward = ef.make("reward", N * 4, 1)
        reward.addDecodedTermination("input", [[intPSC]], intPSC, False)
        reward.addDecodedTermination("feedback", [[1]], intPSC, False)
        reward.addTermination("gate",
                              [[-8] for _ in range(reward.getNodeCount())],
                              intPSC, False)
        self.addNode(reward)
        reward.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.addProjection(reward.getOrigin("X"),
                           reward.getTermination("feedback"))
        self.addProjection(discountreward.getOrigin("X"),
                           reward.getTermination("input"))
        self.addProjection(reset.getOrigin("X"), reward.getTermination("gate"))

        # weight currQ by discount
        discountcurrQ = eprod.Eprod("discountcurrQ", N * 4, 1, oneDinput=True)
        self.addNode(discountcurrQ)
        self.addProjection(currQ.getOrigin("X"),
                           discountcurrQ.getTermination("A"))
        self.addProjection(discount.getOrigin("X"),
                           discountcurrQ.getTermination("B"))

        # error calculation
        # radius of 2 since max error = maxQ + maxreward - 0 (unless we let Q
        # values go negative)
        error = ef.make("error", N * 2, [2])
        error.addDecodedTermination("currQ", [[1]], tauPSC, False)
        error.addDecodedTermination("reward", [[1]], tauPSC, False)
        error.addDecodedTermination("storeQ", [[-1]], tauPSC, False)
        self.addNode(error)
        self.addProjection(discountcurrQ.getOrigin("X"),
                           error.getTermination("currQ"))
        self.addProjection(reward.getOrigin("X"),
                           error.getTermination("reward"))
        self.addProjection(storeQ.getOrigin("X"),
                           error.getTermination("storeQ"))
        self.exposeOrigin(error.getOrigin("X"), "X")
예제 #14
0
def node_fac():
    tauRC = 0.02
    tauRef = 0.002
    return LIFNeuronFactory(tauRC, tauRef, IndicatorPDF(100, 200),
                            IndicatorPDF(-0.9, 0.9))
예제 #15
0
    def __init__(self, stateN, stateD, state_encoders, actions, learningrate,
                 stateradius=1.0, Qradius=1.0, load_weights=None):
        NetworkImpl.__init__(self)
        self.name = "QNetwork"
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)
        
        N = 50
        statelength = math.sqrt(2*stateradius**2)
        tauPSC = 0.007
        num_actions = len(actions)
        init_Qs = 0.0
        weight_save = 600.0 #period to save weights (realtime, not simulation time)
        
        #set up relays
        state_relay = net.make("state_relay", 1, stateD, mode="direct")
        state_relay.fixMode()
        state_relay.addDecodedTermination("input", MU.I(stateD), 0.001, False)
        
        #create state population
        state_fac = HRLutils.node_fac()
        state_fac.setIntercept(IndicatorPDF(0,1))
            
        state_pop = net.make("state_pop", stateN, stateD, 
                              radius=statelength,
                              node_factory=state_fac,
                              encoders=state_encoders)
#                              eval_points=MU.I(stateD))
#        state_pop = net.make_array("state_pop", stateN/stateD, stateD,
#                                   node_factory=state_fac)
        state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        
        net.connect(state_relay, state_pop, pstc=tauPSC)
        
        #create population tied to previous state (to be used in learning)
        saved_state = memory.Memory("saved_state", N*4, stateD, inputscale=50, radius=stateradius,
                                    direct_storage=True)
        net.add(saved_state)
        
        net.connect(state_relay, saved_state.getTermination("target"))
        
        old_state_pop = net.make("old_state_pop", stateN, stateD, 
                              radius=statelength,
                              node_factory=state_fac,
                              encoders=state_encoders)
#                              eval_points=MU.I(stateD))
#        old_state_pop = net.make_array("old_state_pop", stateN/stateD, stateD,
#                                   node_factory=state_fac)
        old_state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        
        net.connect(saved_state, old_state_pop, pstc=tauPSC)
        
        #set up action nodes
        decoders = state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders()
        actionvals = actionvalues.ActionValues("actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders)
        net.add(actionvals)
        
        decoders = old_state_pop.addDecodedOrigin("init_decoders", [ConstantFunction(stateD,init_Qs)], "AXON").getDecoders()
        old_actionvals = actionvalues.ActionValues("old_actionvals", N, stateN, actions, learningrate, Qradius=Qradius, init_decoders=decoders)
        net.add(old_actionvals)
        
        net.connect(state_pop.getOrigin("AXON"), actionvals.getTermination("state"))
        net.connect(old_state_pop.getOrigin("AXON"), old_actionvals.getTermination("state"))
        
        if load_weights != None:
            self.loadWeights(load_weights)
        
            #find error between old_actionvals and actionvals
        valdiff = net.make_array("valdiff", N, num_actions, node_factory = HRLutils.node_fac())
        net.connect(old_actionvals, valdiff, transform=MU.diag([2]*num_actions), pstc=tauPSC)
        net.connect(actionvals, valdiff, transform=MU.diag([-2]*num_actions), pstc=tauPSC)
            #doubling values to get a bigger error signal
        
            #calculate diff between curr_state and saved_state and use that to gate valdiff
        statediff = net.make_array("statediff", N, stateD, intercept=(0.2,1))
        net.connect(state_relay, statediff, pstc=tauPSC)
        net.connect(saved_state, statediff, transform=MU.diag([-1]*stateD), pstc=tauPSC)
        
        net.connect(statediff, valdiff, func=lambda x: [abs(v) for v in x], 
                    transform = [[-10]*stateD for _ in range(valdiff.getNeurons())], pstc=tauPSC)
        
        net.connect(valdiff, actionvals.getTermination("error"))
        
        #periodically save the weights
        class WeightSaveThread(threading.Thread):
            def __init__(self, func, prefix, period):
                threading.Thread.__init__(self)
                self.func = func
                self.prefix = prefix
                self.period = period
                
            def run(self):
                while True:
                    time.sleep(self.period)
                    self.func(self.prefix)
        wsn = WeightSaveThread(self.saveWeights, os.path.join("weights","tmp"), weight_save)
        wsn.start()
        
        self.exposeTermination(state_relay.getTermination("input"), "state")
        self.exposeTermination(old_actionvals.getTermination("error"), "error")
        self.exposeTermination(saved_state.getTermination("transfer"), "save_state")
        self.exposeOrigin(actionvals.getOrigin("X"), "vals")
        self.exposeOrigin(old_actionvals.getOrigin("X"), "old_vals")
예제 #16
0
    def __init__(self, actions, Qradius=1, noiselevel=0.03):
        """Builds the BGNetwork.

        :param actions: actions available to the system
            :type actions: list of tuples (action_name,action_vector)
        :param Qradius: expected radius of Q values
        :param noiselevel: standard deviation of noise added to Q values for
            exploration
        """

        self.name = "BGNetwork"
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)

        self.N = 50
        self.d = len(actions)
        self.mut_inhib = 1.0  # mutual inhibition between actions
        self.tauPSC = 0.007

        # make basal ganglia
        netbg = nef.Network("bg")

        bginput = netbg.make("bginput", 1, self.d, mode="direct")
        bginput.fixMode()
        bginput.addDecodedTermination("input",
                                      MU.diag([1.0 / Qradius for _ in
                                               range(self.d)]), 0.001, False)
        # divide by Q radius to get values back into 0 -- 1 range

        bgoutput = netbg.make("bgoutput", 1, self.d, mode="direct")
        bgoutput.fixMode()

        basalganglia.make_basal_ganglia(netbg, bginput, bgoutput,
                                        dimensions=self.d, neurons=200)
        bg = netbg.network
        net.add(bg)
        bg.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])

        bg.exposeTermination(bginput.getTermination("input"), "input")
        bg.exposeOrigin(bgoutput.getOrigin("X"), "X")

        # insert noise (used to give some randomness to drive exploration)
        noiselevel = net.make_input("noiselevel", [noiselevel])

        noise = noisenode.NoiseNode(1, dimension=len(actions))
        net.add(noise)

        net.connect(noiselevel, noise.getTermination("scale"))
        net.connect(noise.getOrigin("noise"), "bg.bginput", pstc=0.001)

        # add bias to shift everything up to 0.5--1.5
        biasinput = net.make_input("biasinput", [0.5])
        net.connect(biasinput, "bg.bginput",
                    transform=[[1] for _ in range(self.d)], pstc=0.001)

        # invert BG output (so the "selected" action will have a positive value
        # and the rest zero)
        invert = thalamus.make(net, name="invert", neurons=self.N,
                               dimensions=self.d, useQuick=False)
        invert.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        net.connect(bg, invert.getTermination("bg_input"))

        # add mutual inhibition
        net.connect(invert.getOrigin("xBiased"), invert, pstc=self.tauPSC,
                    transform=[[0 if i == j else -self.mut_inhib
                                for j in range(self.d)]
                               for i in range(self.d)])

        # threshold output values so that you get a nice clean 0 for
        # non-selected and 1 for selected
        threshf = HRLutils.node_fac()
        threshold = 0.1
        threshf.setIntercept(IndicatorPDF(threshold, 1.0))
        val_threshold = net.make_array("val_threshold", self.N * 2, self.d,
                                       node_factory=threshf, encoders=[[1]])
        val_threshold.addDecodedOrigin(
            "output",
            [PiecewiseConstantFunction([threshold], [0, 1])
             for _ in range(self.d)], "AXON", True)

        net.connect(invert.getOrigin("xBiased"), val_threshold,
                    pstc=self.tauPSC)

        # output action (action vectors weighted by BG output)
        weight_actions = net.make_array("weight_actions", 50,
                                        len(actions[0][1]), intercept=(0, 1))
        net.connect(val_threshold.getOrigin("output"), weight_actions,
                    transform=MU.transpose([actions[i][1]
                                            for i in range(self.d)]),
                    pstc=0.007)

        # save the BG output (selected action and selected action value)
        save_relay = net.make("save_relay", 1, 1, mode="direct")
        save_relay.fixMode()
        save_relay.addDecodedTermination("input", [[1]], 0.001, False)

        saved_action = memory.Memory("saved_action", self.N * 2,
                                     len(actions[0][1]), inputscale=75)
        net.add(saved_action)
        net.connect(weight_actions, saved_action.getTermination("target"))
        net.connect(save_relay, saved_action.getTermination("transfer"))

        saved_vals = memory.Memory("saved_values", self.N * 2, self.d,
                                   inputscale=75)
        net.add(saved_vals)
        net.connect(val_threshold.getOrigin("output"),
                    saved_vals.getTermination("target"))
        net.connect(save_relay, saved_vals.getTermination("transfer"))

        # put the saved values through a threshold (we want a nice clean
        # zero for non-selected values)
        nfac = HRLutils.node_fac()
        nfac.setIntercept(IndicatorPDF(0.2, 1))
        saved_vals_threshold = net.make_array("saved_vals_threshold", self.N,
                                              self.d, node_factory=nfac,
                                              encoders=[[1]])
        saved_vals_threshold.addDecodedOrigin(
            "output", [PiecewiseConstantFunction([0.3], [0, 1])
                       for _ in range(self.d)], "AXON", True)

        net.connect(saved_vals, saved_vals_threshold, pstc=self.tauPSC)

        self.exposeTermination(bg.getTermination("input"), "input")
        self.exposeTermination(save_relay.getTermination("input"),
                               "save_output")
        self.exposeOrigin(val_threshold.getOrigin("output"), "curr_vals")
        self.exposeOrigin(weight_actions.getOrigin("X"), "curr_action")
        self.exposeOrigin(saved_vals_threshold.getOrigin("output"),
                          "saved_vals")
        self.exposeOrigin(saved_action.getOrigin("X"), "saved_action")
예제 #17
0
    def __init__(self,
                 stateN,
                 stateD,
                 state_encoders,
                 actions,
                 learningrate,
                 stateradius=1.0,
                 Qradius=1.0,
                 load_weights=None,
                 state_evals=None,
                 state_threshold=(0.0, 1.0),
                 statediff_threshold=0.2,
                 init_Qs=None):
        """Builds the QNetwork.

        :param stateN: number of neurons to use to represent state
        :param stateD: dimension of state vector
        :param state_encoders: encoders to use for neurons in state population
        :param actions: actions available to the system
            :type actions: list of tuples (action_name,action_vector)
        :param learningrate: learningrate for action value learning rule
        :param stateradius: expected radius of state values
        :param Qradius: expected radius of Q values
        :param load_weights: filename to load Q value weights from
        :param state_evals: evaluation points to use for state population.
            This is used when initializing the Q values (may be necessary if
            the input states don't tend to fall in the hypersphere).
        :param state_threshold: threshold range of state neurons
        :param statediff_threshold: maximum state difference for dual training
        :param init_Qs: initial Q values
        """

        self.name = "QNetwork"
        net = nef.Network(self, seed=HRLutils.SEED, quick=False)

        N = 50
        tauPSC = 0.007
        num_actions = len(actions)
        init_Qs = [0.2] * num_actions if init_Qs is None else init_Qs

        # if True, use neuron--neuron weight learning, otherwise, use decoder
        # learning
        self.neuron_learning = False

        # set up relays
        state_relay = net.make("state_relay", 1, stateD, mode="direct")
        state_relay.fixMode()
        state_relay.addDecodedTermination("input", MU.I(stateD), 0.001, False)

        # create state population
        state_fac = HRLutils.node_fac()
        if isinstance(state_threshold, (float, int)):
            state_threshold = (state_threshold, 1.0)
        state_fac.setIntercept(
            IndicatorPDF(state_threshold[0], state_threshold[1]))

        state_pop = net.make("state_pop",
                             stateN,
                             stateD,
                             radius=stateradius,
                             node_factory=state_fac,
                             encoders=state_encoders,
                             eval_points=state_evals)
        state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])

        net.connect(state_relay, state_pop, pstc=tauPSC)

        # store the state value (used to drive population encoding previous
        # state)
        saved_state = memory.Memory("saved_state",
                                    N * 4,
                                    stateD,
                                    inputscale=50,
                                    radius=stateradius,
                                    direct_storage=True)
        net.add(saved_state)

        net.connect(state_relay, saved_state.getTermination("target"))

        # create population representing previous state
        old_state_pop = net.make("old_state_pop",
                                 stateN,
                                 stateD,
                                 radius=stateradius,
                                 node_factory=state_fac,
                                 encoders=state_encoders,
                                 eval_points=state_evals)
        old_state_pop.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])

        net.connect(saved_state, old_state_pop, pstc=tauPSC)

        # set up action nodes
        if self.neuron_learning:
            # use ActionValues network to compute Q values

            # current Q values
            decoders = state_pop.addDecodedOrigin(
                "init_decoders", [ConstantFunction(stateD, init_Qs)],
                "AXON").getDecoders()
            actionvals = actionvalues.ActionValues("actionvals",
                                                   N,
                                                   stateN,
                                                   actions,
                                                   learningrate,
                                                   Qradius=Qradius,
                                                   init_decoders=decoders)
            net.add(actionvals)

            net.connect(state_pop.getOrigin("AXON"),
                        actionvals.getTermination("state"))

            # Q values of previous state
            decoders = old_state_pop.addDecodedOrigin(
                "init_decoders", [ConstantFunction(stateD, init_Qs)],
                "AXON").getDecoders()
            old_actionvals = actionvalues.ActionValues("old_actionvals",
                                                       N,
                                                       stateN,
                                                       actions,
                                                       learningrate,
                                                       Qradius=Qradius,
                                                       init_decoders=decoders)
            net.add(old_actionvals)

            net.connect(old_state_pop.getOrigin("AXON"),
                        old_actionvals.getTermination("state"))
        else:
            # just use decoder on state population to compute Q values

            # current Q values
            origin = state_pop.addDecodedOrigin("vals", [
                ConstantFunction(num_actions, init_Qs[i])
                for i in range(num_actions)
            ], "AXON")
            state_dlnode = decoderlearningnode.DecoderLearningNode(
                state_pop,
                origin,
                learningrate,
                num_actions,
                name="state_learningnode")
            net.add(state_dlnode)

            # just a little relay node, so that things match up for the rest of
            # the script when you have the neuron -- neuron learning
            actionvals = net.make("actionvals", 1, num_actions, mode="direct")
            actionvals.fixMode()
            net.connect(origin, actionvals, pstc=0.001)

            # Q values of previous state
            origin = old_state_pop.addDecodedOrigin("vals", [
                ConstantFunction(num_actions, init_Qs[i])
                for i in range(num_actions)
            ], "AXON")
            old_state_dlnode = decoderlearningnode.DecoderLearningNode(
                old_state_pop,
                origin,
                learningrate,
                num_actions,
                name="old_state_learningnode")
            net.add(old_state_dlnode)

            old_actionvals = net.make("old_actionvals",
                                      1,
                                      num_actions,
                                      mode="direct")
            old_actionvals.fixMode()
            net.connect(origin, old_actionvals, pstc=0.001)

        if load_weights is not None:
            self.loadParams(load_weights)

        # find error between old_actionvals and actionvals (this will be used
        # to drive learning on the new actionvals)
        valdiff = net.make_array("valdiff",
                                 N,
                                 num_actions,
                                 node_factory=HRLutils.node_fac())
        # doubling the values to get a bigger error signal
        net.connect(old_actionvals,
                    valdiff,
                    transform=MU.diag([2] * num_actions),
                    pstc=tauPSC)
        net.connect(actionvals,
                    valdiff,
                    transform=MU.diag([-2] * num_actions),
                    pstc=tauPSC)

        # calculate diff between curr_state and saved_state and use that to
        # gate valdiff (we only want to train the curr state based on previous
        # state when the two have similar values)
        # note: threshold > 0 so that there is a deadzone in the middle (when
        # the states are similar) where there will be no output inhibition
        statediff = net.make_array("statediff",
                                   N,
                                   stateD,
                                   intercept=(statediff_threshold, 1))

        net.connect(state_relay, statediff, pstc=tauPSC)
        net.connect(saved_state,
                    statediff,
                    transform=MU.diag([-1] * stateD),
                    pstc=tauPSC)

        net.connect(statediff,
                    valdiff,
                    func=lambda x: [abs(v) for v in x],
                    transform=[[-10] * stateD
                               for _ in range(valdiff.getNeurons())],
                    pstc=tauPSC)

        # connect up valdiff to the error signal for current Q values, and
        # expose the error signal for the previous Q values to the external
        # error
        if self.neuron_learning:
            net.connect(valdiff, actionvals.getTermination("error"))
            self.exposeTermination(old_actionvals.getTermination("error"),
                                   "error")
        else:
            net.connect(valdiff, state_dlnode.getTermination("error"))
            self.exposeTermination(old_state_dlnode.getTermination("error"),
                                   "error")

        self.exposeTermination(state_relay.getTermination("input"), "state")
        self.exposeTermination(saved_state.getTermination("transfer"),
                               "save_state")
        self.exposeOrigin(actionvals.getOrigin("X"), "vals")
        self.exposeOrigin(old_actionvals.getOrigin("X"), "old_vals")
예제 #18
0
from ca.nengo.math import Function
from ca.nengo.math.impl import FourierFunction
from ca.nengo.math.impl import IndicatorPDF
from ca.nengo.math.impl import ConstantFunction
from ca.nengo.model import SimulationMode
from ca.nengo.plot import Plotter
from ca.nengo.util import MU
import math

networks = [interneuron, dualTC, adapting, depressing, butterworth, interneuronFeedback]

tau = [.005, .01, .05, .1, .2, .5]
signalBandwidth = 15
frequencies = MU.makeVector(.1, .1, signalBandwidth)
componentRMS = math.sqrt(1.0 / len(frequencies)); 
signal = FourierFunction(frequencies, MU.uniform(1, len(frequencies), componentRMS/.707)[0], MU.random(1, len(frequencies), IndicatorPDF(-.5, .5))[0])

noiseBandwidth = 500

for network in networks:
	network.setMode(SimulationMode.DIRECT);
	network.setStepSize(.0005);	
	signalPower = []
	noisePower = []
	
	for t in tau:
		network.setTau(t)
		
		network.setInputFunction(signal);
		network.clearErrors();
		network.reset(0)