示例#1
0
    def buildNetwork(self, network):
        network.name = "Integrator"

        # Util.debugMsg("Network building started")

        f = ConstantFunction(1, 1.0)
        input = FunctionInput("input", [f], Units.UNK)

        # uiViewer.addNeoNode(uiInput);

        ef = NEFEnsembleFactoryImpl()
        integrator = ef.make("integrator", 500, 1, "integrator1", False)
        interm = integrator.addDecodedTermination("input", [[tau]], tau, False)
        fbterm = integrator.addDecodedTermination("feedback", [[1.0]], tau, False)

        network.addNode(integrator)
        time.sleep(1)
        network.addNode(input)
        time.sleep(1)
        # UINEFEnsemble uiIntegrator = new UINEFEnsemble(integrator);
        # uiViewer.addNeoNode(uiIntegrator);
        # uiIntegrator.collectSpikes(true);

        # UITermination uiInterm =
        # uiIntegrator.showTermination(interm.getName());
        # UITermination uiFbterm =
        # uiIntegrator.showTermination(fbterm.getName());

        network.addProjection(input.getOrigin(FunctionInput.ORIGIN_NAME), interm)
        time.sleep(0.5)
        network.addProjection(integrator.getOrigin(NEFEnsemble.X), fbterm)
        time.sleep(0.5)

        # Test removing projections
        network.removeProjection(interm)
        time.sleep(0.5)
        # add the projection back
        network.addProjection(input.getOrigin(FunctionInput.ORIGIN_NAME), interm)
        time.sleep(0.5)
        # Add probes
        integratorXProbe = network.simulator.addProbe("integrator", NEFEnsemble.X, True)
        time.sleep(0.5)
        # Test adding removing probes
        network.simulator.removeProbe(integratorXProbe)
        time.sleep(0.5)
        # add the probe back
        network.simulator.addProbe("integrator", NEFEnsemble.X, True)
        time.sleep(0.5)

        SwingUtilities.invokeLater(make_runnable(self.doPostUIStuff))
示例#2
0
def makeInputVector(name, d, randomSeed=None):
    vec = []

    if randomSeed == None:
        randomSeed = long(time.clock() * 100000000000000000)

    if randomSeed > -1:
        PDFTools.setSeed(randomSeed)

    length = 0
    for i in range(d):
        tmp = PDFTools.sampleFloat(GaussianPDF())
        vec = vec + [tmp]
        length = length + tmp**2

    length = math.sqrt(length)

    f = []
    for i in range(d):
        vec[i] = vec[i] / length
        f = f + [ConstantFunction(1, vec[i])]

    if randomSeed > -1:
        PDFTools.setSeed(long(time.clock() * 1000000000000000))

    print vec

    return (FunctionInput(name, f, Units.UNK))
示例#3
0
文件: 1annea.py 项目: jvitku/designer
def buildExperiment(ind):
    net=nef.Network('EA designed recurrent SNN')  
    net.add_to_nengo();
    # generator
    # function .1 base freq, max freq 10 rad/s, and RMS of .5; 12 is a seed
    generator=FunctionInput('generator',[FourierFunction(.1, 5,.3, 12),
        FourierFunction(.5, 7, .7, 112)],Units.UNK)
    net.add(generator);

    # model
    model= buildModel(ind);
    net.add(model.network); 

    # plant
    plant = net.add(modules.OR('Fuzzy OR'));
    #plant = net.add(mathNodes.SUMABS('SUM'));
    # error estimation
    err = net.make('error',1,1,mode='direct');
    mse = net.add(modules.mse('MSE'));

    # wiring
    modelIn = model.get('inputs');  # get the input model
    model.network.exposeTermination(modelIn.getTermination('in'),'subInput'); # expose its termination
    net.connect(generator,model.network.getTermination('subInput'),weight=1)  # generator to termination
    net.connect(generator,plant.getTermination('inputs'))                     # generator to plant

    modelOut = model.get('outputt');
    model.network.exposeOrigin(modelOut.getOrigin('out'),'subOutput');        # expose its termination
    net.connect(model.network.getOrigin('subOutput'), err, weight=-1)

    net.connect(plant.getOrigin('output'),err)  
    net.connect(err,mse.getTermination('error'))
    return net;
示例#4
0
def makeInputVectors(names, vectors):
    return [
        FunctionInput(names[i], [ConstantFunction(1, x)
                                 for x in vec], Units.UNK)
        for i, vec in enumerate(vectors)
    ]
    def loadSequenceMatrix(self, cell):
        """Load a matrix in HRR vector format from a file and create corresponding output functions."""

        d = len(cell[0])
        discontinuities = [
            RPMutils.STEP_SIZE, 2 * RPMutils.STEP_SIZE, 3 * RPMutils.STEP_SIZE,
            4 * RPMutils.STEP_SIZE, 5 * RPMutils.STEP_SIZE
        ]
        values1 = [[0 for x in range(6)] for x in range(d)]
        values2 = [[0 for x in range(6)] for x in range(d)]

        #         1.0       2.0      3.0      4.0
        #signal A
        #    cell1    cell2    cell4    cell5    cell7
        #signal B
        #    cell2    cell3    cell5    cell6    cell8

        #values for 0th timestep
        for i in range(d):
            values1[i][0] = cell[0][i]
        for i in range(d):
            values2[i][0] = cell[1][i]

        #values for 1st timestep
        for i in range(d):
            values1[i][1] = cell[1][i]
        for i in range(d):
            values2[i][1] = cell[2][i]

        #values for 2nd timestep
        for i in range(d):
            values1[i][2] = cell[3][i]
        for i in range(d):
            values2[i][2] = cell[4][i]

        #values for 3rd timestep
        for i in range(d):
            values1[i][3] = cell[4][i]
        for i in range(d):
            values2[i][3] = cell[5][i]

        #values for 4th timestep
        for i in range(d):
            values1[i][4] = cell[6][i]
        for i in range(d):
            values2[i][4] = cell[7][i]

        for i in range(d):
            values1[i][5] = 0
            values2[i][5] = 0

        #create signalA
        f = []
        for i in range(d):
            f = f + [PiecewiseConstantFunction(discontinuities, values1[i])]
        sigA = FunctionInput("sigA", f, Units.UNK)

        #create signal B
        f = []
        for i in range(d):
            f = f + [PiecewiseConstantFunction(discontinuities, values2[i])]
        sigB = FunctionInput("sigB", f, Units.UNK)

        #create signal for adaptive learning rate
        rates = [1.0, 1.0 / 2.0, 1.0 / 3.0, 1.0 / 4.0, 1.0 / 5.0, 0.0]
        lrate = FunctionInput(
            "lrate", [PiecewiseConstantFunction(discontinuities, rates)],
            Units.UNK)

        #create signal for second last cell
        f = []
        for i in range(d):
            f = f + [ConstantFunction(1, cell[7][i])]
        secondLast = FunctionInput("secondLast", f, Units.UNK)

        #load rule signal from file
        rulesig = []
        if RPMutils.LOAD_RULES:
            rulefile = open(RPMutils.ruleFile())
            lines = rulefile.readlines()
            rulefile.close()
            mod, rule = lines[0].split(":")
            if mod == "sequencesolver":
                rule = RPMutils.str2floatlist(rule.strip())
            else:
                rule = [0.0 for i in range(self.d)]
            rulesig = RPMutils.makeInputVectors("rulesig", [rule])

        if RPMutils.RUN_WITH_CONTROLLER:
            return ([sigA, sigB, lrate, secondLast] + rulesig)
        else:
            #create signals for answers
            ans = []
            for i in range(8):
                ans = ans + [cell[8 + i]]

            return ([sigA, sigB, lrate, secondLast] + ans + rulesig)
示例#6
0
    def __init__(self, gamma, rewardradius=1.0):
        """Builds the ErrorCalc network.

        :param gamma: discount factor
        :param rewardradius: expected radius of reward values
        """

        self.name = "ErrorCalc"
        tauPSC = 0.007
        intPSC = 0.1
        N = 50

        ef = HRLutils.defaultEnsembleFactory()

        # current Q input
        currQ = ef.make("currQ", 1, 1)
        currQ.addDecodedTermination("input", [[1]], 0.001, False)
        self.addNode(currQ)
        currQ.setMode(SimulationMode.DIRECT)
        currQ.fixMode()
        self.exposeTermination(currQ.getTermination("input"), "currQ")

        # input population for resetting the network
        resetef = HRLutils.defaultEnsembleFactory()
        resetef.setEncoderFactory(vectorgenerators.DirectedVectorGenerator([1
                                                                            ]))
        resetef.getNodeFactory().setIntercept(IndicatorPDF(0.3, 1.0))
        reset = resetef.make("reset", N, 1)
        reset.addDecodedTermination("input", [[1]], tauPSC, False)
        self.addNode(reset)
        reset.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.exposeTermination(reset.getTermination("input"), "reset")

        # store previous value of Q
        storeQ = memory.Memory("storeQ", N * 4, 1, inputscale=50)
        self.addNode(storeQ)
        self.addProjection(reset.getOrigin("X"),
                           storeQ.getTermination("transfer"))
        self.addProjection(currQ.getOrigin("X"),
                           storeQ.getTermination("target"))

        # calculate discount
        biasInput = FunctionInput("biasinput", [ConstantFunction(1, 1)],
                                  Units.UNK)
        self.addNode(biasInput)

        discount = memory.Memory("discount",
                                 N * 4,
                                 1,
                                 inputscale=50,
                                 recurweight=gamma)
        self.addNode(discount)
        self.addProjection(biasInput.getOrigin("origin"),
                           discount.getTermination("target"))
        self.addProjection(reset.getOrigin("X"),
                           discount.getTermination("transfer"))

        # accumulate discounted reward
        # do we really need gamma to make this all work? if it proves to be a
        # problem, could try removing it, and just use un-discounted reward.
        # we can just use the fact that the reward integrator will saturate to
        # prevent rewards from going to infinity
        discountreward = eprod.Eprod("discountreward",
                                     N * 4,
                                     1,
                                     weights=[[[1.0 / rewardradius]], [[1.0]]],
                                     oneDinput=True)
        self.addNode(discountreward)
        self.exposeTermination(discountreward.getTermination("A"), "reward")
        self.addProjection(discount.getOrigin("X"),
                           discountreward.getTermination("B"))

        reward = ef.make("reward", N * 4, 1)
        reward.addDecodedTermination("input", [[intPSC]], intPSC, False)
        reward.addDecodedTermination("feedback", [[1]], intPSC, False)
        reward.addTermination("gate",
                              [[-8] for _ in range(reward.getNodeCount())],
                              intPSC, False)
        self.addNode(reward)
        reward.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.addProjection(reward.getOrigin("X"),
                           reward.getTermination("feedback"))
        self.addProjection(discountreward.getOrigin("X"),
                           reward.getTermination("input"))
        self.addProjection(reset.getOrigin("X"), reward.getTermination("gate"))

        # weight currQ by discount
        discountcurrQ = eprod.Eprod("discountcurrQ", N * 4, 1, oneDinput=True)
        self.addNode(discountcurrQ)
        self.addProjection(currQ.getOrigin("X"),
                           discountcurrQ.getTermination("A"))
        self.addProjection(discount.getOrigin("X"),
                           discountcurrQ.getTermination("B"))

        # error calculation
        # radius of 2 since max error = maxQ + maxreward - 0 (unless we let Q
        # values go negative)
        error = ef.make("error", N * 2, [2])
        error.addDecodedTermination("currQ", [[1]], tauPSC, False)
        error.addDecodedTermination("reward", [[1]], tauPSC, False)
        error.addDecodedTermination("storeQ", [[-1]], tauPSC, False)
        self.addNode(error)
        self.addProjection(discountcurrQ.getOrigin("X"),
                           error.getTermination("currQ"))
        self.addProjection(reward.getOrigin("X"),
                           error.getTermination("reward"))
        self.addProjection(storeQ.getOrigin("X"),
                           error.getTermination("storeQ"))
        self.exposeOrigin(error.getOrigin("X"), "X")
示例#7
0
from ca.nengo.math.impl import FourierFunction
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model import Units

random.seed(37)

net = nef.Network('Learn Product')  #Create the network object

# Create input and output populations.
A = net.make('pre', N, D)  #Make a population with 60 neurons, 1 dimensions
B = net.make('post', N, 1)  #Make a population with 60 neurons, 1 dimensions

# Create a random function input.
input = FunctionInput(
    'input', [FourierFunction(.1, 8, .4, i, 0) for i in range(D)],
    Units.UNK)  #Create a white noise input function .1 base freq, max
#freq 8 rad/s, and RMS of .4; i makes one for each dimension;
#0 is the seed
net.add(input)  #Add the input node to the network
net.connect(input, A)

# Create a modulated connection between the 'pre' and 'post' ensembles.
learning.make(
    net, errName='error', N_err=100, preName='pre', postName='post',
    rate=5e-7)  #Make an error population with 100 neurons, and a learning

#rate of 5e-7


# Set the modulatory signal to compute the desired function
示例#8
0
from ca.nengo.math.impl import FourierFunction
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model import Units

random.seed(27)

net=nef.Network('Learn Communication') #Create the network object

# Create input and output populations.
A=net.make('pre',N,D) #Make a population with 60 neurons, 1 dimensions
B=net.make('post',N,D) #Make a population with 60 neurons, 1 dimensions

# Create a random function input.
input=FunctionInput('input',[FourierFunction(
    .1, 10,.5, 12)],
    Units.UNK) #Create a white noise input function .1 base freq, max 
               #freq 10 rad/s, and RMS of .5; 12 is a seed
net.add(input) #Add the input node to the network
net.connect(input,A)

# Create a modulated connection between the 'pre' and 'post' ensembles.
learning.make(net,errName='error', N_err=100, preName='pre', postName='post',
    rate=5e-7) #Make an error population with 100 neurons, and a learning 
               #rate of 5e-7

# Set the modulatory signal.
net.connect('pre', 'error')
net.connect('post', 'error', weight=-1)

# Add a gate to turn learning on and off.
示例#9
0
    def __init__(self, gamma, rewardradius=1.0):
        """Builds the ErrorCalc network.

        :param gamma: discount factor
        :param rewardradius: expected radius of reward values
        """

        self.name = "ErrorCalc"
        tauPSC = 0.007
        intPSC = 0.1
        N = 50

        ef = HRLutils.defaultEnsembleFactory()

        #current Q input
        currQ = ef.make("currQ", 1, 1)
        currQ.addDecodedTermination("input", [[1]], 0.001, False)
        self.addNode(currQ)
        currQ.setMode(SimulationMode.DIRECT)
        currQ.fixMode()
        self.exposeTermination(currQ.getTermination("input"), "currQ")

        #input population for resetting the network
        resetef = HRLutils.defaultEnsembleFactory()
        resetef.setEncoderFactory(vectorgenerators.DirectedVectorGenerator([1]))
        resetef.getNodeFactory().setIntercept(IndicatorPDF(0.3, 1.0))
        reset = resetef.make("reset", N, 1)
        reset.addDecodedTermination("input", [[1]], tauPSC, False)
        self.addNode(reset)
        reset.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.exposeTermination(reset.getTermination("input"), "reset")

        #store previous value of Q
        storeQ = memory.Memory("storeQ", N * 4, 1, inputscale=50)
        self.addNode(storeQ)
        self.addProjection(reset.getOrigin("X"), storeQ.getTermination("transfer"))
        self.addProjection(currQ.getOrigin("X"), storeQ.getTermination("target"))

        #calculate discount
        biasInput = FunctionInput("biasinput", [ConstantFunction(1, 1)], Units.UNK)
        self.addNode(biasInput)

        discount = memory.Memory("discount", N * 4, 1, inputscale=50, recurweight=gamma)
        self.addNode(discount)
        self.addProjection(biasInput.getOrigin("origin"), discount.getTermination("target"))
        self.addProjection(reset.getOrigin("X"), discount.getTermination("transfer"))

        #accumulate discounted reward
        #do we really need gamma to make this all work? if it proves to be a problem, could
        #try removing it, and just use un-discounted reward. we can just use the fact that
        #the reward integrator will saturate to prevent rewards from going to infinity
        discountreward = eprod.Eprod("discountreward", N * 4, 1, weights=[[[1.0 / rewardradius]], [[1.0]]], oneDinput=True)
        self.addNode(discountreward)
        self.exposeTermination(discountreward.getTermination("A"), "reward")
        self.addProjection(discount.getOrigin("X"), discountreward.getTermination("B"))

        reward = ef.make("reward", N * 4, 1)
        reward.addDecodedTermination("input", [[intPSC]], intPSC, False)
        reward.addDecodedTermination("feedback", [[1]], intPSC, False)
        reward.addTermination("gate", [[-8] for _ in range(reward.getNodeCount())], intPSC, False)
        self.addNode(reward)
        reward.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])
        self.addProjection(reward.getOrigin("X"), reward.getTermination("feedback"))
        self.addProjection(discountreward.getOrigin("X"), reward.getTermination("input"))
        self.addProjection(reset.getOrigin("X"), reward.getTermination("gate"))

        #weight currQ by discount
        discountcurrQ = eprod.Eprod("discountcurrQ", N * 4, 1, oneDinput=True)
        self.addNode(discountcurrQ)
        self.addProjection(currQ.getOrigin("X"), discountcurrQ.getTermination("A"))
        self.addProjection(discount.getOrigin("X"), discountcurrQ.getTermination("B"))

        #error calculation
        error = ef.make("error", N * 2, [2]) #radius of 2 since max error = maxQ + maxreward - 0 (unless we let Q values go negative)
        error.addDecodedTermination("currQ", [[1]], tauPSC, False)
        error.addDecodedTermination("reward", [[1]], tauPSC, False)
        error.addDecodedTermination("storeQ", [[-1]], tauPSC, False)
        self.addNode(error)
        self.addProjection(discountcurrQ.getOrigin("X"), error.getTermination("currQ"))
        self.addProjection(reward.getOrigin("X"), error.getTermination("reward"))
        self.addProjection(storeQ.getOrigin("X"), error.getTermination("storeQ"))
        self.exposeOrigin(error.getOrigin("X"), "X")
示例#10
0
import random

from ca.nengo.math.impl import FourierFunction
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model import Units

random.seed(27)

net = nef.Network('Learning (pre-built)')

# Create input and output populations.
net.make('pre', N, D)
net.make('post', N, D)

# Create a random function input.
input = FunctionInput('input', [FourierFunction(.1, 10, .5, 1)], Units.UNK)
net.add(input)
net.connect(input, 'pre')

# Create a modulated connection between the 'pre' and 'post' ensembles.
learning.make(net,
              errName='error',
              N_err=100,
              preName='pre',
              postName='post',
              rate=5e-7)

# Set the modulatory signal.
net.connect('pre', 'error')
net.connect('post', 'error', weight=-1)
示例#11
0
#
# by Jaroslav Vitku [[email protected]]

import nef
from ca.nengo.math.impl import FourierFunction
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model import Units
from rosnodes import temp_logic_gates  # import jython helper

net = nef.Network(
    'Project Template script - copied just triangular fuzzy memebership funciton'
)
net.add_to_nengo(
)  # here: delete old (toplevel) network and replace it with the newly CREATED one

gen1 = FunctionInput('Randomized input 1', [FourierFunction(.1, 10, 3, 12)],
                     Units.UNK)
net.add(gen1)

net.make_input('alpha1', [-0.5])
net.make_input('beta1', [0])
net.make_input('gamma1', [0.5])

# Add it
triangle = temp_logic_gates.fuzzyMemTriangle("Triangle")
net.add(triangle)

# Wire inputs
net.connect(gen1, triangle.getTermination('logic/gates/ina'))

net.connect('alpha1', triangle.getTermination('logic/gates/confa'))
net.connect('beta1', triangle.getTermination('logic/gates/confb'))
示例#12
0
net = nef.Network(
    'Demo of NeuralModule which implements discrete Q-learning with eligibility trace'
)
net.add_to_nengo()

#RosUtils.setAutorun(False)     # Do we want to autorun roscore and rxgraph? (tru by default)
#RosUtils.prefferJroscore(True)  # preffer jroscore before the roscore?

finderA = rl_sarsa.qlambdaASMConfigured("RL", net, 2,
                                        4)  # 2 state variables, 4 actions

#Create a white noise input function with params: baseFreq, maxFreq [rad/s], RMS, seed
# first dimension is reward, do not generate signal (ignored in the connection matrix)
generator = FunctionInput('StateGenerator', [
    FourierFunction(0, 0, 0, 12),
    FourierFunction(.5, 11, 1.6, 17),
    FourierFunction(.2, 21, 1.1, 11)
], Units.UNK)

# first dimension is reward, do not generate states (these are ignored in the conneciton matrix)
reward = FunctionInput('RewardGenerator', [
    FourierFunction(.1, 10, 1, 12),
    FourierFunction(0, 0, 0, 17),
    FourierFunction(0, 0, 0, 17),
], Units.UNK)

net.add(generator)
net.add(reward)

tx = [[0 for j in range(3)] for i in range(3)]
tx[1][1] = 1