Ejemplo n.º 1
0
def sync_timeaware_node(name):
	g = NodeGroup("SynMinMaxFloat", True);        			
	g.addNode(time, "SynMinMaxFloat", "java");     		
	module = NeuralModule(name+'_SynMinMaxFloat', g)    	
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)  			
	module.createDecoder("org/hanns/demonodes/pubsub/OUT", "float", 2)    		
	return module
Ejemplo n.º 2
0
def fuzzynot_node(name):
	g = NodeGroup("FuzzyNOT", True);
	g.addNode(fNOT, "FuzzyNOT", "java");
	module = NeuralModule(name+'_FuzzyNOT', g)
	module.createEncoder("logic/gates/ina", "float", 1)
	module.createDecoder("logic/gates/outa", "float", 1)
	return module
Ejemplo n.º 3
0
def async_minmaxfloat_node(name):
	g = NodeGroup("AsynMinMaxFloat", True);        			
	g.addNode(mmf, "AsynMinMaxFloat", "java");     		
	module = NeuralModule(name+'_AsynMinMaxFloat', g, False)    	
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)  			
	module.createDecoder("org/hanns/demonodes/pubsub/OUT", "float", 2)    		
	return module
Ejemplo n.º 4
0
def async_timeaware_node(name):
	g = NodeGroup("AsynTimeAwareNode", True);        			
	g.addNode(time, "AsynTimeAwareNode", "java");     		
	module = NeuralModule(name+'_AsynTimeAwareNode', g, False)    	
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)			
	module.createDecoder("org/hanns/demonodes/pubsub/OUT", "float", 2)
	return module
Ejemplo n.º 5
0
def sync_minmaxint_node(name):
	g = NodeGroup("SynMinMaxInt", True);        			
	g.addNode(minmaxint, "SynMinMaxInt", "java");     		
	module = NeuralModule(name+'_SynMinMaxInt', g)    	# here: Nengo waits each step for response from node
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)  			
	module.createDecoder("org/hanns/demonodes/pubsub/OUT", "int", 2)    			
	return module
Ejemplo n.º 6
0
def sync_minmaxfloat_node(name):
	g = NodeGroup("SynMinMaxFloat", True);        			
	g.addNode(mmf, "SynMinMaxFloat", "java");     		
	module = NeuralModule(name+'_SynMinMaxFloat', g)    	# here: Nengo waits each step for response from node
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)  			
	neuron.createDecoder("org/hanns/demonodes/pubsub/OUT", "float", 2)  # add FLOAT publisher
	return module
Ejemplo n.º 7
0
def not_node(name):
	g = NodeGroup("NOT", True);
	g.addNode(NOT, "NOT", "java");
	module = NeuralModule(name+'_NOT', g)
	module.createEncoder("logic/gates/ina", "bool", 1)
	module.createDecoder("logic/gates/outa", "bool", 1)
	return module
Ejemplo n.º 8
0
def qlambdaMOO(name,
               noStateVars=2,
               noActions=4,
               noValues=5,
               logPeriod=100,
               maxDelay=1):
    command = [
        classMOO, '_' + QLambda.noInputsConf + ':=' + str(noStateVars),
        '_' + QLambda.noOutputsConf + ':=' + str(noActions),
        '_' + QLambda.sampleCountConf + ':=' + str(noValues),
        '_' + QLambda.logPeriodConf + ':=' + str(logPeriod),
        '_' + QLambda.filterConf + ':=' + str(maxDelay)
    ]
    g = NodeGroup("RL", True)
    g.addNode(command, "RL", "java")
    module = NeuralModule(name + '_QLambda', g, False)
    module.createEncoder(QLambda.topicAlpha, "float", 1)
    # alpha config
    module.createEncoder(QLambda.topicGamma, "float", 1)
    module.createEncoder(QLambda.topicLambda, "float", 1)
    module.createEncoder(QLambda.topicImportance, "float", 1)
    module.createDecoder(QLambda.topicProsperity, "float", 3)
    # float[]{prosperity, coverage, reward/step}
    module.createDecoder(QLambda.topicDataOut, "float",
                         noActions)  # decode actions
    module.createEncoder(QLambda.topicDataIn, "float",
                         noStateVars + 1)  # encode states (first is reward)
    return module
Ejemplo n.º 9
0
def or_node(name):
	g = NodeGroup("OR", True);
	g.addNode(OR, "OR", "java");
	module = NeuralModule(name+'_OR', g)
	module.createEncoder("logic/gates/ina", "bool", 1)
	module.createEncoder("logic/gates/inb", "bool", 1)
	module.createDecoder("logic/gates/outa", "bool", 1)
	return module
Ejemplo n.º 10
0
def fuzzyand_node(name):
	g = NodeGroup("FuzzyAND", True);
	g.addNode(fAND, "FuzzyAND", "java");
	module = NeuralModule(name+'_FuzzyAND', g)
	module.createEncoder("logic/gates/ina", "float", 1)
	module.createEncoder("logic/gates/inb", "float", 1)
	module.createDecoder("logic/gates/outa", "float", 1)
	return module
Ejemplo n.º 11
0
def nand_node(name):
	g = NodeGroup("NAND", True);
	g.addNode(NAND, "NAND", "java");
	module = NeuralModule(name+'_NAND', g)
	module.createEncoder("logic/gates/ina", "bool", 1)
	module.createEncoder("logic/gates/inb", "bool", 1)
	module.createDecoder("logic/gates/outa", "bool", 1)
	return module
Ejemplo n.º 12
0
def fuzzyMemIncLin(name):									# ____|----
	g = NodeGroup("FuzzyMemIncLin", True);
	g.addNode(flininc,"FuzzyMemIncLin","java");
	module = NeuralModule(name+'_FuzzyMemIncLin', g)
	module.createEncoder("logic/gates/ina", "float", 1)		# x
	module.createEncoder("logic/gates/confa", "float", 1) 	# alpha
	module.createEncoder("logic/gates/confb", "float", 1) 	# beta
	module.createDecoder("logic/gates/outa", "float", 1)	# y
	return module
Ejemplo n.º 13
0
def fuzzyAsynMemTriangle(name):
    g = NodeGroup("AsynFuzzyMemTriangle", True)
    g.addNode(ftriangle, "AsynFuzzyMemTriangle", "java")
    module = AsynNeuralModule(name + '_AsynFuzzyMemTriangle', g)
    module.createEncoder("logic/gates/ina", "float", 1)  # x
    module.createEncoder("logic/gates/confa", "float", 1)  # alpha
    module.createEncoder("logic/gates/confb", "float", 1)  # beta
    module.createEncoder("logic/gates/confc", "float", 1)  # gamma
    module.createDecoder("logic/gates/outa", "float", 1)  # y
    return module
Ejemplo n.º 14
0
def make(net,name='NeuralModule which interfaces demoSubscriber with the Nengo simulator', independent=True, useQuick=True):

    node = "org.hanns.demonodes.pubsub.DemoSubscriber";
    
    g = NodeGroup(name, independent);    			# Create group of nodes (represented as SimpleNode in the GUI)
    g.addNode(node, "subscriber", "java");  	    # start java node and name it subscriber in the ROS network
    module = NeuralModule('Subscriber_'+name, g, False) 	# Construct the neural module in the asynchronous mode
    module.createEncoder("org/hanns/demonodes/pubsub", "float", 7) # Define IO: termination = input of neuron (n*int)

    many=net.add(module)                    		# add it into Nengo simulator
Ejemplo n.º 15
0
def fuzzyMemTriangle(name):									# ____|\____
	g = NodeGroup("FuzzyMemTriangle", True);
	g.addNode(ftriangle,"FuzzyMemTriangle","java");
	module = NeuralModule(name+'_FuzzyMemTriangle', g)
	module.createEncoder("logic/gates/ina", "float", 1)		# x
	module.createEncoder("logic/gates/confa", "float", 1) 	# alpha
	module.createEncoder("logic/gates/confb", "float", 1) 	# beta
	module.createEncoder("logic/gates/confc", "float", 1) 	# gamma
	module.createDecoder("logic/gates/outa", "float", 1)	# y
	return module
Ejemplo n.º 16
0
def async_minmaxint_node(name):
	g = NodeGroup("AsynMinMaxInt", True);        			# create independent (True) group called..
	g.addNode(minmaxint, "AsynMinMaxInt", "java");     		# start java node and name it finder
	module = NeuralModule(name+'_AsynMinMaxInt', g, False)   # construct the Neural Module
	# add encoder to the module (input)
	# It is output on modem which is connected to input (topic name) to the ROS node
	module.createEncoder("org/hanns/demonodes/pubsub/IN", "float", 4)  	# ..called TERMINATION of SimpleNode
	# Add decoder to the module (output):
	# It is input if modem which is connected to output (topic name) of the ROS node
	module.createDecoder("org/hanns/demonodes/pubsub/OUT", "int", 2)    # ..called ORIGIN of SimpleNode
	return module
Ejemplo n.º 17
0
def qlambdaASM(name, noStateVars=2, noActions=4, noValues=5, logPeriod=100, maxDelay=1,
classname="org.hanns.rl.discrete.ros.sarsa.config.QlambdaCoverageReward",prospLen=3,synchronous=True):
	"""Method that returns synchronous NeuralModule implementing the QLambda algorithm with 
	the ASM inbuilt. Configuration parameters of the node can be found in 
	the javadoc. Number of data inputs (size of the vector) to the module 
	is determined as 1+*noStateVars*, where the first element is the reward value.
	
	The parameter *maxDelay* describes the number of time-steps in the closed-loop learning, 
	that is: how many time steps the module should wait before change of the state
	to evalueate that the action had no-effect (state of the world has not changed).
	
	Note: if the configEncoders (config INs) are not connected, the default values
	are sent to the ROS node instead of zeros. So non-configured QLambda module
	will learn with use of predefined parameters. 
	
	:param string mame: name of the neural module (TODO) to be created
	:param integer noStateVars: number of state variables to be taken into account 
	:param integer noActions: number of actions the node can produce (encoding 1ofN is used)
	:param integer noValues: the number of values that is expected for each state variable (interval [0,1] is sampled)
	:param integer logPeriod: how often to print out the data
	:param integer maxDelay: max delay in the closed-loop learning
	:param string classname: full className of the ROS node to be launched
	:param integer prospLen: size of the vector expected from the nodes prosperity publisher
	:returns: NeuralModule that should be added into the network, the node represents the QLambda ROS node 
	"""
	# this command is used to launch the ROSjava node
	command = [classname, '_'+QLambda.noInputsConf+ ':=' + str(noStateVars), 
	'_'+QLambda.noOutputsConf+':='+str(noActions),
	'_'+QLambda.sampleCountConf+':='+str(noValues),
	'_'+QLambda.logPeriodConf+':='+str(logPeriod),
	'_'+QLambda.filterConf+':='+str(maxDelay)]

	# represent the ROS node by means of Neural Module
	g = NodeGroup("RL", True);
	g.addNode(command, "RL", "java");
	module = NeuralModule(name+'_QLambda', g, synchronous)

	# create config IO
	module.createConfigEncoder(QLambda.topicAlpha,"float",QLambda.DEF_ALPHA); 	# alpha config input, def. value is DEF_ALPHA
	module.createConfigEncoder(QLambda.topicGamma,"float",QLambda.DEF_GAMMA);
	module.createConfigEncoder(QLambda.topicLambda,"float",QLambda.DEF_LAMBDA);
	module.createEncoder(QLambda.topicImportance,"float",1);					# default value is 0

	# QLambdaCoverageReward classname => float[]{prosperity, coverage, reward/step}
	module.createDecoder(QLambda.topicProsperity,"float", prospLen);			

	# create data IO
	module.createDecoder(QLambda.topicDataOut, "float", noActions)  	# decode actions
	module.createEncoder(QLambda.topicDataIn, "float", noStateVars+1) 	# encode states (first is reward)

	return module
Ejemplo n.º 18
0
def qlambdaASM(name, noStateVars=2, noActions=4, noValues=5, logPeriod=100, maxDelay=1,
classname="org.hanns.rl.discrete.ros.sarsa.config.QlambdaCoverageReward", prospLen=3,synchronous=True):
	"""Method that returns synchronous NeuralModule implementing the QLambda algorithm with 
	the ASM inbuilt. Configuration parameters of the node can be found in 
	the javadoc. Number of data inputs (size of the vector) to the module 
	is determined as 1+*noStateVars*, where the first element is the reward value.
	
	The parameter *maxDelay* describes the number of time-steps in the closed-loop learning, 
	that is: how many time steps the module should wait before change of the state
	to evalueate that the action had no-effect (state of the world has not changed).
	
	Note: if the configEncoders (config INs) are not connected, the default values
	are sent to the ROS node instead of zeros. So non-configured QLambda module
	will learn with use of predefined parameters. 
	
	:param string mame: name of the neural module (TODO) to be created
	:param integer noStateVars: number of state variables to be taken into account 
	:param integer noActions: number of actions the node can produce (encoding 1ofN is used)
	:param integer noValues: the number of values that is expected for each state variable (interval [0,1] is sampled)
	:param integer logPeriod: how often to print out the data
	:param integer maxDelay: max delay in the closed-loop learning
	:param string classname: full className of the ROS node to be launched
	:param integer prospLen: size of the vector expected from the nodes prosperity publisher
	:returns: NeuralModule that should be added into the network, the node represents the QLambda ROS node 
	"""
	# this command is used to launch the ROSjava node
	command = [classname, '_'+QLambda.noInputsConf+ ':=' + str(noStateVars), 
	'_'+QLambda.noOutputsConf+':='+str(noActions),
	'_'+QLambda.sampleCountConf+':='+str(noValues),
	'_'+QLambda.logPeriodConf+':='+str(logPeriod),
	'_'+QLambda.filterConf+':='+str(maxDelay)]

	# represent the ROS node by means of Neural Module
	g = NodeGroup("RL", True);
	g.addNode(command, "RL", "java");
	module = NeuralModule(name+'_QLambda', g, synchronous)

	# create config IO
	module.createConfigEncoder(QLambda.topicAlpha,"float",QLambda.DEF_ALPHA); 	# alpha config input, def. value is DEF_ALPHA
	module.createConfigEncoder(QLambda.topicGamma,"float",QLambda.DEF_GAMMA);
	module.createConfigEncoder(QLambda.topicLambda,"float",QLambda.DEF_LAMBDA);
	module.createEncoder(QLambda.topicImportance,"float",1);					# default value is 0

	# QLambdaCoverageReward classname => float[]{prosperity, coverage, reward/step}
	module.createDecoder(QLambda.topicProsperity, "float", prospLen);			

	# create data IO
	module.createDecoder(QLambda.topicDataOut, "float", noActions)  	# decode actions
	module.createEncoder(QLambda.topicDataIn, "float", noStateVars+1) 	# encode states (first is reward)

	return module
Ejemplo n.º 19
0
def benchmarkTwoR(name, mapName="benchmark", logPeriod=200, synchronous=True):

    command = [bench, "_" + World.logPeriodConf + ":=" + str(logPeriod)]

    noActions = 4
    # hardcoded
    noStateVars = 2

    g = NodeGroup(mapName, True)
    g.addNode(command, name, "java")
    module = NeuralModule(name + "_GridWorld", g, synchronous)
    module.createEncoder(QLambda.topicDataOut, "float", noActions)  # decode actions
    module.createDecoder(QLambda.topicDataIn, "float", noStateVars + 2)  # encode states (first is reward)
    return module
Ejemplo n.º 20
0
def make(net,
         name='NeuralModule which implements RL SARSA algorithm',
         independent=True,
         useQuick=True,
         prospLen=3,
         noStateVars=2,
         noActions=4,
         sampleCount=30,
         logPeriod=100,
         maxDelay=1,
         synchronous=True):

    # full name of the reosjava node to be started
    classname = "org.hanns.rl.discrete.ros.sarsa.config.QlambdaCoverageReward"

    #command to launch and configure the RL rosjava node
    command = [
        classname, '_' + QLambda.noInputsConf + ':=' + str(noStateVars),
        '_' + QLambda.noOutputsConf + ':=' + str(noActions),
        '_' + QLambda.sampleCountConf + ':=' + str(sampleCount),
        '_' + QLambda.logPeriodConf + ':=' + str(logPeriod),
        '_' + QLambda.filterConf + ':=' + str(maxDelay)
    ]

    # create a group with a given name
    g = NodeGroup(name, independent)
    g.addNode(command, "rl_sarsa", "java")
    # start and configure the rosjava node
    module = NeuralModule(name + '_QLambda', g)
    # create the neural module representing the node

    # create config IO
    module.createConfigEncoder(QLambda.topicAlpha, "float", QLambda.DEF_ALPHA)
    # alpha config input, def. value is DEF_ALPHA
    module.createConfigEncoder(QLambda.topicGamma, "float", QLambda.DEF_GAMMA)
    module.createConfigEncoder(QLambda.topicLambda, "float",
                               QLambda.DEF_LAMBDA)
    module.createEncoder(QLambda.topicImportance, "float", 1)
    # default value is 0

    # QLambdaCoverageReward classname => float[]{prosperity, coverage, reward/step}
    module.createDecoder(QLambda.topicProsperity, "float", prospLen)

    # create data IO
    module.createDecoder(QLambda.topicDataOut, "float",
                         noActions)  # decode actions
    module.createEncoder(QLambda.topicDataIn, "float",
                         noStateVars + 1)  # encode states (first is reward)

    many = net.add(module)  # add it into the network
Ejemplo n.º 21
0
def make(net,name='NeuralModule which implements logical NOT operation', 
independent=True, useQuick=True):

    finder = "org.hanns.logic.crisp.gates.impl.NOT";

    # create group with a name
    g = NodeGroup(name, independent);    	# create independent group called..
    g.addNode(finder, "logic_crisp_gates_NOT", "java");      # start java node and name it finder
    neuron = NeuralModule(name+"_logic_crisp_gates_not", g) # construct the neural module 

    neuron.createEncoder("logic/gates/ina", "bool",1)   # termination = input of neuron (4xfloat)
    neuron.createDecoder("logic/gates/outa", "bool",1)  # origin = output of neuron (min and max)

    many=net.add(neuron)                    # add it into the network
Ejemplo n.º 22
0
def benchmarkTwoR(name, mapName="benchmark", logPeriod=200, synchronous=True):

    command = [bench, '_' + World.logPeriodConf + ':=' + str(logPeriod)]

    noActions = 4
    # hardcoded
    noStateVars = 2

    g = NodeGroup(mapName, True)
    g.addNode(command, name, "java")
    module = NeuralModule(name + '_GridWorld', g, synchronous)
    module.createEncoder(QLambda.topicDataOut, "float",
                         noActions)  # decode actions
    module.createDecoder(QLambda.topicDataIn, "float",
                         noStateVars + 2)  # encode states (first is reward)
    return module
Ejemplo n.º 23
0
def example(name, logPeriod=200):

    command = [nodep, "_" + World.logPeriodConf + ":=" + str(logPeriod)]

    noActions = 4
    # hardcoded
    noStateVars = 2

    g = NodeGroup("GridWorld", True)
    g.addNode(command, "GridWorld", "java")
    module = NeuralModule(name + "_GridWorld", g, True)

    module.createEncoder(QLambda.topicDataOut, "float", noActions)  # decode actions
    module.createDecoder(QLambda.topicDataIn, "float", noStateVars + 1)  # encode states (first is reward)

    return module
Ejemplo n.º 24
0
def buildModule(name):
    minmaxint =   	"ctu.nengoros.testsuit.demo.nodes.minmax.F2FPubSub"
    g = NodeGroup("MinMaxFloat", True);        			# create independent (True) group called..
    g.addNode(minmaxint, "MinMaxFloatNode", "java");     	# start java node and name it finder
    module = NeuralModule(name+'_MinMaxFloat', g, True)  # construct the Neural Module and run it as synchrnonous (True)
    
    # Add an Encoder to the module (input), which receives 4 floats and sends them to the ROS node 
    # This adds one (correspondingly named) Termination to the NeuralModule 
    module.createEncoder(F2FPubSub.ann2ros, "float", 4) 
    
    # Add a Decoder to the module (output)
    # The Decoder receives ROS messages (topic name==first parameter), decodes them into Nengo RealOutputImpl data,
    # and places values on the (correspondingly named) Origin
    # In case of synchronous usage of NeuralModule, the Nengo simulator waits for new ROS messages each sim. step
    module.createDecoder(F2FPubSub.ros2ann, "float", 2)
    return module
Ejemplo n.º 25
0
def qlambdaMOO(name, noStateVars=2, noActions=4, noValues=5, logPeriod=100, maxDelay=1):
	command = [classMOO, '_'+QLambda.noInputsConf+ ':=' + str(noStateVars), 
	'_'+QLambda.noOutputsConf+':='+str(noActions),
	'_'+QLambda.sampleCountConf+':='+str(noValues),
	'_'+QLambda.logPeriodConf+':='+str(logPeriod),
	'_'+QLambda.filterConf+':='+str(maxDelay)]
	g = NodeGroup("RL", True);
	g.addNode(command, "RL", "java");
	module = NeuralModule(name+'_QLambda', g, False)
	module.createEncoder(QLambda.topicAlpha,"float",1); 				# alpha config
	module.createEncoder(QLambda.topicGamma,"float",1);
	module.createEncoder(QLambda.topicLambda,"float",1);
	module.createEncoder(QLambda.topicImportance,"float",1);
	module.createDecoder(QLambda.topicProsperity,"float", 3);			# float[]{prosperity, coverage, reward/step}
	module.createDecoder(QLambda.topicDataOut, "float", noActions)  	# decode actions
	module.createEncoder(QLambda.topicDataIn, "float", noStateVars+1) 	# encode states (first is reward)
	return module
Ejemplo n.º 26
0
def make(net,name='NeuralModule which implements FuzzyMembership function - Increasing Linear', 
independent=True, useQuick=True):

    finder = "org.hanns.logic.fuzzy.membership.impl.IncreasingLinear";

    # create group with a name
    g = NodeGroup(name, independent);   
    g.addNode(finder, "FuzzyMemLinInc", "java");     

    neuron = NeuralModule(name+"_FuzzyMemLinInc", g) 
    neuron.createEncoder("logic/gates/ina", "float",1)   	# termination = data input x 
    neuron.createEncoder("logic/gates/confa", "float",1)	# termination - config input alpha
    neuron.createEncoder("logic/gates/confb", "float",1)	# termination - config input beta
    neuron.createDecoder("logic/gates/outa", "float",1)  	# origin = output of neuron = data output y


    many=net.add(neuron)                    # add it into the network
Ejemplo n.º 27
0
def initVivae(numsensors):
    modem  = "ctu.nengoros.comm.nodeFactory.modem.impl.DefaultModem";   
    server = "vivae.ros.simulator.server.SimulatorServer"        		# start the simulator server in own thread
    # Or it is possible to call the Vivae as a standalone application in the similar way:
    #server = ["./sb/../../../../simulators/vivae/build/install/vivae/bin/vivae","vivae.ros.simulatorControlsServer.ControlsServer"]

    # create group of nodes
    g = NodeGroup("vivae", True);               # create default group of nodes
    g.addNode(server, "SimulatorServer", "java");   # run the simulator..
    g.addNode(modem,"modem","modem")              # add default modem..
    g.startGroup()                              # start group normally

    #time.sleep(3)    # if the process is native, it takes longer time to init the services !!      TODO           
    simulator = NeuralModule('VivaeSimulator', g)  # create NeuralModule which is able to add/remove agents

    sc = simulator.getControls();     # this starts the control services..
    sc.callSetVisibility(True);              # make simulation window visible..
    many=net.add(simulator)                 # add it to the Nengo network

    sc.callLoadMap('data/scenarios/test/walls.svg')  

    #addAgent(name,numSensors, maxDistance, frictionSensor) 
    sc.addAgent('a',2*numsensors,    120          ,0)
    sc.callStartSimulation()
    return simulator;
Ejemplo n.º 28
0
def initVivae(numsensors):
    modem  = "ctu.nengoros.comm.nodeFactory.modem.impl.DefaultModem";   # custom modem here
    server = "vivae.ros.simulatorControlsServer.ControlsServer"        # call Vivae as a thread in Java from this process
    # Call Vivae as an external process
    #server = ["./sb/../../../../simulators/vivae/build/install/vivae/bin/vivae","vivae.ros.simulatorControlsServer.ControlsServer"]

    # create group of nodes
    g = NodeGroup("vivae", True);               # create default group of nodes
    g.addNode(server, "vivaeSimulator", "java");   # run the simulator..
    g.addNode(modem,"modem","modem")              # add default modem..
    g.startGroup()                              # start group normally

    #time.sleep(3)    # if the process is native, it takes longer time to init the services !!                 
    simulator = NeuralModule('VivaeSimulator', g)  # create NeuralModule which is able to add/remove agents

    vivae = simulator.getControls();     # this starts the control services..
    vivae.setVisible(True);              # make simulation window visible..
    many=net.add(simulator)                 # add it to the Nengo network

    vivae.loadMap('data/scenarios/test/walls.svg')  

    #addAgent(name,numSensors, maxDistance, frictionSensor) 
    vivae.addAgent('a',2*numsensors,    120          ,0)
    vivae.start()
    return simulator;
Ejemplo n.º 29
0
def example(name, logPeriod=200):

    command = [nodep, '_' + World.logPeriodConf + ':=' + str(logPeriod)]

    noActions = 4
    # hardcoded
    noStateVars = 2

    g = NodeGroup("GridWorld", True)
    g.addNode(command, "GridWorld", "java")
    module = NeuralModule(name + '_GridWorld', g, True)

    module.createEncoder(QLambda.topicDataOut, "float",
                         noActions)  # decode actions
    module.createDecoder(QLambda.topicDataIn, "float",
                         noStateVars + 1)  # encode states (first is reward)

    return module
Ejemplo n.º 30
0
def make(net,name='NeuralModule which implements FuzzyMembership function - Triangular - projectTemplate', 
independent=True, useQuick=True):

    finder = "org.hanns.myPackage.fuzzy.membership.impl.Triangular";

    # create group with a name
    g = NodeGroup(name, independent);   
    g.addNode(finder, "temp_FuzzyMemTriangular", "java");     

    neuron = NeuralModule(name+"_temp_FuzzyMemTriangular", g) 
    neuron.createEncoder("logic/gates/ina", "float",1)   	# termination = data input x 
    neuron.createEncoder("logic/gates/confa", "float",1)	# termination - config input alpha
    neuron.createEncoder("logic/gates/confb", "float",1)	# termination - config input betaa
    neuron.createEncoder("logic/gates/confc", "float",1)	# termination - config input gamma
    neuron.createDecoder("logic/gates/outa", "float",1)  	# origin = output of neuron = data output y


    many=net.add(neuron)                    # add it into the network
Ejemplo n.º 31
0
def basic(name, noInputs=Motivation.DEF_NOINPUTS, decay=Motivation.DEF_DECAY, logPeriod=Motivation.DEF_LOGPERIOD,
rewVal=Motivation.DEF_REWARD, rewThr=Motivation.DEF_REWTHRESHOLD, synchronous=True):

    # configure the node during startup from "the commandline"
	command = [classs, '_'+Motivation.noInputsConf+ ':=' + str(noInputs), 
	'_'+Motivation.decayConf+':='+str(decay),
	'_'+Motivation.logPeriodConf+':='+str(logPeriod)]

	g = NodeGroup("Motivation", True);
	g.addNode(command, "Motivation", "java");
	module = NeuralModule(name+'_Motivation', g, synchronous)

    # connect the decay parameter to the Nengoros network (changed online)
	module.createConfigEncoder(Motivation.topicDecay,"float", 1); 		# decay config (unconneced=Motivation.DEF_DECAY)

	module.createDecoder(Motivation.topicDataOut, "float", 2)           # decode float[]{reward,motivation}
	module.createEncoder(Motivation.topicDataIn, "float", noInputs) 	# encode input data (sum rewards here)
	
	module.createDecoder(Motivation.topicProsperity,"float", 1);		# float[]{prosperity}  = MSD from the limbo area

	return module
Ejemplo n.º 32
0
def make(net,name='Vivae Simulator', mapName='arena1.svg', numSensors=4,maxdist=30,frictdist=50,independent=True, useQuick=True):
    
    numSensors = numSensors*2
    mn = 'data/scenarios/'+mapName

    # note that simulator is started externally, since SVG loaded in vivae hanged otherwise from unknown reason..
    modem  = "ctu.nengoros.comm.nodeFactory.modem.impl.DefaultModem";   # custom modem here
    server = "vivae.ros.simulatorControlsServer.ControlsServer"         # call Vivae as a thread in Java from this process
    vv  = ["./sb/../../../../simulators/vivae/build/install/vivae/bin/vivae","vivae.ros.simulatorControlsServer.ControlsServer"]
    vvj = ["vivae.ros.simulatorControlsServer.ControlsServer"]
    
    # create group of nodes
    g = NodeGroup("vivae", True);           # if nameSpace not defined, create independent group
    #g.addNode(server,"vivaeSimulator", "java");   # run the simulator..
    #g.addNode(vv, "vivaeSimulator", "native");  # run the simulator..
    g.addNode(vvj, "vivaeSimulator", "java");  # run the simulator..
    g.addNode(modem,"modem","modem")              # add default modem..
    g.startGroup()                              # start group normally

    #modem = g.getModem()
    #time.sleep(3)    # if the process is native, it takes longer time to init the services !!      
    
    simulator = NeuralModule('VivaeSimulator',g)  # create NeuralModule which is able to add/remove agents
    
    vivae = simulator.getControls();     # this starts the control services..
    vivae.setVisible(True);              # make simulation window visible..
    many=net.add(simulator)                 # add it to the Nengo network
    """
    vivae.loadMap('data/scenarios/test/walls.svg')  

    #addAgent(name,numSensors, maxDistance, frictionSensor) 
    vivae.addAgent('a',2*numsensors,    120          ,0)
    vivae.start()
    """
    print 'loaigin'
    vivae.loadMap(mn)  
    agentNames = ['a','b','c','d','e','f','g','h','i','j','k','l','m']
    # run as many agents as map can hold (up to a-m)  
    for i in range(0, len(agentNames)):
        vivae.addAgent(agentNames[i], numSensors, maxdist, frictdist)
    print 'starting'
    vivae.start()
    
    """
    vivae.loadMap(mn)  
    agentNames = ['a','b','c','d','e','f','g','h','i','j','k','l','m']
    # run as many agents as map can hold (up to a-m)  
    for i in range(0, len(agentNames)):
        Controls.addAgent(agentNames[i], numSensors, maxdist, frictdist)    
    """
    #    vivae.start()
    print 'Vivae is ready.'
Ejemplo n.º 33
0
def init(net, mapName=Sim.Maps.DEFAULT, visible=False):
	
    # create group of nodes
    g = NodeGroup("vivae", True);               	# create default group of nodes
    g.addNode(server, "SimulatorServer", "java");   # run the simulator..
    g.addNode(modem,"modem","modem")              	# add default modem..
    g.startGroup()                              	# start group normally

    simulator = NeuralModule('VivaeSimulator',g)  	# create NeuralModule which is able to add/remove agents

    vivae = simulator.getControls();     			# this starts the control services..
    vivae.callSetVisibility(visible);              	# make simulation window visible..
    result=net.add(simulator)                 		# add it to the Nengo network

    vivae.callLoadMap(mapName)  					# loads the map into the simulator and waits for start or agents
    return simulator;
Ejemplo n.º 34
0
def initVivae(numsensors, visible=False):
    modem  = "ctu.nengoros.comm.nodeFactory.modem.impl.DefaultModem";   
    server = "vivae.ros.simulator.server.SimulatorServer"        		# start the simulator server in own thread

    # create group of nodes
    g = NodeGroup("vivae", True);               # create default group of nodes
    g.addNode(server, "SimulatorServer", "java");   # run the simulator..
    g.addNode(modem,"modem","modem")              # add default modem..
    g.startGroup()                              # start group normally

    simulator = NeuralModule('VivaeSimulator',g)  # create NeuralModule which is able to add/remove agents

    vivae = simulator.getControls();     # this starts the control services..
    vivae.callSetVisibility(visible);              # make simulation window visible..
    many=net.add(simulator)                 # add it to the Nengo network

    vivae.callLoadMap('data/scenarios/test/wallsII.svg')  

    #addAgent(name,numSensors, maxDistance, frictionSensor)  (note that you will actually get numSensors+1 floats +(speed))
    vivae.addAgent('a',2*numsensors,    120          ,0)
    vivae.callStartSimulation()
    return simulator;
Ejemplo n.º 35
0
from ctu.nengoros.comm.nodeFactory import NodeGroup as NodeGroup
from ctu.nengoros.comm.rosutils import RosUtils as RosUtils

pub = "org.hanns.demonodes.pubsub.DemoPublisher"
sub = "org.hanns.demonodes.pubsub.DemoSubscriber"

################# setup the ROS utils (optional) 
#RosUtils.setAutorun(False)     # Do we want to autorun roscore and rxgraph? (tru by default)
#RosUtils.prefferJroscore(True)  # preffer jroscore before the roscore

# creates nef network and adds it to nengo (this must be first in the script) 
net=nef.Network('Neural module - sub-network of ROS nodes - independent of Nengo simulation')
net.add_to_nengo()  # here: delete old (toplevel) network and replace it with the newly CREATED one
					# any poteintial running ROS nodes are sopped here

################ Run group of nodes which is independent of Nengo simulator 
# In this case, nodes are staarted after adding into the simulator (as usual) 
# and stopped after removing from the network. Their communication is not affected by Nengo
#
# This provides handy alternative for launching arbitrary ROS nodes.
#g = NodeGroup("PubSubDemo", False);					# will not "push" topics of node communication into own namespace
g = NodeGroup("PubSubDemo", True);
g.addNode(pub, "IndependentPublisher", "java");     	# add the publisher node
g.addNode(sub, "IndependentSubscriber", "java");     	# add the subscriber node
module = NeuralModule("Independent_PubSubDemo", g)    		

net.add(module)	# nodes are launched here

print 'Configuration complete, nodes launched and already communicate, see the commandline.'
print 'Try deleting the node from GUI.'
Ejemplo n.º 36
0
net=nef.Network('Create a simple Neural Module which is configured by this Jython script')
net.add_to_nengo()  # here: delete old (toplevel) network and replace it with the newly CREATED one

################# setup the ROS utils (optional) 
#RosUtils.setTimeIgnore() 	
#RosUtils.setTimeMaster() 	# used by default
#RosUtils.setTimeSlave() 	# note: experimental, TODO
#RosUtils.setAutorun(False)     # Do we want to autorun roscore and rxgraph? (tru by default)
#RosUtils.prefferJroscore(True)  # preffer jroscore before the roscore? 


################# add ROS node with configuration stored in `command`

rosparam =   	"org.hanns.demonodes.privateParams.SetPrivateParameters"
command = [rosparam, '_a:=2', '_b:=3', 'shouldLog:=true']


g = NodeGroup("Sinus", True);        			# create independent (True) group called..
g.addNode(command, "Sinus", "java");     		# start java node and name it finder
module = NeuralModule("Configured Neural Module", g)# construct the Neural Module

# connect output
module.createDecoder("org/hanns/demonodes/params/", "float", 1)    # ..called ORIGIN of SimpleNode
many = net.add(module)

# create ANN with 100 neurons which approximates it's input 
A=net.make('A',neurons=50,dimensions=2,radius=8)    				# identity transformation implemented by population of neurons
net.connect(many.getOrigin('org/hanns/demonodes/params/'),A)		# connect the origin on our module to the network

print 'Configuration complete.'