# # #Nolan B. 1/8/16 from org.myrobotlab.service import Joystick from org.myrobotlab.service import Arduino from org.myrobotlab.service import Runtime from time import sleep #----------------------------------Web Gui-------------------------- webgui = Runtime.create("webgui", "WebGui") webgui.autoStartBrowser(False) Runtime.start("webgui", "WebGui") #---------------------------------Create Services---------------------- arduino = Runtime.createAndStart("arduino", "Arduino") joystick = Runtime.createAndStart("joystick", "Joystick") motorL = Runtime.start("motorL", "Motor") motorR = Runtime.start("motorR", "Motor") log = Runtime.start("log", "Log") #----------------------Connect Peripherals----------------------------------- joystick.setController(0) #PC only - Pi needs new joystick.addInputListener(python) arduino.connect("/dev/ttyACM0") # Tell the joystick to turn on joystick.startPolling() # attach servos to Arduino arduino.motorAttach("motorL", 5, 4)
from java.lang import Class from java.awt import Rectangle from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.opencv import OpenCVData from com.googlecode.javacv.cpp.opencv_core import CvPoint from org.myrobotlab.service import OpenCV from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from time import sleep # system specif variables actservox = 90 actservoy = 90 xpid = Runtime.createAndStart("xpid", "Pid") ypid = Runtime.createAndStart("ypid", "Pid") xpid.setMode(1) xpid.setOutputRange(-3, 3) xpid.setPID(10.0, 0, 1.0) xpid.setControllerDirection(1) xpid.setSetpoint(160) # we want the target in the middle of the x ypid.setMode(1) ypid.setOutputRange(-3, 3) ypid.setPID(10.0, 0, 1.0) ypid.setControllerDirection(1) ypid.setSetpoint(120) arduino = Runtime.createAndStart("arduino", "Arduino") pan = Runtime.createAndStart("pan", "Servo") tilt = Runtime.createAndStart("tilt", "Servo")
############################################## # This script creates the parts of a ROFI and # attaches them to MRL #from org.myrobotlab.service import UltrasonicSensor from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from org.myrobotlab.service import GUIService from org.myrobotlab.service import Speech from time import sleep # we need a dictionary of arrays which store calibration data for each servo/joint calib = {} # create the services speech = Runtime.createAndStart("speech", "Speech") # For voice feedback #ear = Runtime.createAndStart("listen","Sphinx") # For hearing spoken commands gui = Runtime.start("gui", "GUIService") keyboard = Runtime.start("keyboard", "Keyboard") # For getting user confirmation #keyboard.addKeyListener(python) # Arduino to connect everything to like a spinal cord arduino = Runtime.createAndStart("arduino", "Arduino") #sr04 = Runtime.start("sr04", "UltrasonicSensor") # For an ultrasonic view of the world # 6 joints in the Right leg rAnkle = Runtime.createAndStart("R Ankle", "Servo")
i01_chatBot_search = Runtime.start('i01.chatBot.search', 'GoogleSearch') MoveHeadTimer = Runtime.start('MoveHeadTimer', 'Clock') #gui = Runtime.start('gui', 'SwingGui') vi01_left = Runtime.start('vi01.left', 'VirtualArduino') vi01_right = Runtime.start('vi01.right', 'VirtualArduino') i01_head_rothead = Runtime.start('i01.head.rothead', 'Servo') i01_leftArm = Runtime.start('i01.leftArm', 'InMoov2Arm') openWeatherMap = Runtime.start('openWeatherMap', 'OpenWeatherMap') i01_ear = Runtime.start('i01.ear', 'WebkitSpeechRecognition') i01_head_neck = Runtime.start('i01.head.neck', 'Servo') i01_head_eyelidLeft = Runtime.start('i01.head.eyelidLeft', 'Servo') i01_head_eyelidRight = Runtime.start('i01.head.eyelidRight', 'Servo') i01_leftArm_shoulder = Runtime.start('i01.leftArm.shoulder', 'Servo') i01_rightHand_wrist = Runtime.start('i01.rightHand.wrist', 'Servo') i01_mouth = Runtime.start('i01.mouth', 'MarySpeech') AudioPlayer = Runtime.createAndStart("AudioPlayer", "AudioFile") ############################################################# ## Needs fixing in InMoov2.java #i01_pir = Runtime.start('i01.pir', 'Pir') #i01_ultraSonicRight = Runtime.start('i01.ultraSonicRight', 'UltrasonicSensor') #i01_ultraSonicLeft = Runtime.start('i01.ultraSonicLeft', 'UltrasonicSensor') #i01_neopixel = Runtime.start('i01.neopixel', 'Neopixel') #i01_opencv = Runtime.start('i01.opencv', 'OpenCV') ############################################################## ## creating client connections connections #### ############################################################## ## configuring services #### # Servo Config : i01_leftArm_rotate # sets initial position of servo before moving # in theory this is the position of the servo when this file was created
from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from time import sleep # create the services arduino = Runtime.createAndStart("arduino","Arduino") servo01 = Runtime.createAndStart("servo01","Servo") # initialize arduino arduino.connect("/dev/ttyUSB0") # TODO - set limits # attach servo arduino.servoAttach(servo01.getName(), 9) # fast sweep servo01.moveTo(179) sleep(0.5) servo01.moveTo(10) sleep(0.5) servo01.moveTo(179) sleep(0.5) servo01.moveTo(10) sleep(0.5)
from java.lang import String from org.myrobotlab.service import Sphinx from org.myrobotlab.service import Runtime # create ear and mouth ear = Runtime.createAndStart("ear","Sphinx") mouth = Runtime.createAndStart("mouth","MarySpeech") # start listening for the words we are interested in ear.addComfirmations("yes","correct","ya") ear.addNegations("no","wrong","nope","nah") #ear.startListening("hello world|happy monkey|go forward|stop|yes|correct|ya|no|wrong|nope|nah") ear.startListening("hello world|happy monkey|go forward|stop") ear.addCommand("hello world", "python", "helloWorld") # set up a message route from the ear --to--> python method "heard" # ear.addListener("recognized", python.name, "heard"); # ear.addComfirmations("yes","correct","yeah","ya") # ear.addNegations("no","wrong","nope","nah") # ear.addCommand("hello world", "python", "helloworld") # set up a message route from the ear --to--> python method "heard" # ear.addListener("recognized", python.name, "heard"); # this method is invoked when something is # recognized by the ear - in this case we # have the mouth "talk back" the word it recognized
from java.lang import String from java.lang import Class from java.awt import Rectangle from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.opencv import OpenCVData from com.googlecode.javacv.cpp.opencv_core import CvPoint; global posx global posy global newx global newy opencv = Runtime.createAndStart("opencv","OpenCV") opencv.publishState() opencv.addFilter("pd","PyramidDown") opencv.setDisplayFilter("pd") opencv.addFilter("detector","Detector") opencv.setDisplayFilter("detector") opencv.addFilter("findcontours","FindContours") opencv.setDisplayFilter("findcontours") tracker = Runtime.create("tracker","Tracking") # create all the peer services rotation = Runtime.create("rotation","Servo") neck = Runtime.create("neck","Servo") arduino = Runtime.create("arduino","Arduino") xpid = Runtime.create("xpid","PID"); ypid = Runtime.create("ypid","PID");
from org.myrobotlab.service import Runtime from org.myrobotlab.service import WebServer from org.myrobotlab.net import BareBonesBrowserLaunch #It creates and starts webserver service webserver = Runtime.createAndStart("webserver", "WebServer") #Launch your web browser to the URL of localhost, by default port is 19191 #helloworl.htm file is in your MRL main folder #you can add files there and change the URL below as you desire BareBonesBrowserLaunch.openURL("http://localhost:19191/helloworld.htm")
from jarray import array from java.lang import String from java.lang import Class from org.myrobotlab.service import Runtime from org.myrobotlab.framework import Message catcher = Runtime.createAndStart("catcher","TestCatcher") thrower = Runtime.createAndStart("thrower","TestThrower") def input(): # python catches data from thrower - then throws to catcher print 'thrower sent me ', msg_thrower_send.data[0] print 'modifying the ball' msg_thrower_send.data[0]='throw from python->catcher' print 'throwing to catcher now' python.send('catcher', 'catchString', msg_thrower_send.data[0]) # thrower sends data to python thrower.throwString('python', 'input', 'throw from thrower->python');
import random from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from java.lang import String from time import sleep holygrail = Runtime.createAndStart("holygrail", "WebGui") wksr = Runtime.createAndStart("webkitspeechrecognition", "WebkitSpeechRecognition") simple = Runtime.createAndStart("simple", "ProgramAB") simple.startSession("default", "simple") htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter") acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech") voices = acapelaSpeech.getVoices() for voice in voices: acapelaSpeech.setVoice("Ryan") wksr.addTextListener(simple) simple.addTextListener(htmlfilter) htmlfilter.addTextListener(acapelaSpeech)
###################################### # Example Document Processing pipeline ###################################### from org.myrobotlab.service import Runtime from org.myrobotlab.document.transformer import WorkflowConfiguration from org.myrobotlab.document.transformer import StageConfiguration # create the pipeline service pipeline = Runtime.createAndStart("docproc", "DocumentPipeline") # create a pipeline # pipeline.workflowName = "default"; # create a workflow to load into that pipeline service workflowConfig = WorkflowConfiguration() workflowConfig.setName("default") staticFieldStageConfig = StageConfiguration() staticFieldStageConfig.setStageClass( "org.myrobotlab.document.transformer.SetStaticFieldValue") staticFieldStageConfig.setStageName("SetTableField") # statically assign the value of "MRL" to the field "table" on the document staticFieldStageConfig.setStringParam("table", "MRL") workflowConfig.addStage(staticFieldStageConfig) # a stage that sends a document to solr openNLPConfig = StageConfiguration() openNLPConfig.setStageClass("org.myrobotlab.document.transformer.OpenNLP") openNLPConfig.setStageName("OpenNLP") openNLPConfig.setStringParam("textField", "description") workflowConfig.addStage(openNLPConfig) sendToSolrConfig = StageConfiguration() sendToSolrConfig.setStageClass( "org.myrobotlab.document.transformer.SendToSolr")
from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.service import VideoStreamer from time import sleep from org.myrobotlab.net import BareBonesBrowserLaunch # create a video source (opencv) & a video streamer opencv = Runtime.createAndStart("opencv","OpenCV") streamer = Runtime.createAndStart("streamer","VideoStreamer") # attache them streamer.attach(opencv) # add a pyramid down filter and gray to minimize the data opencv.addFilter("pyramidDown", "PyramidDown"); opencv.addFilter("gray", "Gray"); # start the camera opencv.capture(); #added sleep in order to give opencv the time to "warm up" the cam sleep(3) # go to http://localhost:9090/output BareBonesBrowserLaunch.openURL("http://localhost:9090")
from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from time import sleep # create the services arduino = Runtime.createAndStart("arduino", "Arduino") servo01 = Runtime.createAndStart("servo01", "Servo") # initialize arduino arduino.connect("/dev/ttyUSB0") # TODO - set limits # attach servo arduino.servoAttach(servo01.getName(), 9) # fast sweep servo01.moveTo(179) sleep(0.5) servo01.moveTo(10) sleep(0.5) servo01.moveTo(179) sleep(0.5) servo01.moveTo(10) sleep(0.5)
from org.myrobotlab.service import Runtime from org.myrobotlab.service import AudioCapture from time import sleep audiocapture = Runtime.createAndStart("audiocapture", "AudioCapture") #it starts capturing audio audiocapture.captureAudio() # it will record for 5 seconds sleep(5) #then it stops recording audio audiocapture.stopAudioCapture() #it plays audio recorded audiocapture.playAudio()
############################################## # This script creates 2 servos of a pan / tilt kit # attaches them to an Arduino and attaches # a joystick to control the servos from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Joystick from org.myrobotlab.service import Runtime from time import sleep # create the services arduino = Runtime.createAndStart("arduino","Arduino") pan = Runtime.createAndStart("pan","Servo") tilt = Runtime.createAndStart("tilt","Servo") joystick = Runtime.createAndStart("joystick","Joystick") arduino.connect("COM10", 57600, 8, 1, 0) sleep(2) # attach servos to Arduino pan.attach(arduino.getName(), 9) tilt.attach(arduino.getName(), 10) # attach joystick to servos joystick.attach(pan, Joystick.Z_AXIS) joystick.attach(tilt, Joystick.Z_ROTATION) joystick.setController(2); joystick.startPolling();
from org.myrobotlab.service import Runtime from org.myrobotlab.service import AudioCapture from time import sleep audiocapture = Runtime.createAndStart("audiocapture","AudioCapture") #it starts capturing audio audiocapture.captureAudio() # it will record for 5 seconds sleep(5) #then it stops recording audio audiocapture.stopAudioCapture() #it plays audio recorded audiocapture.playAudio()
# the best interface to do finely controlled actuation. But, it is very # convenient to express high-level (e.g. go to center of the room) commands # # FYI - The memory requirements for Sphinx are a bit hefty and depending on the # platform additional JVM arguments might be necessary e.g. -Xmx256m # create an ear ear = Runtime.create("ear","Sphinx") # create the grammar you would like recognized # this must be done before the service is started ear.createGrammar("all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two ") ear.startService() # start the mouth mouth = Runtime.createAndStart("mouth","Speech") # set up a message route from the ear --to--> python method "heard" ear.addListener("recognized", python.name, "heard", String().getClass()); # this method is invoked when something is # recognized by the ear - in this case we # have the mouth "talk back" the word it recognized # prevent infinite loop - this will suppress the # recognition when speaking - default behavior # when attaching an ear to a mouth :) ear.attach("mouth") #create an Arduino & name arduino & index
#Nolan B. 1/8/16 from org.myrobotlab.service import Joystick from org.myrobotlab.service import Arduino from org.myrobotlab.service import Runtime from time import sleep #----------------------------------Web Gui-------------------------- webgui = Runtime.create("webgui", "WebGui") webgui.autoStartBrowser(False) Runtime.start("webgui", "WebGui") #---------------------------------Create Services---------------------- arduino = Runtime.createAndStart("arduino","Arduino") joystick = Runtime.createAndStart("joystick","Joystick") motorL = Runtime.start("motorL","Motor") motorR = Runtime.start("motorR","Motor") log = Runtime.start("log","Log") #----------------------Connect Peripherals----------------------------------- joystick.setController(0); #PC only - Pi needs new joystick.addInputListener(python) arduino.connect("/dev/ttyACM0"); # Tell the joystick to turn on joystick.startPolling()
############################################## # This script creates 2 servos of a pan / tilt kit # attaches them to an Arduino and attaches # a joystick to control the servos from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Joystick from org.myrobotlab.service import Runtime from time import sleep # create the services arduino = Runtime.createAndStart("arduino", "Arduino") pan = Runtime.createAndStart("pan", "Servo") tilt = Runtime.createAndStart("tilt", "Servo") joystick = Runtime.createAndStart("joystick", "Joystick") arduino.connect("COM10", 57600, 8, 1, 0) sleep(2) # attach servos to Arduino pan.attach(arduino.getName(), 9) tilt.attach(arduino.getName(), 10) # attach joystick to servos joystick.attach(pan, Joystick.Z_AXIS) joystick.attach(tilt, Joystick.Z_ROTATION) joystick.setController(2) joystick.startPolling()
from jarray import array from java.lang import String from java.lang import Class from org.myrobotlab.service import Runtime from org.myrobotlab.framework import Message catcher = Runtime.createAndStart("catcher", "TestCatcher") thrower = Runtime.createAndStart("thrower", "TestThrower") def input(): # python catches data from thrower - then throws to catcher print "thrower sent me ", msg_thrower_send.data[0] print "modifying the ball" msg_thrower_send.data[0] = "throw from python->catcher" print "throwing to catcher now" python.send("catcher", "catchString", msg_thrower_send.data[0]) # thrower sends data to python thrower.throwString("python", "input", "throw from thrower->python")
from org.myrobotlab.service import Runtime from org.myrobotlab.service import WebServer from org.myrobotlab.net import BareBonesBrowserLaunch #It creates and starts webserver service webserver = Runtime.createAndStart("webserver","WebServer") #Launch your web browser to the URL of localhost, by default port is 19191 #helloworl.htm file is in your MRL main folder #you can add files there and change the URL below as you desire BareBonesBrowserLaunch.openURL("http://localhost:19191/helloworld.htm")
from java.lang import String from org.myrobotlab.service import Sphinx from org.myrobotlab.service import Runtime from ispyMotors import Motors m = Motors() #connect to the Tank Control port m.init_connection() #once we're connected if m.is_connected: ear = Runtime.createAndStart("ear", "Sphinx") # start listening for the words we are interested in ear.startListening( "go forward|go backward|turn left|turn right|head up|head down|stop") # set up a message route from the ear --to--> python method "heard" ear.addListener("recognized", python.name, "heard", String().getClass()) # this method is invoked when something is # recognized by the ear def heard(phrase): print "heard ", phrase if phrase == "go forward": m.moveForward() elif phrase == "go backward":
# This is a little speech recognition script. # Use "Led On" or "Led off" to control the led . # The led is connected on pin 13 by default # Change the value of the variable "ledPin" if you connect the led on an other pin # Set the right com port in the variable " comPort " and the right arduino board # set the pin for the led ledPin = 13 # set the com port for the arduino comPort = "COM4" # create ear and mouth ear = Runtime.createAndStart("ear", "Sphinx") mouth = Runtime.createAndStart("mouth", "Speech") # create an Arduino service named arduino arduino = Runtime.createAndStart("arduino", "Arduino") # set the board type arduino.setBoard("atmega328") # atmega168 | mega2560 | etc # set serial device arduino.setSerialDevice(comPort, 57600, 8, 1, 0) sleep(1) # give it a second for the serial device to get ready # update the gui with configuration changes arduino.publishState()
import random from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from java.lang import String from time import sleep from org.myrobotlab.net import BareBonesBrowserLaunch wdf = Runtime.createAndStart("wikiDataFetcher", "WikiDataFetcher") holygrail = Runtime.createAndStart("holygrail", "WebGui") wksr = Runtime.createAndStart("webkitspeechrecognition", "WebkitSpeechRecognition") elias = Runtime.createAndStart("elias", "ProgramAB") elias.startSession("elias") htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter") acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech") voices = acapelaSpeech.getVoices() for voice in voices: acapelaSpeech.setVoice("Graham") wksr.addTextListener(elias) elias.addTextListener(htmlfilter) htmlfilter.addTextListener(acapelaSpeech) def BT(): global c c = b - a sleep(2) print c if (c == 2): resp = elias.getResponse("AUTORESPOND1") if (c == 4):
############################################## # This script creates the parts of a ROFI and # attaches them to MRL #from org.myrobotlab.service import UltrasonicSensor from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from org.myrobotlab.service import GUIService from org.myrobotlab.service import Speech from time import sleep # we need a dictionary of arrays which store calibration data for each servo/joint calib = {} # create the services speech = Runtime.createAndStart("speech","Speech") # For voice feedback #ear = Runtime.createAndStart("listen","Sphinx") # For hearing spoken commands gui = Runtime.start("gui", "GUIService") keyboard = Runtime.start("keyboard", "Keyboard") # For getting user confirmation #keyboard.addKeyListener(python) # Arduino to connect everything to like a spinal cord arduino = Runtime.createAndStart("arduino","Arduino") #sr04 = Runtime.start("sr04", "UltrasonicSensor") # For an ultrasonic view of the world # 6 joints in the Right leg rAnkle = Runtime.createAndStart("R Ankle","Servo") rLowLeg = Runtime.createAndStart("R Low Leg","Servo")
###################################### # Example Document Processing pipeline ###################################### from org.myrobotlab.service import Runtime from org.myrobotlab.document.transformer import WorkflowConfiguration from org.myrobotlab.document.transformer import StageConfiguration # create the pipeline service pipeline = Runtime.createAndStart("docproc", "DocumentPipeline") # create a pipeline # pipeline.workflowName = "default"; # create a workflow to load into that pipeline service workflowConfig = WorkflowConfiguration(); workflowConfig.setName("default"); staticFieldStageConfig = StageConfiguration(); staticFieldStageConfig.setStageClass("org.myrobotlab.document.transformer.SetStaticFieldValue"); staticFieldStageConfig.setStageName("SetTableField"); # statically assign the value of "MRL" to the field "table" on the document staticFieldStageConfig.setStringParam("table", "MRL"); workflowConfig.addStage(staticFieldStageConfig); # a stage that sends a document to solr openNLPConfig = StageConfiguration() openNLPConfig.setStageClass("org.myrobotlab.document.transformer.OpenNLP") openNLPConfig.setStageName("OpenNLP") openNLPConfig.setStringParam("textField","description") workflowConfig.addStage(openNLPConfig) sendToSolrConfig = StageConfiguration(); sendToSolrConfig.setStageClass("org.myrobotlab.document.transformer.SendToSolr") sendToSolrConfig.setStageName("SendToSolr") sendToSolrConfig.setStringParam("solrUrl", "http://www.skizatch.org:8983/solr/graph")
from org.myrobotlab.service import Runtime serial = Runtime.createAndStart("serial", "Serial") serial.connect('COM4') joystick = Runtime.start("joystick", "Joystick") joystick.setController(0) python.subscribe("joystick", "publishJoystickInput") def onJoystickInput(data): if (data.id == "R" and data.value == 1.0): serial.write("255,0,0") if (data.id == "G" and data.value == 1.0): serial.write("0,255,0") if (data.id == "B" and data.value == 1.0): serial.write("0,0,255")
try: mp3file = urllib2.urlopen('http://www.inmoov.fr/wp-content/uploads/2015/05/starting-mouth.mp3') output = open(soundfilename,'wb') output.write(mp3file.read()) output.close() except IOError: print "Check access right on the directory" except Exception: print "Can't get the sound File ! Check internet Connexion" leftPort = "COM20" #modify port according to your board i01 = Runtime.createAndStart("i01", "InMoov") i01.startEar() mouth = Runtime.createAndStart("mouth","Speech") i01.startMouth() ############## torso = i01.startTorso("COM20") #modify port according to your board # tweaking default torso settings torso.topStom.setMinMax(0,180) torso.topStom.map(0,180,67,110) torso.midStom.setMinMax(0,180) torso.midStom.map(0,180,60,120) #torso.lowStom.setMinMax(0,180) #torso.lowStom.map(0,180,60,110) #torso.topStom.setRest(90) #torso.midStom.setRest(90)
from java.lang import Class from java.awt import Rectangle from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.opencv import OpenCVData from com.googlecode.javacv.cpp.opencv_core import CvPoint; from org.myrobotlab.service import OpenCV from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from time import sleep # system specif variables actservox = 90 actservoy = 90 xpid = Runtime.createAndStart("xpid","PID") ypid = Runtime.createAndStart("ypid","PID") xpid.setMode(1) xpid.setOutputRange(-3, 3) xpid.setPID(10.0, 0, 1.0) xpid.setControllerDirection(1) xpid.setSetpoint(160) # we want the target in the middle of the x ypid.setMode(1) ypid.setOutputRange(-3, 3) ypid.setPID(10.0, 0, 1.0) ypid.setControllerDirection(1) ypid.setSetpoint(120) arduino = Runtime.createAndStart("arduino","Arduino") pan = Runtime.createAndStart("pan","Servo") tilt = Runtime.createAndStart("tilt","Servo")
from org.myrobotlab.service import Arduino # This is a little speech recognition script. # Use "Led On" or "Led off" to control the led . # The led is connected on pin 13 by default # Change the value of the variable "ledPin" if you connect the led on an other pin # Set the right com port in the variable " comPort " and the right arduino board # set the pin for the led ledPin = 13 # set the com port for the arduino comPort = "COM4" # create ear and mouth ear = Runtime.createAndStart("ear", "Sphinx") mouth = Runtime.createAndStart("mouth", "Speech") # create an Arduino service named arduino arduino = Runtime.createAndStart("arduino", "Arduino") # set the board type arduino.setBoard("atmega328") # atmega168 | mega2560 | etc # set serial device arduino.setSerialDevice(comPort, 57600, 8, 1, 0) sleep(1) # give it a second for the serial device to get ready # update the gui with configuration changes arduino.publishState()
#from org.myrobotlab.service import Speech from org.myrobotlab.service import Sphinx from org.myrobotlab.service import Runtime import smtplib from email.MIMEMultipart import MIMEMultipart from email.MIMEBase import MIMEBase from email.MIMEText import MIMEText from email import Encoders import os gmail_user = "******" gmail_pwd = " # create ear and mouth ear = Runtime.createAndStart("ear","Sphinx") mouth = Runtime.createAndStart("mouth","Speech") opencv = Runtime.createAndStart("opencv","OpenCV") opencv.addFilter("pdown","PyramidDown") opencv.setDisplayFilter("pdown") opencv.capture() mouth.setLanguage("it") # start listening for the words we are interested in ear.startListening("hello robot|take photo|send email") # set up a message route from the ear --to--> python method "heard" ear.addListener("recognized", python.name, "heard", String().getClass()); def mail(to, subject, text, attach): msg = MIMEMultipart()
# sayThings.py # example script for MRL showing various methods # of the Speech Service # http://myrobotlab.org/doc/org/myrobotlab/service/Speech.html # The preferred method for creating services is # through the Runtime. This will allow # the script to be rerun without creating a new # Service each time. The initial delay from Service # creation and caching voice files can be large, however # after creation and caching methods should return # immediately # Create a running instance of the Speech Service. # Name it "speech". And start it right away. speech = Runtime.createAndStart("speech","Speech") # Speak with initial defaults - Google en speech.speak("hello brave new world") # Google must have network connectivity # the back-end will cache a sound file # once it is pulled from Goole. So the # first time it is slow but subsequent times its very # quick and can be run without network connectivity. speech.setBackendType("GOOGLE") speech.setLanguage("en") speech.speak("Hello World From Google.") speech.setLanguage("pt") # Google supports some language codes speech.speak("Hello World From Google.")
from java.lang import String from java.lang import Class from java.awt import Rectangle from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.opencv import OpenCVData from org.myrobotlab.opencv import OpenCVFilterInRange from com.googlecode.javacv.cpp.opencv_core import CvPoint global posx global posy global newx global newy opencv = Runtime.createAndStart("opencv", "OpenCV") opencv.publishState() opencv.addFilter("pd", "PyramidDown") InRange = OpenCVFilterInRange("InRange") InRange.useHue = True InRange.hueMinValue = 22 InRange.hueMaxValue = 47 InRange.useSaturation = True InRange.saturationMinValue = 88 InRange.saturationMaxValue = 246 InRange.useValue = True InRange.valueMinValue = 178 InRange.valueMinValue = 255 opencv.addFilter(InRange) opencv.setDisplayFilter("InRange") opencv.addFilter("findcontours", "FindContours")
from org.myrobotlab.service import Runtime serial = Runtime.createAndStart("serial","Serial") serial.connect('COM4') joystick = Runtime.start("joystick","Joystick") joystick.setController(0) python.subscribe("joystick","publishJoystickInput") def onJoystickInput(data): if (data.id=="R" and data.value == 1.0): serial.write("255,0,0") if (data.id=="G" and data.value == 1.0): serial.write("0,255,0") if (data.id=="B" and data.value == 1.0): serial.write("0,0,255")
from java.lang import String from org.myrobotlab.service import Speech from org.myrobotlab.service import Sphinx from org.myrobotlab.service import Runtime # create mouth arduino and servo ear = Runtime.createAndStart("ear","Sphinx") arduino = Runtime.createAndStart("arduino","Arduino") arduino.connect("COM4") servo = Runtime.createAndStart("servo","Servo") servo.attach(arduino, 10) # start listening for the words we are interested in ear.startListening("go forward|go backwards|stop") # set up a message route from the ear --to--> python method "heard" ear.addListener("recognized", python.name, "heard", String().getClass()); # this method is invoked when something is # recognized by the ear - in this case we # have the mouth "talk back" the word it recognized def heard(phrase): print("I heard ", phrase) if phrase == "go forward": servo.moveTo(170) elif phrase == "go backwards": servo.moveTo(10) elif phrase == "stop": servo.moveTo(90)
from java.lang import String from java.lang import Class from java.awt import Rectangle from org.myrobotlab.service import Runtime from org.myrobotlab.service import OpenCV from org.myrobotlab.opencv import OpenCVData from com.googlecode.javacv.cpp.opencv_core import CvPoint from org.myrobotlab.service import OpenCV from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from time import sleep xpid = Runtime.createAndStart("xpid", "PID") ypid = Runtime.createAndStart("ypid", "PID") xpid.setMode(1) xpid.setOutputRange(-1, 1) xpid.setPID(5.0, 0.0, 1.0) xpid.setControllerDirection(0) xpid.setSetpoint( 80) #setpoint now is 80 instead of 160 because of 2 PD filters ypid.setMode(1) ypid.setOutputRange(-1, 1) ypid.setPID(5.0, 0.0, 1.0) ypid.setControllerDirection(0) ypid.setSetpoint( 60) #set point is now 60 instead of 120 because of 2 PD filters xpid.invert() #ypid.invert() arduino = Runtime.createAndStart("arduino", "Arduino") speech = Runtime.createAndStart("speech", "Speech") pan = Runtime.createAndStart("pan", "Servo")
from time import sleep from org.myrobotlab.service import Speech from org.myrobotlab.service import Runtime #Starting the required Services serial = Runtime.createAndStart("serial","Serial") chessgame = Runtime.createAndStart("chessgame", "ChessGame") log = Runtime.createAndStart("log", "Log") speech = Runtime.create("speech","Speech") #Configureing Speech Service speech.startService() speech.setLanguage("en") speech.speak("Game Begins!") count = 0 chessMove = "" #Connecting Arduino via Serial if not serial.isConnected(): serial.connect("COM9") # Adding Listeners serial.addListener("publishByte", python.name, "input") chessgame.addListener("computerMoved", python.name, "voice") chessgame.addListener("computerMoved", log.name, "log") chessgame.addListener("makeMove", serial.name, "write")
from java.lang import String from org.myrobotlab.service import Sphinx from org.myrobotlab.service import Runtime from ispyMotors import Motors m = Motors() #connect to the Tank Control port m.init_connection() #once we're connected if m.is_connected: ear = Runtime.createAndStart("ear","Sphinx") # start listening for the words we are interested in ear.startListening("go forward|go backward|turn left|turn right|head up|head down|stop") # set up a message route from the ear --to--> python method "heard" ear.addListener("recognized", python.name, "heard", String().getClass()); # this method is invoked when something is # recognized by the ear def heard(phrase): print "heard ", phrase if phrase == "go forward": m.moveForward() elif phrase == "go backward": m.moveBackward() elif phrase == "turn left":
import random from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from java.lang import String from time import sleep from org.myrobotlab.net import BareBonesBrowserLaunch wdf = Runtime.createAndStart("wikiDataFetcher", "WikiDataFetcher") holygrail = Runtime.createAndStart("holygrail", "WebGui") wksr = Runtime.createAndStart("webkitspeechrecognition", "WebkitSpeechRecognition") elias = Runtime.createAndStart("elias", "ProgramAB") elias.startSession("elias") htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter") acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech") voices = acapelaSpeech.getVoices() for voice in voices: acapelaSpeech.setVoice("Graham") wksr.addTextListener(elias) elias.addTextListener(htmlfilter) htmlfilter.addTextListener(acapelaSpeech) def BT(): global c c = b - a sleep(2) print c if (c == 2): resp = elias.getResponse("AUTORESPOND1") if (c == 4): resp = elias.getResponse("AUTORESPOND2") if (c == 6):
from org.myrobotlab.service import Arduino from org.myrobotlab.service import Servo from org.myrobotlab.service import Runtime from time import sleep # create the services arduino = Runtime.createAndStart("arduino","Arduino") mouth = Runtime.createAndStart("mouth","Servo") eyes_ud = Runtime.createAndStart("eyes_ud","Servo") eyes_lr = Runtime.createAndStart("eyes_lr","Servo") joystick = runtime.createAndStart("joystick","Joystick") joystick.setController(3) # Set controller index joystick.startPolling() # Start the polling of the device this # initialize arduino arduino.setSerialDevice("COM4", 115200, 8, 1, 0) # attach servo arduino.attach("mouth",10) arduino.attach("eyes_ud",11) arduino.attach("eyes_lr",9) # servo position at start mouth.moveTo(120) sleep(0.5) eyes_ud.moveTo(90) sleep(0.5) eyes_lr.moveTo(90) sleep(0.5)