コード例 #1
0
ファイル: Servo.py プロジェクト: DarkRebel/myrobotlab
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime

from time import sleep

# create the services

arduino = Runtime.createAndStart("arduino","Arduino")
servo01 = Runtime.createAndStart("servo01","Servo")

# initialize arduino
arduino.connect("/dev/ttyUSB0")

# TODO - set limits

# attach servo
arduino.servoAttach(servo01.getName(), 9)

# fast sweep
servo01.moveTo(179)
sleep(0.5)

servo01.moveTo(10)
sleep(0.5)

servo01.moveTo(179)
sleep(0.5)

servo01.moveTo(10)
sleep(0.5)
コード例 #2
0
  sleep(0.5)

tilt.moveTo(90)


# //////////OPENCV////////////////////////////////////////

from java.lang import String
from java.lang import Class
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from com.googlecode.javacv.cpp.opencv_core import CvPoint;
from org.myrobotlab.service import OpenCV

# create or get a handle to an OpenCV service
opencv = Runtime.createAndStart("opencv","OpenCV")

# add the desired filters
opencv.addFilter("PyramidDown1", "PyramidDown")
opencv.addFilter("InRange1", "InRange")
opencv.addFilter("Dilate1", "Dilate")
opencv.addFilter("FindContours1", "FindContours")

if configType == 'GroG':
  # GroG is looking for a purple balloon 
  opencv.setFilterCFG("InRange1","hueMin", "30")
  opencv.setFilterCFG("InRange1","hueMax", "54")
  opencv.setFilterCFG("InRange1","saturationMin", "70")
  opencv.setFilterCFG("InRange1","saturationMax", "241")
  opencv.setFilterCFG("InRange1","valueMin", "56")
  opencv.setFilterCFG("InRange1","valueMax", "89")
コード例 #3
0
from time import sleep
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Runtime
 
#Starting the required Services
 
serial = Runtime.createAndStart("serial","Serial")
chessgame = Runtime.createAndStart("chessgame", "ChessGame")
log = Runtime.createAndStart("log", "Log")
speech = Runtime.create("speech","Speech")
 
#Configureing Speech Service
 
speech.startService()
speech.setLanguage("en")
speech.speak("Game Begins!")
 
count = 0
chessMove = ""
 
#Connecting Arduino via Serial
 
if not serial.isConnected():
    serial.connect("COM9")
 
# Adding Listeners
serial.addListener("publishByte", python.name, "input") 
chessgame.addListener("computerMoved", python.name, "voice") 
chessgame.addListener("computerMoved", log.name, "log")
chessgame.addListener("makeMove", serial.name, "write") 
 
コード例 #4
0
from jarray import array
from java.lang import String
from java.lang import Class
from org.myrobotlab.service import Runtime
from org.myrobotlab.framework import Message

catcher = Runtime.createAndStart("catcher","TestCatcher")
thrower = Runtime.createAndStart("thrower","TestThrower")

def input():
	# python catches data from thrower - then throws to catcher
    	print 'thrower sent me ', msg_thrower_send.data[0]
    	print 'modifying the ball'
    	msg_thrower_send.data[0]='throw from python->catcher'
    	print 'throwing to catcher now'
    	python.send('catcher', 'catchString', msg_thrower_send.data[0])

# thrower sends data to python
thrower.throwString('python', 'input', 'throw from thrower->python');
コード例 #5
0
# can be found ./audioFile/google/<language code>/audrey/phrase.mpe
#
# A message route is created to NOT recognize speech when MRL is talking.
# Otherwise, initially amusing scenarios can occur such as infinite loops of
# the robot recognizing "hello", then saying "hello", then recognizing "hello"...
#
# The recognized phrase can easily be hooked to additional function such as
# changing the mode of the robot, or moving it.  Speech recognition is not
# the best interface to do finely controlled actuation.  But, it is very
# convenient to express high-level (e.g. go to center of the room) commands
#
# FYI - The memory requirements for Sphinx are a bit hefty and depending on the
# platform additional JVM arguments might be necessary e.g. -Xmx256m

# start an ear
ear = Runtime.create("ear","Sphinx")
# create the grammar you would like recognized
# this must be done before the service is started
ear.createGrammar("go | stop | left | right | back")
ear.startService()

# start the mouth
mouth = Runtime.create("mouth","Speech")
mouth.startService()

# creat a magabot
magabot = Runtime.create("magabot","MagaBot")
magabot.init("COM8");  # initalize arduino on port specified to 9600 8n1

speaking = False
コード例 #6
0
ファイル: Roomba.py プロジェクト: BlenderCN-Org/inmoov
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import Roomba
from time import sleep
 
roomba = Runtime.create("roomba","Roomba")
roomba.connect( "COM9" )
roomba.startService()
roomba.startup()
roomba.control()
roomba.playNote( 72, 10 )
roomba.sleep( 200 )
roomba.goForward()
roomba.sleep( 1000 )
roomba.goBackward()
roomba.sleep( 1000 )
roomba.spinRight()
roomba.sleep( 1000 )
コード例 #7
0
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import MagaBot
from org.myrobotlab.service import Runtime

from time import sleep

# Create a running instance of the MagaBot Service.

magabot = Runtime.create("magabot", "MagaBot")
magabot.startService()
magabot.init("COM8")  # initalize arduino on port specified to 9600 8n1

for x in range(0, 20):
    magabot.sendOrder('a')
    sleep(0.5)

#magabot.sendOrder('a')
#sleep(0.5)
#magabot.sendOrder('a')
#sleep(0.5)
#magabot.sendOrder('a')
#sleep(0.5)
コード例 #8
0
#from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
import os

gmail_user = "******"
gmail_pwd = "


# create ear and mouth
ear = Runtime.createAndStart("ear","Sphinx")
mouth = Runtime.createAndStart("mouth","Speech")
opencv = Runtime.createAndStart("opencv","OpenCV")
opencv.addFilter("pdown","PyramidDown")
opencv.setDisplayFilter("pdown")
opencv.capture()
mouth.setLanguage("it")
# start listening for the words we are interested in
ear.startListening("hello robot|take photo|send email")


# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass()); 

def mail(to, subject, text, attach):
            msg = MIMEMultipart()
コード例 #9
0
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import MagaBot
from org.myrobotlab.service import Runtime

from time import sleep

# Create a running instance of the MagaBot Service.

magabot = Runtime.create("magabot","MagaBot")
magabot.startService()
magabot.init("COM8")  # initalize arduino on port specified to 9600 8n1

for x in range(0,20):
  magabot.sendOrder('a')
  sleep(0.5)
  
#magabot.sendOrder('a')
#sleep(0.5)
#magabot.sendOrder('a')
#sleep(0.5)
#magabot.sendOrder('a')
#sleep(0.5)


コード例 #10
0
#script to control the TrashyBot platform through remote control via Xbox 360 wireless remote
#
#
#Nolan B. 1/8/16


from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Runtime
from time import sleep

#----------------------------------Web Gui--------------------------
webgui = Runtime.create("webgui", "WebGui")
webgui.autoStartBrowser(False)
Runtime.start("webgui", "WebGui")


#---------------------------------Create Services----------------------
arduino = Runtime.createAndStart("arduino","Arduino")
joystick = Runtime.createAndStart("joystick","Joystick")
motorL = Runtime.start("motorL","Motor")
motorR = Runtime.start("motorR","Motor")
log = Runtime.start("log","Log")



#----------------------Connect Peripherals-----------------------------------
joystick.setController(0); #PC only - Pi needs new
joystick.addInputListener(python)
arduino.connect("/dev/ttyACM0");
# Tell the joystick to turn on
コード例 #11
0
ファイル: 4PID.Tracking.py プロジェクト: BlenderCN-Org/inmoov
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep

global servox
global servoy
global actservox
global actservoy

actservox = 90
actservoy = 90

tracker = Runtime.create("tracker", "Tracking")

# create all the peer services
rotation = Runtime.create("rotation", "Servo")
neck = Runtime.create("neck", "Servo")
arduino = Runtime.create("arduino", "Arduino")
xpid = Runtime.create("xpid", "PID")
ypid = Runtime.create("ypid", "PID")

rotationb = Runtime.create("rotationb", "Servo")
neckb = Runtime.create("neckb", "Servo")
xpidb = Runtime.create("xpidb", "PID")
ypidb = Runtime.create("ypidb", "PID")

# adjust values
arduino.connect("COM3")
コード例 #12
0
# sayThings.py
# example script for MRL showing various methods
# of the Speech Service
# http://myrobotlab.org/doc/org/myrobotlab/service/Speech.html

# The preferred method for creating services is
# through the Runtime. This will allow
# the script to be rerun without creating a new
# Service each time. The initial delay from Service
# creation and caching voice files can be large, however
# after creation and caching methods should return
# immediately

# Create a running instance of the Speech Service.
# Name it "speech".
speech = Runtime.create("speech", "Speech")
speech.startService()

# Speak with initial defaults - Google en
speech.speak("hello brave new world")

# Google must have network connectivity
# the back-end will cache a sound file
# once it is pulled from Goole.  So the
# first time it is slow but subsequent times its very
# quick and can be run without network connectivity.
speech.setBackendType("GOOGLE")
speech.setLanguage("en")
speech.speak("Hello World From Google.")
speech.setLanguage("pt")  # Google supports some language codes
speech.speak("Hello World From Google.")
コード例 #13
0
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import AudioCapture
from time import sleep
audiocapture = Runtime.createAndStart("audiocapture","AudioCapture")
#it starts capturing audio
audiocapture.captureAudio()
# it will record for 5 seconds
sleep(5)
#then it stops recording audio
audiocapture.stopAudioCapture()
#it plays audio recorded
audiocapture.playAudio()
コード例 #14
0
##############################################
# This script creates the parts of a ROFI and
# attaches them to MRL
#from org.myrobotlab.service import UltrasonicSensor
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import GUIService
from org.myrobotlab.service import Speech
from time import sleep

# we need a dictionary of arrays which store calibration data for each servo/joint
calib = {}

# create the services
speech = Runtime.createAndStart("speech", "Speech")  # For voice feedback
#ear = Runtime.createAndStart("listen","Sphinx") # For hearing spoken commands

gui = Runtime.start("gui", "GUIService")
keyboard = Runtime.start("keyboard",
                         "Keyboard")  # For getting user confirmation

#keyboard.addKeyListener(python)

# Arduino to connect everything to like a spinal cord
arduino = Runtime.createAndStart("arduino", "Arduino")

#sr04 = Runtime.start("sr04", "UltrasonicSensor") # For an ultrasonic view of the world

# 6 joints in the Right leg
rAnkle = Runtime.createAndStart("R Ankle", "Servo")
コード例 #15
0
from jarray import array
from java.lang import String
from java.lang import Class
from org.myrobotlab.service import Runtime
from org.myrobotlab.framework import Message

catcher = Runtime.createAndStart("catcher", "TestCatcher")
thrower = Runtime.createAndStart("thrower", "TestThrower")


def input():
    # python catches data from thrower - then throws to catcher
    print "thrower sent me ", msg_thrower_send.data[0]
    print "modifying the ball"
    msg_thrower_send.data[0] = "throw from python->catcher"
    print "throwing to catcher now"
    python.send("catcher", "catchString", msg_thrower_send.data[0])


# thrower sends data to python
thrower.throwString("python", "input", "throw from thrower->python")
コード例 #16
0
#script to control the TrashyBot platform through remote control via Xbox 360 wireless remote
#
#
#Nolan B. 1/8/16

from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Runtime
from time import sleep

#----------------------------------Web Gui--------------------------
webgui = Runtime.create("webgui", "WebGui")
webgui.autoStartBrowser(False)
Runtime.start("webgui", "WebGui")

#---------------------------------Create Services----------------------
arduino = Runtime.createAndStart("arduino", "Arduino")
joystick = Runtime.createAndStart("joystick", "Joystick")
motorL = Runtime.start("motorL", "Motor")
motorR = Runtime.start("motorR", "Motor")
log = Runtime.start("log", "Log")

#----------------------Connect Peripherals-----------------------------------
joystick.setController(0)
#PC only - Pi needs new
joystick.addInputListener(python)
arduino.connect("/dev/ttyACM0")
# Tell the joystick to turn on
joystick.startPolling()

# attach servos to Arduino
コード例 #17
0
# these are D6,D7 for 2 PWM motor control
pwmPin = 6
dirPin = 7
# this is "A7" on the Arduino Mega
potPin = 61

# port = "/dev/ttyACM0"
port = "COM31"

# to help avoid buffer overruns when sampling data from the arduino
sampleRate = 25

# Runtime.start("webgui", "WebGui")
# Start the Arduino
arduino = Runtime.start("arduino", "Arduino")
arduino.connect(port)
# a slight pause to let the arduino connect (maybe not necessary)
sleep(2)
# set up the motor control pins to be output
arduino.pinMode(pwmPin, Arduino.OUTPUT)
arduino.pinMode(dirPin, Arduino.OUTPUT)
# set the analog pin to be an input pin
arduino.pinMode(potPin, Arduino.INPUT)
# set the sample rate for the analog pins
arduino.setSampleRate(sampleRate)
arduino.analogReadPollingStart(potPin)
# start the motor service and attach it to the arduino
m1 = Runtime.start("m1", "Motor")
m1.setTypeSimple(pwmPin, dirPin)
m1.attach(arduino)
コード例 #18
0
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from org.myrobotlab.opencv import OpenCVFilterInRange
from com.googlecode.javacv.cpp.opencv_core import CvPoint

global posx
global posy
global newx
global newy

opencv = Runtime.createAndStart("opencv", "OpenCV")
opencv.publishState()
opencv.addFilter("pd", "PyramidDown")
InRange = OpenCVFilterInRange("InRange")
InRange.useHue = True
InRange.hueMinValue = 22
InRange.hueMaxValue = 47
InRange.useSaturation = True
InRange.saturationMinValue = 88
InRange.saturationMaxValue = 246
InRange.useValue = True
InRange.valueMinValue = 178
InRange.valueMinValue = 255
opencv.addFilter(InRange)
opencv.setDisplayFilter("InRange")
opencv.addFilter("findcontours", "FindContours")
コード例 #19
0
from java.lang import String
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
from ispyMotors import Motors

m = Motors()

#connect to the Tank Control port
m.init_connection()

#once we're connected
if m.is_connected:
    ear = Runtime.createAndStart("ear", "Sphinx")

    # start listening for the words we are interested in
    ear.startListening(
        "go forward|go backward|turn left|turn right|head up|head down|stop")

    # set up a message route from the ear --to--> python method "heard"
    ear.addListener("recognized", python.name, "heard",
                    String().getClass())

    # this method is invoked when something is
    # recognized by the ear
    def heard(phrase):

        print "heard ", phrase

        if phrase == "go forward":
            m.moveForward()
        elif phrase == "go backward":
コード例 #20
0
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime

from time import sleep

servoPin = 4
# comPort = "/dev/ttyUSB0"
comPort = "COM15"

# create the services
arduino = Runtime.start("arduino","Arduino")
servo01 = Runtime.start("servo01","Servo")

# initialize arduino
# arduino.connect("/dev/ttyUSB0")
arduino.connect(comPort)

# TODO - set limits
servo01.setMinMax(0, 180)
  
# attach servo
servo01.attach(arduino.getName(), servoPin)

# fast sweep
servo01.moveTo(179)
sleep(0.5)

servo01.moveTo(10)
sleep(0.5)
コード例 #21
0
import random
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime
from java.lang import String
from time import sleep
from org.myrobotlab.net import BareBonesBrowserLaunch
wdf = Runtime.createAndStart("wikiDataFetcher", "WikiDataFetcher")
holygrail = Runtime.createAndStart("holygrail", "WebGui")
wksr = Runtime.createAndStart("webkitspeechrecognition",
                              "WebkitSpeechRecognition")
elias = Runtime.createAndStart("elias", "ProgramAB")
elias.startSession("elias")
htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter")
acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech")
voices = acapelaSpeech.getVoices()
for voice in voices:
    acapelaSpeech.setVoice("Graham")
wksr.addTextListener(elias)
elias.addTextListener(htmlfilter)
htmlfilter.addTextListener(acapelaSpeech)


def BT():
    global c
    c = b - a
    sleep(2)
    print c
    if (c == 2):
        resp = elias.getResponse("AUTORESPOND1")
    if (c == 4):
コード例 #22
0
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep

xpid = Runtime.createAndStart("xpid", "PID")
ypid = Runtime.createAndStart("ypid", "PID")
xpid.setMode(1)
xpid.setOutputRange(-1, 1)
xpid.setPID(5.0, 0.0, 1.0)
xpid.setControllerDirection(0)
xpid.setSetpoint(
    80)  #setpoint now is 80 instead of 160 because of 2 PD filters
ypid.setMode(1)
ypid.setOutputRange(-1, 1)
ypid.setPID(5.0, 0.0, 1.0)
ypid.setControllerDirection(0)
ypid.setSetpoint(
    60)  #set point is now 60 instead of 120 because of 2 PD filters
xpid.invert()
#ypid.invert()
arduino = Runtime.createAndStart("arduino", "Arduino")
speech = Runtime.createAndStart("speech", "Speech")
pan = Runtime.createAndStart("pan", "Servo")
コード例 #23
0
######################################
# Example Document Processing pipeline
######################################
from org.myrobotlab.service import Runtime
from org.myrobotlab.document.transformer import WorkflowConfiguration
from org.myrobotlab.document.transformer import StageConfiguration
# create the pipeline service
pipeline = Runtime.createAndStart("docproc", "DocumentPipeline")
# create a pipeline
# pipeline.workflowName = "default";
# create a workflow to load into that pipeline service
workflowConfig = WorkflowConfiguration();
workflowConfig.setName("default");
staticFieldStageConfig = StageConfiguration();
staticFieldStageConfig.setStageClass("org.myrobotlab.document.transformer.SetStaticFieldValue");
staticFieldStageConfig.setStageName("SetTableField");
# statically assign the value of "MRL" to the field "table" on the document
staticFieldStageConfig.setStringParam("table", "MRL");
workflowConfig.addStage(staticFieldStageConfig);
# a stage that sends a document to solr

openNLPConfig = StageConfiguration()
openNLPConfig.setStageClass("org.myrobotlab.document.transformer.OpenNLP")
openNLPConfig.setStageName("OpenNLP")
openNLPConfig.setStringParam("textField","description")
workflowConfig.addStage(openNLPConfig)

sendToSolrConfig = StageConfiguration();
sendToSolrConfig.setStageClass("org.myrobotlab.document.transformer.SendToSolr")
sendToSolrConfig.setStageName("SendToSolr")
sendToSolrConfig.setStringParam("solrUrl", "http://www.skizatch.org:8983/solr/graph")
コード例 #24
0
ファイル: start.py プロジェクト: Bodo11/pyrobotlab
import random
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime
from java.lang import String
from time import sleep
from org.myrobotlab.net import BareBonesBrowserLaunch
wdf = Runtime.createAndStart("wikiDataFetcher", "WikiDataFetcher")
holygrail = Runtime.createAndStart("holygrail", "WebGui")
wksr = Runtime.createAndStart("webkitspeechrecognition", "WebkitSpeechRecognition")
elias = Runtime.createAndStart("elias", "ProgramAB")
elias.startSession("elias")
htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter")
acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech")
voices = acapelaSpeech.getVoices()
for voice in voices:
    acapelaSpeech.setVoice("Graham")
wksr.addTextListener(elias)
elias.addTextListener(htmlfilter)
htmlfilter.addTextListener(acapelaSpeech)

def BT():
    global c
    c = b - a
    sleep(2)
    print c
    if (c == 2):
       resp = elias.getResponse("AUTORESPOND1")          
    if (c == 4):
	  resp = elias.getResponse("AUTORESPOND2")           
    if (c == 6):
コード例 #25
0
try:
	mp3file = urllib2.urlopen('http://www.inmoov.fr/wp-content/uploads/2015/05/starting-mouth.mp3')
	output = open(soundfilename,'wb')
	output.write(mp3file.read())
	output.close()
except IOError:
	print "Check access right on the directory"
except Exception:
	print "Can't get the sound File ! Check internet Connexion"



leftPort = "COM20"  #modify port according to your board

i01 = Runtime.createAndStart("i01", "InMoov")
i01.startEar()

mouth = Runtime.createAndStart("mouth","Speech")
i01.startMouth()
##############
torso = i01.startTorso("COM20")  #modify port according to your board
# tweaking default torso settings
torso.topStom.setMinMax(0,180)
torso.topStom.map(0,180,67,110)
torso.midStom.setMinMax(0,180)
torso.midStom.map(0,180,60,120)
#torso.lowStom.setMinMax(0,180)
#torso.lowStom.map(0,180,60,110)
#torso.topStom.setRest(90)
#torso.midStom.setRest(90)
コード例 #26
0
from org.myrobotlab.framework import Message

# inputTest.py
# example script for MRL showing Python Service
# input method.  Input is a hook which allows
# other services to send data to your script.
# This script will also show a "Message" which
# is the basic form of communication between all
# Services in MRL.  Additionally it will show how to 
# extract data from the message to be used in the 
# script.

# Create a running instance of the Clock Service.
# <<URL>>
# Name it "clock".
clock = Runtime.create("clock","Clock")
clock.startService()
# Create a running instance of the Log Service.
# <<URL>>
# Name it "log".
log = Runtime.create("log","Log")
log.startService()

# ----------------------------------
# input
# ----------------------------------
# the "input" method is a Message input sink 
# currently String is the only
# parameter supported - possibly the future
# non-primitive Java data types could be dynamically
# constructed through reflection and sent to the 
コード例 #27
0
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep

# create ear and mouth
ear = Runtime.createAndStart("ear", "Sphinx")
mouth = Runtime.createAndStart("mouth", "Speech")
mouth.setLanguage("it")

ear.startListening("find me")

tracker = Runtime.create("tracker", "Tracking")


def heard():
    data = msg_ear_recognized.data[0]
    print "heard ", data
    if (data == "find me"):
        mouth.speak("Ciao Alessandro.Ti ho trovato.Ti vedo")
        tracker.trackPoint(newx, newy)
コード例 #28
0
##############################################
# This script creates the parts of a ROFI and 
# attaches them to MRL
#from org.myrobotlab.service import UltrasonicSensor
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import GUIService
from org.myrobotlab.service import Speech
from time import sleep

# we need a dictionary of arrays which store calibration data for each servo/joint
calib = {}

# create the services
speech = Runtime.createAndStart("speech","Speech") # For voice feedback
#ear = Runtime.createAndStart("listen","Sphinx") # For hearing spoken commands

gui = Runtime.start("gui", "GUIService")
keyboard = Runtime.start("keyboard", "Keyboard") # For getting user confirmation

#keyboard.addKeyListener(python)

# Arduino to connect everything to like a spinal cord
arduino = Runtime.createAndStart("arduino","Arduino")

#sr04 = Runtime.start("sr04", "UltrasonicSensor") # For an ultrasonic view of the world

# 6 joints in the Right leg
rAnkle = Runtime.createAndStart("R Ankle","Servo")
rLowLeg = Runtime.createAndStart("R Low Leg","Servo")
コード例 #29
0
ファイル: faceDetector.py プロジェクト: DarkRebel/myrobotlab
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint;
from org.myrobotlab.service import OpenCV

# create or get a handle to an OpenCV service
opencv = Runtime.create("opencv","OpenCV")
opencv.startService()
# reduce the size - face tracking doesn't need much detail
# the smaller the faster
opencv.addFilter("PyramidDown1", "PyramidDown")
# add the face detect filter
opencv.addFilter("FaceDetect1", "FaceDetect")

# ----------------------------------
# input
# ----------------------------------
# the "input" method is where Messages are sent to
# from other Services. The data from these messages can
# be accessed on based on these rules:
# Details of a Message structure can be found here
# http://myrobotlab.org/doc/org/myrobotlab/framework/Message.html 
# When a message comes in - the input function will be called
# the name of the message will be msg_+<sending service name>+_+<sending method name>
# In this particular case when the service named "opencv" finds a face it will publish
# a CvPoint.  The CvPoint can be access by msg_opencv_publish.data[0]
def input():
コード例 #30
0
from org.myrobotlab.service import Runtime

serial = Runtime.createAndStart("serial", "Serial")
serial.connect('COM4')

joystick = Runtime.start("joystick", "Joystick")
joystick.setController(0)
python.subscribe("joystick", "publishJoystickInput")


def onJoystickInput(data):

    if (data.id == "R" and data.value == 1.0):
        serial.write("255,0,0")

    if (data.id == "G" and data.value == 1.0):
        serial.write("0,255,0")

    if (data.id == "B" and data.value == 1.0):
        serial.write("0,0,255")
コード例 #31
0
from org.myrobotlab.service import Arduino

# This is a little speech recognition script.
# Use "Led On" or "Led off" to control the led .
# The led is connected on pin 13 by default
# Change the value of the variable "ledPin" if you connect the led on an other pin
# Set the right com port in the variable " comPort " and the right arduino board

# set the pin for the led
ledPin = 13

# set the com port for the arduino
comPort = "COM4"

# create ear and mouth
ear = Runtime.createAndStart("ear", "Sphinx")
mouth = Runtime.createAndStart("mouth", "Speech")

# create an Arduino service named arduino
arduino = Runtime.createAndStart("arduino", "Arduino")

# set the board type
arduino.setBoard("atmega328")  # atmega168 | mega2560 | etc

# set serial device
arduino.setSerialDevice(comPort, 57600, 8, 1, 0)
sleep(1)  # give it a second for the serial device to get ready

# update the gui with configuration changes
arduino.publishState()
コード例 #32
0
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint;
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep

# system specif variables
actservox = 90
actservoy = 90

xpid = Runtime.createAndStart("xpid","PID")
ypid = Runtime.createAndStart("ypid","PID")
xpid.setMode(1)
xpid.setOutputRange(-3, 3)
xpid.setPID(10.0, 0, 1.0)
xpid.setControllerDirection(1)
xpid.setSetpoint(160) # we want the target in the middle of the x
ypid.setMode(1)
ypid.setOutputRange(-3, 3)
ypid.setPID(10.0, 0, 1.0)
ypid.setControllerDirection(1)
ypid.setSetpoint(120)

arduino = Runtime.createAndStart("arduino","Arduino")
pan 	= Runtime.createAndStart("pan","Servo")
tilt	= Runtime.createAndStart("tilt","Servo")
コード例 #33
0
ファイル: sayThings.py プロジェクト: BlenderCN-Org/inmoov
# sayThings.py
# example script for MRL showing various methods
# of the Speech Service 
# http://myrobotlab.org/doc/org/myrobotlab/service/Speech.html

# The preferred method for creating services is
# through the Runtime. This will allow
# the script to be rerun without creating a new
# Service each time. The initial delay from Service
# creation and caching voice files can be large, however
# after creation and caching methods should return 
# immediately

# Create a running instance of the Speech Service.
# Name it "speech". And start it right away.
speech = Runtime.createAndStart("speech","Speech")


# Speak with initial defaults - Google en
speech.speak("hello brave new world")

# Google must have network connectivity
# the back-end will cache a sound file
# once it is pulled from Goole.  So the 
# first time it is slow but subsequent times its very 
# quick and can be run without network connectivity.
speech.setBackendType("GOOGLE") 
speech.setLanguage("en")
speech.speak("Hello World From Google.")
speech.setLanguage("pt") # Google supports some language codes
speech.speak("Hello World From Google.")
コード例 #34
0
# can be found ./audioFile/google/<language code>/audrey/phrase.mpe
#
# A message route is created to NOT recognize speech when MRL is talking.
# Otherwise, initially amusing scenarios can occur such as infinite loops of
# the robot recognizing "hello", then saying "hello", then recognizing "hello"...
#
# The recognized phrase can easily be hooked to additional function such as
# changing the mode of the robot, or moving it.  Speech recognition is not
# the best interface to do finely controlled actuation.  But, it is very
# convenient to express high-level (e.g. go to center of the room) commands
#
# FYI - The memory requirements for Sphinx are a bit hefty and depending on the
# platform additional JVM arguments might be necessary e.g. -Xmx256m

# start an ear
ear = Runtime.create("ear", "Sphinx")
# create the grammar you would like recognized
# this must be done before the service is started
ear.createGrammar("go | stop | left | right | back")
ear.startService()

# start the mouth
mouth = Runtime.create("mouth", "Speech")
mouth.startService()

# creat a magabot
magabot = Runtime.create("magabot", "MagaBot")
magabot.init("COM8")
# initalize arduino on port specified to 9600 8n1

speaking = False
コード例 #35
0
ファイル: RGBSeriel.py プロジェクト: sola1993/inmoov
from org.myrobotlab.service import Runtime

serial = Runtime.createAndStart("serial","Serial")
serial.connect('COM4')

joystick = Runtime.start("joystick","Joystick")
joystick.setController(0)
python.subscribe("joystick","publishJoystickInput")

def onJoystickInput(data):
     

        
    if (data.id=="R" and data.value == 1.0):
        serial.write("255,0,0")  

    if (data.id=="G" and data.value == 1.0):
        serial.write("0,255,0") 

    if (data.id=="B" and data.value == 1.0):
        serial.write("0,0,255") 
コード例 #36
0
ファイル: panTilt.py プロジェクト: ADVALAIN596/pyrobotlab
##############################################
# This script creates 2 servos of a pan / tilt kit
# attaches them to an Arduino and attaches
# a joystick to control the servos

from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Runtime
from time import sleep

# create the services
arduino = Runtime.createAndStart("arduino","Arduino")
pan 	= Runtime.createAndStart("pan","Servo")
tilt	= Runtime.createAndStart("tilt","Servo")
joystick = Runtime.createAndStart("joystick","Joystick")

arduino.connect("COM10", 57600, 8, 1, 0)

sleep(2)

# attach servos to Arduino
pan.attach(arduino.getName(), 9)
tilt.attach(arduino.getName(), 10)

# attach joystick to servos
joystick.attach(pan, Joystick.Z_AXIS)
joystick.attach(tilt, Joystick.Z_ROTATION)
joystick.setController(2);
joystick.startPolling();	
コード例 #37
0
import random
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime
from java.lang import String
from time import sleep

holygrail = Runtime.createAndStart("holygrail", "WebGui")
wksr = Runtime.createAndStart("webkitspeechrecognition",
                              "WebkitSpeechRecognition")
simple = Runtime.createAndStart("simple", "ProgramAB")
simple.startSession("default", "simple")
htmlfilter = Runtime.createAndStart("htmlfilter", "HtmlFilter")
acapelaSpeech = Runtime.createAndStart("speech", "AcapelaSpeech")
voices = acapelaSpeech.getVoices()
for voice in voices:
    acapelaSpeech.setVoice("Ryan")
wksr.addTextListener(simple)
simple.addTextListener(htmlfilter)
htmlfilter.addTextListener(acapelaSpeech)
コード例 #38
0
from org.myrobotlab.framework import Message

# inputTest.py
# example script for MRL showing Python Service
# input method.  Input is a hook which allows
# other services to send data to your script.
# This script will also show a "Message" which
# is the basic form of communication between all
# Services in MRL.  Additionally it will show how to
# extract data from the message to be used in the
# script.

# Create a running instance of the Clock Service.
# <<URL>>
# Name it "clock".
clock = Runtime.create("clock", "Clock")
clock.startService()
# Create a running instance of the Log Service.
# <<URL>>
# Name it "log".
log = Runtime.create("log", "Log")
log.startService()

# ----------------------------------
# input
# ----------------------------------
# the "input" method is a Message input sink
# currently String is the only
# parameter supported - possibly the future
# non-primitive Java data types could be dynamically
# constructed through reflection and sent to the
コード例 #39
0
ファイル: minibot.py プロジェクト: vadimostanin/myrobotlab
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Runtime

from time import sleep

# You should want a differential drive service associated with 
# an Arduino
# as well as anything which can do Position Encoding (interface)

arduino = Runtime.create("arduino","Arduino")

# FIXME - re-entrant and auto-save functionality
arduino.setPort("COM8")
arduino.setBaudBase(115200);

# if not loaded - load with PDE :P

# move

# report back sensor readings

# report back to simulator?

コード例 #40
0
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
  
# create mouth arduino and servo
ear = Runtime.createAndStart("ear","Sphinx")
arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM4")
servo = Runtime.createAndStart("servo","Servo")
servo.attach(arduino, 10)
  
# start listening for the words we are interested in
ear.startListening("go forward|go backwards|stop")
  
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass()); 
  
# this method is invoked when something is 
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
def heard(phrase):
      print("I heard ", phrase)
      if phrase == "go forward":
        servo.moveTo(170)
      elif phrase == "go backwards":
        servo.moveTo(10)
      elif phrase == "stop":
        servo.moveTo(90)
      
コード例 #41
0
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint
from org.myrobotlab.service import OpenCV

# create or get a handle to an OpenCV service
opencv = Runtime.create("opencv", "OpenCV")
opencv.startService()
# reduce the size - face tracking doesn't need much detail
# the smaller the faster
opencv.addFilter("PyramidDown1", "PyramidDown")
# add the face detect filter
opencv.addFilter("FaceDetect1", "FaceDetect")


# ----------------------------------
# input
# ----------------------------------
# the "input" method is where Messages are sent to
# from other Services. The data from these messages can
# be accessed on based on these rules:
# Details of a Message structure can be found here
# http://myrobotlab.org/doc/org/myrobotlab/framework/Message.html
# When a message comes in - the input function will be called
# the name of the message will be msg_+<sending service name>+_+<sending method name>
# In this particular case when the service named "opencv" finds a face it will publish
# a CvPoint.  The CvPoint can be access by msg_opencv_publish.data[0]
コード例 #42
0
from java.lang import String
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime

# create ear and mouth
ear = Runtime.createAndStart("ear","Sphinx")
mouth = Runtime.createAndStart("mouth","MarySpeech")

# start listening for the words we are interested in
ear.addComfirmations("yes","correct","ya")
ear.addNegations("no","wrong","nope","nah")
#ear.startListening("hello world|happy monkey|go forward|stop|yes|correct|ya|no|wrong|nope|nah")
ear.startListening("hello world|happy monkey|go forward|stop")

ear.addCommand("hello world", "python", "helloWorld")

# set up a message route from the ear --to--> python method "heard"
# ear.addListener("recognized", python.name, "heard");

# ear.addComfirmations("yes","correct","yeah","ya")
# ear.addNegations("no","wrong","nope","nah")

# ear.addCommand("hello world", "python", "helloworld")

# set up a message route from the ear --to--> python method "heard"
# ear.addListener("recognized", python.name, "heard");


# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
コード例 #43
0
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import VideoStreamer
from time import sleep
from org.myrobotlab.net import BareBonesBrowserLaunch

# create a video source (opencv) & a video streamer
opencv = Runtime.createAndStart("opencv","OpenCV")
streamer = Runtime.createAndStart("streamer","VideoStreamer")

# attache them
streamer.attach(opencv)

# add a pyramid down filter and gray to minimize the data
opencv.addFilter("pyramidDown", "PyramidDown");
opencv.addFilter("gray", "Gray");

# start the camera
opencv.capture();
#added sleep in order to give opencv the time to "warm up" the cam
sleep(3)


# go to http://localhost:9090/output
BareBonesBrowserLaunch.openURL("http://localhost:9090")
コード例 #44
0
ファイル: inmoovServo.py プロジェクト: MyRobotLab/pyrobotlab
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Runtime

from time import sleep

servo1Pin = 12
servo2Pin = 13
servo3Pin = 26
servo4Pin = 4

# comPort = "/dev/ttyUSB0"
comPort = "COM6"

# create the services
arduino = Runtime.start("arduino","Arduino")
servo01 = Runtime.start("servo01","Servo")

# initialize arduino
# arduino.connect("/dev/ttyUSB0")
arduino.connect(comPort)

# TODO - set limits
servo01.setMinMax(60, 120)
  
# attach servo
servo01.attach(arduino.getName(), servo1Pin)

# fast sweep
servo01.moveTo(90)
コード例 #45
0
ファイル: main.py プロジェクト: chromecide/MRL_ISpyWifiTank
from java.lang import String
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
from ispyMotors import Motors

m = Motors()

#connect to the Tank Control port
m.init_connection()

#once we're connected
if m.is_connected:
	ear = Runtime.createAndStart("ear","Sphinx")

	# start listening for the words we are interested in
	ear.startListening("go forward|go backward|turn left|turn right|head up|head down|stop")
	
	# set up a message route from the ear --to--> python method "heard"
	ear.addListener("recognized", python.name, "heard", String().getClass()); 
	  
	# this method is invoked when something is 
	# recognized by the ear
	def heard(phrase):
	
	      print "heard ", phrase
	      
	      if phrase == "go forward":
	          m.moveForward()
	      elif phrase == "go backward":
	      	m.moveBackward()
	      elif phrase == "turn left":
コード例 #46
0
##############################################
# This script creates 2 servos of a pan / tilt kit
# attaches them to an Arduino and attaches
# a joystick to control the servos

from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Joystick
from org.myrobotlab.service import Runtime
from time import sleep

# create the services
arduino = Runtime.createAndStart("arduino", "Arduino")
pan = Runtime.createAndStart("pan", "Servo")
tilt = Runtime.createAndStart("tilt", "Servo")
joystick = Runtime.createAndStart("joystick", "Joystick")

arduino.connect("COM10", 57600, 8, 1, 0)

sleep(2)

# attach servos to Arduino
pan.attach(arduino.getName(), 9)
tilt.attach(arduino.getName(), 10)

# attach joystick to servos
joystick.attach(pan, Joystick.Z_AXIS)
joystick.attach(tilt, Joystick.Z_ROTATION)
joystick.setController(2)
joystick.startPolling()
コード例 #47
0
import org.myrobotlab.framework.Platform as Platform
import org.myrobotlab.service.Runtime as Runtime

# InMoov2 Config : i01
Platform.setVirtual(True)
i01.setVirtual(True)
i01.setMute(False)
##############################################################
## creating 69 services ####
# Although Runtime.start(name,type) both creates and starts services it might be desirable on creation to
# substitute peers, types or references of other sub services before the service is "started"
# e.g. i01 = Runtime.create('i01', 'InMoov') # this will "create" the service and config could be manipulated before starting 
# e.g. i01_left = Runtime.create('i01.left', 'Ssc32UsbServoController')
##############################################################

runtime = Runtime.getInstance()
runtime.setAllLocales('en-US')
#i01_jme = Runtime.start('i01.jme', 'JMonkeyEngine')
i01_servomixer = Runtime.start('i01.servomixer', 'ServoMixer')
i01_leftArm_rotate = Runtime.start('i01.leftArm.rotate', 'Servo')
i01_torso_topStom = Runtime.start('i01.torso.topStom', 'Servo')
i01_rightArm_bicep = Runtime.start('i01.rightArm.bicep', 'Servo')
security = Runtime.start('security', 'Security')
i01_head_rollNeck = Runtime.start('i01.head.rollNeck', 'Servo')
i01_leftHand = Runtime.start('i01.leftHand', 'InMoov2Hand')
i01_rightHand_ringFinger = Runtime.start('i01.rightHand.ringFinger', 'Servo')
MoveEyesTimer = Runtime.start('MoveEyesTimer', 'Clock')
i01_leftHand_majeure = Runtime.start('i01.leftHand.majeure', 'Servo')
i01_leftHand_index = Runtime.start('i01.leftHand.index', 'Servo')
i01_rightHand_majeure = Runtime.start('i01.rightHand.majeure', 'Servo')
i01_head_eyeX = Runtime.start('i01.head.eyeX', 'Servo')
コード例 #48
0
ファイル: MotorPotPID.py プロジェクト: ADVALAIN596/pyrobotlab
# these are D6,D7 for 2 PWM motor control
pwmPin = 6
dirPin = 7
# this is "A7" on the Arduino Mega
potPin = 61

# port = "/dev/ttyACM0"
port = "COM31"

# to help avoid buffer overruns when sampling data from the arduino
sampleRate = 25

# Runtime.start("webgui", "WebGui")
# Start the Arduino
arduino = Runtime.start("arduino", "Arduino")
arduino.connect(port)
# a slight pause to let the arduino connect (maybe not necessary)
sleep(2)
# set up the motor control pins to be output
arduino.pinMode(pwmPin, Arduino.OUTPUT)
arduino.pinMode(dirPin, Arduino.OUTPUT)
# set the analog pin to be an input pin
arduino.pinMode(potPin, Arduino.INPUT)
# set the sample rate for the analog pins
arduino.setSampleRate(sampleRate)
arduino.analogReadPollingStart(potPin)
# start the motor service and attach it to the arduino
m1 = Runtime.start("m1", "Motor");
m1.setTypeSimple(pwmPin, dirPin);
m1.attach(arduino)
コード例 #49
0
# This is a little speech recognition script.
# Use "Led On" or "Led off" to control the led .
# The led is connected on pin 13 by default
# Change the value of the variable "ledPin" if you connect the led on an other pin
# Set the right com port in the variable " comPort " and the right arduino board


# set the pin for the led
ledPin = 13

# set the com port for the arduino
comPort = "COM4"

# create ear and mouth
ear = Runtime.createAndStart("ear", "Sphinx")
mouth = Runtime.createAndStart("mouth", "Speech")

# create an Arduino service named arduino
arduino = Runtime.createAndStart("arduino", "Arduino")

# set the board type
arduino.setBoard("atmega328")  # atmega168 | mega2560 | etc

# set serial device
arduino.setSerialDevice(comPort, 57600, 8, 1, 0)
sleep(1)  # give it a second for the serial device to get ready

# update the gui with configuration changes
arduino.publishState()
コード例 #50
0
from java.lang import String
from java.lang import Class
from java.awt import Rectangle
from org.myrobotlab.service import Runtime
from org.myrobotlab.service import OpenCV
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint;
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep
 
 
tracker = Runtime.create("tracker","Tracking")
 
# create all the peer services
rotation = Runtime.create("rotation","Servo")
neck = Runtime.create("neck","Servo")
arduino = Runtime.create("arduino","Arduino")
xpid = Runtime.create("xpid","PID");
ypid = Runtime.create("ypid","PID");
 
# adjust values
arduino.connect("COM3")
eye = Runtime.create("eye","OpenCV")
opencv = Runtime.create("opencv","OpenCV")
opencv.startService()
opencv.setCameraIndex(1)
eye.setCameraIndex(1)
 
#i'm wondering if these are working??? they are by default...
コード例 #51
0
from org.myrobotlab.opencv import OpenCVData
from com.googlecode.javacv.cpp.opencv_core import CvPoint;
from org.myrobotlab.service import OpenCV
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from time import sleep

global servox
global servoy
global actservox
global actservoy

actservox = 90
actservoy = 90
 
tracker = Runtime.create("tracker","Tracking")
 
# create all the peer services
rotation = Runtime.create("rotation","Servo")
neck = Runtime.create("neck","Servo")
arduino = Runtime.create("arduino","Arduino")
xpid = Runtime.create("xpid","Pid");
ypid = Runtime.create("ypid","Pid");

rotationb = Runtime.create("rotationb","Servo")
neckb = Runtime.create("neckb","Servo")
xpidb = Runtime.create("xpidb","Pid");
ypidb = Runtime.create("ypidb","Pid");
 
# adjust values
arduino.connect("COM3")