def __init__(self, name): self.quit = False self.detectLock = False self.standing = False # Register our module with NAOqi ALModule.__init__(self, name) self.motion = ALProxy("ALMotion") self.posture = ALProxy("ALRobotPosture") self.tts = ALProxy("ALTextToSpeech") # Register landmark detection module self.memory = ALProxy("ALMemory") self.ld = ALProxy("ALLandMarkDetection") self.ld.subscribe("NaoMarkNavigator") # Subscribe the "onFaceDetected" function to "FaceDetected". self.memory.subscribeToEvent("LandmarkDetected", "NaoMarkNavigator", "onMarkDetected") self.tts.say("Show me NAOMarks to control my movement.") # Start detections. self.startDetection()
def __init__(self, name): ALModule.__init__(self, name) # No need for IP and port here because # we have our Python broker connected to NAOqi broker self.word_from_nao = ["No output", 0.0] # Create a proxy to ALTextToSpeech for later use self.tts = ALProxy("ALTextToSpeech") self.tts.setVolume(0.5) self.asr = ALProxy("ALSpeechRecognition") self.asr.pause(True) # vocabulary = ["yes", "no", "please", "hello"] # self.asr.setVocabulary(vocabulary, False) # self.asr.pause(False) self.asr.subscribe("ASR_External_Python") # Subscribe to the FaceDetected event: global memory self.memory = ALProxy("ALMemory") self.memory.subscribeToEvent("WordRecognized", "SpeachRec", "onWordDetected") self.asr.pause(True)
def __init__(self, name): ALModule.__init__(self, name) self.tts = ALProxy("ALTextToSpeech") global memory memory = ALProxy("ALMemory") #开始识别 memory.raiseEvent("GLOABALSTATE",1) #订阅语音识别和语义理解事件 memory.subscribeToEvent("SpeechResult", "SpeechInstance", "onSpeechResult") #订阅识别超时事件 memory.subscribeToEvent("TimeOut", "SpeechInstance", "onTimeOut") #订阅云端返回错误事件 memory.subscribeToEvent("ErrorEvent", "SpeechInstance", "onErrorEventHandle") #订阅头部中间按键监听事件 memory.subscribeToEvent("MiddleTactilTouched", "SpeechInstance", "onMiddleHeadTouched") #订阅头后部按键监听事件 memory.subscribeToEvent("RearTactilTouched", "SpeechInstance", "onRearHeadTouched")
def __init__(self, ip, port, _myBroker): ALModule.__init__(self, "BasicMotions") myBroker=_myBroker bModulePresent = myBroker.isModulePresent("BasicMotions") print("BasicMotions module status:", bModulePresent) self.tts = ALProxy("ALTextToSpeech") global memory memory = ALProxy("ALMemory") self.SubscribeAllTouchEvent() self.NAOip = ip self.NAOport = port self.createEyeGroup() self.eyeColor={'happy': 0x0000FF00, 'sad': 0x00600088, 'scared1': 0x00000060, 'scared2': 0x00000060, 'fear': 0x00000060, 'hope': 0x00FFB428, 'anger': 0x00FF0000} self.eyeShape={'happy': "EyeTop", 'sad': "EyeBottom", 'scared1': "EyeNone", 'scared2': "EyeNone", 'fear': "EyeBottom", 'hope': "EyeTop", 'anger': "EyeTopBottom"} self.bScared = False
def __init__(self, name, robot_controller, color="red", diameter=0.2): ALModule.__init__(self, name) self.name = name self.blobDetector = robot_controller.blobProxy if color == "red": pprint("Looking for a red ball...") self.blobDetector.setColor(*self.red) elif color == "blue": pprint("Looking for a blue ball...") self.blobDetector.setColor(*self.blue) elif color == "yellow": pprint("Looking for a yellow ball...") self.blobDetector.setColor(*self.yellow) else: pwrite(">> Warning << Invalid color set in BallDetector! ") print("Defaulting to red...") self.blobDetector.setColor(*self.red) self.blobDetector.setObjectProperties(20, diameter, "Circle") self.memoryProxy = robot_controller.memoryProxy self.memoryProxy.subscribeToEvent("ALTracker/ColorBlobDetected", self.name, "onBlobDetection") self.motionProxy = robot_controller.motionProxy self.camProxy = robot_controller.camProxy pprint("BallDetector initialized!") self.lock = threading.Lock() self.info = None
def __init__(self, name): """ Initialise module. """ ALModule.__init__(self, name) # Globals for proxies global touchProxy global memoryProxy self.rightHandFlag = False self.leftHandFlag = False self.subscriptionListRight = [ "HandRightBackTouched", "HandRightLeftTouched", "HandRightRightTouched" ] self.subscriptionListLeft = [ "HandLeftBackTouched", "HandLeftLeftTouched", "HandLeftRightTouched" ] # Setup proxies try: touchProxy = ALProxy("ALTouch") except Exception, e: print "Could not create proxy to ALTouch. Error: ", e
def __init__(self, name): # 基类初始化 ALModule.__init__(self, name) self.name = name # 记录实例的名称;订阅事件时要用到; # ----------> 类成员变量 <---------- # 为机器人头部三块触摸区域指定编码; # Head/Touch/Front =1 # Head/Touch/Middle =2 # Head/Touch/Rear =3 self.HEAD_FRONT = 1 self.HEAD_MIDDLE = 2 self.HEAD_REAR = 3 # 密码序列,只有按照下面序列依次触摸机器人,才会通过验证; # 密码元素: 单个数字、字符;后面语音反馈,机器人需要念出密码元素; self.password = [1,3,2,3,1,2] # 记录用户的输入密码,最后用来与正确密码做比较; self.input_passwd = [] # 验证标志, 输入正确密码会设置标志为True; 默认为False; self.verify_flag = False # naoqi.ALProxy try: # 语音反馈 self.tts = ALProxy("ALTextToSpeech") # 触摸事件订阅 self.memory = ALProxy("ALMemory") except Exception, e: print "Could not create proxy by ALProxy in Class MP3player" print "Error: ", e
def __init__(self): ALModule.__init__(self, 'sensorModule') modules.append(self) self.memory = ALProxy('ALMemory') try: self.memory.subscribeToEvent("FrontTactilTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("RearTactilTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandLeftLeftTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandLeftRightTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandLeftBackTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandRightLeftTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandRightRightTouched", "sensorModule", "onTactilTouched") self.memory.subscribeToEvent("HandRightBackTouched", "sensorModule", "onTactilTouched") except Exception, e: print e
def __init__(self,name): ALModule.__init__(self, name) rospy.init_node('acore_state_server') self.moduleName = name self.connectNaoQi() self.startSubscriber() self.dataNamesList = ["DCM/Time", "Device/SubDeviceList/InertialSensor/AngleX/Sensor/Value", "Device/SubDeviceList/InertialSensor/AngleY/Sensor/Value", "Device/SubDeviceList/InertialSensor/AngleZ/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyroscopeX/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyroscopeY/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyroscopeZ/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccelerometerX/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccelerometerY/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccelerometerZ/Sensor/Value"] self.FSRdataList = ["Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value"] self.MsgsInit()
def __init__(self, name): self.quit = False global memory # Register our module with NAOqi ALModule.__init__(self, name) memory = ALProxy("ALMemory") self.posture = ALProxy("ALRobotPosture") self.tts = ALProxy("ALTextToSpeech") # Disable autonomous movement. self.am = ALProxy("ALAutonomousMoves") self.am.setExpressiveListeningEnabled(False) # Register speech recognition self.asr = ALProxy("ALSpeechRecognition") self.asr.setLanguage("English") self.asr.setVisualExpression(True) self.stopListening() self.asr.setVocabulary(["sit", "stand", "goodbye"], False) # Subscribe to the speech recognition events. self.asr.subscribe("SpeechRecognition") # Start speech recognition engine. self.startListening() # Subscribe the "onWordRecognized" function to "WordRecognized". memory.subscribeToEvent("WordRecognized", "SpeechRecognition", "onWordRecognized")
def __init__(self, name): ALModule.__init__(self, name) print "Inicializacija" self.pname = rospy.get_param('/pepper_name') self.pub_sound = rospy.Publisher(self.pname + "/sound/located", SoundLocated, queue_size=10)
def __init__(self, name): self.touched = False ALModule.__init__(self, name) # Subscribe to TouchChanged event: global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("TouchChanged", "ReactToTouch", "onTouched")
def __init__(self, name): # 基类初始化 ALModule.__init__(self, name) self.name = name # 记录实例的名称;订阅事件时要用到; # ----------> 类成员变量 <---------- # 为机器人头部三块触摸区域指定编码; # Head/Touch/Front =1 # Head/Touch/Middle =2 # Head/Touch/Rear =3 self.HEAD_FRONT = 1 self.HEAD_MIDDLE = 2 self.HEAD_REAR = 3 # 密码序列,只有按照下面序列依次触摸机器人,才会通过验证; # 密码元素: 单个数字、字符;后面语音反馈,机器人需要念出密码元素; self.password = [1, 3, 2, 3, 1, 2] # 记录用户的输入密码,最后用来与正确密码做比较; self.input_passwd = [] # 验证标志, 输入正确密码会设置标志为True; 默认为False; self.verify_flag = False # naoqi.ALProxy try: # 语音反馈 self.tts = ALProxy("ALTextToSpeech") # 触摸事件订阅 self.memory = ALProxy("ALMemory") except Exception, e: print "Could not create proxy by ALProxy in Class MP3player" print "Error: ", e
def __init__(self, name): ALModule.__init__(self, name) # self.BIND_PYTHON(self.getName(), "callback") self.tts = ALProxy("ALTextToSpeech", PEPPER_IP, 9559) # print("PepperModule Initialyze")
def __init__(self, name, memory): ALModule.__init__(self, name) self.__name = name; self.__memory = memory; self.__faceDetectedEvent = "FaceDetected"; self.__faceDetectedFunction = "onFaceDetected"; self.__pictureDetectedEvent = "PictureDetected"; self.__pictureDetectedFunction = "onPictureDetected"; self.__speechDetectedEvent = "SpeechDetected"; self.__speechDetectedFunction = "onSpeechDetected"; self.__wordRecognizedEvent = "WordRecognized"; self.__onWordRecognizedFunction = "onWordRecognized"; self.__lastWordRecognizedEvent = "LastWordRecognized"; self.__onLastWordRecognizedFunction = "onLastWordRecognized"; self.__handRightBackTouched = "HandRightBackTouched"; self.__handRightLeftTouched = "HandRightLeftTouched"; self.__handRightRightTouched = "HandRightRightTouched"; self.__handLeftBackTouched = "HandLeftBackTouched"; self.__handLeftLeftTouched = "HandLeftLeftTouched"; self.__handLeftRightTouched = "HandLeftRightTouched"; self.__frontTactilTouched = "FrontTactilTouched"; self.__middleTactilTouched = "MiddleTactilTouched"; self.__rearTactilTouched = "RearTactilTouched"; self.__onTactileEventFunction = "onTactileEvent";
def __init__(self, name): ALModule.__init__(self, name) try: redBallDetectionProxy = ALProxy("ALRedBallDetection", NAO_IP, 9559) except Exception, e: print "Couldn't create proxy to ALRedBallDetection.\nError was:", e
def __init__(self, name): ALModule.__init__(self, name) self.name = name self.tts = ALProxy("ALTextToSpeech") self.posture = ALProxy("ALRobotPosture") self.memory = ALProxy("ALMemory") self.memory.subscribeToEvent("HandDetectedEvent", name, "handleDetection")
def __init__(self, ip, port, publisher): # Get a (unique) name for naoqi module which is based on the node name # and is a valid Python identifier (will be useful later) self.naoqi_name = "ros" + rospy.get_name().replace("/", "_") #Start ALBroker (needed by ALModule) self.broker = ALBroker( self.naoqi_name + "_broker", "0.0.0.0", # listen to anyone 0, # find a free port and use it ip, # parent broker IP port # parent broker port ) #Init superclassALModule ALModule.__init__(self, self.naoqi_name) self.memory = ALProxy("ALMemory") self.proxy = ALProxy("ALSpeechRecognition") #Keep publisher to send word recognized self.pub = publisher #Install global variables needed by Naoqi self.install_naoqi_globals()
def __init__(self, name): ALModule.__init__(self, name) self.callBack = None self.globalObjectName = name global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("FaceDetected",self.globalObjectName,"onFaceDetected")
def __init__(self, name): ALModule.__init__(self, name) # No need for IP and port here because # we have our Python broker connected to NAOqi broker # Create proxy for later use self.motionProxy = ALProxy("ALMotion") self.postureProxy = ALProxy("ALRobotPosture") self.ttsProxy = ALProxy("ALTextToSpeech") self.ledsProxy = ALProxy("ALLeds") self.videoRecorderProxy= ALProxy("ALVideoRecorder") self.audioRecorderProxy = ALProxy("ALAudioRecorder") self.checkRecorder = True # Subscribe to LeftBumperPressed event: global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("LeftBumperPressed", "ReactToTouch", "onTouchedLeft") ##################################### memory.subscribeToEvent("RightBumperPressed", "ReactToTouch", "onTouchedRight")
def __init__(self, name): ALModule.__init__(self, name) # No need for IP and port here because # we have our Python broker connected to NAOqi broker head_touch = 0 right_touch = 0 left_touch = 0 # Subscribe to TouchChanged event: global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("MiddleTactilTouched", "ReactToTouch", "onheadTouched") # memory.subscribeToEvent("FrontTactilTouched", # "ReactToTouch", # "onheadTouched") # memory.subscribeToEvent("RearTactilTouched", # "ReactToTouch", # "onheadTouched") memory.subscribeToEvent("HandRightBackTouched", "ReactToTouch", "onrightTouched") #memory.subscribeToEvent("HandRightLeftTouched", # "ReactToTouch", # "onrightTouched") #memory.subscribeToEvent("HandRightRightTouched", # "ReactToTouch", # "onrightTouched") memory.subscribeToEvent("HandLeftBackTouched", "ReactToTouch", "onleftTouched")
def __init__(self, ip, port, publisher): # Get a (unique) name for naoqi module which is based on the node name # and is a valid Python identifier (will be useful later) self.naoqi_name = "ros" + rospy.get_name().replace("/", "_") #Start ALBroker (needed by ALModule) self.broker = ALBroker(self.naoqi_name + "_broker", "0.0.0.0", # listen to anyone 0, # find a free port and use it ip, # parent broker IP port # parent broker port ) #Init superclassALModule ALModule.__init__( self, self.naoqi_name ) self.memory = ALProxy("ALMemory") self.proxy = ALProxy("ALSpeechRecognition") #Keep publisher to send word recognized self.pub = publisher #Install global variables needed by Naoqi self.install_naoqi_globals()
def __init__(self, name): ALModule.__init__(self, name) self.tts = ALProxy("ALTextToSpeech") global memory memory = ALProxy("ALMemory") memory.subsribeToEvent("FaceDetected", "HumanGreeters", "reactToFaces")
def __init__(self, id): # Id passed to constructor must same as id provided during event subscription ALModule.__init__(self, id) self.logger = Logger() self.logger.logInfo('INFO', "Initializing module") # No need for IP and port here because # we have our Python broker connected to NAOqi broker self.id = id self.photo_counter = 0 self.last_photo_taken = -1 self.is_uploading_photo = False # Stop NAO from moving around self.awareness = ALProxy('ALBasicAwareness') self.motion = ALProxy('ALMotion') self.ttsProxy = ALProxy("ALTextToSpeech") self.photo_capture_proxy = ALProxy("ALPhotoCapture", NAO_IP, 9559) # 0: qqvga = 160 x 120 # 1: qvga = 320 x 240 # 2: vga = 640 x 480 # 3: xvga = 1280 x 960 self.photo_capture_proxy.setResolution(3) self.photo_capture_proxy.setPictureFormat("jpg") self.logger.logInfo('INFO', "Module initialized")
def __init__(self, name): global memory self.blockInput = False # Register our module with NAOqi ALModule.__init__(self, name) memory = ALProxy("ALMemory") memory.subscribeToEvent("RightBumperPressed", "FiniteStateMachine", "onRightBumperPressed") memory.subscribeToEvent("LeftBumperPressed", "FiniteStateMachine", "onLeftBumperPressed") self.leds = ALProxy("ALLeds") # States of the FSM self.earLedsOn = False self.eyeColor = EYE_COLOR_A # Turn stuff off. self.leds.fadeRGB("EarLeds", 0x000000, 0) self.leds.fadeRGB("FaceLeds", 0x000000, 0)
def __init__(self, name): self.IP = "169.254.67.213" self.PORT = 9559 ALModule.__init__(self, name) try: global memory memory = ALProxy("ALMemory", self.IP, self.PORT) except Exception as e: print("Error: ", e) try: global faceProxy faceProxy = ALProxy("ALFaceDetection", self.IP, self.PORT) except Exception as e: print("Error: ", e) try: global asr asr = ALProxy("ALSpeechRecognition", self.IP, self.PORT) except Exception as e: print("Error: ", e) try: global tts tts = ALProxy("ALTextToSpeech", self.IP, self.PORT) except Exception as e: print("Error: ", e) vocabulary = ["x"] asr.pause(True) asr.setLanguage("English") asr.setVocabulary(vocabulary, False) asr.pause(False) memory.subscribeToEvent("FaceDetected", "FaceDet", "onFaceDetected") tts.setVolume(0.9)
def __init__(self): print("开始") ALModule.__init__(self, "humanEventWatcher") global memory memory = ALProxy("ALMemory", ip_robot, port_robot) memory.subscribeToEvent("ALBasicAwareness/HumanTracked", "humanEventWatcher", "onHumanTracked") memory.subscribeToEvent("ALBasicAwareness/PeopleLeft", "humanEventWatcher", "onPeopleLeft") memory.subscribeToEvent("SpeechDetected", "humanEventWatcher", "onSpeechDetected") memory.subscribeToEvent("ALSpeechRecognition / IsRunning", "humanEventWatcher", "onALSpeechDetected") memory.subscribeToEvent("FrontTactilTouched", "humanEventWatcher", "onFrontTactilTouched") memory.subscribeToEvent('WordRecognized', "humanEventWatcher", 'wordRecognized') self.speech_reco = ALProxy("ALSpeechRecognition", ip_robot, port_robot) self.text_to_speech=ALProxy("ALTextToSpeech", ip_robot, port_robot) self.is_speech_reco_started = False self.photo_apture=ALProxy("ALPhotoCapture", ip_robot, port_robot) self.cameraMap = { 'Top': 0, 'Bottom': 1 } self.camera_id=0 self.recordFolder = "/home/nao/recordings/cameras/" self.flag=True
def __init__(self, name): ALModule.__init__(self, name) self.name = name self.memory = ALProxy("ALMemory") self.motion = ALProxy("ALMotion") self.memory.subscribeToEvent("ColorDetectedEvent", name, "handleDetection")
def __init__(self, name): ALModule.__init__(self, name) # Create proxies for the instance. global memory memory = ALProxy("ALMemory") self.tts = ALProxy("ALTextToSpeech") self.leds = ALProxy("ALLeds") self.motion = ALProxy("ALMotion") # Write empty valence and arousal values to memory. valence = 0 arousal = 0 param1 = 'null' current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (param1, "param2")] memory.insertData("Emotion/Current", current_emotion) # Disable ALAutonomousLife to better demonstrate emotional actions. self.autonomous_life = ALProxy("ALAutonomousLife") if (self.autonomous_life.getState() != "disabled"): self.autonomous_life.setState("disabled") time.sleep(1.0) self.motion.wakeUp() # Run behaviour when a tactile touched. memory.subscribeToEvent("VAChanged", self.getName(), "express_current_emotion")
def __init__(self, name): ALModule.__init__(self, name) self.name = name self.tts = ALProxy("ALTextToSpeech") self.memory = ALProxy("ALMemory") self.memory.subscribeToEvent("RedBallDetectedEvent", name, "handleBallDetection")
def __init__(self, name): ALModule.__init__(self, name) # No need for IP and port here because # we have our Python broker connected to NAOqi broker # HUE service self._hue = None self._teller = None # Create a proxy to ALTextToSpeech for later use global tts tts = ALProxy("ALTextToSpeech", NAO_IP, 9559) # Subscribe to the FaceDetected event: global memory self.leds = ALProxy("ALLeds", NAO_IP, 9559) memory = ALProxy("ALMemory") memory.subscribeToEvent("MiddleTactilTouched", "HumanGreeter", "onMiddleTouchSensed") # memory.unsubscribeToEvent("WordRecognized", # "HumanGreeter") speechrecog = ALProxy("ALSpeechRecognition") speechrecog.setLanguage("French") wordList = ["bleu", "rouge", "vert", "jaune", "porte", "température", "meteo"] try: speechrecog.setVocabulary(wordList, True) except Exception as ex: _logger.warning("Got exception: %s", ex) tts.say("Je suis prêt à recevoir des ordres")
def __init__(self, NAME): ALModule.__init__(self, NAME) try: UpgradedBot().memory.unsubscribeToEvent("WordRecognized", "GETVOICE") except BaseException, e: print(str(e))
def __init__(self, name): ALModule.__init__(self, name) self.memory = ALProxy("ALMemory") self.subscriptions = [] print "... initialized " + self.getName()
def __init__(self, name): ALModule.__init__(self, name) self.name = name #self.tts = ALProxy("ALTextToSpeech") self.memory = ALProxy("ALMemory") self.motion = ALProxy("ALMotion") self.memory.subscribeToEvent("RedBallDetectedEvent", name, "handleBallDetection")
def __init__(self, name): ALModule.__init__(self, name) try: self.asr = ALProxy("ALSpeechRecognition") except Exception as e: self.asr = None self.memory = ALProxy("ALMemory")
def __init__(self, name): ALModule.__init__(self, name) self.name = name self.PEPPER_IP = "172.18.33.122" self.state = 0 self.fps = 30 self.resolution = vision_definitions.kVGA self.colorSpace = vision_definitions.kRGBColorSpace self.session = None self.autonomousLifeProxy = None self.memoryProxy = None self.tabletProxy = None self.tabletResolution = (1280, 800) self.cameraProxy = None self.cameraClient = None self.lastImage = None self.emotionsArray = [] self.classes = [ 'angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral' ] self.tts = None self.faceService = None self.trackingEnabled = True
def _validate(self, context): """ Component validated """ _logger.debug("Validating speech...") # Register the module as a global in __main__ constants.register_almodule(self._name, self) # Initialize the module ALModule.__init__(self, self._name) # Get the "memory" proxy, to register to callbacks self._memory = ALProxy("ALMemory") # Just to be sure... try: self._memory.unsubscribeToEvent("WordRecognized", self._name) except: _logger.debug("Speech wasn't yet registered") # Create the proxy self._recog = ALProxy("ALSpeechRecognition") self._recog.setLanguage("French") # We're ready self._can_recog.set() _logger.debug("Speech ready")
def __init__(self, name, broker): ALModule.__init__(self, name) self.object_gesture = ALProxy('NAOObjectGesture', broker) self.memory = ALProxy('ALMemory', broker) self.data = [] self.time_start = time.time()
def __init__(self, name): #contructor of the class, which takes two parameters, self refers to the instance of the class and the name parameter which is just a string ALModule.__init__(self, name) #calling of the contructpor of the ALModule self.tts = ALProxy("ALTextToSpeech", ip, 9559) #proxy creation on the tts module self.asr = ALProxy("ALSpeechRecognition", ip, 9559) #proxy creation on the asr module self.memory = ALProxy("ALMemory", ip, 9559) #proxy creation on the memory module self.num1 = random.randint(1, 10) #here are two integers randomly selected from 1 to 10 self.num2 = random.randint(1, 10) self.operator = random.choice("-") #here is randomly choosen operator which is then applied to the equation self.tts.setLanguage("English") #set the the language which NAO uses for talking if self.operator == "-": #NAO was programmed to create equations which have a positive result if self.num1 > self.num2: #the numbers are compared in order to asure that the larger number is first self.result = str(eval(str(self.num1) + self.operator + str(self.num2))) #the result is evaluated and put into a string so NOA can say it self.operator = " minus " #and so is the operator self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?" #the question is created else: self.result = str(eval(str(self.num2) + self.operator + str(self.num1))) self.operator = " minus " self.question = "What is the result of " + str(self.num2) + self.operator + str(self.num1) + "?" else: self.result = str(eval(str(self.num1) + self.operator + str(self.num2))) self.operator = " plus " self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?" print self.question #the question is printed to the terminal print self.result #the reslt is printed to the terminal self.tts.say(self.question) #NAO tells the question self.speech_recognition() #the speech_recognition method is called
def __init__(self, name): global memory self.codeProgression = 0 self.quit = False # Register our module with NAOqi ALModule.__init__(self, name) memory = ALProxy("ALMemory") self.tts = ALProxy("ALTextToSpeech") self.motion = ALProxy("ALMotion") # Subscribe to the rear tacile sensor touch event. memory.subscribeToEvent("RearTactilTouched", "BallTracker", "onRearTactilTouched") # Register speech recognition self.asr = ALProxy("ALBallTracker") self.asr.setLanguage("English") self.asr.setVisualExpression(True) # Subscribe to speech detection memory.subscribeToEvent("WordRecognized", "BallTracker", "onWordRecognized") # Create the tracker. self.redBallTracker = ALProxy("ALRedBallTracker") self.startListening()
def __init__(self, name, state): ALModule.__init__(self, name) self.state = state self.leds = ALProxy("ALLeds") self.logs.display("Subscribed to an ALLeds proxy", "Good")
def __init__(self, name, Nao): ALModule.__init__(self, name) self.Nao = Nao # Event subscriptions self.Nao.memoryProxy.subscribeToEvent("NaoMultimediaRequest", "NaoMultimedia", "onNaoMultimediaRequest")
def __init__(self, name): ALModule.__init__(self, name) self.tts = ALProxy("ALTextToSpeech") self.motionProxy = ALProxy("ALMotion") # Subscribe to TouchChanged event: global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("TouchChanged", "ReactToTouch", "onTouched")
def __init__(self, name, spelling): self.al = ALNetworkManager() self.tts = ALProxy("ALTextToSpeech", NAO_IP, 9559) self.memory = ALProxy("ALMemory") self.module_name = name self.spelling = spelling self.connected = False ALModule.__init__(self, name)
def __init__(self, name, event_root_path): ALModule.__init__(self, name) self.memory = ALProxy("ALMemory") self.name = name self.event_root_path = event_root_path self.status = "stop" self.status_handler = None self.subscribed_on_received_status = False
def __init__(self, name, interaction_manager): ALModule.__init__(self, name) self._interaction_manager = interaction_manager self._name = name self._speech_count = 0 # register all necessary event handlers self._register_event_handler()
def __init__(self, _name): self.name = _name ALModule.__init__(self, _name) self.memory = ALProxy("ALMemory") self.speechRecognizer = ALProxy("ALSpeechRecognition") for subscriber in self.speechRecognizer.getSubscribersInfo(): self.speechRecognizer.unsubscribe(subscriber[0]) vocabulary=["bravo"] self.speechRecognizer.setVocabulary(vocabulary, False)
def __init__(self, ip, port, _myBroker): self.NAOip = ip self.NAOport = port #========SETUP FOR MOTION============= ALModule.__init__(self, "BasicMotions") self.myBroker=_myBroker bModulePresent = self.myBroker.isModulePresent("BasicMotions") print("BasicMotions module status:", bModulePresent) global memory memory = ALProxy("ALMemory") self.SubscribeAllTouchEvent() self.createEyeGroup() self.eyeColor={'happy': 0x0000FF00, 'sad': 0x00600088, 'scared1': 0x00000060, 'scared2': 0x00000060, 'fear': 0x00000060, 'hope': 0x00FFB428, 'anger': 0x00FF0000} self.eyeShape={'happy': "EyeTop", 'sad': "EyeBottom", 'scared1': "EyeNone", 'scared2': "EyeNone", 'fear': "EyeBottom", 'hope': "EyeTop", 'anger': "EyeTopBottom"} self.bScared = False #=========SETUP FOR VOICE================ self.tts = ALProxy("ALTextToSpeech") audioProxy = ALProxy("ALAudioDevice") audioProxy.setOutputVolume(100) #Valid Value:50 to 200 self.ttsPitch={ 'default': "\\vct=100\\", 'happy': "\\vct=120\\", 'sad': "\\vct=50\\", 'scared': "\\vct=150\\", 'fear': "\\vct=60\\", 'hope': "\\vct=100\\", 'anger': "\\vct=60\\"} #Valid Value: 50 to 400"\\ self.ttsSpeed={ 'default': "\\rspd=100\\", 'happy': "\\rspd=100\\", 'sad': "\\rspd=70\\", 'scared': "\\rspd=130\\", 'fear': "\\rspd=100\\", 'hope': "\\rspd=100\\", 'anger': "\\rspd=110\\"} #Valid Value: 0 to 100 self.ttsVolume={ 'default': "\\vol=050\\", 'happy': "\\vol=060\\", 'sad': "\\vol=035\\", 'scared': "\\vol=060\\", 'fear': "\\vol=050\\", 'hope': "\\vol=050\\", 'anger': "\\vol=060\\"} #================================================= self.createDialog()
def __init__(self, name): ALModule.__init__(self, name) try: self.__sr = ALProxy("ALSpeechRecognition") self.__sr.setLanguage("French") except Exception, e: print("Impossible de définir la restriction de vocabulaire") print("Probable utilisation d'un robot virtuel, on continue") self.__sr = None
def __init__(self, strName, address, port): ALModule.__init__(self, strName) self.outfile = None self.outfiles = [None]*3 self.count = 9999999 self.check = False self.audio = ALProxy("ALAudioDevice", address, port) self.audio.setClientPreferences(self.getName(), 48000, [1,1,1,1], 0, 0)
def __init__(self, name): ALModule.__init__(self, name) self.tts = ALProxy("ALTextToSpeech") global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("ALTextToSpeech/TextDone", "haiku_guru", "on_tts_text_done")
def __init__(self, name, message): ALModule.__init__(self, name) self.message = message self.logs = logs.logs() self.tts = ALProxy("ALTextToSpeech") self.logs.display("Subscribed to an ALTextToSpeech proxy", "Good")
def __init__(self, name): ALModule.__init__(self, name) self.name = name self.tts = ALProxy("ALTextToSpeech") self.memory = ALProxy("ALMemory") self.motion = ALProxy("ALMotion") self.faceDetection = ALProxy("ALFaceDetection") self.posture = ALProxy("ALRobotPosture") self.seenFace = False self.onCompleteCallback = None
def __init__(self, name,motionMaestro): ALModule.__init__(self, name) self.motionMaestro=motionMaestro self.name=name self.tts = ALProxy("ALTextToSpeech") global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("SonarLeftDetected",name,"sonarLeftHandler") memory.subscribeToEvent("SonarRightDetected",name,"sonarRightHandler") print "subbed to RL"
def __init__(self, name): ALModule.__init__(self, name) self.tts = ALProxy("ALTextToSpeech") self.ears = ALProxy("ALSpeechRecognition") self.memory = ALProxy("ALMemory") self.log = ALProxy("ALLogger") self.ears.subscribe("Conversation") self.memory.subscribeToEvent("WordRecognized", "Conversation", "onWordRecognized")
def __init__(self, name): ALModule.__init__(self, name) # Create proxies for the instance. self.motion = ALProxy("ALMotion") # Run behaviour when a tactile touched. global memory memory = ALProxy("ALMemory") memory.subscribeToEvent("TouchChanged", self.getName(), "emotive_motion")