def connect_to_robot(self, IPAddress, port=9559): """ Connects to the robot over TCP/IP (wifi or ethernet connection), as well as passes :param ip_address: the IP Address of the robot :param argv: any extra arguments that need to be provided :return: 0 on success, something else on failure """ self.__ip_address = IPAddress self.__port = port # Proxy setup try: self.__speechProxy = naoqi.ALProxy("ALTextToSpeech", self.__ip_address, self.__port) self.__motionProxy = naoqi.ALProxy("ALMotion", self.__ip_address, self.__port) self.__postureProxy = naoqi.ALProxy("ALRobotPosture", self.__ip_address, self.__port) # self.__photoCaptureProxy = naoqi.ALProxy("ALPhotoCapture", self.__ip_address, self.__port) self.__motionProxy.wakeUp() self.connected = True except: raise ( "Something failed. Please check the IP Address and port of the robot, and try again" )
def left_arm_ik_continuous(verbose=False): work_pose = k.left_arm_work_pose pos, ori = k.left_arm_get_position(work_pose) poss = [] d = 32 r = 0.05 time.sleep(5) poss = [] pos[1] = pos[1] + r poss.append(np.copy(pos)) p = np.copy(pos) p[1] = p[1] + r poss.append(p) poss.append(np.copy(pos)) p = np.copy(pos) p[1] = p[1] - r poss.append(p) poss.append(np.copy(pos)) p = np.copy(pos) p[2] = p[2] - r poss.append(p) poss.append(np.copy(pos)) p = np.copy(pos) p[2] = p[2] + r poss.append(p) poss.append(np.copy(pos)) motion = naoqi.ALProxy("ALMotion", host, port) print k.left_arm_work_pose motion.setAngles(k.left_arm_tags, work_pose, 1.0) time.sleep(1.0) for i in range(2): for p in poss: angles = motion.getAngles(k.left_arm_tags, True) target_angles = k.left_arm_set_position(angles, p, ori) a = [a for a in target_angles] motion.setAngles(k.left_arm_tags, a, 1.0) time.sleep(0.5) poss = [] for i in range(d): p = np.copy(pos) th = math.pi * 2 / d * i p[1] = p[1] + r * math.cos(th) p[2] = p[2] + r * math.sin(th) poss.append(p) motion = naoqi.ALProxy("ALMotion", host, port) print k.left_arm_work_pose motion.setAngles(k.left_arm_tags, work_pose, 1.0) time.sleep(1.0) for i in range(5): for p in poss: angles = motion.getAngles(k.left_arm_tags, True) target_angles = k.left_arm_set_position(angles, p, ori) a = [a for a in target_angles] motion.setAngles(k.left_arm_tags, a, 1.0) time.sleep(0.1)
def __init__(self, strModuleName, strNaoIp, strNaoPort): naoqi.ALModule.__init__(self, strModuleName ) self.strNaoIp = strNaoIp self.strNaoPort = strNaoPort try: self.asr = naoqi.ALProxy("ALSpeechRecognition", strNaoIp, strNaoPort) except Exception as e: self.asr = None self.memory = naoqi.ALProxy("ALMemory", strNaoIp, strNaoPort) self.onLoad()
def __init__(self, pepper_ip): self.setName("SpeechRecogniser") self.publisher = rospy.Publisher("/speech", String, queue_size=10) self.memory = naoqi.ALProxy("ALMemory", pepper_ip, 9559) self.memory.subscribeToEvent("WordRecognized", "SpeechRecogniser", "speechCallBack") self.proxy = naoqi.ALProxy("ALSpeechRecognition", pepper_ip, 9559) #self.proxy.setVocabulary(["Pepper"], False) self.recogniser = sr.Recognizer() self.log("Initialized")
def initialize_robot(): """Wake up (stiff joints) and disable autonomous life (autonomous human-like motions)""" proxy_motion = naoqi.ALProxy("ALMotion", IP_ROBOT, PORT_ROBOT) proxy_motion.wakeUp() proxy_autonomous_life = naoqi.ALProxy("ALAutonomousLife", IP_ROBOT, PORT_ROBOT) proxy_autonomous_life.setState("disabled") proxy_motion = naoqi.ALProxy("ALMotion", IP_ROBOT, PORT_ROBOT) proxy_motion.wakeUp()
def main(ip, id): video = naoqi.ALProxy("ALVideoDevice", ip, 9559) w = 320 h = 240 result = video.getImageRemote(id) image = np.zeros((h, w, 3), np.uint8) values = map(ord, list(result[6])) i = 0 for y in range(0, h): for x in range(0, w): image.itemset((y, x, 0), values[i + 0]) image.itemset((y, x, 1), values[i + 1]) image.itemset((y, x, 2), values[i + 2]) i += 3 '''Image is written into a json file so that it can be retrieved in python3''' image = image.tolist() data = {} with open(os.path.join(DIR, "temp_img", "current_frame.json"), 'w') as file: file.truncate() data["data"] = image json.dump(data, file) return
def __init__(self): try: self.mutex except: self.audioProxy = naoqi.ALProxy("ALAudioPlayer") self.mutex = mutex.mutex() self.aSoundBank = {}
def events_and_callbacks_qi_framework(): """Example of getting callbacks for events""" # ALMemory acts as the hub for the distribution of event notifications. # Source: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/naoqi-apis/naoqi-core/almemory # Example: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/other-tutorials/python-sdk-tutorials/python-sdk-examples/vision/face # Create a broker # TODO(TK): why? naoqi.ALBroker("pythonBroker", IP_ME, PORT_ME, IP_ROBOT, PORT_ROBOT) proxy_memory = naoqi.ALProxy("ALMemory", IP_ROBOT, PORT_ROBOT) # Register callback: def mycallback(key, value): print("qi callback: key={}, value={}".format(key, value)) sess = proxy_memory.session() mem = sess.service("ALMemory") sub = mem.subscriber("FaceDetected") sub.signal.connect(functools.partial(mycallback, "FaceDetected")) # Raise an event: proxy_memory.raiseEvent("FaceDetected", str(datetime.datetime.now())) proxy_memory.raiseEvent("AnotherEvent", str(datetime.datetime.now())) time.sleep(0.1) # give it some time to process
def play(self, df_gestures): """ Takes a DataFrame of Nao joint rotations, with the index as a series of timestamps, and plays these on the Nao robot. """ proxy_motion = naoqi.ALProxy("ALMotion", self._robot_ip, self._robot_port) proxy_motion.wakeUp() dt_initial = 0.1 t_begin = time.time() + dt_initial for index, series_row in df_gestures.iterrows(): # Sync timing: t_target = index.total_seconds() t_elapsed = time.time() - t_begin dt = t_target - t_elapsed if dt > 0: time.sleep(dt) else: self._stream_err.write( "WARNING! Robot fell behind achieving gestures\n") # Execute on robot: for frame in series_row.index: proxy_motion.setAngles(frame, series_row[frame], 1.0)
def __init__(self, IP, PORT, SPEED): self.session = qi.Session() self.session.connect("tcp://" + IP + ":" + str(PORT)) self.motionService = self.session.service("ALMotion") self.memP = nao.ALProxy("ALMemory", IP, PORT) self.speed = SPEED self.bodyNames = self.motionService.getBodyNames("Body") self.sensorNames = [ 'LFsrFL', 'LFsrFR', 'LFsrBL', 'LFsrBR', 'RFsrFL', 'RFsrFR', 'RFsrBL', 'RFsrBR', 'GyrX', 'GyrY', 'AccX', 'AccY', 'AccZ', 'TorsoAngleX', 'TorsoAngleY' ] self.pathNames = [ "Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value", "Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value", "Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyrX/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyrY/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccX/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccY/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccZ/Sensor/Value", "Device/SubDeviceList/InertialSensor/AngleX/Sensor/Value", "Device/SubDeviceList/InertialSensor/AngleY/Sensor/Value" ]
def subscribe(box): #box.log("[jdb] subscribing for box: " + repr(box)) memory = naoqi.ALProxy("ALMemory") for funcName in dir(box): func = getattr(box, funcName) if callable(func) and hasattr(func, "_listenedEventName"): memory.subscribeToEvent(func._listenedEventName , box.getName(), "", funcName)
def stop( self ): print( "INF: SoundReceiver: stopping..." ) audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 ) audio.unsubscribe( self.getName() ) self.pause = False print( "INF: SoundReceiver: stopped!" ) if( self.outfile != None ): self.outfile.close()
def posture(): """Read and command posture""" proxy_posture = naoqi.ALProxy("ALRobotPosture", IP_ROBOT, PORT_ROBOT) print("posture list={}".format(proxy_posture.getPostureList())) print("current posture={}".format(proxy_posture.getPosture())) proxy_posture.goToPosture("Sit", 0.75) proxy_posture.goToPosture("Stand", 0.75)
def make_proxy(proxy_type): try: proxy = naoqi.ALProxy(proxy_type, "127.0.0.1", 9559) return proxy except Exception, e: print("Error creating ", proxy_type, " proxy:", str(e)) exit(1)
def __init__(self): try: self.mutex except: self.fm = naoqi.ALProxy("ALFrameManager") self.mutex = mutex.mutex() self.aLoaded = {} self.aRunning = {}
def unsubscribe(box): memory = naoqi.ALProxy("ALMemory") for funcName in dir(box): func = getattr(box, funcName) if callable(func) and hasattr(func, "_listenedEventName"): try: memory.unsubscribeToMicroEvent(func._listenedEventName , box.getName()) except Exception as e: box.log("Exception in unsubscribe: " + str(e))
def start( self ): audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 ) nNbrChannelFlag = 3 # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2; AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4. nDeinterleave = 0 nSampleRate = SAMPLE_RATE audio.setClientPreferences( self.getName(), nSampleRate, nNbrChannelFlag, nDeinterleave ) # setting same as default generate a bug !?! audio.subscribe( self.getName() ) self.pause = True print( "INF: SoundReceiver: started!" )
def __init__(self, strModuleName, strNaoIp, strNaoPort): naoqi.ALModule.__init__(self, strModuleName ) self.BIND_PYTHON( self.getName(),"callback" ) self.strNaoIp = strNaoIp self.strNaoPort = strNaoPort try: self.sd = naoqi.ALProxy("ALSoundDetection", strNaoIp, strNaoPort) except Exception,e: print "Could not create proxy to ALSoundDetection",e
def __init__(self, strModuleName, strNaoIp, strNaoPort): naoqi.ALModule.__init__(self, strModuleName) self.BIND_PYTHON(self.getName(), "pictureChanged") self.strNaoIp = strNaoIp self.strNaoPort = strNaoPort try: self.memory = naoqi.ALProxy("ALMemory", strNaoIp, strNaoPort) except RuntimeError, e: print "Error when creating ALMemory proxy:" + str(e)
def ConnectProxy(proxy_name, IP, PORT): theProxy=None try: theProxy = naoqi.ALProxy(proxy_name, IP, PORT) sleep(0.01) except RuntimeError as e: print "Error when creating ", proxy_name ," proxy:" print str(e) return theProxy
def __init__(self, ip="nao.local", port=9559): self.proxies = {} try: self.proxies['memory'] = naoqi.ALProxy("ALMemory", ip, port) self.proxies['motion'] = naoqi.ALProxy("ALMotion", ip, port) self.proxies['posture'] = naoqi.ALProxy("ALRobotPosture", ip, port) except Exception as e: robotlog.error("Error when creating one of the NAOqi proxy:" + str(e)) raise e res = self.proxies['motion'].setCollisionProtectionEnabled( "Arms", True) if res: robotlog.info("Collision avoidance enabled on the arms.") else: robotlog.warning( "Unable to enable collision avoidance on the arms!")
def case_message_battery(self): try: bat = naoqi.ALProxy("ALBattery", self.robotIP, self.robotPort) self.buffer.clearbuffer() self.buffer.writebyte(bat.getBatteryCharge()) self.sendmessage() except: self.buffer.clearbuffer() self.buffer.writebyte(255) self.sendmessage()
def stop( self ): print( "INF: SoundReceiver: stopping..." ); audio = naoqi.ALProxy( "ALAudioDevice", self.strNaoIp, 9559 ); audio.unsubscribe( self.getName() ); self.stream.stop_stream() self.stream.close() self.p.terminate() print( "INF: SoundReceiver: stopped!" ); if( self.outfile != None ): self.outfile.close();
def _get_appname(box): """Gets the name of the app to which the box belongs. Mostly a helper function for the rest. """ behavior_id = box.behaviorId frame_manager = naoqi.ALProxy("ALFrameManager") behavior_path = os.path.normpath(frame_manager.getBehaviorPath(behavior_id)) assert _APPS_FOLDER_FRAGMENT in behavior_path fragment = behavior_path.split(_APPS_FOLDER_FRAGMENT, 1)[1] return fragment.lstrip("\\/")
def main(args): #print args service = naoqi.ALProxy("ALTabletService", args.ip, 9559) #service.enableWifi() #service.configureWifi("wpa2","LAPTOP_NET5","Pepper123") #con = service.connectWifi("LAPTOP_NET5") #if (con): # print "connected" #service.loadUrl("http://google.com") show(service) reload(service)
def pause(self): print("INF: SpeechRecognitionModule.pause: stopping") if (self.isStarted == False): print("INF: SpeechRecognitionModule.stop: not running") return self.isStarted = False audio = naoqi.ALProxy("ALAudioDevice", self.strNaoIp, NAO_PORT) audio.unsubscribe(self.getName()) print("INF: SpeechRecognitionModule: stopped!")
def __init__(self, nao_ip="127.0.0.1", nao_port=9559, with_ros=True, immediate=False): super(Nao, self).__init__(actions=["robots.naoqi.actions"], supports=ROS if with_ros else 0, dummy=False, immediate=immediate) self.motion = alnaoqi.ALProxy("ALMotion", nao_ip, nao_port)
def __init__(self, ip): """Construct speech synthesis class Arguments: -ip: IP address of NAO robot (string) """ self._tts = naoqi.ALProxy("ALTextToSpeech", ip, SpeechSynthesis.NAO_PORT) self._script = { token: ScriptControl.ScriptItem(scriptRaw) for token, scriptRaw in Script.data.iteritems() }
def face_characteristics(): global visible_persons retry_proxy = retry_wrapper(naoqi.ALProxy) retry_broker = retry_wrapper(naoqi.ALBroker) proxy_face_detection = retry_proxy("ALFaceDetection", IP_ROBOT, PORT_ROBOT) proxy_people_perception = retry_proxy("ALPeoplePerception", IP_ROBOT, PORT_ROBOT) proxy_gaze_analysis = retry_proxy("ALGazeAnalysis", IP_ROBOT, PORT_ROBOT) proxy_people_perception.setFastModeEnabled(False) proxy_gaze_analysis.setFaceAnalysisEnabled(True) print "isTrackingEnabled={}".format( proxy_face_detection.isTrackingEnabled()) print "isRecognitionEnabled={}".format( proxy_face_detection.isRecognitionEnabled()) print "isFaceDetectionEnabled={}".format( proxy_people_perception.isFaceDetectionEnabled()) print "isFastModeEnabled={}".format( proxy_people_perception.isFastModeEnabled()) print "isFaceAnalysisEnabled={}".format( proxy_gaze_analysis.isFaceAnalysisEnabled()) retry_broker("pythonBroker", IP_ME, PORT_ME, IP_ROBOT, PORT_ROBOT) proxy_memory = naoqi.ALProxy("ALMemory", IP_ROBOT, PORT_ROBOT) while True: people_list = proxy_memory.getData("PeoplePerception/PeopleList") candidate_keys = proxy_memory.getDataList("PeoplePerception/Person") for person in people_list: print str( proxy_memory.getData( "PeoplePerception/Person/{}/AgeProperties".format(person))) print str( parse_face_expressions( proxy_memory.getData( "PeoplePerception/Person/{}/ExpressionProperties". format(person)))) # print str(proxy_memory.getData("PeoplePerception/Person/{}/SmileProperties".format(person))) gaze_direction_key = "PeoplePerception/Person/{}/GazeDirection".format( person) # FIXME(TK): gaze detection not working if gaze_direction_key in candidate_keys: print "looking at robot? {}".format( str(proxy_memory.getData(gaze_direction_key))) else: print "Unknown gaze" time.sleep(1)
def __init__(self, strModuleName, strNaoIp): try: naoqi.ALModule.__init__(self, strModuleName) # is these 2 line necessary? what do they do? # just copied them from the examples... self.BIND_PYTHON(self.getName(), "callback") self.strNaoIp = strNaoIp # declare event to ALMemory so other modules can subscribe self.memory = naoqi.ALProxy("ALMemory") self.memory.declareEvent("SpeechRecognition") # flag to indicate if subscribed to audio events self.isStarted = False # flag to indicate if we are currently recording audio self.isRecording = False self.startRecordingTimestamp = 0 self.recordingDuration = RECORDING_DURATION # flag to indicate if auto speech detection is enabled self.isAutoDetectionEnabled = False self.autoDetectionThreshold = 10 # TODO: find a default value that works fine so we don't need to calibrate every time # flag to indicate if we are calibrating self.isCalibrating = False self.startCalibrationTimestamp = 0 # RMS calculation variables self.framesCount = 0 self.rmsSum = 0 # used to sum up rms results and calculate average self.lastTimeRMSPeak = 0 # audio buffer self.buffer = [] self.preBuffer = [] self.preBufferLength = 0 # length in samples (len(self.preBuffer) just counts entries) # init parameters self.language = DEFAULT_LANGUAGE self.idleReleaseTime = IDLE_RELEASE_TIME self.holdTime = HOLD_TIME self.lookaheadBufferSize = LOOKAHEAD_DURATION * SAMPLE_RATE # counter for wav file output self.fileCounter = 0 except BaseException, err: print("ERR: SpeechRecognitionModule: loading error: %s" % str(err))