Ejemplo n.º 1
0
def mainBlind(robotIP,
              disable_state=disable_state,
              make_photos=make_photos,
              n_chunks=n_chunks,
              speed=0,
              extra_lag=extra_lag):
    proxy_list = []
    walk_dist = 3
    proxy_names, port = [
        'ALMotion', 'MovementGraph', 'ALRobotPosture', 'ALVideoDevice',
        'ALAutonomousLife'
    ], 9559
    sess = qi.Session()
    sess.connect("tcp://" + robotIP + ":" + str(port))
    USE_ONE = False
    if USE_ONE:
        video_service = sess.service('ALVideoDevice')
    else:
        sess.service(proxy_names[-1]).setState('disabled')
        for proxy_name in (proxy_names):
            print('adding ' + str(proxy_name))
            proxy_list.append(sess.service(proxy_name))
        motionProxy, customProxy, postureProxy, video_service, AutonomousLifeProxy = proxy_list
        AutonomousLifeProxy.setState("disabled")
        postureProxy.goToPosture("StandInit", 0.6)
    camera_id = 0
    resolution = 2
    colorSpace = 11
    fps = 5
    video_client = video_service.subscribeCamera('python_client', camera_id,
                                                 resolution, colorSpace, fps)
    img_pack = video_service.getImageRemote(video_client)
    assert img_pack is not None
    #assert type(video_client) is not str
    print("starting")
    t = time.time()
    DEFAULT_SPEED = 0.07
    walk_dist = 3
    walk_time = walk_dist / DEFAULT_SPEED
    n_chunks = 11
    extra_lag = 1.5  #.5
    time_chunk = walk_time / (n_chunks + 0.0)
    #time_chunk = walk_time/10
    customProxy.StartMove()
    motionProxy.moveTo(0, 0, -13 * np.pi / 180)
    for i in range(n_chunks + 1):
        print('chunk ' + str(i))
        if i == (
            (n_chunks + 1) // 2
        ):  #We do it at the beginning, not at the end, so reduced by 1
            motionProxy.setAngles(["HeadPitch", "HeadYaw"], [0, 0], 0.3)
            img_pack = video_service.getImageRemote(video_client)
            if img_pack is None:
                turn_flag = -1
            else:
                img = np.array(
                    Image.frombytes("RGB", (img_pack[0], img_pack[1]),
                                    bytes(img_pack[6])))
                image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                cv2.imwrite('PHOTO_' + str(i) + '.jpg', image)
                turn_flag = ShouldTurn(image, allowed_gamma=0)
            motionProxy.moveTo(0, 0, turn_flag * 15 * np.pi / 180)
        customProxy.GoForvard(walk_dist)  #walk walk_dist meters forward
        time.sleep(time_chunk)
        customProxy.StopMove()
        time.sleep(extra_lag)

    postureProxy.goToPosture("StandInit", 0.6)
    print('going back')
    time.sleep(2)
    time_chunk = time_chunk / 2
    postureProxy.goToPosture("StandInit", 0.6)
    customProxy.StartMove()
Ejemplo n.º 2
0
    def __init__(self, params):
        rospy.init_node("restaurant")
        atexit.register(self.__del__)
        self.ip = params["ip"]
        self.port = params["port"]
        self.session = qi.Session()
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)
        self.Leds = self.session.service("ALLeds")
        self.Memory = self.session.service("ALMemory")
        self.Motion = self.session.service("ALMotion")
        self.VideoDev = self.session.service("ALVideoDevice")
        self.AudioPla = self.session.service("ALAudioPlayer")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.AudioRec = self.session.service("ALAudioRecorder")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.TabletSer = self.session.service("ALTabletService")
        self.AutonomousLife = self.session.service("ALAutonomousLife")

        try:
            self.AudioRec.stopMicrophonesRecording()
        except BaseException:
            print(
                "\033[0;33m\t[Kamerider W] : You don't need stop record\033[0m"
            )
        # 录音的函数
        self.thread_recording = Thread(target=self.record_audio, args=(None, ))
        self.thread_recording.daemon = True
        self.audio_terminate = False
        # ROS 订阅器和发布器
        self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                   MoveBaseAction)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
        self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                               GoalID,
                                               queue_size=1)
        self.nav_as.wait_for_server()
        # YOLO相关
        self.waving_detector = waving_detection.object_detection()
        self.detector = dlib.get_frontal_face_detector()
        self.get_image_switch = True
        # 清除costmap
        self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                                Empty)
        self.map_clear_srv()
        # 声明一些变量
        self.object = [
            "coke cole", "wine", "beer", "photo chips", "green tea", "water"
        ]
        self.current_drink_name = []
        self.scan_msg = []
        self.if_save_switch = True
        self.scan_msg_time = 0
        self.angle = -.2
        self.f = open('./laser.txt', 'w')
        self.if_need_record = False
        self.head_fix = True
        self.bar_location = "none"
        self.point_dataset = []
        # 设置英语
        self.TextToSpe.setLanguage("English")
        #    LED的group
        self.led_name = [
            "Face/Led/Blue/Right/0Deg/Actuator/Value",
            "Face/Led/Blue/Right/45Deg/Actuator/Value",
            "Face/Led/Blue/Right/90Deg/Actuator/Value",
            "Face/Led/Blue/Right/135Deg/Actuator/Value",
            "Face/Led/Blue/Right/180Deg/Actuator/Value",
            "Face/Led/Blue/Right/225Deg/Actuator/Value",
            "Face/Led/Blue/Right/270Deg/Actuator/Value",
            "Face/Led/Blue/Right/315Deg/Actuator/Value",
            "Face/Led/Blue/Left/0Deg/Actuator/Value",
            "Face/Led/Blue/Left/45Deg/Actuator/Value",
            "Face/Led/Blue/Left/90Deg/Actuator/Value",
            "Face/Led/Blue/Left/135Deg/Actuator/Value",
            "Face/Led/Blue/Left/180Deg/Actuator/Value",
            "Face/Led/Blue/Left/225Deg/Actuator/Value",
            "Face/Led/Blue/Left/270Deg/Actuator/Value",
            "Face/Led/Blue/Left/315Deg/Actuator/Value"
        ]
        self.Leds.createGroup("MyGroup", self.led_name)
        # 关闭basic_awareness
        if self.BasicAwa.isEnabled():
            self.BasicAwa.setEnabled(False)
        if self.BasicAwa.isRunning():
            self.BasicAwa.pauseAwareness()
        # 初始化平板
        self.TabletSer.cleanWebview()
        print(
            '\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
        # 录下的音频保存的路径
        self.audio_path = '/home/nao/audio/record.wav'
        self.recog_result = "None"
        # Beep 音量
        self.beep_volume = 70
        # 设置初始化头的位置 走路的时候固定头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., -0.25], .05)
        # 设置说话速度
        self.TextToSpe.setParameter("speed", 80.0)
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        self.RobotPos.goToPosture("Stand", .5)
        # 初始化录音
        self.record_delay = 2
        self.speech_hints = []
        self.enable_speech_recog = True
        self.SoundDet.setParameter("Sensitivity", .8)
        self.SoundDet.subscribe('sd')
        self.SoundDet_s = self.Memory.subscriber("SoundDetected")
        self.SoundDet_s.signal.connect(self.callback_sound_det)
        # 调用成员函数
        self.start_head_fix()
        self.set_volume(.7)
Ejemplo n.º 3
0
    def __init__(self, params):
        atexit.register(self.__del__)

        self.ip = params["ip"]
        self.port = params["port"]

        self.session = qi.Session()
        self.switch = True
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("connection Error!!")
            sys.exit(1)

        #需要用到的naoqi_service
        self.tts = self.session.service("ALTextToSpeech")
        self.tb = self.session.service("ALRobotPosture")
        self.motion = self.session.service("ALMotion")
        self.memory = self.session.service("ALMemory")
        self.FaceDet = self.session.service("ALFaceDetection")
        self.BasicAwareness = self.session.service("ALBasicAwareness")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.FaceCharcater = self.session.service("ALFaceCharacteristics")
        self.PeoPer = self.session.service("ALPeoplePerception")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.SoundLoc = self.session.service("ALSoundLocalization")
        self.TabletSer = self.session.service("ALTabletService")

        #需要设置的参数
        self.tts.setParameter("speed", 75.0)
        self.tts.setLanguage("English")
        self.PeoPer.resetPopulation()
        self.motion.setStiffnesses("Head", 1.0)
        self.conv_count = self.memory.subscriber("stop_loc")
        self.conv_count.signal.connect(self.callback_stop_loc)

        #运行部分
        #取消自动感知模式
        print "---Disabling the AutonomousLife Mode---"
        self.BasicAwareness.setEnabled(False)
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        print "---AutonomousLife Mode Disable!---"
        #说想要玩谜语游戏

        self.motion.wakeUp()
        self.tb.goToPosture("StandInit", 0.5)
        self.say("I want to play a riddle game,please stand behind me")
        self.tb.goToPosture("StandInit", 0.5)
        self.motion.moveInit()
        turn_count = 10
        while turn_count >= 1:
            self.say(str(turn_count))
            time.sleep(0.75)
            turn_count -= 1
        #转身
        self.motion.setStiffnesses("Head", 1.0)
        self.motion.moveTo(0, 0, 3.14, _async=True)
        time.sleep(4)

        #开始检测性别,回答站在背后的人的问题
        self.take_pic = PhotoCapture(self.session)
        self.take_pic.takepic()
        self.count_num_of_human()
        self.dialog = dialog_answer.Dialog(self.session,
                                           '/home/nao/top/dialog.top')

        # 声源定位
        self.tts.say(
            "We are going to play the Blind man’s bluff game, please stand around me"
        )
        self.motion.setAngles("HeadPitch", -0.4, 0.1)
        self.count = 5
        while self.count >= 1:
            time.sleep(0.75)
            self.tts.say(str(self.count))
            self.count -= 1
        self.sound_localization()
        self.blind_game = dialog_answer.Dialog(self.session,
                                               '/home/nao/top/dialog.top')
Ejemplo n.º 4
0
def session(url):
    """ Connected session to a NAOqi """
    ses = qi.Session()
    ses.connect(url)
    return ses
Ejemplo n.º 5
0
    def __init__(self, ip):

        self.ipAddress = ip
        self.session = qi.Session()
        self.connectToRobot()
Ejemplo n.º 6
0
 def __init__(self, params):
     atexit.register(self.__del__)
     # pepper connection
     self.ip = params["ip"]
     self.port = params["port"]
     self.session = qi.Session()
     rospy.init_node("find_my_mate")
     try:
         self.session.connect("tcp://" + self.ip + ":" + str(self.port))
     except RuntimeError:
         print("[Kamerider E] : connection Error!!")
         sys.exit(1)
     self.car_pose = MoveBaseGoal()
     # naoqi API
     self.Leds = self.session.service("ALLeds")
     self.Memory = self.session.service("ALMemory")
     self.Dialog = self.session.service("ALDialog")
     self.Motion = self.session.service("ALMotion")
     self.AudioDev = self.session.service("ALAudioDevice")
     self.AudioPla = self.session.service("ALAudioPlayer")
     self.PhotoCap = self.session.service("ALPhotoCapture")
     self.VideoDev = self.session.service("ALVideoDevice")
     self.RobotPos = self.session.service("ALRobotPosture")
     self.TextToSpe = self.session.service("ALTextToSpeech")
     self.AudioRec = self.session.service("ALAudioRecorder")
     self.BasicAwa = self.session.service("ALBasicAwareness")
     self.TabletSer = self.session.service("ALTabletService")
     self.SoundDet = self.session.service("ALSoundDetection")
     self.AutonomousLife = self.session.service("ALAutonomousLife")
     # stop recording
     try:
         self.AudioRec.stopMicrophonesRecording()
     except BaseException:
         print("\033[0;33m\t[Kamerider W]ALFaceCharacteristics : You don't need stop record\033[0m")
     # 录音的函数
     self.thread_recording = Thread(target=self.record_audio, args=(None,))
     self.thread_recording.daemon = True
     self.audio_terminate = False
     # ROS 订阅器和发布器
     self.nav_as = actionlib.SimpleActionClient("/move_base", MoveBaseAction)
     self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
     self.goal_cancel_pub = rospy.Publisher('/move_base/cancel', GoalID, queue_size=1)
     self.nav_as.wait_for_server()
     # 清除costmap
     self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps', Empty)
     self.map_clear_srv()
     self.Motion.setTangentialSecurityDistance(.05)
     self.Motion.setOrthogonalSecurityDistance(.1)
     #    LED的group
     self.led_name = ["Face/Led/Green/Right/0Deg/Actuator/Value", "Face/Led/Green/Right/45Deg/Actuator/Value",
                      "Face/Led/Green/Right/90Deg/Actuator/Value", "Face/Led/Green/Right/135Deg/Actuator/Value",
                      "Face/Led/Green/Right/180Deg/Actuator/Value", "Face/Led/Green/Right/225Deg/Actuator/Value",
                      "Face/Led/Green/Right/270Deg/Actuator/Value", "Face/Led/Green/Right/315Deg/Actuator/Value",
                      "Face/Led/Green/Left/0Deg/Actuator/Value", "Face/Led/Green/Left/45Deg/Actuator/Value",
                      "Face/Led/Green/Left/90Deg/Actuator/Value", "Face/Led/Green/Left/135Deg/Actuator/Value",
                      "Face/Led/Green/Left/180Deg/Actuator/Value", "Face/Led/Green/Left/225Deg/Actuator/Value",
                      "Face/Led/Green/Left/270Deg/Actuator/Value", "Face/Led/Green/Left/315Deg/Actuator/Value"]
     self.Leds.createGroup("MyGroup", self.led_name)
     # 声明一些变量
     # 接近人脸的次数
     self.temp_reach_time = 0
     self.if_door_open = False
     # amcl_pose话题的订阅器
     self.amcl_pose_sb = None
     self.position_result = []
     self.get_image_switch = True
     self.to_find_person_name = []
     self.current_drink_name = []
     # 保存当前对话人的年龄性别
     self.gender = "none"
     self.age = "none"
     # 存储每次语音识别的类别
     self.type = "none"
     # 储存每次语音识别的结果
     self.audio_recog_result = "none"
     # 保存当前人的肤色,衣服颜色和类别
     self.lower_color = "none"
     self.lower_wear = "none"
     self.upper_color = "none"
     self.upper_wear = "none"
     self.skin_color  = "none"
     self.predefined_position = {
             "trash bin":[4.34827899933,.21289503574,0.0,0.0,0.932419364911,0.361378095543],
             "tv":[6.94088697433,2.1938290596,0.0,0.0,0.387483707115,0.921876551779],
             "couch":[6.29112815857,-0.861555457115,0.0,0.0,-0.27312959294,0.961977247891],
             "arm chair":[7.84281349182,0.143020510674,0.0,0.0,-0.401161409027,0.916007382016]
         }
     # 保存当前人旁边有什么东西
     self.position = "none"
     self.positive_list = ["yes", "of course", "we do"]
     self.negative_list = ["no", "sorry", "we don't", "regret"]
     self.name_list = ["Max","Alex","Charlie","Elizabeth","Francis","James","Jennifer","John","Linda","Michael","Mary","Robert","Patricia","Robin","Skyler","William"]
     self.angle = -.1
     self.person_num = 1
     self.head_fix = True
     self.if_need_record = False
     self.point_dataset = self.load_waypoint("waypoints_314.txt")
     # 人脸识别
     self.detector = dlib.get_frontal_face_detector()
     # 物体识别
     # self.object_detection = object_detection()
     # 关闭basic_awareness
     if self.BasicAwa.isEnabled():
         self.BasicAwa.setEnabled(False)
     if self.BasicAwa.isRunning():
         self.BasicAwa.pauseAwareness()
     # 关闭AutonomousLife模式-
     if self.AutonomousLife.getState() != "disabled":
         self.AutonomousLife.setState("disabled")
     # 初始化平板
     self.TabletSer.hideImage()
     print ('\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
     # 设置dialog语言
     self.Dialog.setLanguage("English")
     # 录下的音频保存的路径
     self.audio_path = '/home/nao/audio/record.wav'
     self.recog_result = "00"
     self.RobotPos.goToPosture("StandInit", .2)
     # Beep 音量
     self.beep_volume = 70
     # 脸最大的人的中心
     self.center = 0
     # 设置说话速度
     self.TextToSpe.setParameter("speed", 75.0)
     # 初始化录音
     self.record_delay = 2.5
     self.speech_hints = []
     self.enable_speech_recog = True
     # 1代表最灵敏
     self.SoundDet.setParameter("Sensitivity", .4)
     self.SoundDet.subscribe('sd')
     self.SoundDet_s = self.Memory.subscriber("SoundDetected")
     self.SoundDet_s.signal.connect(self.callback_sound_det)
     # 调用成员函数
     self.start_head_fix()
     self.set_volume(.7)
     self.show_image("instructions.png")
     self.keyboard_control()
Ejemplo n.º 7
0
		# Get the Accelerometers Values
		AccX = memory_service.getData("Device/SubDeviceList/InertialSensor/AccX/Sensor/Value")
		AccY = memory_service.getData("Device/SubDeviceList/InertialSensor/AccY/Sensor/Value")
		AccZ = memory_service.getData("Device/SubDeviceList/InertialSensor/AccZ/Sensor/Value")

		x = np.array([AccX, AccY, -AccZ])
		
		return x/np.linalg.norm(x)


#################################################
# Small test
if False:
	IP_ADRESS 	= "192.168.43.184"
	PORT		= 9559
	v5			= False
	
	if v5:
		motionProxy = ALProxy("ALMotion", IP_ADRESS, PORT)
		memoryProxy = ALProxy("ALMemory", IP_ADRESS, PORT)
		
		prox = [motionProxy,memoryProxy]
		
	else:
		prox = qi.Session()
		prox.connect("tcp://" + IP_ADRESS + ":" + str(PORT))
		
		
	b = Body(prox, v5)
	b.relax()
Ejemplo n.º 8
0
def get_session(opts):
    session = qi.Session()
    session.connect("tcp://" + opts.ip + ":" + str(opts.port))
    return session
Ejemplo n.º 9
0
#        # Hide the web view
#        tabletService.hideWebview()
#    except Exception, e:
#        print "Error was: ", e
        
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print "Interrupted by user, stopping tablette"
        tabletService.hideWebview()
        #stop
        sys.exit(0)



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--ip", type=str, default="134.214.50.49",
                        help="Robot IP address. On robot or Local Naoqi: use '134.214.50.49'.")
    parser.add_argument("--port", type=int, default=9559,
                        help="Naoqi port number")

    args = parser.parse_args()
    robot_session = qi.Session()
    try:
        robot_session.connect("tcp://" + args.ip + ":" + str(args.port))
    except RuntimeError:
        print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"+"Please check your script arguments. Run with -h option for help.")
        sys.exit(1)
    main(robot_session)
Ejemplo n.º 10
0
    def __init__(self,params):

        # 退出程序时进入__del__函数
        atexit.register(self.__del__)

        # 初始化pepper的ip和port
        self.ip = params["ip"]
        self.port = params["port"]
        self.session = qi.Session()

        # 尝试连接pepper
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)

        # 需要使用的naoqi api
        self.VideoDev = self.session.service("ALVideoDevice")
        self.FaceCha = self.session.service("ALFaceCharacteristics")
        self.FaceDet = self.session.service("ALFaceDetection")
        self.Memory = self.session.service("ALMemory")
        self.Dialog = self.session.service("ALDialog")
        self.AnimatedSpe = self.session.service("ALAnimatedSpeech")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.TabletSer = self.session.service("ALTabletService")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.Motion = self.session.service("ALMotion")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.Tracker = self.session.service("ALTracker")
        self.PeoplePer = self.session.service("ALPeoplePerception")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.SoundLoc = self.session.service("ALSoundLocalization")
        self.WavingDet = self.session.service("ALWavingDetection")
        self.MovementDet = self.session.service("ALMovementDetection")

        '''
        # topic的回调函数
        start_following_sub = self.Memory.subscribe("follow_switch")
        start_following_sub.signal.connect(self.callback_start_following)
        start_navigation_sub = self.Memory.subscribe("navigation_switch")
        start_navigation_sub.signal.connect(self.callback_start_navigation)
        face_detection_sub = self.Memory.subscribe("FaceDetected")
        face_detection_sub.signal.connect(self.callback_face_detection)
        '''

        # 关闭 basic_awareness
        self.BasicAwa.setEnabled(False)

        # 设置tracker模式为头追踪
        # --------------------------可能没用---------------------------
        self.Tracker.setMode("Head")

        # 初始化平板
        self.TabletSer.cleanWebview()

        # 订阅相机
        self.rgb_top = self.VideoDev.subscribeCamera('rgb_t', 0, 2, 11, 40)
        self.depth = self.VideoDev.subscribeCamera('dep', 2, 1, 17, 20)

        # 设置dialog语言
        self.Dialog.setLanguage("English")
        '''
        # 加载pepper电脑里的topic
        self.Topic_path = '/home/nao/top/competetion_enu.top'
        # 以utf-8的格式编码
        self.Topic_path = self.Topic_path.decode('utf-8')
        # ========================测试删除utf-8行不行===========================================
        self.Topic_name = self.Dialog.loadTopic(self.Topic_path.encode('utf-8'))
        '''
        # 初始化头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., -0.25], .05)
        self.TextToSpe.setLanguage("English")

        # 设置说话速度
        self.TextToSpe.setParameter("speed", 70.0)

        print self.AutonomousLife.getState()
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        self.RobotPos.goToPosture("Stand", .5)


        # 宣布猜谜
        self.annouce()

        # 等待10s
        time.sleep(1)

        # 转身定位人群
        # self.turn_to_find()


        # if self.AutonomousLife.getState() != "solitary":
        #     self.AutonomousLife.setState("solitary")
        #time.sleep(10)
        #性别检测
        # self.face_detection_sub = self.Memory.subscriber("FaceDetected")
        # self.face_detection_sub.signal.connect(self.on_FaceDetected)
        # while self.is_Face_detected == False:
        #     print("no person detected")
        #     time.sleep(3)
        # #开始检测性别
        # self.take_pic = PhotoCapture(self.session)
        # self.take_pic.takepic()
        # cmd = 'sshpass -p kurakura326 scp nao@' + str(self.ip) + ":/home/nao/picture/image_0.jpg ./person_image.jpg"
        # os.system(cmd)
        # self.num_man, self.num_woman = gender_predict.gender("./person_image.jpg")
        # self.TextToSpe.say("There are " + str(self.num_woman + self.num_man) + "people")
        # self.TextToSpe.say("the number of man is " + str(self.num_man))
        #
        # self.TextToSpe.say("the number of woman is " + str(self.num_woman))
        #
        # self.dialog = dialog_answer.Dialog(self.session,'/home/nao/top/dialog.top')

        #self.gender_detection()
        #self.Memory.removeData("WavingDetection/Waving")
        #邀请操作者

        #声源定位
        self.sound_localization()



        # while 1:
        #     a = self.Memory.getData("WavingDetection/Waving")
        #     b = self.Memory.getData("WavingDetection/PersonWaving")
        #     c = self.Memory.getData("WavingDetection/PersonWavingCenter")
        #     print a
        #     print b
        #     print c
        #     time.sleep(1)

        #挥手检测
        # while 1:
        #     self.waving_detection()
        #     time.sleep(1)

        # while 1:
        #     self.movement_detection()

        time.sleep(100)
    def run(self):

        parser = argparse.ArgumentParser()
        parser.add_argument(
            "--ip",
            type=str,
            default=ip,
            help="Robot IP address. On robot or Local Naoqi: use ip.")
        parser.add_argument("--port",
                            type=int,
                            default=9559,
                            help="Naoqi port number")
        args = parser.parse_args()
        session = qi.Session()
        try:
            session.connect("tcp://" + args.ip + ":" + str(args.port))
        except RuntimeError:
            print(
                "Can't connect to Naoqi at ip \"" + args.ip + "\" on port " +
                str(args.port) + ".\n"
                "Please check your script arguments. Run with -h option for help."
            )
            sys.exit(1)

        motion_service = session.service("ALMotion")
        name1 = "Body"
        name2 = "LAnkleRoll"
        name3 = "RAnkleRoll"
        frame = motion.FRAME_ROBOT
        useSensors = True
        useSensorValues = True
        #------------
        while True:
            self.lock.acquire()
            if self.data.full():
                for j in range(25):
                    val = self.data.get()
                    if val == 'quit':
                        #postureProxy.goToPosture("StandInit", 0.2)
                        #dcm.setAlias(["UpHalfBodyStiffness","ClearAll","time-mixed",[[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]],[[-0.1, dcm.getTime(0)]]]])
                        #print "%s: %s is consuming. %s in the queue is consumed!\n" %(time.ctime(), self.getName(), val)
                        motionProxy.rest()
                        break
                    order[j] = val
                self.lock.release()
            else:
                self.lock.release()
                time.sleep(0.1)
                continue
            dcm.setAlias([
                "UpHalfBody", "ClearAll", "time-mixed",
                [[[float(order[0]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[1]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[2]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[3]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[4]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[5]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[6]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[7]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[8]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[9]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[10]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[11]) * almath.TO_RAD,
                   dcm.getTime(750)]], [[float(order[16]),
                                         dcm.getTime(750)]],
                 [[float(order[17]), dcm.getTime(750)]]]
            ])
            dcm.setAlias([
                "DownHalfBody", "ClearAll", "time-mixed",
                [[[float(order[12]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[13]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[14]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[15]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[18]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[19]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[20]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[21]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[22]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[22]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[23]) * almath.TO_RAD,
                   dcm.getTime(750)]],
                 [[float(order[24]) * almath.TO_RAD,
                   dcm.getTime(750)]]]
            ])

            f0 = file("H:\\angleList.txt", "a+")
            aaa = motion_service.getAngles("RKneePitch", useSensors)
            bbb = motion_service.getAngles("RHipRoll", useSensors)
            ccc = motion_service.getAngles("LShoulderPitch", useSensors)
            ddd = motion_service.getAngles("RShoulderPitch", useSensors)
            b = [str(aaa), ",", str(bbb), ",", str(ccc), ",", str(ddd), "\n"]
            f0.writelines(b)
            f0.close()

            f4 = file("H:\\angleOrder.txt", "a+")
            c = [
                str(float(order[15]) * almath.TO_RAD), ",",
                str(float(order[14]) * almath.TO_RAD), ",",
                str(float(order[3]) * almath.TO_RAD), ",",
                str(float(order[8]) * almath.TO_RAD), "\n"
            ]
            f4.writelines(c)
            f4.close()

            pos = motion_service.getCOM(name1, frame, useSensors)
            f1 = file("H:\\CoM.txt", "a+")
            a = [str(pos[0]), ",", str(pos[1]), ",", str(pos[2]), "\n"]
            f1.writelines(a)
            f1.close()

            pos1 = motion_service.getPosition(name2, frame, useSensorValues)
            f2 = file("H:\\LAnkleRoll.txt", "a+")
            f2.writelines(str(pos1))
            f2.writelines("\n")
            f2.close()

            pos2 = motion_service.getPosition(name3, frame, useSensorValues)
            f3 = file("H:\\RAnkleRoll.txt", "a+")
            f3.writelines(str(pos2))
            f3.writelines("\n")
            f3.close()

        postureProxy.goToPosture("StandInit", 0.2)
        motionProxy.rest()
        print "%s: %s finished!" % (time.ctime(), self.getName())
Ejemplo n.º 12
0
    parser.add_argument("--port", type=int, default=9559, help="Naoqi port number") 

    args = parser.parse_args()

    return args

if __name__ == "__main__":

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    args = parse_args()

    if args.model == ' ':
        raise IOError(('Error: Model not found.\n'))

    print "Initializing frcnn..."
    imageDetector = network.frcnnImageDetections(args)

    print "Initializing handtracking..."
    detection_graph, trackingSess = detector_utils.load_inference_graph()

    print "Starting naoqi session..."
    NAOQIsess = qi.Session()
    try:
        NAOQIsess.connect("tcp://" + args.ip + ":" + str(args.port))
    except RuntimeError:
        print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
               "Please check your script arguments. Run with -h option for help.")
        sys.exit(1)

    main(NAOQIsess, args.ip, args.port, imageDetector, detection_graph, trackingSess)
Ejemplo n.º 13
0
    def __init__(self, logger, ip="192.168.1.101", port=9559):
        try:
            self.logger = logger
            self.logger.log("Initiating Robot")
            self.ip = ip
            self.port = port

            self.session = qi.Session()
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))

            #initialize robot abilities
            try:
                self._audio = audio_module.Audio(self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._say = speak_module.Say(self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._motion = motion_module.Motion(self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._posture = posture_module.Posture(self.session,
                                                       self.logger)
            except Exception as e:
                print(e)
            try:
                self._autolife = autonomouslife_module.AutonomousLife(
                    self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._leds = leds_module.Leds(self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._navigation = navigation_module.Navigation(
                    self.session, self.logger)
            except Exception as e:
                print(e)
            try:
                self._animation = animation_module.Animation(
                    self.session, self.logger)
            except Exception as e:
                print(e)
            #Following abilities do not work properly or at all with virtual robot so they are not initiated to avoid exceptions
            #"localhost" indicates directly to virtual robot
            if self.ip != "localhost":
                try:
                    self._camera = camera_module.Camera(
                        self.session, self.logger)
                except Exception as e:
                    print(e)
                try:
                    self._listen = listen_module.Listen(
                        self.session, self.logger)
                except Exception as e:
                    print(e)
                try:
                    self._tablet = tablet_module.Tablet(
                        self.session, self.logger)
                except Exception as e:
                    print(e)
                try:
                    self._recharge = recharge_module.Recharge(
                        self.session, self.logger)
                except Exception as e:
                    print(e)

            else:
                self.logger.log(
                    "Not initiating certain features since virtual robot is used"
                )

            self.logger.log("Robot initiated")

        except Exception as e:
            self.logger.error_log("Robot initiation failed: " + str(e))
            raise e
Ejemplo n.º 14
0
    def __init__(self, params):
        # 初始化ROS节点
        rospy.init_node("pepper_test")
        # 程序退出时的回调函数
        atexit.register(self.__del__)

        self.ip = params['ip']
        self.port = params['port']
        self.session = qi.Session()

        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider W] : connection Error!!")
            sys.exit(1)

        # 订阅需要的服务
        self.VideoDev = self.session.service("ALVideoDevice")
        self.FaceCha = self.session.service("ALFaceCharacteristics")
        self.Memory = self.session.service("ALMemory")
        self.Dialog = self.session.service("ALDialog")
        self.AnimatedSpe = self.session.service("ALAnimatedSpeech")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.TabletSer = self.session.service("ALTabletService")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.Motion = self.session.service("ALMotion")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.Tracker = self.session.service("ALTracker")

        # 关闭 basic_awareness
        self.BasicAwa.setEnabled(False)

        # 设置tracker模式为头追踪
        self.Tracker.setMode("Head")

        # 初始化平板
        self.TabletSer.cleanWebview()
        # 初始化位置点
        self.PepperPosition = PoseStamped()
        # 推荐的商品的位置
        self.Point_A = MoveBaseGoal()
        self.Point_A.target_pose.header.frame_id = '/map'
        self.Point_A.target_pose.header.stamp = self.PepperPosition.header.stamp
        self.Point_A.target_pose.header.seq = self.PepperPosition.header.seq
        self.Point_A.target_pose.pose.position.x = 1.57589215035
        self.Point_A.target_pose.pose.position.y = -4.95822132707
        self.Point_A.target_pose.pose.position.z = .0
        self.Point_A.target_pose.pose.orientation.x = .0
        self.Point_A.target_pose.pose.orientation.y = .0
        self.Point_A.target_pose.pose.orientation.z = -0.996909852153
        self.Point_A.target_pose.pose.orientation.w = 0.0785541003461
        # 起始位置
        self.Point_B = MoveBaseGoal()
        self.Point_B.target_pose.header.frame_id = '/map'
        self.Point_B.target_pose.header.stamp = self.PepperPosition.header.stamp
        self.Point_B.target_pose.header.seq = self.PepperPosition.header.seq
        self.Point_B.target_pose.pose.position.x = 3.48176515207
        self.Point_B.target_pose.pose.position.y = -4.7906732889
        self.Point_B.target_pose.pose.position.z = .0
        self.Point_B.target_pose.pose.orientation.x = .0
        self.Point_B.target_pose.pose.orientation.y = .0
        self.Point_B.target_pose.pose.orientation.z = -0.99958660438
        self.Point_B.target_pose.pose.orientation.w = 0.0287510059817

        # 设置输出音量
        # 相机
        video_subs_list = ['rgb_t_0', 'rgb_b_0', 'dep_0']

        for sub_name in video_subs_list:
            print sub_name, self.VideoDev.unsubscribe(sub_name)
        self.rgb_top = self.VideoDev.subscribeCamera('rgb_t', 0, 2, 11, 40)
        self.depth = self.VideoDev.subscribeCamera('dep', 2, 1, 17, 20)

        # 语言
        self.Dialog.setLanguage("English")

        # 载入topic
        self.Topic_path = '/home/nao/top/WRS_pepper_enu.top'
        self.Topic_path = self.Topic_path.decode('utf-8')

        # topic回调函数
        self.go_to_pointA_sub = self.Memory.subscriber("go_to_pointA")
        self.go_to_pointA_sub.signal.connect(self.callback_pointA)
        self.Bar_code_sub = self.Memory.subscriber("Bar_code")
        self.Bar_code_sub.signal.connect(self.callback_Bar_code)
        self.change_img_language_sub = self.Memory.subscriber("Info_Chinese")
        self.change_img_language_sub.signal.connect(self.callback_img_lan_CHN)
        self.record_item_sub = self.Memory.subscriber("record")
        self.record_item_sub.signal.connect(self.callback_record_item)
        self.pay_bill_sub = self.Memory.subscriber("pay_bill")
        self.pay_bill_sub.signal.connect(self.callback_pay_bill)
        self.finish_sub = self.Memory.subscriber("finish")
        self.finish_sub.signal.connect(self.callback_finish)
        self.recommend_sub = self.Memory.subscriber("recommend_start")
        self.recommend_sub.signal.connect(self.callback_recommend)
        self.hide_image_sub = self.Memory.subscriber("hide_image")
        self.hide_image_sub.signal.connect(self.callback_hide_image)
        self.half_price_sub = self.Memory.subscriber("half_price")
        self.half_price_sub.signal.connect(self.callback_half_price)
        self.face_start_sub = self.Memory.subscriber("start_face")
        self.face_start_sub.signal.connect(self.start_face_recog)
        self.japan_sub = self.Memory.subscriber("japan")
        self.japan_sub.signal.connect(self.callback_japan)
        # 自主说话模式
        self.configure = {"bodyLanguageMode": "contextual"}

        # ROS节点
        self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                   MoveBaseAction)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
        self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                               GoalID,
                                               queue_size=1)
        self.nav_as.wait_for_server()

        self.transform = TransformListener()
        self.transformer = Transformer(True, rospy.Duration(10))
        self.goal_pub = rospy.Publisher('/move_base/current_goal',
                                        PoseStamped,
                                        queue_size=1)
        # 清除costmap
        self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                                Empty)
        self.map_clear_srv()

        # 连接数据库
        self.db = MySQLdb.connect("localhost", "root", "'", "WRS_PEPPER")

        # 商品条形码对应的价格
        self.price = {
            "jam": 8,
            "tea": 3,
            "bread": 5,
            "juice": 6,
            "potato chips": 7,
            "beef": 10,
            "instant noodles": 5
        }
        self.price_sum = 0

        # 变量值
        self.filename = "/home/jiashi/Desktop/1538483080.png"

        # 当前记录的单号
        self.order_id = 6
        self.month = 10

        # 记录是否停止对话
        self.if_stop_dia = False
        self.if_start_dia = False
        self.Pause_dia = False

        # 初始化头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., -0.25], .05)

        # 要执行的javascript
        self.java_script = ""

        # 设置说话速度
        self.TextToSpe.setParameter("speed", 80.0)

        # pepper头的角度
        self.angle = -.33

        # 第一个商品
        self.first_item = 1
        self.state = True
        self.start_head = True

        # 调用成员函数
        self.set_volume(1)
        self.cal_user_similarity()
        self.close_auto_life()
        self.Topic_name = self.Dialog.loadTopic(
            self.Topic_path.encode('utf-8'))
        self.Dialog.activateTopic(self.Topic_name)
        self.show_image("welcome.jpg")
        self.start_head_fix()
        self.start_dialog()
        self.activate_keyboard_control()
Ejemplo n.º 15
0
    def __init__(self, params):
        # 初始化ROS节点
        rospy.init_node("help_me_carry")
        # 退出程序的时候进入__del__函数
        atexit.register(self.__del__)
        # 初始化pepper的ip和port
        self.ip = params["ip"]
        self.port = params["port"]
        self.session = qi.Session()
        # 尝试连接pepper
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)
        #    需要使用的naoqi api
        self.Leds = self.session.service("ALLeds")
        self.Memory = self.session.service("ALMemory")
        self.Dialog = self.session.service("ALDialog")
        self.Motion = self.session.service("ALMotion")
        self.Tracker = self.session.service("ALTracker")
        self.BehaviorMan = self.session.service("ALBehaviorManager")
        self.VideoDev = self.session.service("ALVideoDevice")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.AudioPla = self.session.service("ALAudioPlayer")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.AudioRec = self.session.service("ALAudioRecorder")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.TabletSer = self.session.service("ALTabletService")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.AnimatedSpe = self.session.service("ALAnimatedSpeech")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        # 调小安全距离
        self.Motion.setTangentialSecurityDistance(.05)
        self.Motion.setOrthogonalSecurityDistance(.1)

        # 停止录音
        try:
            self.AudioRec.stopMicrophonesRecording()
        except BaseException:
            print(
                "\033[0;33m\t[Kamerider W] : You don't need stop record\033[0m"
            )
        # 录音的函数
        self.thread_recording = Thread(target=self.record_audio, args=(None, ))
        self.thread_recording.daemon = True
        self.audio_terminate = False
        self.if_stop_follow = False
        self.if_need_head = True
        self.if_need_hand = True
        # ROS 订阅器和发布器
        self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                   MoveBaseAction)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
        self.init_pose_pub = rospy.Publisher('/initialpose',
                                             PoseWithCovarianceStamped,
                                             queue_size=1)
        self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                               GoalID,
                                               queue_size=1)
        self.nav_as.wait_for_server()
        # 清除costmap
        self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                                Empty)
        self.map_clear_srv()
        # 声明一些变量
        self.angle = -0.4
        self.if_need_record = False
        self.point_dataset = self.load_waypoint("waypoints_help.txt")
        #    LED的group
        self.led_name = [
            "Face/Led/Blue/Right/0Deg/Actuator/Value",
            "Face/Led/Blue/Right/45Deg/Actuator/Value",
            "Face/Led/Blue/Right/90Deg/Actuator/Value",
            "Face/Led/Blue/Right/135Deg/Actuator/Value",
            "Face/Led/Blue/Right/180Deg/Actuator/Value",
            "Face/Led/Blue/Right/225Deg/Actuator/Value",
            "Face/Led/Blue/Right/270Deg/Actuator/Value",
            "Face/Led/Blue/Right/315Deg/Actuator/Value",
            "Face/Led/Blue/Left/0Deg/Actuator/Value",
            "Face/Led/Blue/Left/45Deg/Actuator/Value",
            "Face/Led/Blue/Left/90Deg/Actuator/Value",
            "Face/Led/Blue/Left/135Deg/Actuator/Value",
            "Face/Led/Blue/Left/180Deg/Actuator/Value",
            "Face/Led/Blue/Left/225Deg/Actuator/Value",
            "Face/Led/Blue/Left/270Deg/Actuator/Value",
            "Face/Led/Blue/Left/315Deg/Actuator/Value"
        ]
        self.Leds.createGroup("MyGroup", self.led_name)
        # 关闭basic_awareness
        if self.BasicAwa.isEnabled():
            self.BasicAwa.setEnabled(False)
        if self.BasicAwa.isRunning():
            self.BasicAwa.pauseAwareness()
        # 初始化平板
        self.TabletSer.cleanWebview()
        print(
            '\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
        # 初始化关键字
        self.stop = ["stop", "here is the car"]
        # 当前时间戳(订阅相机的名字,每个只能使用6次)
        ticks = time.time()
        # 0代表top相机 最后一个参数是fps
        self.VideoDev.subscribeCamera(str(ticks), 0, 2, 11, 40)
        # 设置dialog语言
        self.TextToSpe.setLanguage("English")
        # track的程序名
        self.track_behavior_name = "peopletrack-84f37e/behavior_1"
        # 录下的音频保存的路径
        self.audio_path = '/home/nao/audio/record.wav'
        self.recog_result = "None"
        # Beep 音量
        self.beep_volume = 70
        self.head_fix = True
        # 设置初始化头的位置 走路的时候固定头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., self.angle], .05)
        # 设置说话速度
        self.TextToSpe.setParameter("speed", 80.0)
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        self.RobotPos.goToPosture("Stand", .5)
        # follow me function
        self.pepper_follow_me = pepper_follow.follow_me(self.session)
        # 初始化录音
        self.record_delay = 2
        self.speech_hints = []
        self.enable_speech_recog = True
        self.SoundDet.setParameter("Sensitivity", .4)
        self.SoundDet.subscribe('sd')
        self.SoundDet_s = self.Memory.subscriber("SoundDetected")
        self.SoundDet_s.signal.connect(self.callback_sound_det)
        # 调用成员函数
        self.set_volume(.7)
        self.show_image("instructions.png")
        self.keyboard_control()
Ejemplo n.º 16
0
 def __init__(self, params):
     atexit.register(self.__del__)
     # pepper connection
     self.ip = params["ip"]
     self.port = params["port"]
     self.session = qi.Session()
     rospy.init_node("find_my_mate")
     try:
         self.session.connect("tcp://" + self.ip + ":" + str(self.port))
     except RuntimeError:
         print("[Kamerider E] : connection Error!!")
         sys.exit(1)
     self.car_pose = MoveBaseGoal()
     # naoqi API
     self.Leds = self.session.service("ALLeds")
     self.Memory = self.session.service("ALMemory")
     self.Dialog = self.session.service("ALDialog")
     self.Motion = self.session.service("ALMotion")
     self.AudioDev = self.session.service("ALAudioDevice")
     self.AudioPla = self.session.service("ALAudioPlayer")
     self.PhotoCap = self.session.service("ALPhotoCapture")
     self.VideoDev = self.session.service("ALVideoDevice")
     self.RobotPos = self.session.service("ALRobotPosture")
     self.TextToSpe = self.session.service("ALTextToSpeech")
     self.AudioRec = self.session.service("ALAudioRecorder")
     self.BasicAwa = self.session.service("ALBasicAwareness")
     self.TabletSer = self.session.service("ALTabletService")
     self.SoundDet = self.session.service("ALSoundDetection")
     self.AutonomousLife = self.session.service("ALAutonomousLife")
     # stop recording
     try:
         self.AudioRec.stopMicrophonesRecording()
     except BaseException:
         print("\033[0;33m\t[Kamerider W]ALFaceCharacteristics : You don't need stop record\033[0m")
     # 录音的函数
     self.thread_recording = Thread(target=self.record_audio, args=(None,))
     self.thread_recording.daemon = True
     self.audio_terminate = False
     # ROS 订阅器和发布器
     self.nav_as = actionlib.SimpleActionClient("/move_base", MoveBaseAction)
     self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
     self.goal_cancel_pub = rospy.Publisher('/move_base/cancel', GoalID, queue_size=1)
     self.nav_as.wait_for_server()
     self.if_door_open = False
     # 清除costmap
     self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps', Empty)
     self.map_clear_srv()
     self.Motion.setTangentialSecurityDistance(.05)
     self.Motion.setOrthogonalSecurityDistance(.1)
     #    LED的group
     self.led_name = ["Face/Led/Green/Right/0Deg/Actuator/Value", "Face/Led/Green/Right/45Deg/Actuator/Value",
                      "Face/Led/Green/Right/90Deg/Actuator/Value", "Face/Led/Green/Right/135Deg/Actuator/Value",
                      "Face/Led/Green/Right/180Deg/Actuator/Value", "Face/Led/Green/Right/225Deg/Actuator/Value",
                      "Face/Led/Green/Right/270Deg/Actuator/Value", "Face/Led/Green/Right/315Deg/Actuator/Value",
                      "Face/Led/Green/Left/0Deg/Actuator/Value", "Face/Led/Green/Left/45Deg/Actuator/Value",
                      "Face/Led/Green/Left/90Deg/Actuator/Value", "Face/Led/Green/Left/135Deg/Actuator/Value",
                      "Face/Led/Green/Left/180Deg/Actuator/Value", "Face/Led/Green/Left/225Deg/Actuator/Value",
                      "Face/Led/Green/Left/270Deg/Actuator/Value", "Face/Led/Green/Left/315Deg/Actuator/Value"]
     self.Leds.createGroup("MyGroup", self.led_name)
     # 声明一些变量
     # amcl_pose话题的订阅器
     self.amcl_pose_sb = None
     self.position_result = []
     self.get_image_switch = True
     self.to_find_person_name = []
     self.current_drink_name = []
     # 保存当前对话人的年龄性别
     self.gender = "none"
     self.age = "none"
     # 存储每次语音识别的类别
     self.type = "none"
     # 储存每次语音识别的结果
     self.audio_recog_result = "none"
     # 最多接近多少次
     self.temp_reach_time = 0
     # 保存当前人的肤色,衣服颜色和类别
     self.lower_color = "none"
     self.lower_wear = "none"
     self.upper_color = "none"
     self.upper_wear = "none"
     self.skin_color  = "none"
     self.predefined_position = {
             # "bed":[0.752529621124,4.56971073151,0.0,0.0,0.875692237431,0.48286965664],
             # "shelf":[3.48698234558,5.21366643906,0.0,0.0,0.46922085657,0.88308085007],
             # "side table":[3.49552464485,4.00150775909,0.0,0.0,0.00539639848746,0.999985439336],
             # "sink":[4.11269855499,3.84168052673,0.0,0.0,0.86228371236,0.506425512192],
             # "dish washer":[4.36022758484,4.83986616135,0.0,0.0,-0.999998542378,0.00170740794843],
             # "fridge":[4.38763856888,5.69660758972,0.0,0.0,0.82926155938,0.558860685801],
             "trash bin":[4.34827899933,.21289503574,0.0,0.0,0.932419364911,0.361378095543],
             "tv":[6.94088697433,2.1938290596,0.0,0.0,0.387483707115,0.921876551779],
             "couch":[6.29112815857,-0.861555457115,0.0,0.0,-0.27312959294,0.961977247891],
             "arm chair":[7.84281349182,0.143020510674,0.0,0.0,-0.401161409027,0.916007382016]
             # "coat hanger":[1.34018230438,1.20021378994,0.0,0.0,0.905398107208,0.424563620042],
             # "desk":[2.42883086205,-0.940541505814,0.0,0.0,-0.704562317248,0.709642121857],
             # "kitchen table":[6.16149091721,3.65961480141,0.0,0.0,0.379552154941,0.92517034198],
             # "shoe rack":[0.840799331665,-0.702893853188,0.0,0.0,-0.95921175774,0.282688527912]
         }
     # 保存当前人旁边有什么东西
     self.position = "none"
     self.positive_list = ["yes", "of course", "we do"]
     self.negative_list = ["no", "sorry", "we don't", "regret"]
     self.name_list = ["Amelia","Angel","Charlie","Ava","Hunter","Jack","Charlotte","Max",
                       "Noah","Oliver","Mia", "Parker","Sam","Thomas","William", "check", "Olivia", "even"]
     self.angle = -.1
     self.person_num = 1
     self.head_fix = True
     self.if_need_record = False
     self.point_dataset = self.load_waypoint("waypoints_help.txt")
     # 人体识别
     self.human_detector = human_detection.human_detector()
     # 物体识别
     # self.object_detection = object_detection()
     # 关闭basic_awareness
     if self.BasicAwa.isEnabled():
         self.BasicAwa.setEnabled(False)
     if self.BasicAwa.isRunning():
         self.BasicAwa.pauseAwareness()
     # 关闭AutonomousLife模式-
     if self.AutonomousLife.getState() != "disabled":
         self.AutonomousLife.setState("disabled")
     # 初始化平板
     self.TabletSer.hideImage()
     print ('\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
     # 设置dialog语言
     self.Dialog.setLanguage("English")
     # 录下的音频保存的路径
     self.audio_path = '/home/nao/audio/record.wav'
     self.recog_result = "00"
     self.RobotPos.goToPosture("StandInit", .2)
     # Beep 音量
     self.beep_volume = 70
     # 设置说话速度
     self.TextToSpe.setParameter("speed", 75.0)
     # 初始化录音
     self.record_delay = 2.5
     self.speech_hints = []
     self.enable_speech_recog = True
     # 1代表最灵敏
     self.SoundDet.setParameter("Sensitivity", .3)
     self.SoundDet.subscribe('sd')
     self.SoundDet_s = self.Memory.subscriber("SoundDetected")
     self.SoundDet_s.signal.connect(self.callback_sound_det)
     # 调用成员函数
     self.start_head_fix()
     self.set_volume(.7)
     self.show_image("instructions.png")
     self.keyboard_control()
Ejemplo n.º 17
0
    def __init__(self, params):
        # 初始化ROS节点
        rospy.init_node("gpsr_node")
        # 程序退出时的回调函数
        atexit.register(self.__del__)
        self.ip = params['ip']
        self.port = params['port']
        self.session = qi.Session()
        # 连接pepper
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except Exception as e:
            print e
            # 输出红字
            print("\033[0;30m\t[Kamerider E] : connection Error!!\033[0m")
            sys.exit(1)
        self.if_head_fix = True
        # 需要的naoqi的服务
        self.Leds = self.session.service("ALLeds")
        self.Dialog = self.session.service("ALDialog")
        self.Memory = self.session.service("ALMemory")
        self.Motion = self.session.service("ALMotion")
        self.AudioPla = self.session.service("ALAudioPlayer")
        self.VideoDev = self.session.service("ALVideoDevice")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.AudioRec = self.session.service("ALAudioRecorder")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.TabletSer = self.session.service("ALTabletService")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.recog_result = "00"
        # 停止录音
        try:
            self.AudioRec.stopMicrophonesRecording()
        except BaseException:
            print(
                "\033[0;33m\t[Kamerider W] : You don't need stop record\033[0m"
            )
        #    LED的group
        self.led_name = [
            "Face/Led/Blue/Right/0Deg/Actuator/Value",
            "Face/Led/Blue/Right/45Deg/Actuator/Value",
            "Face/Led/Blue/Right/90Deg/Actuator/Value",
            "Face/Led/Blue/Right/135Deg/Actuator/Value",
            "Face/Led/Blue/Right/180Deg/Actuator/Value",
            "Face/Led/Blue/Right/225Deg/Actuator/Value",
            "Face/Led/Blue/Right/270Deg/Actuator/Value",
            "Face/Led/Blue/Right/315Deg/Actuator/Value",
            "Face/Led/Blue/Left/0Deg/Actuator/Value",
            "Face/Led/Blue/Left/45Deg/Actuator/Value",
            "Face/Led/Blue/Left/90Deg/Actuator/Value",
            "Face/Led/Blue/Left/135Deg/Actuator/Value",
            "Face/Led/Blue/Left/180Deg/Actuator/Value",
            "Face/Led/Blue/Left/225Deg/Actuator/Value",
            "Face/Led/Blue/Left/270Deg/Actuator/Value",
            "Face/Led/Blue/Left/315Deg/Actuator/Value"
        ]
        self.Leds.createGroup("MyGroup", self.led_name)
        self.if_need_record = False

        # 录音的函数
        self.thread_recording = Thread(target=self.record_audio, args=(None, ))
        self.thread_recording.daemon = True
        self.audio_terminate = False
        # ROS 订阅器和发布器
        # 声明一些变量
        self.point_init = MoveBaseGoal()
        self.point_dataset = self.load_waypoint("waypoints_gpsr.txt")

        rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped,
                         self.amcl_callback)

        # 关闭basic_awareness
        if self.BasicAwa.isEnabled():
            self.BasicAwa.setEnabled(False)
        if self.BasicAwa.isRunning():
            self.BasicAwa.pauseAwareness()
        # 初始化平板
        self.TabletSer.cleanWebview()
        self.TabletSer.hideImage()
        # 初始化录音
        self.record_delay = 2
        self.speech_hints = []
        self.enable_speech_recog = True
        self.SoundDet.setParameter("Sensitivity", .2)
        self.SoundDet.subscribe('sd')
        self.SoundDet_s = self.Memory.subscriber("SoundDetected")
        self.SoundDet_s.signal.connect(self.callback_sound_det)
        # 初始化关键字
        self.action = ["go to", "look for", "search", "Navigate"]
        # self.answer = ["answer" ]
        self.place = [
            "bathroom", "kitchen", "party room", "dining room", "living room",
            "bedroom"
        ]
        self.person_name = [
            "James", "Alex", "Ryan", "John", "Eric", "Adam", "Carter", "Jack",
            "David", "Tyler", "Lily", "Mary", "Anna", "Zoe", "Sara", "Sofia",
            "Faith", "Julia", "Paige", "Jessica", "jams", "job", "join",
            "games", "Brian", "fight on", "out her", "barry", "finally",
            "lady", "lilly", "marymy", "Sarah", "Sawyer", "Sophia", "Soviet",
            "face", "page", "check", "cheque", "addicts"
        ]
        self.object_name = [
            "grapejuice", "cola", "biscuit", "apple", "soap", "beer", "icetea",
            "chips", "bread", "tray", "plate", "basket", "milk tea", "cup",
            "pear", "orange", "banana"
        ]
        self.current_action = "none"
        self.current_place = "none"
        self.current_item = "none"
        self.current_person_name = "none"
        self.current_content = "none"
        # 订阅相机
        # 当前时间戳(订阅相机的名字,每个只能使用6次)
        ticks = time.time()
        # 0代表top相机 最后一个参数是fps
        self.VideoDev.subscribeCamera(str(ticks), 0, 2, 11, 40)
        # 设置dialog语言
        self.Dialog.setLanguage("English")
        # 加载pepper电脑里的topic
        self.Topic_path = '/home/nao/top/competetion_enu.top'
        # 以utf-8的格式编码
        self.Topic_path = self.Topic_path.decode('utf-8')
        self.Topic_name = self.Dialog.loadTopic(
            self.Topic_path.encode('utf-8'))
        # 录下的音频保存的路径
        self.audio_path = '/home/nao/audio/record.wav'
        # ROS 订阅器和发布器
        self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                   MoveBaseAction)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
        self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                               GoalID,
                                               queue_size=1)
        self.nav_as.wait_for_server()
        # 清除costmap
        self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                                Empty)
        self.map_clear_srv()
        # self.Motion.setTangentialSecurityDistance(.05)
        # self.Motion.setOrthogonalSecurityDistance(.1)
        # Beep 音量
        self.beep_volume = 70
        self.head_fix = True
        self.question_num = 0
        self.angle = -.3
        # 设置初始化头的位置 走路的时候固定头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., -0.25], .05)
        # 设置说话速度
        self.TextToSpe.setParameter("speed", 80.0)
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        self.RobotPos.goToPosture("Stand", .5)
        # 调用成员函数
        self.start_head_fix()
        self.get_init_pose()
Ejemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--pip",
        type=str,
        default=os.environ['PEPPER_IP'],
        help="Robot IP address.  On robot or Local Naoqi: use '127.0.0.1'.")
    parser.add_argument("--pport",
                        type=int,
                        default=9559,
                        help="Naoqi port number")

    args = parser.parse_args()
    pip = args.pip
    pport = args.pport

    #Start working session
    session = qi.Session()
    try:
        session.connect("tcp://" + pip + ":" + str(pport))
    except RuntimeError:
        print(
            "Can't connect to Naoqi at ip \"" + pip + "\" on port " +
            str(pport) + ".\n"
            "Please check your script arguments. Run with -h option for help.")
        sys.exit(1)

    #Starting services
    motion_service = session.service("ALMotion")

    #Example of how to get robot position in world.
    useSensorValues = False
    result = motion_service.getRobotPosition(useSensorValues)
    print "Initial Robot Position", result

    #This sequence of commands follow a square of 1m size
    #Move 1m in its X direction
    x = 1.0
    y = 0.0
    theta = 0.0
    motion_service.moveTo(x, y, theta)  #blocking function

    #Turn 90deg to the left
    x = 0.0
    y = 0.0
    theta = math.pi / 2
    motion_service.moveTo(x, y, theta)  #blocking function

    #Move 1m in its X direction
    x = 1.0
    y = 0.0
    theta = 0.0
    motion_service.moveTo(x, y, theta)  #blocking function

    #Turn 90deg to the left
    x = 0.0
    y = 0.0
    theta = math.pi / 2
    motion_service.moveTo(x, y, theta)  #blocking function

    #Move at 0.2m/s in the X direction during 5 seconds (=1m)
    x = 0.2
    y = 0.0
    theta = 0.0
    motion_service.move(
        x, y, theta)  #non-blocking function, Pepper will move forever!!

    time.sleep(5)
    motion_service.stopMove()  #stopping previous move

    #Turn 90deg to the left
    x = 0.0
    y = 0.0
    theta = math.pi / 2
    motion_service.moveTo(x, y, theta)  #blocking function

    #Move 1m in its X direction
    x = 1.0
    y = 0.0
    theta = 0.0
    motion_service.moveTo(x, y, theta)  #blocking function

    #Turn 90deg to the left
    x = 0.0
    y = 0.0
    theta = math.pi / 2
    motion_service.moveTo(x, y, theta)  #blocking function

    #time = 5.0
    #motion_service.moveTo(x, y, theta, time) #blocking function

    result = motion_service.getRobotPosition(useSensorValues)
    print "Final Robot Position", result
Ejemplo n.º 19
0
def main():
    """
    This example uses ALDialog methods.
    It's a short dialog session with one topic.
    """
    session = qi.Session()
    session.connect(robot_ip)
    #Using ALServices
    tts = session.service("ALTextToSpeech")
    motion_service = session.service("ALMotion")
    tracker_service = session.service("ALTracker")
    life_service = session.service("ALAutonomousLife")

    #First deactivate life
    life_service.setAutonomousAbilityEnabled("All", 0)

    #Check if the programm on the robot is running
    tts.say("Pepeur Go")

    # Then, wake up.
    motion_service.wakeUp()

    # Add target to track.
    targetName = "Sound"
    [distance, confidence] = [0.6, 0.5]
    tracker_service.registerTarget(targetName, [distance, confidence])

    # set mode
    mode = "Move"
    tracker_service.setMode(mode)

    # Then, start tracker.
    tracker_service.track(targetName)

    print "ALTracker successfully started, now show your face to robot!"
    print "Use Ctrl+c to stop this script."

    #Check launching
    speech = session.service("ALSpeechRecognition")
    speech.setVocabulary(["check" "tchèque" "topla"], 1)
    a = speech.WordRecognized("WordRecognized")
    print a
    if a == 1:
        check(tts, motion_service)
        a = 0

    try:
        while True:
            time.sleep(1)

    except KeyboardInterrupt:
        print
        print "Interrupted by user"
        print "Stopping..."

    # Stop tracker.
    tracker_service.stopTracker()
    tracker_service.unregisterAllTargets()

    #motion_service.rest()

    print "ALTracker stopped."
Ejemplo n.º 20
0
    def __init__(self, params):
        atexit.register(self.__del__)

        self.ip = params["ip"]
        self.port = params["port"]

        self.session = qi.Session()

        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)

        # 需要用到的naoqi_service
        self.VideoDev = self.session.service("ALVideoDevice")
        self.FaceCha = self.session.service("ALFaceCharacteristics")
        self.FaceDet = self.session.service("ALFaceDetection")
        self.Memory = self.session.service("ALMemory")
        self.Dialog = self.session.service("ALDialog")
        self.AnimatedSpe = self.session.service("ALAnimatedSpeech")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.TabletSer = self.session.service("ALTabletService")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.Motion = self.session.service("ALMotion")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.Tracker = self.session.service("ALTracker")
        self.PeoplePer = self.session.service("ALPeoplePerception")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.SoundLoc = self.session.service("ALSoundLocalization")
        self.WavingDet = self.session.service("ALWavingDetection")
        self.MovementDet = self.session.service("ALMovementDetection")
        self.SpeakingMov = self.session.service("ALSpeakingMovement")

        # # 需要设置的参数
        # self.TextToSpe.setParameter("speed", 50.0)
        # self.TextToSpe.setLanguage("Chinese")
        # self.PeoplePer.resetPopulation()

        # 运行部分
        # 取消自动感知模式
        print "---Disabling the AutonomousLife Mode---"
        self.BasicAwa.setEnabled(False)
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        print "---AutonomousLife Mode Disable!---"

        # self.AnimatedSpe.say("I am saying the word testword !")

        # ttw = {"hello": ["hey", "yo", "testword"],
        #        "everything": ["everybody"] }
        # self.SpeakingMov.addTagsToWords(ttw)

        self.TextToSpe.setLanguage("English")
        self.TextToSpe.setParameter("pitchShift", 1.1)
        self.TextToSpe.setParameter("speed", 50)

        # self.Dialog.setLanguage("English")
        time.sleep(0)
        # 开始介绍
        print self.TextToSpe.getAvailableVoices()
        # self.TextToSpe.setParameter("pitchShift", 0.9)
        self.TextToSpe.setParameter("speed", 100)
        print self.TextToSpe.getParameter("defaultVoiceSpeed")
        self.AnimatedSpe.say("hello,I'm pepper,i'm the happiest boy here!")
Ejemplo n.º 21
0
 def __init__(self, params):
     atexit.register(self.__del__)
     # pepper connection
     self.ip = params["ip"]
     self.port = params["port"]
     self.session = qi.Session()
     rospy.init_node("where_is_this")
     try:
         self.session.connect("tcp://" + self.ip + ":" + str(self.port))
     except RuntimeError:
         print("[Kamerider E] : connection Error!!")
         sys.exit(1)
     self.car_pose = MoveBaseGoal()
     # naoqi API
     self.Leds = self.session.service("ALLeds")
     self.Memory = self.session.service("ALMemory")
     self.Dialog = self.session.service("ALDialog")
     self.Motion = self.session.service("ALMotion")
     self.AudioDev = self.session.service("ALAudioDevice")
     self.AudioPla = self.session.service("ALAudioPlayer")
     self.PhotoCap = self.session.service("ALPhotoCapture")
     self.VideoDev = self.session.service("ALVideoDevice")
     self.RobotPos = self.session.service("ALRobotPosture")
     self.TextToSpe = self.session.service("ALTextToSpeech")
     self.AudioRec = self.session.service("ALAudioRecorder")
     self.BasicAwa = self.session.service("ALBasicAwareness")
     self.TabletSer = self.session.service("ALTabletService")
     self.SoundDet = self.session.service("ALSoundDetection")
     self.AutonomousLife = self.session.service("ALAutonomousLife")
     # stop recording
     try:
         self.AudioRec.stopMicrophonesRecording()
     except BaseException:
         print(
             "\033[0;33m\t[Kamerider W]ALFaceCharacteristics : You don't need stop record\033[0m"
         )
     # 录音的函数
     self.thread_recording = Thread(target=self.record_audio, args=(None, ))
     self.thread_recording.daemon = True
     self.audio_terminate = False
     # ROS 订阅器和发布器
     self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                MoveBaseAction)
     self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
     self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                            GoalID,
                                            queue_size=1)
     self.nav_as.wait_for_server()
     # 清除costmap
     self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                             Empty)
     self.map_clear_srv()
     self.Motion.setTangentialSecurityDistance(.05)
     self.Motion.setOrthogonalSecurityDistance(.1)
     #    LED的group
     self.led_name = [
         "Face/Led/Green/Right/0Deg/Actuator/Value",
         "Face/Led/Green/Right/45Deg/Actuator/Value",
         "Face/Led/Green/Right/90Deg/Actuator/Value",
         "Face/Led/Green/Right/135Deg/Actuator/Value",
         "Face/Led/Green/Right/180Deg/Actuator/Value",
         "Face/Led/Green/Right/225Deg/Actuator/Value",
         "Face/Led/Green/Right/270Deg/Actuator/Value",
         "Face/Led/Green/Right/315Deg/Actuator/Value",
         "Face/Led/Green/Left/0Deg/Actuator/Value",
         "Face/Led/Green/Left/45Deg/Actuator/Value",
         "Face/Led/Green/Left/90Deg/Actuator/Value",
         "Face/Led/Green/Left/135Deg/Actuator/Value",
         "Face/Led/Green/Left/180Deg/Actuator/Value",
         "Face/Led/Green/Left/225Deg/Actuator/Value",
         "Face/Led/Green/Left/270Deg/Actuator/Value",
         "Face/Led/Green/Left/315Deg/Actuator/Value"
     ]
     self.Leds.createGroup("MyGroup", self.led_name)
     # 声明一些变量
     self.place = ["TV", "sofa", "table", "bed", "tea table"]
     self.place_description = {
         "TV":
         "you need to turn round first then go ahead. and the TV is on your right",
         "sofa":
         "you need go out first and then turn right. Sofa is in front of you",
         "table":
         "you need to turn round first then go ahead. and the table is on your left",
         "bed":
         "you need go out first and then turn right. Bed is in front of you",
         "tea table":
         "you need to turn right first then go ahead. and the tea table is on your left"
     }
     # amcl_pose话题的订阅器
     self.amcl_pose_sb = None
     self.position_result = []
     self.get_image_switch = True
     self.to_find_person_name = []
     self.current_drink_name = []
     # 存储每次语音识别的类别
     self.type = "none"
     # 储存每次语音识别的结果
     self.audio_recog_result = "none"
     self.predefined_position = {
         'table': [0.8722, -0.3192, 0, 0, 0.950170776273, 0.311729844444],
         "sofa1": [2.629, -0.37998, 0, 0, 0.435683814323, 0.900099779989],
         "tea table":
         [2.8597, -1.0975, 0, 0, 0.435683814323, 0.900099779989],
         "sofa2": [3.82847, -1.03030, 0, 0, 0.435683814323, 0.900099779989],
         "chair": [4.66809, -1.6237, 0, 0, 0.435683814323, 0.900099779989],
         "bed": [3.14724, -3.78763, 0, 0, -0.709890396828, 0.704312164094],
         "TV": [1.34616, -2.70090, 0, 0, -0.896545546577, 0.4429515582]
     }
     # 保存当前人旁边有什么东西
     self.old_drink = "none"
     self.angle = -.1
     self.person_num = 1
     # laser计算的数量
     self.laser_num = 0
     self.head_fix = True
     # 判断门是不是开着的
     self.if_door_open = False
     self.if_need_record = False
     self.point_dataset = self.load_waypoint("waypoints_help.txt")
     # 人脸识别
     self.detector = dlib.get_frontal_face_detector()
     # 关闭basic_awareness
     if self.BasicAwa.isEnabled():
         self.BasicAwa.setEnabled(False)
     if self.BasicAwa.isRunning():
         self.BasicAwa.pauseAwareness()
     # 关闭AutonomousLife模式-
     if self.AutonomousLife.getState() != "disabled":
         self.AutonomousLife.setState("disabled")
     # 初始化平板
     self.TabletSer.hideImage()
     print(
         '\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
     # 设置dialog语言
     self.Dialog.setLanguage("English")
     # 录下的音频保存的路径
     self.audio_path = '/home/nao/audio/record.wav'
     self.recog_result = "00"
     self.RobotPos.goToPosture("StandInit", .2)
     # Beep 音量
     self.beep_volume = 70
     # 设置说话速度
     self.TextToSpe.setParameter("speed", 75.0)
     # 初始化录音
     self.record_delay = 2.5
     self.speech_hints = []
     self.enable_speech_recog = True
     # 1代表最灵敏
     self.SoundDet.setParameter("Sensitivity", .4)
     self.SoundDet.subscribe('sd')
     self.SoundDet_s = self.Memory.subscriber("SoundDetected")
     self.SoundDet_s.signal.connect(self.callback_sound_det)
     # 调用成员函数
     self.start_head_fix()
     self.set_volume(.7)
     self.show_image("instructions.png")
     self.keyboard_control()
Ejemplo n.º 22
0
    def __init__(self, params):
        #NaoQI
        self.ip = params['ip']
        self.port = params['port']
        self.session = qi.Session()

        self.match_images = {}

        ld = os.listdir('./object_images/')

        for fn in ld:
            if fn.split('.')[-1] != 'png' or len(fn.split('_')) != 3: continue
            on = fn.split('.')[0]
            img = cv2.imread('./object_images/' + fn)
            self.match_images[on] = img

        print 'matching objects loaded : ', self.match_images.keys()

        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("Connection Error!")
            sys.exit(1)
        self.video = self.session.service("ALVideoDevice")
        video_subs_list = [
            'detector_rgb_t_0', 'detector_pc_0', 'detector_dep_0'
        ]
        print self.video.getSubscribers()
        for sub_name in video_subs_list:
            print sub_name, self.video.unsubscribe(sub_name)

        self.rgb_top = self.video.subscribeCamera(
            'detector_rgb_t', 0, 2, 11,
            20)  #name, idx, resolution, colorspace, fps
        self.pc = self.video.subscribeCamera('detector_pc', 2, 1, 19, 20)
        self.depth = self.video.subscribeCamera('detector_dep', 2, 1, 17, 20)

        self.point_cloud = np.zeros((240, 320, 3))
        offset_x = -120
        offset_y = -160
        self.x_temp = -(np.tile(np.arange(240).reshape(240, 1),
                                (1, 320)).reshape(240, 320, 1) + offset_x)
        self.y_temp = -(np.tile(np.arange(320).reshape(1, 320),
                                (240, 1)).reshape(240, 320, 1) + offset_y)

        #self.thread_cloud = Thread(target=self.get_point_cloud, args=(None,))
        #self.thread_cloud.daemon = True
        #self.thread_cloud.start()
        time.sleep(1)
        print self.video.getSubscribers()

        #Darknet
        self.gg = tensorflow.Graph()
        with self.gg.as_default() as g:
            self.tfnet = TFNet(self.options)
        self.classes = open('cfg/coco.names', 'r').readlines()

        #ROS
        self.cvbridge = CvBridge()

        self.transform = TransformListener()
        self.transformer = Transformer(True, rospy.Duration(10.0))

        self.RGB_TOPIC = params['rgb_topic']
        self.OBJ_TOPIC = params['obj_topic']

        self.rgb_sub = rospy.Subscriber(self.RGB_TOPIC,
                                        Image,
                                        self.callback_image,
                                        queue_size=1)
        self.obj_pub = rospy.Publisher(self.OBJ_TOPIC,
                                       objs_array,
                                       queue_size=1)

        self.show = params['od_show']
        self.object_id = 0
        self.object_id_max = 999999
        self.msg_idx = 0
        self.msg_idx_max = 9999999
        if self.show:
            cv2.startWindowThread()
            cv2.namedWindow('objs')
        self.tttt = time.time()
        time.sleep(1)
Ejemplo n.º 23
0
def main():
    """
    I should put some doc here
    """
    #New session
    robot_session = qi.Session()
    robot_session.connect(robot_ip)

    #Using ALServices
    tts = robot_session.service("ALTextToSpeech")
    motion_service = robot_session.service("ALMotion")
    tracker_service = robot_session.service("ALTracker")
    life_service = robot_session.service("ALAutonomousLife")

    #First deactivate life
    life_service.setAutonomousAbilityEnabled("All", 0)

    #Check if the programm on the robot is running
    tts.say("Dihé")

    # Then, wake up.
    motion_service.wakeUp()

    # Add target to track.
    targetName = "Sound"
    [distance, confidence] = [0.6, 0.5]
    tracker_service.registerTarget(targetName, [distance, confidence])

    # set mode
    mode = "Move"
    tracker_service.setMode(mode)

    # Then, start tracker.
    tracker_service.track(targetName)

    # Example showing how to set the stiffness to 1.0.
    # Beware, doing this could be dangerous, it is safer to use the
    #   stiffnessInterpolation method which takes a duration parameter.
    #    names  = 'Move'
    #    # If only one parameter is received, this is applied to all joints
    #    stiffnesses  = 1.0
    #    motion_service.setStiffnesses(names, stiffnesses)

    print "ALTracker successfully started, now show your face to robot!"
    print "Use Ctrl+c to stop this script."

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        print
        print "Interrupted by user"
        print "Stopping..."

    # Stop tracker.
    tracker_service.stopTracker()
    tracker_service.unregisterAllTargets()

    #motion_service.rest()

    print "ALTracker stopped."
Ejemplo n.º 24
0
    def init(self):

        print "init qi session...",
        self.session = qi.Session()
        self.session.connect("tcp://" + self.PEPPER_IP + ":9559")
        print "done"

        print "disabling autonomous abilities..."
        self.autonomousLifeProxy = self.session.service("ALAutonomousLife")
        # self.autonomousLifeProxy.setAutonomousAbilityEnabled("BasicAwareness", False)
        # self.autonomousLifeProxy.setAutonomousAbilityEnabled("BackgroundMovement", False)
        # self.autonomousLifeProxy.setAutonomousAbilityEnabled("SpeakingMovement", False)
        self.autonomousLifeProxy.setAutonomousAbilityEnabled(
            "ListeningMovement", False)
        # self.setTrackingEnabled
        print "basic ok"

        self.memoryProxy = ALProxy("ALMemory")
        print "memory ok"

        self.tts = ALProxy("ALTextToSpeech")
        print "tts ok"
        #tts es un proxy (al módulo ALTextToSpeech), say es el método
        self.tts.say("Hola")

        self.cameraProxy = self.session.service("ALVideoDevice")
        self.cameraClient = self.cameraProxy.subscribe("python_GVM",
                                                       self.resolution,
                                                       self.colorSpace,
                                                       self.fps)
        print "internal camera setup ok"

        # Get the service ALFaceDetection.
        self.faceService = self.session.service("ALFaceDetection")
        # Enable or disable tracking.

        self.faceService.setTrackingEnabled(self.trackingEnabled)
        # Just to make sure correct option is set.
        print "Is tracking now enabled on the robot?", self.faceService.isTrackingEnabled(
        )

        self.toggleCamera(0)
        self.startCameraService()
        print "internal camera ok"

        self.tabletProxy = self.session.service("ALTabletService")
        print "tablet ok"

        self.tabletProxy.showWebview()

        # function called when the signal onJSEvent is triggered
        # by the javascript function ALTabletBinding.raiseEvent(name)
        # interrupcion llamada al darse el evento de JS
        def callbackJS(event):

            emotionsRecord = self.emotionsArray
            print "emotionsRecord es" + str(emotionsRecord)
            #work with it
            # https://stackoverflow.com/questions/6252280/find-the-most-frequent-number-in-a-numpy-vector/6252400
            if len(emotionsRecord) > 0:
                # https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html
                emotionsRepetitions = [0] * 7
                # print "emotionsRepetitions antes" + str(emotionsRepetitions)
                # https://stackoverflow.com/questions/35751306/python-how-to-pad-numpy-array-with-zeros
                # va de 0 a 6 (se excluye el 7)  --> https://docs.python.org/2/tutorial/introduction.html
                emotionsRepetitions[:len(np.bincount(emotionsRecord)
                                         )] = np.bincount(emotionsRecord)
                print "contenido de emotionsRepetitions es: " + str(
                    emotionsRepetitions)
                emotionsMostFrequent = np.argmax(emotionsRepetitions)
                print "la emocion mas frecuente es " + self.classes[
                    emotionsMostFrequent] + "\n"
                # problemas
                positiveEmotions = emotionsRepetitions[
                    3] + emotionsRepetitions[5] + emotionsRepetitions[6]
                negativeEmotions = emotionsRepetitions[0] + emotionsRepetitions[
                    1] + emotionsRepetitions[2] + emotionsRepetitions[4]
            else:
                positiveEmotions = 0
                negativeEmotions = 0

            print "received an event", event
            toks = event.split(":")

            #esto hara salir de spin y apagar el robot
            if toks[0] == "exit" and toks[1] == "true":
                self.state = -1

            elif toks[0] == "readText":
                text = toks[1]
                self.tts.say(text)

            elif toks[0] == "finishedPage":

                currentPage = toks[1]

                if len(toks) == 2:
                    #selection en funcion de las emociones
                    #if emocion positiva, cambiamos a tal; else, a la otra
                    # self.emotionsArray.clear()
                    print "emociones positivas = " + str(positiveEmotions)
                    print "emociones negativas = " + str(negativeEmotions)

                    if negativeEmotions > positiveEmotions:
                        selection = "B"
                    else:
                        selection = "A"

                    # del self.emotionsArray [:]

                elif len(toks) == 3:
                    selection = toks[2]

                # toks[1] ser la pagina que abandonamos. nextPage la opcion que tomamos, y juntos forman el nombre
                # de la siguiente pagina a visitar

                script = "window.location.href = 'http://172.18.33.108/" + currentPage + selection + ".html'"
                self.tabletProxy.executeJS(script)
                sleep(5)

            # self.emotionsArray.clear()
            # clear es en python 3.3
            # https://stackoverflow.com/questions/39944586/clear-for-list-not-working-python/39944686
            del self.emotionsArray[:]

        self.tabletProxy.onJSEvent.connect(callbackJS)

        #FUNCION QUE LLAMA AL PROGRAMA JS QUE ESTA EN EL SERVIDOR, EN SIMPLE-GAME Y LO EJECUTA EN LA TABLET DE PEPPER
        script = "window.location.href = 'http://172.18.33.108/'"
        self.tabletProxy.executeJS(script)
        sleep(5)

        self.recognizeEmotions()
        print "emotion recognition ok"

        print "all robot services are ok"
Ejemplo n.º 25
0
# -- coding: utf-8 --
import time
import sys
import keyboard
import qi
IP = "192.168.43.100"
Port = 9559
fileLoc = "/home/nao/Spill_non.top"
session = qi.Session("tcp://" + IP + ":" + str(Port))
tts = session.service("ALTextToSpeech")
tts.setLanguage("Norwegian")
#asr = ALProxy("ALSpeechRecognition", IP, Port)
#asr.setLanguage("Norwegian")
faceProxy = session.service("ALFaceDetection")
memoryProxy = session.service("ALMemory")
period = 500
faceProxy.subscribe("Test_Face", period, 0.0)
memValue = "FaceDetected"
ALDialog = session.service("ALDialog")
ALDialog.setLanguage("Norwegian")
asdf = ""


class _GetChWindows:
    def __init__(self):
        import msvcrt

    def __call__(self):
        import msvcrt
        if msvcrt.kbhit():
            while msvcrt.kbhit():
    def __init__(
        self,
        settings={
            'name': "NAO",
            'ip': '192.168.0.100',
            'port': 9559,
            'UseSpanish': True,
            'MotivationTime': 300000000,
            'HeartRate_Lim': 140,
            'Cerv_Lim': 0,
            'Thor_Lim': 0
        }):

        self.settings = settings
        self.ip = self.settings['ip']
        self.port = self.settings['port']
        self.useSpanish = self.settings['UseSpanish']
        self.patient = None

        self.session = qi.Session()
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))

        except RuntimeError:
            logging.debug(
                "Can't connect to Naoqi at ip \"" + self.ip + "\" on port " +
                str(self.port) + ".\n"
                "Please check your script arguments. Run with -h option for help."
            )
            sys.exit(1)

        self.robotName = self.settings['name']
        self.HR_lim = self.settings['HeartRate_Lim']
        self.Cer_lim = self.settings['Cerv_Lim']
        self.Thor_lim = self.settings['Thor_Lim']
        self.dialogs = dialogs.Dialogs()

        self.go_on = True
        self.soun_ON = None

        # Updating the services of the robot

        #Load memory services to recognize events
        self.memory = self.session.service("ALMemory")

        #Load AlTextToSpeech
        self.tts = self.session.service("ALTextToSpeech")
        self.setLanguage('Spanish')
        # Load animated speech service
        self.animatedSpeechProxy = self.session.service("ALAnimatedSpeech")
        # Load motion service
        self.motion = self.session.service("ALMotion")
        #Load posture service
        self.posture = self.session.service("ALRobotPosture")
        #Load tracker service
        self.tracker = self.session.service("ALTracker")

        # Create the sound tracker of the robot
        self.soundTracker = soundTracker.Sound_Detector()

        # Load behaviors services
        self.behavior_mng_service = self.session.service("ALBehaviorManager")

        #names = self.behavior_mng_service.getInstalledBehaviors()
        #print "Behaviors on the robot:"
        #print names
        #Call launchDialogs function
        self.flag_Sound = True

        self.launchDialogs()
Ejemplo n.º 27
0
from PIL import Image
from tqdm import tqdm
import qi
import numpy as np
import cv2 as cv
import os
os.chdir('/home/robocup/many_chessboards')  #
IP = '192.168.1.15'
PORT = 9559
resolution = 2
CameraIndex = 1  # VGA
colorSpace = 11
ses = qi.Session()
ses.connect(IP)
YAW, PITCH = 0, 0
video = ses.service('ALVideoDevice')
motionProxy = ses.service('ALMotion')
postureProxy = ses.service('ALRobotPosture')
videoClient = video.subscribeCamera("python_client", CameraIndex, resolution,
                                    colorSpace, 5)

GENERATE_IMAGES = True
if GENERATE_IMAGES:

    IP = '192.168.1.5'
    PORT = 9559
    resolution = 2  # VGA
    colorSpace = 11
    X, Y = 50, 50
    for YAW in tqdm(np.arange(-0.5, 0.5, 0.05)):
        for PITCH in np.arange(-0.6, 0.5, 0.03):
Ejemplo n.º 28
0
    def __init__(self, params):
        atexit.register(self.__del__)
        # pepper connection
        self.ip = params["ip"]
        self.port = params["port"]
        self.session = qi.Session()
        rospy.init_node("cocktail_party")

        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)
        self.car_pose = MoveBaseGoal()
        # naoqi API
        self.Leds = self.session.service("ALLeds")
        self.Memory = self.session.service("ALMemory")
        self.Dialog = self.session.service("ALDialog")
        self.Motion = self.session.service("ALMotion")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.AudioPla = self.session.service("ALAudioPlayer")
        self.PhotoCap = self.session.service("ALPhotoCapture")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.AudioRec = self.session.service("ALAudioRecorder")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.TabletSer = self.session.service("ALTabletService")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        # stop recording
        try:
            self.AudioRec.stopMicrophonesRecording()
        except BaseException:
            print(
                "\033[0;33m\t[Kamerider W]ALFaceCharacteristics : You don't need stop record\033[0m"
            )
        # 录音的函数
        self.thread_recording = Thread(target=self.record_audio, args=(None, ))
        self.thread_recording.daemon = True
        self.audio_terminate = False
        # ROS 订阅器和发布器
        self.nav_as = actionlib.SimpleActionClient("/move_base",
                                                   MoveBaseAction)
        self.cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
        self.goal_cancel_pub = rospy.Publisher('/move_base/cancel',
                                               GoalID,
                                               queue_size=1)
        self.nav_as.wait_for_server()
        # 清除costmap
        self.map_clear_srv = rospy.ServiceProxy('/move_base/clear_costmaps',
                                                Empty)
        self.map_clear_srv()
        self.Motion.setTangentialSecurityDistance(.05)
        self.Motion.setOrthogonalSecurityDistance(.1)
        # amcl定位
        # rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.amcl_callback)
        #    LED的group
        self.led_name = [
            "Face/Led/Green/Right/0Deg/Actuator/Value",
            "Face/Led/Green/Right/45Deg/Actuator/Value",
            "Face/Led/Green/Right/90Deg/Actuator/Value",
            "Face/Led/Green/Right/135Deg/Actuator/Value",
            "Face/Led/Green/Right/180Deg/Actuator/Value",
            "Face/Led/Green/Right/225Deg/Actuator/Value",
            "Face/Led/Green/Right/270Deg/Actuator/Value",
            "Face/Led/Green/Right/315Deg/Actuator/Value",
            "Face/Led/Green/Left/0Deg/Actuator/Value",
            "Face/Led/Green/Left/45Deg/Actuator/Value",
            "Face/Led/Green/Left/90Deg/Actuator/Value",
            "Face/Led/Green/Left/135Deg/Actuator/Value",
            "Face/Led/Green/Left/180Deg/Actuator/Value",
            "Face/Led/Green/Left/225Deg/Actuator/Value",
            "Face/Led/Green/Left/270Deg/Actuator/Value",
            "Face/Led/Green/Left/315Deg/Actuator/Value"
        ]
        self.Leds.createGroup("MyGroup", self.led_name)
        # 声明一些变量
        self.get_image_switch = True
        self.current_person_name = "none"
        self.current_drink_name = []
        self.gender = "none"
        self.old_drink = "none"
        self.positive_list = ["yes", "of course", "we do"]
        self.negative_list = ["no", "sorry", "we don't", "regret"]
        self.name_list = [
            "Alex", "Charlie", "Elizabeth", "Francis", "James", "Jennifer",
            "John", "Linda", "Michael", "Mary", "Robert", "Patricia", "Robin",
            "Skyler", "William"
        ]
        self.drink_list = [
            "ice tea", "beer", "coke", "milk", "orange juice", "toothpaste",
            "cookie", "shampoo", "chips", "green tea"
        ]
        self.stop_list = ["stop", "go back"]
        self.angle = -.1
        self.head_fix = True
        self.if_need_record = False
        self.point_dataset = self.load_waypoint("waypoints_help.txt")
        # 关闭basic_awareness
        if self.BasicAwa.isEnabled():
            self.BasicAwa.setEnabled(False)
        if self.BasicAwa.isRunning():
            self.BasicAwa.pauseAwareness()
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        # 初始化平板
        self.TabletSer.hideImage()
        print(
            '\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
        # 设置dialog语言
        self.Dialog.setLanguage("English")
        # 录下的音频保存的路径
        self.audio_path = '/home/nao/audio/record.wav'
        self.recog_result = "00"
        self.RobotPos.goToPosture("StandInit", .2)
        print "getPostureList", self.RobotPos.getPostureList()
        print "getPosture", self.RobotPos.getPosture()
        print "getPostureFamily", self.RobotPos.getPostureFamily()
        print "getPostureFamilyList", self.RobotPos.getPostureFamilyList()
        # Beep 音量
        self.beep_volume = 70
        # 设置说话速度
        self.TextToSpe.setParameter("speed", 75.0)
        # 初始化录音
        self.record_delay = 2.5
        self.speech_hints = []
        self.enable_speech_recog = True
        # 1代表最灵敏
        self.SoundDet.setParameter("Sensitivity", .4)
        self.SoundDet.subscribe('sd')
        self.SoundDet_s = self.Memory.subscriber("SoundDetected")
        self.SoundDet_s.signal.connect(self.callback_sound_det)
        # 调用成员函数
        self.start_head_fix()
        self.set_volume(.7)
        self.keyboard_control()
Ejemplo n.º 29
0
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--ip",
        type=str,
        default="134.214.50.49",
        help=
        "Robot's IP address. If on a robot or a local Naoqi - use '127.0.0.1' (this is the default value)."
    )
    parser.add_argument(
        "--port",
        type=int,
        default=9559,
        help="port number, the default value is OK in most cases")
    parser.add_argument(
        "--topic-path",
        type=str,
        #default='/fs03/share/users/thomas.lesbros/home/Documents/PM/Projet_Majeure/Robotcpe/Dialogue/test1.top',
        default='/home/nao/projects/WAN/Dialogue/test2.top',
        help="absolute path of the dialog topic file (on the robot)")

    args = parser.parse_args()
    session = qi.Session()
    try:
        session.connect("tcp://{}:{}".format(args.ip, args.port))
    except RuntimeError:
        print(
            "\nCan't connect to Naoqi at IP {} (port {}).\nPlease check your script's arguments."
            " Run with -h option for help.\n".format(args.ip, args.port))
        sys.exit(1)
    main(session, args.topic_path)
Ejemplo n.º 30
0
    def __init__(self, params):
        atexit.register(self.__del__)
        # pepper connection
        self.ip = params["ip"]
        self.port = params["port"]
        self.session = qi.Session()
        try:
            self.session.connect("tcp://" + self.ip + ":" + str(self.port))
        except RuntimeError:
            print("[Kamerider E] : connection Error!!")
            sys.exit(1)

        # naoqi API
        self.Memory = self.session.service("ALMemory")
        self.Motion = self.session.service("ALMotion")
        self.AudioDev = self.session.service("ALAudioDevice")
        self.TabletSer = self.session.service("ALTabletService")
        self.AudioRec = self.session.service("ALAudioRecorder")
        self.BasicAwa = self.session.service("ALBasicAwareness")
        self.AutonomousLife = self.session.service("ALAutonomousLife")
        self.RobotPos = self.session.service("ALRobotPosture")
        self.Dialog = self.session.service("ALDialog")
        self.TextToSpe = self.session.service("ALTextToSpeech")
        self.SoundDet = self.session.service("ALSoundDetection")
        self.PhotoCap = self.session.service("ALPhotoCapture")
        # stop recording
        try:
            self.AudioRec.stopMicrophonesRecording()
        except BaseException:
            print("\033[0;32;40m\t[Kamerider W]ALFaceCharacteristics : You don't need stop record\033[0m")
        # 录音的函数
        self.thread_recording = Thread(target=self.record_audio, args=(None,))
        self.thread_recording.daemon = True
        self.audio_terminate = False
        # 声明一些变量
        self.num_man = 0
        self.num_woman = 0
        self.angle = -.2
        self.if_need_record = False
        # 关闭basic_awareness
        if self.BasicAwa.isEnabled():
            self.BasicAwa.setEnabled(False)
        if self.BasicAwa.isRunning():
            self.BasicAwa.pauseAwareness()
        # 初始化平板
        self.TabletSer.cleanWebview()
        print ('\033[0;32m [Kamerider I] Tablet initialize successfully \033[0m')
        # 设置dialog语言
        self.Dialog.setLanguage("English")
        # 录下的音频保存的路径
        self.audio_path = '/home/nao/audio/record.wav'
        self.recog_result = "None"
        # Beep 音量
        self.beep_volume = 70
        # 设置初始化头的位置 走路的时候固定头的位置
        self.Motion.setStiffnesses("Head", 1.0)
        self.Motion.setAngles("Head", [0., -0.25], .05)
        # 设置说话速度
        self.TextToSpe.setParameter("speed", 80.0)
        # 关闭AutonomousLife模式
        if self.AutonomousLife.getState() != "disabled":
            self.AutonomousLife.setState("disabled")
        self.RobotPos.goToPosture("Stand", .5)
        # 初始化录音
        self.record_delay = 2
        self.speech_hints = []
        self.enable_speech_recog = True
        self.SoundDet.setParameter("Sensitivity", .8)
        self.SoundDet.subscribe('sd')
        self.SoundDet_s = self.Memory.subscriber("SoundDetected")
        self.SoundDet_s.signal.connect(self.callback_sound_det)
        # 调用成员函数
        self.start_head_fix()
        self.set_volume(.7)
        self.keyboard_control()