Example #1
0
    def __init__(self):
        rospy.init_node('ava_voice_cmd')
        rospy.on_shutdown(self.cleanup)
        self.msg = Twist()
        self.social_cues = robot_api.Social_Cues()
        self.social_cues.express_neutral()
        self.lights = robot_api.Lights()
        self.sound_src = kuri_api.SoundSource('AvaVoiceCommand')
        self.sound_src_music = kuri_api.SoundSource('AvaVoiceCommand-music')
        self._base = robot_api.Base()
        self._head = robot_api.Head()
        # set head to neutral
        self._head.pan_and_tilt(0, -0.1)
        self._face_behavior = FaceBehavior()
        self._nav_waypoint = NavWaypoint()
        self._sleeping = False

        # publish to cmd_vel (publishes empty twist message to stop), subscribe to speech output
        self._pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
        rospy.Subscriber('recognizer/asr_output', String, self.speechCb)

        # PROMPTING THREAD
        #self._kill_prompt_thread = False
        #threading.Thread(target=self._prompt_thread).start()

        rospy.on_shutdown(self.cleanup)
        self._log_set_state(REST)
        self._sound_dir = os.path.expanduser(
            '~') + "/catkin_ws/src/cse481wi19/ava_custom_audio"

        rospy.spin()
Example #2
0
 def __init__(self):
     rospy.init_node('face_detect_demo')
     self.wait_for_time()
     self.lights = robot_api.Lights()
     rospy.sleep(0.5)
     self.head = robot_api.FullBodyLookAt(
         tf_listener=tf.TransformListener())
     self.body = robot_api.Base()
     self.social_cues = robot_api.Social_Cues(move_head=False,
                                              use_sounds=False)
     self.publisher = rospy.Publisher('face_pose_marker',
                                      Marker,
                                      queue_size=10)
     self.command_sub = rospy.Subscriber('move_command',
                                         Bool,
                                         queue_size=1,
                                         callback=self.command_callback)
     self.face_already_seen = False
     self.move_command = False
     self.num_faces = 0
     vision = robot_api.Vision()
     vision.activate("face_detector", config={"fps": 1})
     rospy.sleep(0.5)
     vision.wait_until_ready(timeout=10)
     vision.face_change.connect(self.face_callback)
     if SOCIAL_CUES:
         self.social_cues.express_neutral()
     rospy.spin()
Example #3
0
 def __init__(self):
     self.face_pub = rospy.Publisher('vision/most_confident_face_pos',
                                     PointStamped,
                                     queue_size=10)
     self.num_faces_pub = rospy.Publisher('vision/face_count',
                                          Int8,
                                          queue_size=10)
     self.lights = robot_api.Lights()
     self.head = robot_api.Head()
     self.lock = RLock()
     self.base = robot_api.Base()
     self.facesDemo = FaceInteractionDemo()
     rospy.sleep(0.5)
Example #4
0
    def __init__(self,
                 feedback_cb=None,
                 done_cb=None,
                 use_sounds=True,
                 move_head=True):
        self.head = robot_api.Head()
        self.lights = robot_api.Lights()
        self.use_sounds = use_sounds
        if self.use_sounds:
            self.sound_src = kuri_api.SoundSource('Ava')
        self.move_head = move_head
        self._feedback_cb = feedback_cb
        self._done_cb = done_cb

        self._sound_dir = os.path.expanduser(
            '~') + "/catkin_ws/src/cse481wi19/ava_custom_audio"
Example #5
0
    def __init__(self):

        # Initialize node
        rospy.init_node("luci_control")
        rospy.on_shutdown(self.shutdown)

        # Create a publisher for the grammar data and raw audio
        self.pub_grammar = rospy.Publisher("grammar_data",
                                           String,
                                           queue_size=10)
        self.pub_audio = rospy.Publisher("raw_audio", String, queue_size=10)

        # Subscribe to grammar data output
        rospy.Subscriber("grammar_data", String, self.parse_results)

        # Set language model and dictionary
        self.class_lm = 'corpus/luci.lm'
        self.dict = 'corpus/luci.dic'

        # Used in process_audio (from asr_test.py)
        self.in_speech_bf = False

        # Set this file from "/usr/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"
        self.hmm = 'corpus/hmm'

        # Intializing robot API
        self.lights = robot_api.Lights()
        self.expressions = robot_api.Expressions()
        self.sound_source = robot_api.SoundSource('Luci')

        # Initialize alsa audio PCM: (Using 16000Hz, 16 Bit Encoding, 1 Channel)
        self.inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)
        self.inp.setchannels(1)
        self.inp.setrate(16000)
        self.inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
        self.inp.setperiodsize(160)

        # All params satisfied. Starting recognizer
        t1 = Thread(target=self.start_recognizer)
        t1.start()

        # # Spin up new thread to listen for raw audio input
        t2 = Thread(target=self._listen)
        t2.start()

        rospy.spin()
Example #6
0
def main():
    rospy.init_node('gripper_demo')
    wait_for_time()
    argv = rospy.myargv()
    if len(argv) < 2:
        print_usage()
        return
    command = argv[1]

    lights = robot_api.Lights()

    if command == 'off':
        rospy.logerr('Not implemented.')
    elif command == 'on':
        rospy.logerr('Not implemented.')
    else:
        print_usage()
Example #7
0
 def __init__(self):
     self.face_pub = rospy.Publisher('vision/most_confident_face_pos',
                                     PointStamped,
                                     queue_size=10)
     self.num_faces_pub = rospy.Publisher('vision/face_count',
                                          Int8,
                                          queue_size=10)
     self.lights = robot_api.Lights()
     self.head = robot_api.Head()
     self.lock = RLock()
     self.base = robot_api.Base()
     self.expressions = robot_api.Expressions()
     self.follow = False  # Indicates if Luci should follow someone
     self.face_exists = False  # Indicates if Luci confidently detects a face
     self.face_detection_counter = 0
     rospy.sleep(
         0.5
     )  # Adding sleep to make sure there's enough to initialize other objects
Example #8
0
def main():
    rospy.init_node('gripper_demo')
    wait_for_time()
    argv = rospy.myargv()
    if len(argv) < 2:
        print_usage()
        return
    command = argv[1]

    lights = robot_api.Lights()

    if command == 'off':
        lights.off()

    elif command == 'on':
        lights.put_pixels(lights.ALL_ON)

    else:
        print_usage()
Example #9
0
    def __init__(self):

        rospy.on_shutdown(self.shutdown)

        # Create a publisher for the grammar data and raw audio
        self.pub_grammar = rospy.Publisher("grammar_data", String, queue_size=10)
        self.pub_audio = rospy.Publisher("raw_audio", String, queue_size=10)

        self.alert_pub = rospy.Publisher('patient_monitor/alerts', Alert, queue_size=10)
        self.alert_client(1)
        #self.alert_client(2)
        #self.alert_client(3)
        # Subscribe to grammar data output
        rospy.Subscriber("grammar_data", String, self.parse_results)

        # Set language model and dictionary
        self.class_lm = 'corpus/final.lm'
        self.dict = 'corpus/final.dic'

        # Used in process_audio (from asr_test.py)
        self.in_speech_bf = False

        # Set this file from "/usr/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"
        self.hmm = 'corpus/hmm'

        # Intializing robot API
        self.lights = robot_api.Lights()
        self.expressions = robot_api.Expressions()
        self.sound_source = robot_api.SoundSource('Luci')

        # Initialize alsa audio PCM: (Using 16000Hz, 16 Bit Encoding, 1 Channel)
        self.inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)
        self.inp.setchannels(1)
        self.inp.setrate(16000)
        self.inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
        self.inp.setperiodsize(160)

        start_thread = Thread(target=self._start)
        start_thread.start()
Example #10
0
def main():
    rospy.init_node('gripper_demo')
    wait_for_time()
    argv = rospy.myargv()
    if len(argv) < 2:
        print_usage()
        return
    command = argv[1]
    lights = robot_api.Lights()

    if command == 'off':
        lights.off()
    elif command == 'on':
        lights.all_leds(lights.BLUE)
    elif command == 'color':
        count = 0
        color = [lights.RED, lights.BLUE, lights.GREEN]
        while True:
            num = count % 3
            lights.all_leds(color[num])
            count += 1
    else:
        print_usage()
Example #11
0
 def __init__(self):
     self.lights = robot_api.Lights()
     self._listener = tf.TransformListener()
     self.head = robot_api.FullBodyLookAt(tf_listener=self._listener)
     self.body = robot_api.Base()
     self.nav_client = NavWaypoint()
     self.publisher = rospy.Publisher('face_pose_marker',
                                      Marker,
                                      queue_size=10)
     self.num_faces = 0
     self.count = 0
     self.face_point = None
     # self.talk_position = None
     self.look_at = True
     self.wait_for_time()
     vision = robot_api.Vision()
     vision.activate("face_detector", config={"fps": 1})
     rospy.sleep(0.5)
     vision.wait_until_ready(timeout=10)
     self.face_sub = rospy.Subscriber('vision/results',
                                      FrameResults,
                                      self.face_callback,
                                      queue_size=1)
Example #12
0
	def __init__(self):
		self.face_pub = rospy.Publisher('vision/most_confident_face_pos', PointStamped, queue_size=10)
		self.num_faces_pub = rospy.Publisher('vision/face_count', Int8, queue_size=10)
		self.lights = robot_api.Lights()
		self.head = robot_api.Head()
		self.lock = RLock()
		self.base = robot_api.Base()
		self.lastFacePos = None
		self.vision = robot_api.Vision()

		# Activate Vision API's Face Detector
		self.vision.activate("face_detector", config={"fps": 3})
		rospy.sleep(0.5)
		self.vision.wait_until_ready(timeout=10)
		self.vision.req_mods([["activate", "face_detector", {"fps": 3}, {"skip_ratio": 3}]], [])

		# Trigger callback
		rospy.Subscriber('vision/results', FrameResults, self.callback) 
		rospy.Subscriber('vision/most_confident_face_pos', PointStamped, self.onFacePos) 
		rospy.Subscriber('come_here_kuri', Int8, self.navigateTo) 

		rospy.sleep(0.5)

		rospy.spin()
Example #13
0
 def __init__(self):
     self._head = robot_api.Head(None)
     self._lights = robot_api.Lights()
Example #14
0
 def all_leds(cls, color):
     # TODO: Turn all LEDS to `color`
     # this was taken from red comments above
     chest_leds = robot_api.Lights()
     chest_leds.put_pixels([color] * Lights.NUM_LEDS)
     rospy.loginfo(cls)