示例#1
0
 def callback(self, msg):
     if msg.min_distance < 2.0:
         soundhandle = SoundClient()
         rospy.sleep(rospy.Duration(1))
         soundhandle.playWave(self.sound)
         rospy.sleep(rospy.Duration(2))
         soundhandle.stopAll()
示例#2
0
def callback(data):
	if str(data) == "data: Bark":
		soundhandle = SoundClient()
		rospy.sleep(1)
		soundhandle.play(1)
		pubshoot = rospy.Publisher('bark', String)
		pubshoot.publish(String("Fire"))
示例#3
0
def main():
    rospy.init_node(NAME)
    # Init Globals with their type
    global status_list
    status_list = []
    global cant_reach_list
    cant_reach_list = []

    global actionGoalPublisher

    # Speech stuff
    global soundClient
    soundClient = SoundClient()

    init_point2pont()
    actionStatus = rospy.Subscriber('move_base/status', GoalStatusArray, status_callback)
    actionGoalPublisher = rospy.Publisher('move_base/goal', MoveBaseActionGoal, queue_size=10)
    frontierSub = rospy.Subscriber('grid_frontier', GridCells, frontier_callback)
    rospy.wait_for_message('grid_frontier', GridCells)

    rospy.sleep(1)
    driveTo.goalID = getHighestStatus()

    soundClient.say("Starting Driving")
    global closestGoal
    while not rospy.is_shutdown():
        rospy.loginfo("Waiting for a little while")
        rospy.sleep(2) #Allow the goal to be calculated
        rospy.loginfo("Getting the closest goal")
        closesGoalCopy = getClosestGoal(2)
        rospy.loginfo("Getting the current location")
        driveTo(closesGoalCopy[0], closesGoalCopy[1], closesGoalCopy[2])

    rospy.spin()
示例#4
0
class Voice:
    def __init__(self, sound_db):
        self._sound_client = SoundClient()
        rospy.loginfo('Will wait for a second for sound_pay node.')
        rospy.sleep(1)
        self._sound_db = sound_db
        self.play_sound('sound10')

    def play_sound(self, sound_name):
        '''Plays the requested sound.

        Args:
            requested_sound (str): Unique name of the sound in the sound database.
        '''
        sound_filename = self._sound_db.get(sound_name)
        self._sound_client.playWave(sound_filename)

        # TODO: Make sure this returns when it is done playing the sound.

    def say(self, text):
        '''Send a TTS (text to speech) command.
        Args:
            text (str): The speech to say.
        '''
        self._sound_client.say(text)
示例#5
0
class TalkBack:
    def __init__(self, script_path):
        rospy.init_node('talkback')

        rospy.on_shutdown(self.cleanup)
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        self.wavepath = rospy.get_param("~wavepath", script_path + "/../sounds")
        self.soundhandle = SoundClient()
        rospy.sleep(1)
        self.soundhandle.stopAll()
        self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")
        rospy.sleep(1)
        self.soundhandle.say("Ready", self.voice)
        rospy.loginfo("Say one of the navigation commands...")
        rospy.Subscriber('/recognizer/output', String, self.talkback)
        
    def talkback(self, msg):
	palabra = msg.data
        rospy.loginfo(palabra)
	if (palabra == "cancel"):
		rospy.loginfo("The number is one")
        self.soundhandle.say(msg.data, self.voice)

    def cleanup(self):
        self.soundhandle.stopAll()
        rospy.loginfo("Shutting down talkback node...")
示例#6
0
class VoiceGenerator:
    def __init__(self):
        rospy.on_shutdown(self.cleanup)

        #self.voice = rospy.get_param("~voice", "voice_kal_diphone")
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        self.wavepath = rospy.get_param("~wavepath", "")
        self.topic = rospy.get_param("~voice_topic", "")
       
        # Create the sound client object
        self.soundhandle = SoundClient()
        
        rospy.sleep(1)
        self.soundhandle.stopAll()
       
        # Announce that we are ready for input
        rospy.sleep(1)
        self.soundhandle.say("Voice Module Ready", self.voice)

        rospy.loginfo("Message ...")

        # Subscribe to the recognizer output
        rospy.Subscriber(self.topic, String, self.voicegenerator)
    
    def switch_asr_onoff_client(self, x):
        rospy.wait_for_service('switch_asr_on_off')
        try:
            switch_asr_on_off = rospy.ServiceProxy('switch_asr_on_off', asr_status)
            resp1 = switch_asr_on_off(x)
            
        except rospy.ServiceException, e:
            print "Service call failed: %s"%e
示例#7
0
class Talker:
	def __init__(self):
		#self.talkPublisher = rospy.Publisher("/robotsound", SoundRequest, queue_size=1)
		self.soundClient = SoundClient()
		rospy.sleep(1)

	def say(self, text):
		self.soundClient.say(text, 'voice_kal_diphone')
class SoundIntermediary():
    def __init__(self):
        self.sound_client = SoundClient()
        self.voice = rospy.get_param("~voice", "kal_diphone")
        rospy.Subscriber("wt_speech", String, self.speak)

    def speak(self, msg):
        self.sound_client.say(msg.data, self.voice)
示例#9
0
    def execute(self, userdata):

        soundhandle = SoundClient()
        #let ROS get started...
        rospy.sleep(0.5)

        soundhandle.stopAll()

        return succeeded
示例#10
0
class TtsServer(object):
	feedback = TtsFeedback()
	result = TtsResult()

	def __init__(self, name):
		self._action_name = name
		self._as = actionlib.SimpleActionServer(self._action_name, TtsAction, self.execute_cb, auto_start = False)
		self._as.start()
		self.client = SoundClient()
		rospy.sleep(1)
		self.client.stopAll()
		rospy.loginfo("Started ActionServer")

	def execute_cb(self, goal):
		self.feedback.event_type = 1
		self.feedback.timestamp = rospy.get_rostime()
		self._as.publish_feedback(self.feedback)

		goalString = goal.rawtext.text

		self.result.text = goalString
		self.result.msg = "This string can be used to display an eventual error or warning message during voice synthesis"
		
		rospy.sleep(goal.wait_before_speaking)
		
		# self.client.say(goalString)
		words = goalString.split()
		self.sayWords(words)

		self._as.set_succeeded(self.result)
	
		self.feedback.event_type = 2
		self.feedback.timestamp = rospy.get_rostime()
		self._as.publish_feedback(self.feedback)

	def sayWords(self, words):
		i = 0
		self.feedback.event_type = 32

		for word in words:
			self.client.say(word)
			self.feedback.text_said = word

			if(i<len(words)-1):
				self.feedback.next_word = words[i+1]
			else:
				self.feedback.next_word = "Reached the end of the sentence"
				self.feedback.event_type = 128

			self.feedback.timestamp = rospy.get_rostime()
			self._as.publish_feedback(self.feedback)

			i += 1
			rospy.sleep(0.7)

		del words[:]
示例#11
0
文件: speak.py 项目: hcrlab/push_pull
class FestivalTTS:
  def __init__(self):
    rospy.init_node('festival_tts')
    self._client = SoundClient()
    self.voice = 'voice_cmu_us_slt_arctic_hts'
    rospy.Subscriber('festival_tts', String, self._response_callback)
    rospy.spin()

  def _response_callback(self, data):
    self._client.say(data.data, self.voice)
def play_nonblocking():
    """
    Play the same sounds with manual pauses between them.
    """
    rospy.loginfo('Example: Playing sounds in *non-blocking* mode.')
    # NOTE: you must sleep at the beginning to let the SoundClient publisher
    # establish a connection to the soundplay_node.
    soundhandle = SoundClient(blocking=False)
    rospy.sleep(1)

    # In the non-blocking version you need to sleep between calls.
    rospy.loginfo('Playing say-beep at full volume.')
    soundhandle.playWave('say-beep.wav')
    rospy.sleep(1)

    rospy.loginfo('Playing say-beep at volume 0.3.')
    soundhandle.playWave('say-beep.wav', volume=0.3)
    rospy.sleep(1)

    rospy.loginfo('Playing sound for NEEDS_PLUGGING.')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)
    rospy.sleep(1)

    rospy.loginfo('Speaking some long string.')
    soundhandle.say('It was the best of times, it was the worst of times.')
示例#13
0
class Robbie_Chat():

    def __init__(self):
        rospy.init_node('robbie_chat_node', anonymous=True)
        self.kern = aiml.Kernel()
        self.kern.setBotPredicate("name","robbie")
        self.kern.setBotPredicate("location","Australia")
        self.kern.setBotPredicate("botmaster","Petrus")
        self.kern.setBotPredicate("gender","male")
        self.brainLoaded = False
        self.forceReload = False
        # Set the default TTS voice to use
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        self.robot = rospy.get_param("~robot", "robbie")
        # Create the sound client object
        self.soundhandle = SoundClient()
        
        # Wait a moment to let the client connect to the
        # sound_play server
        rospy.sleep(1)
        
        # Make sure any lingering sound_play processes are stopped.
        self.soundhandle.stopAll()

        while not self.brainLoaded:
	    if self.forceReload or (len(sys.argv) >= 2 and sys.argv[1] == "reload"):
		    # Use the Kernel's bootstrap() method to initialize the Kernel. The
		    # optional learnFiles argument is a file (or list of files) to load.
		    # The optional commands argument is a command (or list of commands)
		    # to run after the files are loaded.
                    #self.kern.setBotPredicate(name. robbie)
		    self.kern.bootstrap(learnFiles="std-startup.xml", commands="load aiml b")
		    self.brainLoaded = True
		    # Now that we've loaded the brain, save it to speed things up for
		    # next time.
		    self.kern.saveBrain("standard.brn")
	    else:
		    # Attempt to load the brain file.  If it fails, fall back on the Reload
		    # method.
		    try:
			    # The optional branFile argument specifies a brain file to load.
			    self.kern.bootstrap(brainFile = "standard.brn")
			    self.brainLoaded = True
		    except:
			    self.forceReload = True


        
        rospy.Subscriber('/speech_text', String, self.speak_text)



    def speak_text(self, text):
        self.soundhandle.say(self.kern.respond(text.data), self.voice)
 def __init__(self):
    if debug:
       print "Initializing Sound Client"
    SoundClient.__init__(self)
    # wait for subscribers
    timeout = 10
    while timeout > 0:
       if self.pub.get_num_connections() > 0:
          timeout = 0
       else:
          rospy.sleep(1)
示例#15
0
def main():
    rospy.init_node('eys_move', anonymous = True)
    soundhandle = SoundClient()
    close_eye_publisher = rospy.Publisher("close_eye", Int32, queue_size=10)
    rospy.sleep(1)
    soundhandle.stopAll()

    close_eye_publisher.publish(Int32(data=2))

    wave_path = os.path.dirname(os.path.abspath(__file__)) + "/../sounds/camera.wav"
    soundhandle.playWave(wave_path)
    sleep(2)
示例#16
0
class ChatbotSpeaker:
  def __init__(self):
    rospy.init_node('chatbot_speaker')
    self._client = SoundClient()
    rospy.Subscriber('chatbot_responses', String, self._response_callback)
    rospy.spin()

  def _response_callback(self, data):
    query = urllib.quote(data.data)
    os.system(tts_cmd.format(query))
    os.system(sox_cmd)
    self._client.playWave('/tmp/speech.wav')
class SaySomething(object):
    def __init__(self, speech):
        self._speech = speech
        self._client = SoundClient()

    def start_goal(self):
        self._client.say(self._speech, 'voice_us1_mbrola')

    def start(self):
        pass

    def stop(self):
        pass
示例#18
0
def wel(filename):
    #absolute path for this
    print filename
    fileloc = str( str(os.path.dirname(__file__)) +'/'+ filename)
    f = open(fileloc, 'r')
    #rospy.init_node('say', anonymous = True)
    soundhandle = SoundClient()
    rospy.sleep(1)
    s = f.read()
    print s
    soundhandle.say(s,'voice_kal_diphone')
    #may be loop with readline so there is a pause at every line.
    rospy.sleep(1)
示例#19
0
    def __init__(self):
        State.__init__(self, outcomes=['detect','not_detect'])

        rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.my_listener)
        self.n_markers = 0
        self.table_flag = False
        self.execute_flag = False

        self.sum_x = 0
        self.counter = 0
        self.mean_distance = 0

        soundhandle = SoundClient()
        self.detect_tableorder = soundhandle.waveSound('/home/mscv/ros/hydro/catkin_ws/src/rbx2/rbx2_tasks/sound/smb_1-up.wav')
示例#20
0
    def execute(self, userdata):

        soundhandle = SoundClient()
        rospy.sleep(1)

        #voice = 'voice_kal_diphone'
        print 'Saying: %s' % userdata.text_to_say
        print 'Voice: %s' % self._voice_name
        rospy.loginfo("SAYING ==>%s" % userdata.text_to_say)

        soundhandle.say(userdata.text_to_say, self._voice_name)
        rospy.sleep(1)

        return succeeded
示例#21
0
class voice_play:

    def __init__(self):
        self.voice = rospy.get_param("~voice", "voice_cmu_us_clb_arctic_clunits")
        self.sound_files_dir = rospy.get_param("~sound_dir", "")

        self.sound_handle = SoundClient()

        rospy.sleep(1)
        rospy.loginfo("Ready to speak...")

        rospy.Subscriber('/speech', String, self.talkback)
        rospy.Subscriber('/sounds', String, self.play_sound)
        # self.sound_handle.say('Greetings, I am Judith.', self.voice)

    def talkback(self, msg):
        rospy.loginfo(msg.data)
        self.sound_handle.stopAll()
        self.sound_handle.say(msg.data, self.voice)
        rospy.sleep(2)

    def play_sound(self, msg):
        rospy.loginfo(msg.data)
        self.sound_handle.stopAll()
        self.sound_handle.playWave(self.sound_files_dir + msg.data)
        rospy.sleep(2)
示例#22
0
文件: talkback.py 项目: Chengxi9/rbx1
class TalkBack:
    def __init__(self, script_path):
        rospy.init_node('talkback')

        rospy.on_shutdown(self.cleanup)
        
        # Set the default TTS voice to use
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        
        # Set the wave file path if used
        self.wavepath = rospy.get_param("~wavepath", script_path + "/../sounds")
        
        # Create the sound client object
        self.soundhandle = SoundClient()
        
        # Wait a moment to let the client connect to the
        # sound_play server
        rospy.sleep(1)
        
        # Make sure any lingering sound_play processes are stopped.
        self.soundhandle.stopAll()
        
        # Announce that we are ready for input
        self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")
        rospy.sleep(1)
        self.soundhandle.say("Ready", self.voice)
        
        rospy.loginfo("Say one of the navigation commands...")

        # Subscribe to the recognizer output and set the callback function
        rospy.Subscriber('/recognizer/output', String, self.talkback)
        
    def talkback(self, msg):
        # Print the recognized words on the screen
        rospy.loginfo(msg.data)
        
        # Speak the recognized words in the selected voice
        self.soundhandle.say(msg.data, self.voice)
        
        # Uncomment to play one of the built-in sounds
        #rospy.sleep(2)
        #self.soundhandle.play(5)
        
        # Uncomment to play a wave file
        #rospy.sleep(2)
        #self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

    def cleanup(self):
        self.soundhandle.stopAll()
        rospy.loginfo("Shutting down talkback node...")
示例#23
0
class SpeakNode():
    
    
    #sound_sig = Signal(SoundRequest)

    def __init__(self):
        self._sound_client = SoundClient()
        rospy.Subscriber('robotsound', SoundRequest, self.sound_cb)
        

    def speak(self, sentence):
        self._sound_client.say(sentence)

    def sound_cb(self, sound_request):
        pass
示例#24
0
    def execute(self, userdata):

        sound_path = Mp3ToWavConverter(userdata.sound_file_path)
        frequency = BpmToFreq(userdata.bpm)
        soundhandle = SoundClient()

        rospy.sleep(1)

        r = rospy.Rate(frequency)
        rospy.loginfo("Playing the sound in this path ==> " + sound_path)
        while not rospy.is_shutdown():
            soundhandle.playWave(sound_path)
            r.sleep()

        return succeeded
示例#25
0
    def __init__(self):
        rospy.on_shutdown(self.cleanup)

        self.voice = rospy.get_param("~voice", "voice_kal_diphone")#voice_don_diphone, voice_kal_diphone
        self.wavepath = rospy.get_param("~wavepath", "")
        self.command_to_phrase = COMMAND_TO_PHRASE  # this is deffined in wheelchairint/keywords_tocommand.py
        # Create the sound client object
        self.soundhandle = SoundClient()
        # Announce that we are ready for input
        rospy.sleep(2)
        self.soundhandle.stopAll()
        rospy.sleep(1)
        self.soundhandle.playWave(self.wavepath + "/R2D2.wav")
        rospy.sleep(2)
        self.soundhandle.say("I am ready", self.voice)
        rospy.sleep(1.5)
        self.soundhandle.say("Give me an order", self.voice)
        rospy.sleep(2)
        rospy.loginfo("Say one a commands...")

        # Subscribe to the recognizer output
        rospy.Subscriber('recognizer/output', String, self.rec_out_callback)
        r = rospy.Rate(0.5)
        while not rospy.is_shutdown():
#            rospy.loginfo("wheelchair talk is running correctly.")
            r.sleep()
示例#26
0
    def __init__(self, use_tts, synchronizer, phrases=None, voice="Kimberly"):
        """ Constructor

        Initializes the speech state with the desired parameters; either with
        or without online TTS, and with or without a pre-generated phrase
        file.

        Parameters
        -----------
        use_tts : bool
            If true, allow online TTS
        synchronizer : Synchronizer
            Synchronizer object to allow sync of speech and behaviors.  Should
            be the same object as is passed to behavior ControllerState objects
            with ``setup_sync``
        voice : str, optional
            Which Amazon Polly voice to use. Defaults to Kimberly

        phrases : str, optional
            Path to the phrase file to use. If None, require online TTS.
        """
        smach.State.__init__(self,outcomes=["preempted","no_audio","done"],
                             input_keys=["text","wav_file"])
        self._tts = use_tts
        if use_tts:
            self._talker = CoRDialTTS(voice)
        self._sound_client = SoundClient()
        self._sync = synchronizer
    def __init__(self):
        rospy.on_shutdown(self.cleanup)
        self.speed = 0.1
        self.buildmap = False
        self.follower = False
        self.navigation = False
        self.msg = Twist()


        # Create the sound client object
        self.soundhandle = SoundClient()
       
        rospy.sleep(1)
        self.soundhandle.stopAll()
       
        # Announce that we are ready for input
        rospy.sleep(1)
        self.soundhandle.say('Hi, my name is Petrois')
        rospy.sleep(3)
        self.soundhandle.say("Say one of the navigation commands")

        # publish to cmd_vel, subscribe to speech output
        self.pub_ = rospy.Publisher("cmd_vel", Twist, queue_size=2)
        rospy.Subscriber('recognizer/output', String, self.speechCb)

        r = rospy.Rate(10.0)
        while not rospy.is_shutdown():
            self.pub_.publish(self.msg)
            r.sleep()
示例#28
0
文件: talkback.py 项目: Chengxi9/rbx1
    def __init__(self, script_path):
        rospy.init_node('talkback')

        rospy.on_shutdown(self.cleanup)
        
        # Set the default TTS voice to use
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        
        # Set the wave file path if used
        self.wavepath = rospy.get_param("~wavepath", script_path + "/../sounds")
        
        # Create the sound client object
        self.soundhandle = SoundClient()
        
        # Wait a moment to let the client connect to the
        # sound_play server
        rospy.sleep(1)
        
        # Make sure any lingering sound_play processes are stopped.
        self.soundhandle.stopAll()
        
        # Announce that we are ready for input
        self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")
        rospy.sleep(1)
        self.soundhandle.say("Ready", self.voice)
        
        rospy.loginfo("Say one of the navigation commands...")

        # Subscribe to the recognizer output and set the callback function
        rospy.Subscriber('/recognizer/output', String, self.talkback)
示例#29
0
文件: node.py 项目: arnthorm/Fable
  def __init__(self):
    rospy.init_node('fable_connection', anonymous=True, disable_signals=True)
    self.soundhandle = SoundClient()
    self.manager = None
    self.motor_last_sent = {}
    try:
      self.manager = ROSEmbeddedManager(
        self.setup,
        SerialTransport(
          port=rospy.get_param('~port', get_usb()),
          baud=rospy.get_param('~baudrate', '57600')
        )
      )

      rospy.Subscriber('/posture_detected', PostureDetected, self.posture_detected_cb)

      r = rospy.Rate(5)
      while not rospy.is_shutdown():
        try:
          self.manager.act()
          r.sleep()
        except KeyboardInterrupt:
          self.manager.stop()
          break
    except:
      if self.manager is not None:
        self.manager.stop()
      raise
    def __init__(self, polygon_file, bigram_file):
        super(LetterboardInterface, self).__init__(polygon_file)

        self.sound_client = SoundClient()

        self.letter_boxes = [
            'abc',
            'def',
            'ghi',
            'jkl',
            'mno',
            'pqr',
            'stuv',
            'wxyz'
        ]

        self.choice_boxes = []
        for i in xrange(1,7):
            self.choice_boxes.append(self.polygons['choice%s' % i])

        self.predictor = Predictor(bigram_file, dict(zip(self.letter_boxes, self.letter_boxes)))
        for box in self.letter_boxes:
            self.register_callback(box, self.letter_cb)

        for box in self.choice_boxes:
            self.register_callback(box.id, self.choice_cb)
            box.name = ''

        self.register_callback('sent', self.sent_cb)

        self.register_callback('delword'  , self.delword)
        self.register_callback('delletter', self.delletter)

        self.polygons['sent'].name = ''
示例#31
0
class MainLoop:
    def __init__(self):
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        self.soundhandle = SoundClient()
        rospy.sleep(1)
        self.soundhandle.stopAll()
        rospy.sleep(1)
        self.soundhandle.say("Ready", self.voice)
        rospy.sleep(1)

        # Create sound client
        self.words = SoundClient()
        # Subscribe to the /recognizer/output topic to receive voice commands
        rospy.Subscriber('/recognizer/output', String, MicInputEventCallback)
        # Subscribe to the /mobile_base/events/digital_input topic to receive DIO
        rospy.Subscriber('/mobile_base/events/digital_input',
                         DigitalInputEvent, DigitalInputEventCallback)
        rospy.sleep(1)

    def talk(self, words):
        self.soundhandle.say(words, self.voice)
        rospy.sleep(1)
示例#32
0
class TalkBack:
    def __init__(self, script_path):
        rospy.init_node('talkback')

        rospy.on_shutdown(self.cleanup)

        # Create the sound client object
        self.soundhandle = SoundClient()

        # Wait a moment to let the client connect to the
        # sound_play server
        rospy.sleep(1)

        # Make sure any lingering sound_play processes are stopped.
        self.soundhandle.stopAll()

        # Announce that we are ready for input
        #self.soundhandle.playWave('say-beep.wav')
        #rospy.sleep(2)
        #self.soundhandle.say('Ready')

        rospy.loginfo("Say one of the navigation commands...")

        # Subscribe to the recognizer output and set the callback function
        rospy.Subscriber('/lm_data', String, self.talkback)

    def talkback(self, msg):
        # Print the recognized words on the screen
        rospy.loginfo(msg.data)

        # Speak the recognized words in the selected voice
        if msg.data == "WHAT IS YOUR NAME":
            self.soundhandle.say("MY NAME IS JESUS", volume=0.01)
        rospy.sleep(5)

    def cleanup(self):
        self.soundhandle.stopAll()
        rospy.loginfo("Shutting down talkback node...")
示例#33
0
def get_occupancy(
    msg
):  #This function gets the occupancy data and pumps it to closure function to check if the map is complete
    global occdata
    global cnt

    # create numpy array
    msgdata = numpy.array(msg.data)
    # compute histogram to identify percent of bins with -1
    occ_counts = numpy.histogram(msgdata, occ_bins)
    # calculate total number of bins
    total_bins = msg.info.width * msg.info.height
    # log the info
    # rospy.loginfo('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i', occ_counts[0][0], occ_counts[0][1], occ_counts[0][2], total_bins)

    # make msgdata go from 0 instead of -1, reshape into 2D
    oc2 = msgdata + 1
    # reshape to 2D array using column order
    occdata = numpy.uint8(
        oc2.reshape(msg.info.height, msg.info.width, order='F'))

    contourCheck = 1
    if contourCheck:
        if closure(occdata):
            # map is complete, so save current time into file
            with open("maptime.txt", "w") as f:
                f.write("done")
            contourCheck = 0
            # play a sound
            soundhandle = SoundClient()
            rospy.sleep(1)
            soundhandle.stopAll()
            soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
            rospy.sleep(2)
            # save the map
            cv2.imwrite('mazemap.png', occdata)
            cnt = 2
            print("Map finished, Check your map at 'mapzemap.png'.")
示例#34
0
class MainLoop:
    def __init__(self):
        self.voice = rospy.get_param("~voice", "voice_don_diphone")
        self.soundhandle = SoundClient()
        rospy.sleep(1)
        self.soundhandle.stopAll()
        rospy.sleep(1)

        self.soundhandle.say("Ready", self.voice)
        rospy.sleep(1)

        # Create sound client
        self.words = SoundClient()
        # Subscribe to the /recognizer/output topic to receive voice commands.
        rospy.Subscriber('/recognizer/output', String, MicInputEventCallback)
        # Subscribe to the /mobile_base/events/digital_input topic to receive DIO
        rospy.Subscriber('/mobile_base/events/digital_input',
                         DigitalInputEvent, DigitalInputEventCallback)
        rospy.sleep(1)

        #    rospy.on_shutdown(self.cleanup)
        self.max_speed = 0
        self.max_angular_speed = 0
        self.speed = 0
        self.angular_speed = 0
        self.linear_increment = 0
        self.angular_increment = 0
        self.rate = rospy.get_param("~rate", 5)
        r = rospy.Rate(self.rate)
        self.paused = False

        self.msg = Twist()
        self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist)

    def talk(self, words):
        self.soundhandle.say(words, self.voice)
        rospy.sleep(1)
示例#35
0
class LittleBot:
    def __init__(self, script_path):
        rospy.init_node('littlebot')

        rospy.on_shutdown(self.cleanup)

        self.wavepath = rospy.get_param("~wavepath",
                                        script_path + "/../sounds")

        self.soundhandle = SoundClient(blocking=True)

        rospy.sleep(1)

        self.soundhandle.stopAll()

        self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

        rospy.loginfo("Ready, waiting for commands...")
        self.soundhandle.say('Hello. Nice to meet you.')

        rospy.Subscriber('/lm_data', String, self.talkback)

    def talkback(self, msg):

        rospy.loginfo(msg.data)

        if msg.data.find('WHO-ARE-YOU') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say("I am LittleBot. I will be your friend.")

        elif msg.data.find('WHERE-ARE-YOU-FROM') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say(
                "I heard you ask about my hometown. I am from Tianjin.")

        elif msg.data.find('WHAT-CAN-YOU-DO') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say(
                "I am a baby now, so I can just do little things. But I will learn."
            )

        elif msg.data.find('DO-YOU-KNOW-ME') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say("Of course. I am clever than you.")

        elif msg.data.find('DO-YOU-LIKE-DOING-HOMEWORK') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say("Well, I do not have homework.")

        elif msg.data.find('GOODBYE') > -1:
            self.soundhandle.playWave(self.wavepath + "/R2D2a.wav")

            self.soundhandle.say("goodbye")

    def cleanup(self):
        self.soundhandle.stopAll()
        rospy.loginfo("Shutting down littlebot node...")
示例#36
0
import numpy as np

import move_base_msgs.msg

from sensor_msgs.msg import PointCloud2
import std_msgs.msg
import acciobot_main.msg
import std_msgs.msg

from geometry_msgs.msg import Pose, PoseStamped

from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient

soundhandle = SoundClient()
rospy.sleep(1)

voice = 'voice_kal_diphone'

clear_pub = rospy.Publisher('accio_clear_collisions',
                            std_msgs.msg.Bool,
                            queue_size=5)
coll_list_pub = rospy.Publisher('accio_collisions',
                                acciobot_main.msg.CollisionList,
                                queue_size=5)

HARDCODED_LOL_HEIGHTS = {1: 0.21, 2: 0.18, 3: 0.12, 4: 0.26}

HARDCODED_LOL_WIDTH = {1: 0.16, 2: 0.09, 3: 0.07, 4: 0.20}
示例#37
0
    def __init__(self):
        rospy.init_node("speech_demo")
        
        # Set the shutdown function
        rospy.on_shutdown(self.shutdown)
        
        self.rate = rospy.get_param('~rate', 10)
                
        self.tick = 1.0 / self.rate
        
        # How close does a person have to be to pay attention to them?
        self.max_target_distance = rospy.get_param('~max_target_distance', 2.0)
        
        # Set the default TTS voice to use
        self.tts_voice = rospy.get_param("~tts_voice", "voice_don_diphone")
        
        # How long in seconds before we consider a detection lost?
        self.lost_detection_timeout = rospy.get_param('~lost_detection_timeout', 5.0)
        
        # Get the path of the file listing valid locations
        nav_config_file = rospy.get_param('~nav_config_file', 'config/3dv/locations.yaml')
        
        # Get the Pocketsphinx vocabulary so we can filter recognition results
        allowed_phrases = rospy.get_param('~allowed_phrases', 'config/3dv_demo/3dv_commands.txt')
                
        # Load the location data
        with open(nav_config_file, 'r') as config:
            self.locations = load(config)
        
        # Read in the file
        with open(allowed_phrases, 'r') as phrases:
            self.allowed_phrases = [line.strip() for line in phrases if line is not None]
        
        # Have we said the greeting to a new person?
        self.greeting_finished = False
        
        # Is a person visible?
        self.target_visible = False
        
        # Are we waiting for the next detection?
        self.waiting = False
        
        # Are we lisenting on the speech recognition topic?
        self.listening = False
        
        # Set a timer to determine how long a target is no longer visible
        self.target_lost_time = 0.0
        
        # Create the sound client object
        self.soundhandle = SoundClient(blocking=True)
        
        # Wait a moment to let the client connect to the sound_play server
        rospy.sleep(2)
        
        # Publish the requested location so the executive node can use it
        self.speech_pub = rospy.Publisher('/speech_command', String, queue_size=1)

        # Connect to the goto_location service
        #rospy.wait_for_service('/goto_location', 60)
 
        #self.goto_service = rospy.ServiceProxy('/goto_location', GotoLocation)

        # Subscribe to the speech recognition /recognizer/output topic to receive voice commands
        rospy.Subscriber("/recognizer/output", String, self.speech_recognition, queue_size=1)
        
        # Subscribe to the target topic for tracking people
        #rospy.Subscriber('target_topic', DetectionArray, self.detect_person, queue_size=1)
        
        # Wait for the speech recognition services to be ready
        rospy.wait_for_service('/recognizer/start', 15)
        rospy.wait_for_service('/recognizer/stop', 15)
        
        # Connect to the start/stop services for speech recognition
        self.stop_speech_recognition = rospy.ServiceProxy('/recognizer/stop', Empty)
        self.start_speech_recognition = rospy.ServiceProxy('/recognizer/start', Empty)
        
        rospy.loginfo("JR demo up and running.")
        
        # Announce that we are ready for input
        self.jr_says("Ready", self.tts_voice, start_listening=True)
        #self.jr_says("Say, hello jack rabbit, to get my attention", self.tts_voice, start_listening=True)
        
        #self.start_speech_recognition()
        
        while not rospy.is_shutdown():
#             # If we have lost the target, start a timer
#             if not self.target_visible:
#                 self.target_lost_time += self.tick
#                 
#                 if self.target_lost_time > self.lost_detection_timeout and not self.waiting:
#                     rospy.loginfo("No person in sight.")
#                     self.target_visible = False
#                     self.greeting_finished = False
#                     self.waiting = True
#             else:
#                 if self.waiting:
#                     rospy.loginfo("Person detected.")
#                     self.waiting = False
#                     self.target_lost_time = 0.0

            rospy.sleep(self.tick)
示例#38
0
#*  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#*  POSSIBILITY OF SUCH DAMAGE.
#***********************************************************

# Author: Blaise Gassend

import sys

if __name__ == '__main__':
    if len(sys.argv) < 2 or len(sys.argv) > 3 or sys.argv[1] == '--help':
        print 'Usage: %s sound_to_play.(ogg|wav) [volume]' % sys.argv[0]
        print
        print 'Plays an .OGG or .WAV file. The path to the file should be absolute, and be valid on the computer on which sound_play is running.\n The (optional) second parameter sets the volume for the sound as a value between 0 and 1.0, where 0 is mute.'
        exit(1)

    # Import after printing usage for speed.
    import rospy
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient

    rospy.init_node('play', anonymous=True)
    soundhandle = SoundClient()

    rospy.sleep(1)
    print 'Playing "%s".' % sys.argv[1]

    volume = float(sys.argv[2]) if len(sys.argv) == 3 else 1.0

    soundhandle.playWave(sys.argv[1], volume)
    rospy.sleep(1)
示例#39
0
    rospy.spin()


if __name__ == '__main__':

    #load_chair_info first
    global chair_info
    with open('chair_info.pkl', 'rb') as f:
        chair_info = pickle.load(f)
    print(len(chair_info))

    global tflistener
    tflistener = TransformListener()
    global map_
    map_ = read_pgm("mapname.pgm", byteorder='<')
    global map_h
    global map_w
    map_h = map_.shape[0]
    map_w = map_.shape[1]

    rospy.init_node('nav')

    print("say hello")
    global soundhandle
    soundhandle = SoundClient()
    rospy.sleep(1)
    soundhandle.say('welcome')

    listener()
    # movebase_client()
class Whereisthis_Control:
    def __init__(self):

        msg = whereisthis()
        self.now_state = msg.GetPoint
        self.next_state = msg.DoorOpening
        self.Return_time = 0
        self.need_help = False
        self.control_sub = rospy.Subscriber("/whereisthis_control",
                                            whereisthis, self.controlCallback)
        self.control_pub = rospy.Publisher("/whereisthis_control",
                                           whereisthis,
                                           queue_size=1)

        self.sh = SoundClient(blocking=True)
        self.voice = rospy.get_param("~voice", "voice_kal_diphone")

        msg.NowTask = msg.GetPoint
        msg.NextTask = msg.DoorOpening
        msg.NeedHelp = False
        msg.FinishState = False
        rospy.sleep(0.5)

        self.sh.say('I am ready to start!', self.voice)

        # for i in range(5):
        self.control_pub.publish(msg)
        print("Start task Whereisthis...")
        print("----------GetPoint-----------")

    def controlCallback(self, msg):

        if msg.NeedHelp == True:
            print("Need help while no help way available.")
            # TODO:发布求救的节点(目前没有)

        elif msg.FinishState == True:
            #self.sh.say('hello!', self.voice)
            n_msg = whereisthis()  #new msg
            # TODO:发布新的消息
            n_msg.NeedHelp = False
            n_msg.FinishState = False

            if msg.NowTask == n_msg.GetPoint and self.now_state == n_msg.GetPoint:
                self.sh.say('I got the control command.', self.voice)
                n_msg.NowTask = n_msg.DoorOpening
                n_msg.NextTask = n_msg.GotoPoint
                # self.sh.say('i am ready to enter the arena now', self.voice)
                n_msg.FinishState = True
                self.control_pub.publish(n_msg)
                print("-------DoorOpening-------")
                # for i in range(5):
                #     self.control_pub.publish(n_msg)
                self.now_state = n_msg.NowTask
                self.next_state = n_msg.NextTask

            elif msg.NowTask == n_msg.DoorOpening and self.now_state == n_msg.DoorOpening:
                n_msg.NowTask = n_msg.GotoPoint
                n_msg.NextTask = n_msg.GoalDescription
                self.sh.say(
                    'thank you. now i will go to the information point.',
                    self.voice)
                print("-------GotoPoint-------")
                for i in range(5):
                    self.control_pub.publish(n_msg)
                self.now_state = n_msg.NowTask
                self.next_state = n_msg.NextTask

            elif (msg.NowTask == n_msg.GotoPoint and self.now_state
                  == n_msg.GotoPoint) or (msg.NowTask == n_msg.BacktoPoint and
                                          self.now_state == n_msg.BacktoPoint):
                # self.sh.say(str(msg), self.voice)
                if msg.NowTask == n_msg.BacktoPoint and self.now_state == n_msg.BacktoPoint:
                    if self.Return_time >= 3:  # task finish
                        self.sh.say('I have finished the task.', self.voice)
                    else:
                        self.sh.say('I am ready to serve another guest',
                                    self.voice)
                        self.Return_time += 1
                n_msg.NowTask = n_msg.GoalDescription
                n_msg.NextTask = n_msg.Guiding
                self.sh.say(
                    'hi dear guest, please stand in front of me, thank you.',
                    self.voice)
                print("-------GoalDescription-------")
                thread.start_new_thread(self.launch_image_core, ())
                rospy.sleep(
                    2
                )  # wait for image function to be ready & guest to stand in front of camera
                #for i in range(5):
                self.sh.say(
                    'i am ready to recognize you,and i will take a photo for you.',
                    self.voice)
                self.control_pub.publish(n_msg)
                self.now_state = n_msg.NowTask
                self.next_state = n_msg.NextTask

            elif msg.NowTask == n_msg.GoalDescription and self.now_state == n_msg.GoalDescription:
                n_msg.NowTask = n_msg.Guiding
                n_msg.NextTask = n_msg.BacktoPoint
                self.sh.say('now i will guide you there. please follow me.',
                            self.voice)
                print("-------Guiding-------")
                for i in range(5):
                    self.control_pub.publish(n_msg)
                self.now_state = n_msg.NowTask
                self.next_state = n_msg.NextTask

            elif msg.NowTask == n_msg.Guiding and self.now_state == n_msg.Guiding:
                n_msg.NowTask = n_msg.BacktoPoint
                n_msg.NextTask = n_msg.GoalDescription
                self.sh.say('now i will go back to the information point',
                            self.voice)
                print("-------BacktoPoint-------")
                for i in range(5):
                    self.control_pub.publish(n_msg)
                self.now_state = n_msg.NowTask
                self.next_state = n_msg.NextTask

    def launch_image_core(self):
        os.system('rosrun whereisthis_image image_whereisthis.py')
示例#41
0
class DemoController():
    def __init__(self, corpGen):
        rospy.init_node('demo_controller')

        #Dataset
        self.corpGen = corpGen
        self.names = [name.upper() for name in corpGen.listNames()]
        self.categories = self.corpGen.generate_category_dict()
        self.drinks = [drink.upper() for drink in self.categories['drinks']]

        #Knowledge
        self.drink_dict = {}
        self.current_name = ''

        #States
        self.ask_name = True
        self.wait_name = False
        self.ask_drink = False
        self.wait_drink = False
        self.ask_again = False

        #Speech synthesis bool
        self.talking = False

        self.timer = rospy.get_rostime().secs
        self.timeout = 10

        #Voice
        self.soundhandle = SoundClient()
        rospy.sleep(1)
        self.voice = 'voice_don_diphone'
        self.volume = 1.0
        self.parse_speech('')
        self.loop()

    def loop(self):
        rospy.Subscriber('/grammar_data', String, self.parse_speech)
        rospy.spin()

    def parse_speech(self, speech_data):

        if speech_data:
            if not self.talking:
                words = speech_data.data.strip()
            else:
                #Mute it
                words = ''

        if self.ask_name:
            self.talking = True
            rospy.loginfo("WHAT IS YOUR NAME")
            resp = 'WHAT IS YOUR NAME?'
            self.say(resp)

            #Set states
            self.ask_name = False
            self.wait_name = True

            #Set timer
            self.timer = rospy.get_rostime().secs
            rospy.sleep(0.5)

        elif self.wait_name:
            self.talking = False
            rospy.loginfo("WAITING FOR NAME")
            for name in self.names:
                if name in words:
                    self.talking = True

                    rospy.loginfo(name)
                    self.drink_dict[name] = ''
                    self.current_name = name

                    self.wait_name = False
                    self.ask_drink = True
                    self.timer = rospy.get_rostime().secs

                    resp = 'NICE TO MEET YOU {}'.format(name)
                    self.say(resp)
                    rospy.loginfo("GOT NAME")

            if rospy.get_rostime().secs - self.timer > self.timeout:

                self.ask_again = True
                self.ask_name = True
                self.wait_name = False

        elif self.ask_drink:
            self.talking = True
            rospy.loginfo("WAITING FOR DRINK")
            resp = 'WHAT IS YOUR FAVOURITE DRINK?'
            self.say(resp)

            #Set states
            self.ask_drink = False
            self.wait_drink = True

            #Set timer
            self.timer = rospy.get_rostime().secs
            rospy.sleep(0.5)

        elif self.wait_drink:
            self.talking = False
            for drink in self.drinks:
                if drink in words:
                    self.talking = True

                    rospy.loginfo(drink)
                    self.drink_dict[self.current_name] = drink

                    self.wait_drink = False
                    self.timer = rospy.get_rostime().secs

                    resp = 'YOUR NAME IS {} AND YOUR FAVOURITE DRINK IS {}'.format(
                        self.current_name, drink)
                    self.say(resp)
                    rospy.loginfo("GOT DRINK")

            if rospy.get_rostime().secs - self.timer > self.timeout:

                self.ask_again = True
                self.ask_drink = True
                self.wait_drink = False

        if self.ask_again:
            self.talking = True
            resp = 'I AM SORRY I DID NOT CATCH THAT'
            rospy.loginfo("DID NOT CATCH")
            self.say(resp)
            self.ask_again = False

    def say(self, speech):
        self.soundhandle.say(speech)
示例#42
0
class RobotIO:
    def __init__(self,
                 table_oidxs,
                 starting_table,
                 image_path,
                 voice="voice_cmu_us_slt_cg"):
        print "RobotIO: __init__ with " + str(table_oidxs) + ", " + str(
            starting_table) + ", " + voice
        self.table_oidxs = table_oidxs  # dictionary from table ids to lists of objects or None if there are None
        self.table = starting_table  # 1, 2, or 3. missing tables should have None as their table_oidxs
        self.image_path = image_path
        self.voice = voice
        self.last_say = None
        self.arm_pos = -1

        # initialize a sound client instance for TTS
        print "RobotIO: initializing SoundClient..."
        self.sound_client = SoundClient(blocking=True)
        rospy.sleep(1)
        self.sound_client.stopAll()
        print "RobotIO: ... done"

        rospy.wait_for_service('tabletop_object_detection_service')
        self.tabletop_object_detection_service = rospy.ServiceProxy(
            'tabletop_object_detection_service',
            TabletopPerception,
            persistent=True)
        self.pointCloud2_plane = None
        self.cloud_plane_coef = None
        self.pointCloud2_objects = None

    # Listen for speech from user.
    def get_from_user(self):
        print "RobotIO: get_from_user called"
        self.listening_mode_toggle_client()
        uin = ''
        while len(uin) == 0:
            uin = self.sound_transcript_client()
            uin = process_raw_utterance(uin)
        self.listening_mode_toggle_client()

        if uin == "frozen fish":
            print "RobotIO: got shutdown keyphrase from user"
            raise rospy.ROSInterruptException

        print "RobotIO: get_from_user returning '" + uin + "'"
        return uin

    # Get an object touch or hear 'all' or 'none'
    # This call opens a sub-dialog in which the user can command the robot to face a different table,
    # watch for a touch, or answer that 'all' or 'none' of the objects fit the description
    # oidxs - not used in this implementation; oidxs drawn from table oidxs and current table face value
    # returns - an oidx in those provided or 'None'
    def get_oidx_from_user(self, oidxs):
        print "RobotIO: get_oidx_from_user called"

        self.point(-1)  # retract the arm, if it's out
        idx = -1
        while idx == -1:
            u = self.get_from_user()
            u = process_raw_utterance(u)
            ws = u.split()

            # The user asked the robot to face a different table.
            if "face" in ws:
                for ns, n in [("one", 1), ("1", 1), ("two", 2), ("2", 2)]:
                    if ns in ws:
                        if self.table != n:
                            self.face_table(n)
                        else:
                            self.say_to_user("I am already facing table " +
                                             str(n))
            elif "turn" in ws:
                if "left" in ws and self.table == 2:
                    self.face_table(1)
                elif "right" in ws and self.table == 1:
                    self.face_table(2)

            # The user told the robot to watch for a touch.
            elif "watch" in ws or "look" in ws or "this" in ws:
                idx = self.get_touch()

            # The user said "all" or "none"
            elif "all" in ws or "none" in ws:
                idx = None

            # The command couldn't be shallow parsed.
            else:
                self.say_to_user("Sorry, I didn't catch that.")
        self.say_to_user("I see.")
        if idx is not None:
            oidx = self.table_oidxs[self.table][idx]
        else:
            oidx = None

        print "RobotIO: get_oidx_from_user returning " + str(oidx)
        return oidx

    # use built-in ROS sound client to do TTS
    def say_to_user(self, s):
        print "RobotIO: say_to_user called with '" + s + "'"

        # Replace 'shake your head' lines for robot interface.
        shake_str = "shake your head"
        if shake_str in s:
            sidx = s.find(shake_str)
            if "you could use the word" in s:  # pos example
                new_str = "say 'none of them'"
            else:  # neg example
                new_str = "say 'all of them'"
            s = s[:sidx] + new_str + s[sidx + len(shake_str):]

        # Remove extra information in parens that was used during MTurk for robot interface.
        sidx = s.find("(")
        eidx = s.find(")") + 1
        while sidx > -1:
            s = s[:sidx] + s[eidx:]
            sidx = s.find("(")
            eidx = s.find(")") + 1

        if self.last_say is None:
            self.last_say = s
        else:
            self.last_say += " " + s

        self.sound_client.say(str(s), voice=self.voice)
        print "say_to_user: "******"RobotIO: say_to_user_with_referents called with '" + u + "', " + str(
            rvs)

        # Replace recipients; we here hard-code the patients from the ispy setting, but, in general,
        # this should be a more interesting procedure.
        if 'recipient' in rvs and rvs['recipient'] is not None:
            p = None
            if rvs['recipient'] == 'b':
                p = 'robert'
            elif rvs['recipient'] == 'd':
                p = 'david'
            elif rvs['recipient'] == 'h':
                p = 'heidi'
            elif rvs['recipient'] == 'p':
                p = 'peggy'
            elif rvs['recipient'] == 'm':
                p = 'mallory'
            elif rvs['recipient'] == 'n':
                p = 'nancy'
            elif rvs['recipient'] == 'r':
                p = 'richard'
            elif rvs['recipient'] == 's':
                p = 'sybil'
            elif rvs['recipient'] == 'w':
                p = 'walter'
            sidx = u.find("<r>")
            eidx = u.find("</r>") + 4
            while sidx > -1:
                u = u[:sidx] + p + u[eidx:]
                sidx = u.find("<r>")
                eidx = u.find("</r>") + 4

        # Replace sources and goals. Here we assume all sources and goals are room numbers with possible
        # letter attachments a and b (because this is what happens in the ispy setting). To ease pronunciation
        # for the model, we space-delimit the numbers and letters.
        for r in ['source', 'goal']:
            if r in rvs and rvs[r] is not None:
                r0 = r[0]
                sidx = u.find("<" + r0 + ">")
                eidx = u.find("</" + r0 + ">") + 4
                while sidx > -1:
                    u = u[:sidx] + ' '.join(rvs[r]) + u[eidx:]
                    sidx = u.find("<" + r0 + ">")
                    eidx = u.find("</" + r0 + ">") + 4

        # Handle patient, which involves first turning to face the right table and pointing (blocking) before
        # releasing to speak.
        if 'patient' in rvs and rvs['patient'] is not None:
            sidx = u.find("<p>")
            eidx = u.find("</p>")

            if sidx > -1:  # if the robot actually said anything about the patient, point
                oidx = int(rvs['patient'].split('_')[1])  # e.g. 'oidx_1' -> 1
                ttid = None
                for tid in self.table_oidxs:
                    if self.table_oidxs[
                            tid] is not None and oidx in self.table_oidxs[tid]:
                        ttid = tid
                if ttid is not None:
                    self.face_table(ttid)
                    self.point(self.table_oidxs[ttid].index(oidx))
                else:
                    raise IndexError("oidx " + str(oidx) +
                                     " not found on tables")

            # Strip <p> </p> from utterance, but speak words between.
            while sidx > -1:
                u = u[:sidx] + u[sidx + 3:eidx] + u[eidx + 4:]
                sidx = u.find("<p>")
                eidx = u.find("</p>")

        # Speak the utterance with all roles instantiated, and possible pointing initiated.
        self.say_to_user(u)

    # Take in an action as a role-value-slot dictionary and produce robot behavior.
    # TODO: this needs to next be tied to actual robot performing behavior
    def perform_action(self, rvs):
        print "RobotIO: perform_action called with " + str(rvs)
        cmd = None
        if rvs['action'] == 'walk':
            a_str = "I will navigate to <g>here</g>."
        elif rvs['action'] == 'bring':
            a_str = "I will grab the object and deliver it to <r>this person</r>."
            cmd = "eog " + os.path.join(self.image_path,
                                        rvs['patient'] + ".jpg")
        elif rvs['action'] == 'move':
            a_str = "I will relocate the object from <s>here</s> to <g>there</g>."
            cmd = "eog " + os.path.join(self.image_path,
                                        rvs['patient'] + ".jpg")
        else:
            raise ValueError("unrecognized action type to perform '" +
                             rvs['action'] + "'")

        # Speak, retract arm, show image of target object (if relevant)
        self.say_to_user_with_referents(a_str, rvs)
        self.point(-1)  # retract arm
        if cmd is not None:
            os.system(cmd)

        # Turn to face 'table 3' (empty space) to facilitate navigation
        self.face_table(3, verbose=False)
        # TODO: execute the action on the physical robot platform

    # Support functions:

    # get touches by detecting human touches on top of objects
    def get_touch(self):
        print "RobotIO support: get_touch called"
        if self.pointCloud2_plane is None:
            self.say_to_user(
                "I am getting the objects on the table into focus.")
            self.pointCloud2_plane, self.cloud_plane_coef, self.pointCloud2_objects = self.obtain_table_objects(
            )
            self.say_to_user("Okay, I see them.")
        self.watching_mode_toggle_client()
        idx = self.detect_touch_client()
        self.watching_mode_toggle_client()
        print "RobotIO support: get_touch returning " + str(idx)
        return int(idx)

    # point using the robot arm
    def point(self, idx):
        print "RobotIO support: point called with " + str(idx)
        if self.arm_pos != idx:
            if self.pointCloud2_plane is None and idx != -1:
                self.say_to_user(
                    "I am getting the objects on the table into focus.")
                self.pointCloud2_plane, self.cloud_plane_coef, self.pointCloud2_objects = self.obtain_table_objects(
                )
                self.say_to_user("Okay, I see them.")
            self.touch_client(idx)
        self.arm_pos = idx

    # Rotate the chassis and establish new objects in line of sight.
    def face_table(self, tid, verbose=True):
        print "RobotIO support: face_table called with " + str(tid)
        self.point(-1)  # retract the arm, if it's out
        if tid != self.table:
            if verbose:
                self.say_to_user("I am turning to face table " + str(tid) +
                                 ".")
            s = self.face_table_client(tid)
            self.table = tid
            self.pointCloud2_plane = None
            self.cloud_plane_coef = None
            self.pointCloud2_objects = None
            print "RobotIO support: face_table returning " + str(s)
        else:
            s = True
        return s

    # get the point cloud objects on the table for pointing / recognizing touches
    def obtain_table_objects(self):
        print "RobotIO support: obtain_table_objects called"
        plane = plane_coef = cloud_objects = None
        focus = False
        while not focus:
            tries = 5
            while tries > 0:
                plane, plane_coef, cloud_objects = self.get_pointCloud2_objects(
                )
                if len(cloud_objects) == len(self.table_oidxs[self.table]):
                    focus = True
                    break
                tries -= 1
                rospy.sleep(1)
            if tries == 0 and not focus:
                self.say_to_user(
                    "I am having trouble focusing on the objects.")
                rospy.sleep(10)
        print "RobotIO support: obtain_table_objects returning plane/coef/objects"
        return plane, plane_coef, cloud_objects

    # get PointCloud2 objects from service
    def get_pointCloud2_objects(self):
        print "RobotIO support: get_pointCloud2_objects called"

        # query to get the blobs on the table
        req = TabletopPerceptionRequest()
        req.apply_x_box_filter = True  # limit field of view to table in front of robot
        req.x_min = -0.25
        req.x_max = 0.8
        try:
            res = self.tabletop_object_detection_service(req)

            if len(res.cloud_clusters) == 0:
                return [], [], []

            # re-index clusters so order matches left-to-right indexing expected
            ordered_cloud_clusters = self.reorder_client("x", True)

            print(
                "RobotIO support: get_pointCloud2_objects returning res with "
                + str(len(ordered_cloud_clusters)) + " clusters")
            return res.cloud_plane, res.cloud_plane_coef, ordered_cloud_clusters
        except rospy.ServiceException, e:
            sys.exit("Service call failed: %s " % e)
示例#43
0
class Master:
    def loadWorld(self, filename):
        archivo = open(filename)
        datos = archivo.read().splitlines()
        y, x = datos[0].split(' ')
        x = int(x)
        y = int(y)
        maze = [[None for i in range(x)] for j in range(y)]
        for i in range(x * y):
            celda = datos[i + 1].split(' ')
            maze[int(celda[0])][int(celda[1])] = celda[2:]
        nStart = int(datos[x * y + 2])
        starts = []
        for i in range(nStart):
            aux = datos[x * y + i + 3].split(' ')
            starts.append([int(aux[0]), int(aux[1]), aux[2]])
        nGoals = int(datos[x * y + 2 + nStart + 2])
        goals = []
        for i in range(nGoals):
            aux = datos[x * y + 5 + nStart + i].split(' ')
            goals.append([int(aux[0]), int(aux[1]), aux[2]])
        maxDepth = int(datos[-1])
        return x, y, maze, starts, goals, maxDepth

    def possible(self, state, maze):
        ans = []
        y, x, direction = state[0]
        moves = state[1]
        if (y >= 0 and y < self.Y and x >= 0 and x < self.X):
            if direction == 'u':
                ans.append([[y, x, 'r'], moves + ['Right']])
                ans.append([[y, x, 'l'], moves + ['Left']])
                if maze[y][x][0] == '0' and (y < self.Y):
                    ans.append([[y + 1, x, direction], moves + ['Go']])
            elif direction == 'l':
                ans.append([[y, x, 'u'], moves + ['Right']])
                ans.append([[y, x, 'd'], moves + ['Left']])
                if maze[y][x][1] == '0' and (x > 0):
                    ans.append([[y, x - 1, direction], moves + ['Go']])
            elif direction == 'd':
                ans.append([[y, x, 'l'], moves + ['Right']])
                ans.append([[y, x, 'r'], moves + ['Left']])
                if maze[y][x][2] == '0' and (y > 0):
                    ans.append([[y - 1, x, direction], moves + ['Go']])
            else:
                ans.append([[y, x, 'd'], moves + ['Right']])
                ans.append([[y, x, 'u'], moves + ['Left']])
                if maze[y][x][3] == '0' and (x < self.X):
                    ans.append([[y, x + 1, direction], moves + ['Go']])
        return ans

    def findPath(self, maze, start, finish, depth, set_print):
        #print(start,finish,'ROBOT QL')
        actual = [[], []]
        next = []
        for initial in start:
            next.append([initial, []])
        visited = []
        while actual[0] not in finish:
            #print(next,'AGG TMRE')
            if len(next) == 0:
                print('NO WAY')
                return []
            else:
                actual = next.pop(0)
                if actual[0] not in visited:
                    visited.append(actual[0])
                    pos = self.possible(actual, maze)
                    for neighbour in pos:
                        if neighbour[0] not in visited:
                            next.append(neighbour)
                    #if set_print:
                    #	print(visited)
        return actual[1]

    def escucha(self, msg):
        #print(msg.data)
        if msg.data == 'done':
            self.done = True
        elif len(msg.data) <= 2:
            self.what = int(msg.data)
        else:
            self.walls = msg.data.split('#')

    def collector(self, msg):
        if msg.data == 'True':
            self.collected = True
            print('done collecting')

    def recolector(self, msg):
        self.mensajes.append(msg.data)
        self.num_acc += 1
        print(self.mensajes, self.num_acc)

    def reconocedoor(self, msg):
        if msg.data == '1' and self.aux == 0:
            self.aparicion = self.num_acc
            self.aux += 1
            self.flagPuerta = True
        if len(msg.data) > 1 and self.aux == 1:
            try:
                actions = msg.data.split('#')
                self.puerta = self.manyStates(self.actual, actions)
                print('door found')
            except:
                print(msg.data)

    def instaWall(self, msj):
        auxdistance = map(float, msj.data.split(':'))
        self.wallCent = auxdistance[1] < 0.6

    def __init__(self):
        dimX, dimY, self.maze, initial, objective, depth = self.loadWorld(
            'laberintos/c7.txt')
        self.X = dimX
        self.Y = dimY
        #		print("Celdas")
        #		for celdas in self.maze:
        #			print(celdas)
        self.wallCent = False
        self.start = initial
        self.objective = objective
        self.depth = depth
        self.camino = []
        self.walls = []
        self.done = False
        self.current = []
        self.idMsg = 0
        self.emptyMaze = [[['0', '0', '0', '0'] for i in range(dimX)]
                          for j in range(dimY)]
        self.what = -1
        self.collected = False
        self.mensajes = []
        self.num_acc = 0
        self.aparicion = None
        self.aux = 0
        self.actual = None
        self.puerta = None
        self.check_grid = None
        self.flagPuerta = False

        rospy.init_node('brain', anonymous=True)
        self.go = rospy.Publisher('todo', String)
        self.loc = rospy.Publisher('find', String)
        self.buscador = rospy.Publisher('puertas', String)
        rospy.Subscriber('done', String, self.escucha)
        rospy.Subscriber('colectabuzz', String, self.collector)
        rospy.Subscriber('sapo', String, self.recolector)
        rospy.Subscriber('doorFinder', String, self.reconocedoor)
        rospy.Subscriber('obstaculo', String, self.instaWall)
        self.chatter = SoundClient()

        self.init_current_localization()

    def init_current_localization(self):
        ##Primero se buscan todos los estados en los que podria estar
        for y in range(len(self.maze)):
            for x in range(len(self.maze[y])):
                self.current.append([[y, x, 'u']] +
                                    [shiftByn(self.maze[y][x], 0)])
                self.current.append([[y, x, 'l']] +
                                    [shiftByn(self.maze[y][x], 1)])
                self.current.append([[y, x, 'd']] +
                                    [shiftByn(self.maze[y][x], 2)])
                self.current.append([[y, x, 'r']] +
                                    [shiftByn(self.maze[y][x], 3)])

    # Reset the visited cells and set which are reachable
    # pos 0 -> Reachable
    # pos 1 -> Visited
    def reset_reachable_grid(self):
        self.check_grid = [[[True, False] for i in range(self.X)]
                           for j in range(self.Y)]
        if self.start[0]:
            self.check_grid[self.start[0][0]][self.start[0][1]][1] = True
            for j in range(self.Y):
                for i in range(self.X):
                    possible_route = self.findPath(self.maze, self.start,
                                                   self.makeGoals(i, j),
                                                   self.depth, False)
                    if len(possible_route) == 0 and [j, i] != self.start[0]:
                        self.check_grid[j][i][0] = False

    def reset_check_grid(self):
        if self.start[0]:
            for j in range(self.Y):
                for i in range(self.X):
                    if self.check_grid[j][i][0]:
                        self.check_grid[j][i][1] = False

    def makeGoals(self, x, y):
        ans = []
        ans.append([y, x, 'u'])
        ans.append([y, x, 'l'])
        ans.append([y, x, 'd'])
        ans.append([y, x, 'r'])
        return ans

    def makePath(self, maze):
        self.camino = self.findPath(maze, self.start, self.objective,
                                    self.depth, False)
        ans = ""
        for paso in self.camino:
            ans += paso + "#"
        return ans[:-1]

    def localize(self):
        ##Lo hacemos girar y que vea las murallas que lo rodean
        primerIntento = True
        while len(self.current) > 1 and (not rospy.is_shutdown()):
            con = 0
            while len(self.walls) < 4 and (not rospy.is_shutdown()):
                self.loc.publish('Left#Left#Left#Left')
            self.loc.publish('')
            self.done = False
            print(self.walls)
            ##Quitamos los estados que no tengan esas murallas
            remove = []
            currentBackup = [] + self.current
            for choice in range(len(self.current)):
                if self.current[choice][1] != self.walls:
                    remove.append(choice)
            while len(remove) > 0:
                self.current.pop(remove.pop(-1))
            ##Hacemos que haga una accion y se repite el codigo
            ## Hacemos mas de un intento para asegurarnos que no hay solucion
            if len(self.current) < 1:
                if primerIntento:
                    print 'Primer intento'
                    primerIntento = False
                    self.current = currentBackup
                    self.walls = []
                    continue
                else:
                    print 'Segundo intento'
                    break
            else:
                primerIntento = True
            msg = ''
            for pared in self.walls:
                if pared == '1':
                    msg += 'Left#'
                else:
                    break
            msg += 'Go'
            print('Parece que estoy en:')
            for estado in self.current:
                print(estado)
            if len(self.current) == 1:
                return [self.current[0][0]]
            self.current = self.actualizarEstados(msg)
            print('Parece que estare en:')
            for estado in self.current:
                print(estado)
            self.walls = []
            print(msg, self.done)
            while not self.done and not rospy.is_shutdown():
                self.go.publish(msg)
            self.done = False
            print(len(self.current), con)
            self.go.publish('')
        return [self.current[0][0]]

    def actualizarEstados(self, instruccion):
        new = []
        for estado in self.current:
            aux = estado
            for message in instruccion.split('#'):
                if message == 'Go':
                    if aux[0][2] == 'u':
                        newX = aux[0][1]
                        newY = aux[0][0] + 1
                        #print(newX, newY)
                        new.append([[newY, newX, 'u']] +
                                   [shiftByn(self.maze[newY][newX], 0)])
                    elif aux[0][2] == 'l':
                        newX = aux[0][1] - 1
                        newY = aux[0][0]
                        #print(newX, newY)
                        new.append([[newY, newX, 'l']] +
                                   [shiftByn(self.maze[newY][newX], 1)])
                    elif aux[0][2] == 'd':
                        newX = aux[0][1]
                        newY = aux[0][0] - 1
                        #print(newX, newY)
                        new.append([[newY, newX, 'd']] +
                                   [shiftByn(self.maze[newY][newX], 2)])
                    elif aux[0][2] == 'r':
                        newX = aux[0][
                            1] + 1  #me tiro un error fuera de indice (4,0)
                        newY = aux[0][0]
                        #print(newX, newY)
                        new.append([[newY, newX, 'r']] +
                                   [shiftByn(self.maze[newY][newX], 3)])
                else:
                    if aux[0][2] == 'u':
                        aux[0][2] = 'l'
                    elif aux[0][2] == 'l':
                        aux[0][2] = 'd'
                    elif aux[0][2] == 'd':
                        aux[0][2] = 'r'
                    elif aux[0][2] == 'r':
                        aux[0][2] = 'u'
        return new

    def checkImages(self):
        print("Buscando imagenes en " + str(self.start[0]))
        self.walls = []
        while len(self.walls) < 4 and (not rospy.is_shutdown()):
            self.loc.publish('Left#Left#Left#Left')
        self.loc.publish('')
        self.done = False

    def newState(self, state, action, inverted=False):
        where = state[2]
        y = state[0]
        x = state[1]
        if action == 'Right':
            if where == 'u':
                where = 'r'
            elif where == 'r':
                where = 'd'
            elif where == 'd':
                where = 'l'
            elif where == 'l':
                where = 'u'
        elif action == 'Left':
            if where == 'u':
                where = 'l'
            elif where == 'l':
                where = 'd'
            elif where == 'd':
                where = 'r'
            elif where == 'r':
                where = 'u'
        elif action == 'Go':
            if inverted is False:
                if where == 'u':
                    y = y + 1
                elif where == 'r':
                    x = x + 1
                elif where == 'd':
                    y = y - 1
                elif where == 'l':
                    x = x - 1
            elif inverted is True:
                if where == 'u':
                    y = y - 1
                elif where == 'r':
                    x = x - 1
                elif where == 'd':
                    y = y + 1
                elif where == 'l':
                    x = x + 1
        return [y, x, where]

    def manyStates(self, state, actions, inverted=False):
        if inverted is False:
            for action in actions:
                state = self.newState(state, action)
        elif inverted is True:
            for i in range(len(actions)):
                state = self.newState(state, actions[-1 - i], True)
        return state

    def markManyStates(self, state, actions, inverted=False):
        if inverted is False:
            for action in actions:
                state = self.newState(state, action)
        elif inverted is True:
            #print(actions, 'truth has come')
            for i in range(len(actions)):
                state = self.newState(state, actions[-1 - i], True)
                self.check_grid[state[1]][state[0]][1] = True

    def modifyMap(self, maze, state):
        y = state[0]
        x = state[1]
        where = state[2]
        if where == 'u':
            maze[y][x][0] = 1
        elif where == 'l':
            maze[y][x][1] = 1
        elif where == 'd':
            maze[y][x][2] = 1
        elif where == 'r':
            maze[y][x][3] = 1
        return maze

    def explore(self):
        initial = self.start
        done = False
        while not done and not rospy.is_shutdown():
            print(self.start[0])
            mens = self.makePath(self.emptyMaze)
            if len(mens) == 0:
                break
            while not rospy.is_shutdown() and self.what < 0:
                self.go.publish(mens)
            actions = mens.split('#')
            self.go.publish('')
            if (len(actions) == self.what):
                done = True
                break
            hechos = []
            for i in range(self.what):
                hechos.append(actions.pop(0))
            aux = self.manyStates(self.start[0], hechos)
            self.start = [aux]
            self.emptyMaze = self.modifyMap(self.emptyMaze, self.start[0])
            self.what = -1

    def undo_actions(self, for_undo_list):
        actions = []
        for do in for_undo_list:
            if do != 'Go':
                do = do.split('#')
                for i in do:
                    if i == 'Left':
                        i = i.replace('Left', 'Right')
                    elif i == 'Right':
                        i = i.replace('Right', 'Left')
                    actions.append(i)
            elif do == 'Go':
                actions.append(do)
            else:
                print(do)
        return actions

    def go_objective(self, objective):
        initial = self.start
        self.done = False
        aux = self.objective
        #print(initial, self.puerta, self.objective, 'lkdd')
        self.objective = objective
        print("Estoy en " + str(self.start) + ", ire a " + str(self.objective))
        mens = self.makePath(self.maze)
        while not self.done and not rospy.is_shutdown():
            self.go.publish(mens)
            #print(self.done)
        self.go.publish('')
        self.start = self.objective
        self.objective = aux
        self.done = False

    def modo_busqueda(self):
        self.reset_reachable_grid()
        self.set_check_state()
        print("Estados actualizados, comenzando busqueda")
        while not self.collected and not rospy.is_shutdown():
            for j in range(self.Y):
                for i in range(self.X):
                    # Verificamos las celdas que son alcanzables y no han sido visitadas
                    if self.check_grid[j][i][0] and not self.check_grid[j][i][
                            1] and not self.collected:
                        #self.go_objective(self.makeGoals(i,j))
                        self.goTo(self.makeGoals(i, j))
                        while not self.done and not rospy.is_shutdown():
                            pass
                        self.checkImages()
            # Si llega hasta aca y no se ha encontrado los objetivos, reinicio las celdas alcanzadas
            self.reset_check_grid()

    def set_check_state(self):
        undo_actions = self.undo_actions(self.mensajes)
        self.markManyStates(self.start[0], undo_actions, True)

    def findDoor(self):
        if self.aparicion is None:
            print('no encontre la puerta')
            return
        if self.aparicion == 0:
            undo = self.mensajes
        else:
            undo = self.mensajes[self.aparicion:]
        actions = []
        for do in undo:
            if do != 'Go':
                do = do.split('#')
                for i in do:
                    if i == 'Left':
                        i = i.replace('Left', 'Right')
                    elif i == 'Right':
                        i = i.replace('Right', 'Left')
                    actions.append(i)
            elif do == 'Go':
                actions.append(do)
            else:
                print(do)
        self.puerta = self.manyStates(self.start[0], actions, True)
        print(self.puerta, 'puerta')

    def goDoor(self):
        initial = self.start
        self.goTo([self.puerta])

    def goTo(self, objective):
        save = self.objective
        print('Estoy en ' + str(self.start[0]) + ', ire a ' + str(objective))
        self.objective = objective
        self.done = False
        mens = self.makePath(self.maze)
        print(mens)
        if len(mens) != 0:
            self.what = -1
            while not rospy.is_shutdown() and self.what < 0:
                self.go.publish(mens)
            actions = mens.split('#')
            self.go.publish('')
            print(mens, self.what)
            hechos = []
            for i in range(self.what):
                print(actions)
                hechos.append(actions.pop(0))
            print('Hice las siguientes acciones: ', hechos)
            aux = self.manyStates(self.start[0], hechos)
            print('Quede en la posicion ', aux)
            print('')
            self.start = [aux]
            while len(actions) == self.what and not rospy.is_shutdown():
                pass
            if (len(actions) == self.what):
                done = True
            self.what = -1
        self.objective = save

    def knockDoor(self):
        self.buscador.publish('Clear')
        self.chatter.say('Alakazam')
        while self.wallCent:
            rospy.sleep(1)
        self.chatter.say('Going through')
        while not rospy.is_shutdown() and not self.done:
            self.go.publish('Go')
        self.go.publish('')
        print(self.start)
        self.start = [self.manyStates(self.start[0], ['Go'])]
        print(self.start)
        rospy.sleep(1)
示例#44
0
	def __init__(self):

		rospy.on_shutdown(self.cleanup)
		self.voice = rospy.get_param("~voice", "voice_cmu_us_rms_cg_clunits")
		self.question_start_signal = rospy.get_param("~question_start_signal", "")



		self.if_say=0
		#for crowd question
		self.appl=['children','adults','elders']
		self.gppl=['females','males','women','men','boys','girls']
		self.people=self.appl+self.gppl
		self.posprs=['standing','sitting','lying']
		self.posppl=['standing','sitting','lying','standing or sitting','standing or lying','sitting or lying']
		self.gesture=['waving','rising left arm','rising right arm','pointing left','pointing right']
		self.gprsng=['male or female','man or woman','boy or girl']
		self.gprsn=['female','male','woman','man','boy','girl']
		self.w=0



		#for object question
		#read object.xml
		self.adja=['heaviest','smallest','biggest','lightest']
		self.adjr=['heavier','smaller','bigger','lighter']
		self.size_order = (
			'mixed-nuts', 'food', 'fork', 'cutlery', 'spoon', 'cutlery',
			'knife', 'cutlery','canned-fish', 'food', 'cup', 'container', 
			'orange-juice', 'drink', 'pringles', 'snack', 'cereal', 'food', 
			'apple-juice', 'drink','milk-tea', 'drink','jelly', 'snack', 
			'milk-biscuit', 'snack', 'root-beer', 'drink', 'potato-chip', 'snack',
			'instant-noodle', 'food', 'green-tea', 'drink','disk','container',
			'cereal-bowl','container','tray','container','shopping-bag','container')
		self.weight_order1= (
            'cup','container',               'cereal-bowl','container',
            'disk','container',           'tray','container',
	    'mixed nuts','food',
	    'potato-chip', 'snack',    'shopping-bag','container',
            'cereal','food',          'instant-noodle','food',
            'milk-biscuit', 'snack',          'pringles','snack',
            'fork', 'cutlery',          'spoon', 'cutlery',
            'knife','cutlery',         'canned-fish','food',
            'apple-juice','drink',   'orange-juice', 'drink',
            'milk-tea', 'drink',          'root-bear','drink',
            'jelly', 'snack',    'green-tea', 'drink')		


		self.weight_order= (
            'cup','the top of the shelf',               'cereal-bowl','the top of the shelf',
            'disk','the top of the shelf',           'tray','the top of the shelf',
	    'mixed nuts','kitchen-table',
	    'potato-chip', 'coffee-table',    'shopping-bag','the top of the shelf',
            'cereal','kitchen-table',          'instant-noodle','kitchen-table',
            'milk-biscuit', 'coffee-table',          'pringles','coffee-table',
            'fork', 'the top of the shelf',          'spoon', 'the top of the shelf',
            'knife','the top of the shelf',         'canned-fish','kitchen-table',
            'apple-juice','bar-table',   'orange-juice', 'bar-table',
            'milk-tea', 'bar-table',          'root-bear','bar-table',
            'jelly', 'coffee-table',    'green-tea', 'bar-table','cutlery','the top of the shelf', 'container','the top of the shelf','food','kitchen-table','snack','bar-table','drink','bar-table')
		self.category = ('container', '5', 'cutlery', '3', 'drink', '5', 'food', '4', 'snack', '4')
		

		self.object_colour = ( 'cup','green red and orange',               'cereal-bowl','red',
		'mixed-nuts','white',
            'disk','blue',         'tray','purple',
	    'potato-chip', 'yellow',    'shopping-bag','red and white',
            'cereal','red and blue',          'instant-noodle','yellow',
            'milk-biscuit', 'blue and red',          'pringles','green and red',
            'fork', 'silver',          'spoon', 'silver',
            'knife','silver',         'canned-fish','red and white',
            'apple-juice','red',   'orange-juice', 'white and greed',
            'milk-tea', 'blue and black',          'root-bear','brown',
            'jelly', 'red and pink',    'green-tea', 'greed')
		









		self.location=('small shelf','living-room','sofa','living-room','coffee-table','living-room',
					   'arm-chair-a','living-room','arm-chair-b','living-room','kitchen-rack','kitchen','kitchen-table','kitchen',
					   'kitchen shelf','kitchen','kitchen-counter','kitchen','fridge','kitchen',
					   'chair','dining-room','dining-table-a','dining-room','little table','dining-room',
					   'right planks','balcony','balcony-shelf','balcony','entrance-shelf','entrance',
					   'bar-table','dining-room','dining-table-b','dining-room','shelf','dining-room')
		self.doornum=('living-room','2','kitchen','1','dining-room','1','hallway','1')
		self.thingnum=('2','arm-chair','living-room','6','chair','dining-room','2','dining-table','dining-room',
				'1','kitchen tack','kitchen','1','kitchen-table','kitchen',
				'1','small shelf','living-room','1','sofa','living-room','1','coffee-table','living-room',
				'1','little table','dining-room','1','bar-table','dining-room','1','shelf','dining-room')


		# Create the sound client object
		self.soundhandle = SoundClient() 
		rospy.sleep(1)
		self.riddle_turn = rospy.Publisher('turn_signal', String, queue_size=15)
		self.soundhandle.stopAll()
		self.soundhandle.say('hello I want to play riddles',self.voice)
		rospy.sleep(3)
		self.soundhandle.say('I will turn back after ten seconds',self.voice)
		rospy.sleep(3)
		self.soundhandle.say('ten',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('nine',self.voice)
		self.riddle_turn.publish("turn_robot")#publish msg to nav
		rospy.sleep(1)
		self.soundhandle.say('eight',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('seven',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('six',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('five',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('four',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('three',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('two',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('one',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('here I come',self.voice)
		rospy.sleep(1)
		rospy.Subscriber('human_num', String, self.h_num)
		rospy.Subscriber('female_num', String, self.f_num)
		rospy.Subscriber('male_num', String, self.m_num)
		rospy.Subscriber('taking_photo_signal', String, self.remind_people)
		if self.if_say==0:
			rospy.Subscriber('recognizer_output', String, self.talk_back)
示例#45
0
class spr:
	def __init__(self):

		rospy.on_shutdown(self.cleanup)
		self.voice = rospy.get_param("~voice", "voice_cmu_us_rms_cg_clunits")
		self.question_start_signal = rospy.get_param("~question_start_signal", "")



		self.if_say=0
		#for crowd question
		self.appl=['children','adults','elders']
		self.gppl=['females','males','women','men','boys','girls']
		self.people=self.appl+self.gppl
		self.posprs=['standing','sitting','lying']
		self.posppl=['standing','sitting','lying','standing or sitting','standing or lying','sitting or lying']
		self.gesture=['waving','rising left arm','rising right arm','pointing left','pointing right']
		self.gprsng=['male or female','man or woman','boy or girl']
		self.gprsn=['female','male','woman','man','boy','girl']
		self.w=0



		#for object question
		#read object.xml
		self.adja=['heaviest','smallest','biggest','lightest']
		self.adjr=['heavier','smaller','bigger','lighter']
		self.size_order = (
			'mixed-nuts', 'food', 'fork', 'cutlery', 'spoon', 'cutlery',
			'knife', 'cutlery','canned-fish', 'food', 'cup', 'container', 
			'orange-juice', 'drink', 'pringles', 'snack', 'cereal', 'food', 
			'apple-juice', 'drink','milk-tea', 'drink','jelly', 'snack', 
			'milk-biscuit', 'snack', 'root-beer', 'drink', 'potato-chip', 'snack',
			'instant-noodle', 'food', 'green-tea', 'drink','disk','container',
			'cereal-bowl','container','tray','container','shopping-bag','container')
		self.weight_order1= (
            'cup','container',               'cereal-bowl','container',
            'disk','container',           'tray','container',
	    'mixed nuts','food',
	    'potato-chip', 'snack',    'shopping-bag','container',
            'cereal','food',          'instant-noodle','food',
            'milk-biscuit', 'snack',          'pringles','snack',
            'fork', 'cutlery',          'spoon', 'cutlery',
            'knife','cutlery',         'canned-fish','food',
            'apple-juice','drink',   'orange-juice', 'drink',
            'milk-tea', 'drink',          'root-bear','drink',
            'jelly', 'snack',    'green-tea', 'drink')		


		self.weight_order= (
            'cup','the top of the shelf',               'cereal-bowl','the top of the shelf',
            'disk','the top of the shelf',           'tray','the top of the shelf',
	    'mixed nuts','kitchen-table',
	    'potato-chip', 'coffee-table',    'shopping-bag','the top of the shelf',
            'cereal','kitchen-table',          'instant-noodle','kitchen-table',
            'milk-biscuit', 'coffee-table',          'pringles','coffee-table',
            'fork', 'the top of the shelf',          'spoon', 'the top of the shelf',
            'knife','the top of the shelf',         'canned-fish','kitchen-table',
            'apple-juice','bar-table',   'orange-juice', 'bar-table',
            'milk-tea', 'bar-table',          'root-bear','bar-table',
            'jelly', 'coffee-table',    'green-tea', 'bar-table','cutlery','the top of the shelf', 'container','the top of the shelf','food','kitchen-table','snack','bar-table','drink','bar-table')
		self.category = ('container', '5', 'cutlery', '3', 'drink', '5', 'food', '4', 'snack', '4')
		

		self.object_colour = ( 'cup','green red and orange',               'cereal-bowl','red',
		'mixed-nuts','white',
            'disk','blue',         'tray','purple',
	    'potato-chip', 'yellow',    'shopping-bag','red and white',
            'cereal','red and blue',          'instant-noodle','yellow',
            'milk-biscuit', 'blue and red',          'pringles','green and red',
            'fork', 'silver',          'spoon', 'silver',
            'knife','silver',         'canned-fish','red and white',
            'apple-juice','red',   'orange-juice', 'white and greed',
            'milk-tea', 'blue and black',          'root-bear','brown',
            'jelly', 'red and pink',    'green-tea', 'greed')
		









		self.location=('small shelf','living-room','sofa','living-room','coffee-table','living-room',
					   'arm-chair-a','living-room','arm-chair-b','living-room','kitchen-rack','kitchen','kitchen-table','kitchen',
					   'kitchen shelf','kitchen','kitchen-counter','kitchen','fridge','kitchen',
					   'chair','dining-room','dining-table-a','dining-room','little table','dining-room',
					   'right planks','balcony','balcony-shelf','balcony','entrance-shelf','entrance',
					   'bar-table','dining-room','dining-table-b','dining-room','shelf','dining-room')
		self.doornum=('living-room','2','kitchen','1','dining-room','1','hallway','1')
		self.thingnum=('2','arm-chair','living-room','6','chair','dining-room','2','dining-table','dining-room',
				'1','kitchen tack','kitchen','1','kitchen-table','kitchen',
				'1','small shelf','living-room','1','sofa','living-room','1','coffee-table','living-room',
				'1','little table','dining-room','1','bar-table','dining-room','1','shelf','dining-room')


		# Create the sound client object
		self.soundhandle = SoundClient() 
		rospy.sleep(1)
		self.riddle_turn = rospy.Publisher('turn_signal', String, queue_size=15)
		self.soundhandle.stopAll()
		self.soundhandle.say('hello I want to play riddles',self.voice)
		rospy.sleep(3)
		self.soundhandle.say('I will turn back after ten seconds',self.voice)
		rospy.sleep(3)
		self.soundhandle.say('ten',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('nine',self.voice)
		self.riddle_turn.publish("turn_robot")#publish msg to nav
		rospy.sleep(1)
		self.soundhandle.say('eight',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('seven',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('six',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('five',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('four',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('three',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('two',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('one',self.voice)
		rospy.sleep(1)
		self.soundhandle.say('here I come',self.voice)
		rospy.sleep(1)
		rospy.Subscriber('human_num', String, self.h_num)
		rospy.Subscriber('female_num', String, self.f_num)
		rospy.Subscriber('male_num', String, self.m_num)
		rospy.Subscriber('taking_photo_signal', String, self.remind_people)
		if self.if_say==0:
			rospy.Subscriber('recognizer_output', String, self.talk_back)

	def h_num(self,msg):
		msg.data=msg.data.lower()
		self.soundhandle.say('I have already taken the photo',self.voice)
		rospy.sleep(3)
		self.soundhandle.say('please wait for a moment',self.voice)
		rospy.sleep(3)
		self.crowd_num=msg.data
		print "human number is " + msg.data
		self.soundhandle.say('human number is  '+msg.data,self.voice)
		rospy.sleep(4)

	def f_num(self,msg):
		msg.data=msg.data.lower()
		print "female number is " + msg.data
		self.female_num=msg.data
		self.soundhandle.say('female number is  '+msg.data,self.voice)
		rospy.sleep(4)

	def m_num(self,msg):
		msg.data=msg.data.lower()
		print "male number is " + msg.data
		self.male_num=msg.data
		self.soundhandle.say('male number is ' +msg.data,self.voice)
		rospy.sleep(4)
		self.soundhandle.say('who wants to play riddles with me',self.voice)
		rospy.sleep(3.5)
		self.soundhandle.say('please stand in front of me and wait for five seconds',self.voice)
		rospy.sleep(8.5)
		self.soundhandle.say('please ask me after you hear',self.voice)
		rospy.sleep(2.5)
		self.soundhandle.playWave(self.question_start_signal+"/question_start_signal.wav")
		rospy.sleep(1.3)
		self.soundhandle.say('Im ready',self.voice)
		rospy.sleep(1.3)
		self.soundhandle.playWave(self.question_start_signal+"/question_start_signal.wav")
		rospy.sleep(1.3)
		self.w=1

	def answer_How_many_people_are_in_the_crowd(self,msg):
		msg.data=msg.data.lower()
		self.soundhandle.say('the answer is there are '+msg.data+' in the crowd',self.voice)
		rospy.sleep(3.5)
		self.soundhandle.say('OK I am ready for next question', self.voice)
		rospy.sleep(2)
		self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
		rospy.sleep(1.2)

	def remind_people(self, msg):
		msg.data = msg.data.lower()
		if msg.data=='start':
			self.soundhandle.say('Im going to take a photo',self.voice)
			rospy.sleep(3)
			self.soundhandle.say('please look at me and smile',self.voice)
			rospy.sleep(3)
			self.soundhandle.say('three',self.voice)
			rospy.sleep(1)
			self.soundhandle.say('two',self.voice)
			rospy.sleep(1)
			self.soundhandle.say('one',self.voice)
			rospy.sleep(1)

	def talk_back(self, msg):
		msg.data = msg.data.lower()
		print msg.data
		if self.w==1 :
			self.sentence_as_array=msg.data.split('-')
			self.sentence=msg.data.replace('-',' ')
			print self.sentence
			#object
			if msg.data.find('which-city-are-we-in')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared which city are we in ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Nagoya', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Nagoya')
			elif msg.data.find('what-is-the-name-of-your-team')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what is the name of your team', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is our team name is kamerider ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('our team name is kamerider')
			elif msg.data.find('how-many-teams-participate-in-robocup-at-home-this-year')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared how many teams participate in robocup at home this year', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is thirty one', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('31')
			elif msg.data.find('who-won-the-popular-vote-in-the-us-election')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who won the popular vote in the us election', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Hillary Clinton ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Hillary Clinton')
			elif msg.data.find('what-is-the-highest-mountain-in-japan')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what is the highest mountain in japan', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Mount Fuji', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Mount Fuji')
			elif msg.data.find('platforms')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared name the two robocup at home standard platforms', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer Pepper and HSR', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Pepper and HSR')
			elif msg.data.find('what-does-dspl-stand-for')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what does dspl stand for', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Domestic Standard Platform League', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Domestic Standard Platform League')
			elif msg.data.find('what-does-sspl-stand-for')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what does sspl stand for', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Social Standard Platform League', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Social Standard Platform League')
			elif msg.data.find('who-did-alphabet-sell-boston-dynamics-to')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who did alphabet sell boston dynamics to', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is SoftBank', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('SoftBank')
			elif msg.data.find('nagoya-has-one-of-the-largest-train-stations-in-the-world-how-large-is-it')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared nagoya has one of the largest train stations in the world how large is it', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is over 410000 square metres ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('over 410000 square metres')
			elif msg.data.find('whats-your-teams-home-city')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared whats your teams home city', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is tianjin', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('tianjin')
			elif msg.data.find('who-created-star-wars')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who created star wars', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is George Lucas ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('George Lucas')
			elif msg.data.find('who-lives-in-a-pineapple-under-the-sea')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who lives in a pineapple under the sea', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Sponge Bob Squarepants ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('Sponge Bob Squarepants')
			elif msg.data.find('what-did-grace-hopper-invent')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what did grace hopper invent', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is the inventor of the first compiler ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(4)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
				rospy.sleep(4)
				print('the inventor of the first compiler')
			elif msg.data.find('which-country-is-the-host-of-robocup-ap')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared which-country-is-the-host-of-robocup-ap', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is thailand', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")

			elif msg.data.find('what-is-the-name-of-your-team')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what is the name of your team', self.voice)
				rospy.sleep(4)
				 
				self.soundhandle.say('the answer is our team name is kamerider ', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('wonder')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				 
				self.soundhandle.say('I heared who-is-the-lead-actress-of-wonder-women', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is gal gadot', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('which-operating-system-are-you-using')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				 	
				self.soundhandle.say('I heared which-operating-system-are-you-using', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is linux', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('who-is-the-richest-man-in-the-world')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who-is-the-richest-man-in-the-world', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is jeff bezos', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('name-one-of-the-famous-thai-food')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared name-one-of-the-famous-thai-food', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is tom yum goong', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('where-are-you-from')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared where-are-you-from', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('I come from China', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('current')>-1 or msg.data.find('president')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared who-is-current-United-state-president', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is donald trump', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('stations')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared how-many-stations-are-there-in-BTS-skytrains', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is thrity-five', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('next-world')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared where-will-next-world-robocup-be-held', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Montresl canada ', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('singer')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared Who-is-the-singer-of-the-song-grenade', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is Bruno mars', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('which-robot-platfrorm-is-used-for-educaational=league')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared which-robot-platfrorm-is-used-for-educaational=league', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is turtle bot', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('tell-me-the-name-of-this-venue')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared tell-me-the-name-of-this-venue', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is bangkok international trade and exhibition center ', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('iphone')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared how-much-doex-iphone-x-cost-in usa', self.voice)
				rospy.sleep(5)
				self.soundhandle.say('the answer is 999 us dollars ', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('what-is-the-batman-super-power')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what-is-the-batman-super-power', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is rich ', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('how-many-team-in-at-home-league')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared how-many-team-in-at-home-league', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is 7 teams ', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('what-is-the-biggest-airport-in-thailand')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what-is-the-biggest-airport-in-thailand', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is suvarnabhumi airport', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('what-is-the-tallest-building-in-the-world')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what-is-the-tallest-building-in-the-world', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is burj khalifa', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('what-is-a-name-of-our-galaxy')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what-is-the-name-of-our-galaxy', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is milky way', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('what-is-the-symbolic-animal-of-thailand')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared what-is-the-symbolic-animal-of-thailand', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is elephant', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			elif msg.data.find('how-many-time-robocup-held-in-thailand')>-1:
				os.system("~/catkin_ws/src/speech/kill_pocketsphinx.sh")
				self.soundhandle.say('I heared how-many-time-robocup-held-in-thailand', self.voice)
				rospy.sleep(4)
				self.soundhandle.say('the answer is one time', self.voice)
				rospy.sleep(3)
				self.soundhandle.say('OK I am ready for next question', self.voice)
				rospy.sleep(2.5)
				os.system("~/catkin_ws/src/speech/run_pocketsphinx.sh")
				self.soundhandle.playWave(self.question_start_signal + "/question_start_signal.wav")
			else:
				print 2	

		
		
	def cleanup(self):
		rospy.loginfo("Shutting down spr node...")
示例#46
0
class Follower:
    def __init__(self):

        self.bridge = cv_bridge.CvBridge()
        #cv2.namedWindow("window", 1)
        self.cmd_vel_pub = rospy.Publisher('mobile_base/commands/velocity',
                                           Twist,
                                           queue_size=1)
        self.image_sub = None
        self.colorDetector_sub = None
        self.taskQueue = []
        self.currColString = ''
        self.isMoving = False
        self.twist = Twist()
        self.counter = 0
        self.returnBack = 0
        self.turnBack = False
        self.colorArr1 = []
        self.colorArr2 = []
        self.currColor = 2
        self.colorArr = []
        self.startMotion = False
        self.cnt = 0
        self.ptr = 0
        self.rotCoeff = -1
        self.soundhandle = SoundClient()
        rospy.Subscriber("chatter", String, self.callback)
        self.init_Move()

    def init_Move(self):
        #self.movementController()
        self.listenRequests()

    def unregisterSub(self):
        self.cnt = self.cnt + 1
        print("In unregisterSub COUNT=" + str(self.cnt))
        print("Color=" + str(self.currColor) + " turnBack=" +
              str(self.turnBack))
        self.image_sub.unregister()
        if self.turnBack == True:
            self.turnBackMotion()

    def turnBackMotion(self):
        self.cnt = self.cnt + 1
        print("In turnBackMotion COUNT=" + str(self.cnt))
        print("Move back")
        print("Waiting...")

        rospy.sleep(1)
        self.soundhandle.say(
            'Please pick order for table number ' + self.currColString +
            ' I will wait here for thirty seconds', 'voice_kal_diphone')
        rospy.sleep(1)
        rospy.sleep(8)
        self.move("west", 180)
        self.moveAlongLine(2)

    def callback(self, data):
        self.taskQueue.append(str(data))
        self.colorArr.append(int(str(data).split(":")[2]))
        print("Queue Status:" + str(self.taskQueue) + "\n")

    def unregisterColorSub(self):
        self.colorDetector_sub.unregister()
        self.moveAlongLine(1)

    def listenRequests(self):
        while 1:
            while self.ptr != len(self.colorArr):
                if len(self.colorArr) == 0:
                    continue
                self.currColor = self.colorArr[self.ptr]
                self.detectColoredLine()
                while self.cnt != 4:
                    pass
                self.cnt = 0
                self.ptr = self.ptr + 1

    def registerSub(self):
        self.image_sub = rospy.Subscriber('camera/rgb/image_raw', Image,
                                          self.image_callback)

    #all micro tasks
    def moveAlongLine(self, line_no):
        #0, 100, 100), cv::Scalar(10, 255, 255) #red
        #[45, 100, 100], [75, 255,255] #green
        #[110,50,50]), [130,255,255] # blue
        line_no = self.currColor

        if line_no == 1:
            self.colorArr1 = [45, 100, 100]
            self.colorArr2 = [75, 255, 255]

        if line_no == 2:
            self.colorArr1 = [0, 100, 100]
            self.colorArr2 = [10, 255, 255]

        if line_no == 3:
            self.colorArr1 = [110, 50, 50]
            self.colorArr2 = [130, 255, 255]
        self.image_sub = rospy.Subscriber('camera/rgb/image_raw', Image,
                                          self.image_callback)

    def detectColoredLine(self):
        if self.currColor == 1:
            self.currColString = "one"
        elif self.currColor == 2:
            self.currColString = "two"
        else:
            self.currColString = "three"

        rospy.sleep(1)
        self.soundhandle.say(
            'Please place order for table number ' + self.currColString +
            ' I will wait here for thirty seconds', 'voice_kal_diphone')
        rospy.sleep(1)
        rospy.sleep(5)
        self.cnt = self.cnt + 1
        print("In detectColoredLine COUNT=" + str(self.cnt))
        line_no = self.currColor
        """if self.ptr!=0:
			if self.colorArr[self.ptr-1]==line_no:
				c=1
				while c!=20:
					self.rotatecont()
					c=c+1


				rospy.sleep(1)
				print("MOVE FOR SAME COLOR")"""

        if line_no == 1:
            self.colorArr1 = [45, 100, 100]
            self.colorArr2 = [75, 255, 255]
            self.rotCoeff = -1

        if line_no == 2:
            self.colorArr1 = [0, 100, 100]
            self.colorArr2 = [10, 255, 255]
            if self.ptr == 0:
                self.rotCoeff = -1
            elif self.colorArr[self.ptr - 1] == 1:
                self.rotCoeff = 1
            else:
                self.rotCoeff = -1

        if line_no == 3:

            self.colorArr1 = [110, 50, 50]
            self.colorArr2 = [130, 255, 255]
            self.rotCoeff = 1
            if self.ptr == 0:
                self.rotCoeff = -1

        self.colorDetector_sub = rospy.Subscriber('camera/rgb/image_raw',
                                                  Image,
                                                  self.detect_color_callback)

    def movementController(self):
        #self.moveToGoal(0.0282609660838,0.0342560425716)
        #self.move("east",90)
        """for color in self.colorArr:
			print("COLOR="+str(color))
			self.currColor=color
			self.detectColoredLine()"""

        for color in self.colorArr:

            self.currColor = color
            self.detectColoredLine()
            while self.cnt != 4:
                pass
            self.cnt = 0
            self.ptr = self.ptr + 1

    def move(self, direction, angle):
        print("Angle=" + str(angle))
        vel_info = Twist()
        vel_info.linear.y = 0
        vel_info.linear.z = 0
        vel_info.angular.x = 0
        vel_info.angular.y = 0
        vel_info.angular.z = 0
        vel_info.linear.x = 0
        if direction == "east" or direction == "west":
            angular_speed = 30 * 2 * 3.1416 / 360
            relative_angle = angle * 2 * 3.1416 / 360
            current_angle = 0
            time_prev = rospy.Time.now().to_sec()
            self.cmd_vel_pub.publish(vel_info)
            if direction == "east":
                vel_info.angular.z = -1 * angular_speed
            else:
                vel_info.angular.z = angular_speed
            self.cmd_vel_pub.publish(vel_info)
            while (current_angle < relative_angle):
                self.cmd_vel_pub.publish(vel_info)
                time_now = rospy.Time.now().to_sec()
                current_angle = angular_speed * (time_now - time_prev)
            vel_info.linear.x = 0
            vel_info.angular.z = 0
            self.cmd_vel_pub.publish(vel_info)
            #rospy.sleep(1)
        if direction == "north" or direction == "south":
            if direction == "north":
                vel_info.linear.x = 0.3
            else:
                vel_info.linear.x = -0.3
            self.cmd_vel_pub.publish(vel_info)

    def rotatecont(self):

        vel_info = Twist()
        vel_info.linear.y = 0
        vel_info.linear.z = 0
        vel_info.angular.x = 0
        vel_info.angular.y = 0
        vel_info.angular.z = 0
        vel_info.linear.x = 0

        angular_speed = 30 * 2 * 3.1416 / 360
        relative_angle = 2 * 2 * 3.1416 / 360
        current_angle = 0
        time_prev = rospy.Time.now().to_sec()
        self.cmd_vel_pub.publish(vel_info)

        vel_info.angular.z = self.rotCoeff * angular_speed

        self.cmd_vel_pub.publish(vel_info)
        while (current_angle < relative_angle):
            self.cmd_vel_pub.publish(vel_info)
            time_now = rospy.Time.now().to_sec()
            current_angle = angular_speed * (time_now - time_prev)

        self.cmd_vel_pub.publish(vel_info)
        #rospy.sleep(1)

    def image_callback(self, msg):
        #self.move("west")
        image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        #0, 100, 100), cv::Scalar(10, 255, 255) #red
        #[45, 100, 100], [75, 255,255] #green
        #[110,50,50]), [130,255,255] # blue
        lower_color = numpy.array(self.colorArr1)  #lue
        upper_color = numpy.array(self.colorArr2)  #blue
        mask = cv2.inRange(hsv, lower_color, upper_color)

        h, w, d = image.shape
        search_top = 3 * h / 4
        search_bot = 3 * h / 4 + 20
        mask[0:search_top, 0:w] = 0
        mask[search_bot:h, 0:w] = 0

        M = cv2.moments(mask)
        if M['m00'] > 0:
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            cv2.circle(image, (cx, cy), 20, (12, 12, 12), -1)
            #The proportional controller is implemented in the following four lines which
            #is reposible of linear scaling of an error to drive the control output.
            err = cx - w / 2
            self.twist.linear.x = 0.5
            self.twist.angular.z = -float(err) / 100
            self.cmd_vel_pub.publish(self.twist)
            #print('Linear X='+str(self.twist.angular.z))
            self.isMoving = True
        else:
            self.isMoving = False

        #print("Moving Status="+str(self.isMoving))

        if self.isMoving == False:
            vel = Twist()
            if self.twist.linear.x != 0 and self.twist.angular.z != 0:
                self.twist.linear.x = 1
                self.twist.linear.z = 0
                self.cmd_vel_pub.publish(self.twist)
                print("Linear Movement")
                self.counter = self.counter + 1

                if self.counter == 18:
                    self.counter = 0
                self.twist = Twist()
                self.cmd_vel_pub.publish(self.twist)
                if self.turnBack == False:
                    self.turnBack = True
                else:
                    self.turnBack = False
                self.unregisterSub()

        cv2.imshow("window" + str(self.currColor), image)
        cv2.waitKey(3)

    def moveToGoal(self, xGoal, yGoal):

        #define a client for to send goal requests to the move_base server through a SimpleActionClient
        ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)

        #wait for the action server to come up
        while (not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
            rospy.loginfo("Waiting for the move_base action server to come up")
        goal = MoveBaseGoal()
        #set up the frame parameters
        goal.target_pose.header.frame_id = "map"
        goal.target_pose.header.stamp = rospy.Time.now()

        # moving towards the goal*/

        goal.target_pose.pose.position = Point(xGoal, yGoal, 0)
        goal.target_pose.pose.orientation.x = 0.0
        goal.target_pose.pose.orientation.y = 0.0
        goal.target_pose.pose.orientation.z = 0.0
        goal.target_pose.pose.orientation.w = 1.0

        rospy.loginfo("Sending goal location ...")
        ac.send_goal(goal)

        ac.wait_for_result(rospy.Duration(60))

        if (ac.get_state() == GoalStatus.SUCCEEDED):
            rospy.loginfo("You have reached the destination")
            return True

        else:
            rospy.loginfo("The robot failed to reach the destination")
            return False

    def detect_color_callback(self, msg):
        self.rotatecont()
        image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        hsv_yellow = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        lower_color = numpy.array(self.colorArr1)
        upper_color = numpy.array(self.colorArr2)
        lower_yellow = numpy.array([0, 0, 0])
        upper_yellow = numpy.array([50, 50, 100])
        mask = cv2.inRange(hsv, lower_color, upper_color)
        mask_yellow = cv2.inRange(hsv_yellow, lower_yellow, upper_yellow)
        h, w, d = image.shape
        search_top = 3 * h / 4
        search_bot = 3 * h / 4 + 20
        mask[0:search_top, 0:w] = 0
        mask[search_bot:h, 0:w] = 0
        mask_yellow[0:search_top, 0:w] = 0
        mask_yellow[search_bot:h, 0:w] = 0
        M = cv2.moments(mask)
        M_yellow = cv2.moments(mask_yellow)
        if M['m00'] > 0:
            print("M[00]=" + str(M['m00']))
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            err = cx - w / 2
            print("COLOR DETECTED" + str(self.currColor))

            self.unregisterColorSub()
        cv2.imshow("Detecting colored line", image)
        cv2.waitKey(3)
class ASRControl(object):
    """Simple voice control interface for ROS command

    Attributes:
        model: model path
        lexicon: pronunciation dictionary
        kwlist: keyword list file
        pub: where to send commands (default: 'mobile_base/commands/velocity')

    """
    def __init__(self, model, lexicon, kwlist, pub_string):
        # initialize ROS
        self.msg_string = String()
        self.msg_gripper = outputMsg.SModel_robot_output()
        self.gripper_state = 0
        rospy.on_shutdown(self.shutdown)

        # you may need to change publisher destination depending on what you run
        self.pub_string = rospy.Publisher(pub_string, String, queue_size=10)
        self.pub_gripper = rospy.Publisher('SModelRobotOutput',
                                           outputMsg.SModel_robot_output)
        rospy.sleep(2)

        # Activate gripper
        self.msg_gripper = self.genCommand("r", self.msg_gripper)
        self.pub_gripper.publish(self.msg_gripper)
        rospy.sleep(1)

        print "Activating Gripper\n"
        self.msg_gripper = self.genCommand("a", self.msg_gripper)
        self.pub_gripper.publish(self.msg_gripper)
        rospy.sleep(1)

        # initialize pocketsphinx
        config = Decoder.default_config()
        config.set_string('-hmm', model)
        config.set_string('-dict', lexicon)
        config.set_string('-kws', kwlist)

        stream = pyaudio.PyAudio().open(format=pyaudio.paInt16,
                                        channels=1,
                                        rate=16000,
                                        input=True,
                                        frames_per_buffer=1024)
        stream.start_stream()

        self.decoder = Decoder(config)
        self.decoder.start_utt()

        while not rospy.is_shutdown():
            buf = stream.read(1024)
            if buf:
                self.decoder.process_raw(buf, False, False)
            else:
                break
            self.parse_asr_result()

    def parse_asr_result(self):
        """
        publish commands to message based on ASR hypothesis
        """
        if self.decoder.hyp() != None:
            print([(seg.word, seg.prob, seg.start_frame, seg.end_frame)
                   for seg in self.decoder.seg()])
            print("Detected keyphrase, restarting search")
            seg.word = seg.word.lower()
            self.decoder.end_utt()
            self.decoder.start_utt()
            # Sending recognized word as string
            if seg.word.find("open") > -1:
                self.msg_string.data = 'open'
                self.msg_gripper = self.genCommand("o", self.msg_gripper)
                self.pub_gripper.publish(self.msg_gripper)
                self.gripper_state = 0

            if seg.word.find("close") > -1:
                self.msg_string.data = 'close'
                self.msg_gripper = self.genCommand("c", self.msg_gripper)
                self.pub_gripper.publish(self.msg_gripper)
                self.gripper_state = 1

            if seg.word.find("robot") > -1:
                self.msg_string.data = 'robot'
                self.soundhandle = SoundClient()
                rospy.sleep(1)
                self.soundhandle.say('Yes, mother.')

            if seg.word.find("recording") > -1:
                self.msg_string.data = 'recording'
                resp = self.send_record_command('record')
                print resp
                if 'starting recorder' in resp:
                    self.soundhandle = SoundClient()
                    rospy.sleep(1)
                    self.soundhandle.say('Started recording.')
                else:
                    self.soundhandle = SoundClient()
                    rospy.sleep(1)
                    self.soundhandle.say('Recording failed.')

            if seg.word.find("stop") > -1:
                if self.gripper_state == 0:
                    self.msg_string.data = 'stop'
                    resp = self.send_record_command('stop')
                    self.soundhandle = SoundClient()
                    rospy.sleep(1)
                    self.soundhandle.say('Stopped recording.')

            self.pub_string.publish(self.msg_string)

    def genCommand(self, char, command):
        """Update the command according to the character entered by the user."""

        if char == 'a':
            command = outputMsg.SModel_robot_output()
            command.rACT = 1
            command.rGTO = 1
            command.rSPA = 255
            command.rFRA = 150

        if char == 'r':
            command = outputMsg.SModel_robot_output()
            command.rACT = 0

        if char == 'c':
            command.rPRA = 255

        if char == 'o':
            command.rPRA = 0

        return command

    def send_record_command(self, string_cmd):
        rospy.wait_for_service('/record/cmd')
        try:
            record_ros_srv = rospy.ServiceProxy('/record/cmd', String_cmd)
            resp = record_ros_srv(string_cmd)
            return resp.res
        except rospy.ServiceException, e:
            print "ROS Record - Service call failed: %s" % e
示例#48
0
        print 'Says a string. For a string on the command line, you must use quotes as'
        print 'appropriate. For a string on standard input, the command will wait for'
        print 'EOF before saying anything.'
        exit(-1)

    # Import after printing usage for speed.
    import rospy
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient

    if len(sys.argv) == 1:
        print 'Awaiting something to say on standard input.'

    # Ordered this way to minimize wait time.
    rospy.init_node('say', anonymous=True)
    soundhandle = SoundClient()
    rospy.sleep(1)
    #soundhandle.say('Take me to your leader.','')
    voice = 'voice_kal_diphone'
    volume = 100.0

    if len(sys.argv) == 1:
        s = sys.stdin.read()
    else:
        s = sys.argv[1]

        if len(sys.argv) > 2:
            voice = sys.argv[2]
        if len(sys.argv) > 3:
            volume = float(sys.argv[3])
示例#49
0
class TalkBack:
      
    	def __init__(self):
		self.month = ['january','february','march','april','may','june','july','august','september','october','november','december']
		self.week = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
		self.names ={'name':['alex','angel','eve','jamie','jane','liza','melissa','tracy','robin','sophia','alex','angel','edward','homer','jamie','john','kevin','kurt']}
        	rospy.on_shutdown(self.cleanup)
        	#self.voice = rospy.get_param("~voice", "voice_don_diphone")
		self.voice = rospy.get_param("~voice", "voice_cmu_us_clb_arctic_clunits")
       		self.wavepath = rospy.get_param("~wavepath", "")
        
		self.pub = rospy.Publisher('mission_complete', String, queue_size=20)
        # Create the sound client object
        	self.soundhandle = SoundClient() 
        	rospy.sleep(1)
        	self.soundhandle.stopAll()
        	rospy.sleep(1)
		self.finished = 0
        	self.start = ''
		self.mstype_out1 = 5
		self.mission_out1 = ''
		self.i = 0
		self.h = 0
        	rospy.loginfo("To deal with the mission...")
		self.package = "voice"
		self.executable = "talker"
		self.node = roslaunch.core.Node(self.package, self.executable)
		self.launch = roslaunch.scriptapi.ROSLaunch()
		self.launch.start()
#		self.process = launch.launch(node)
#		print self.process.is_alive()
        # self.open_object_name = rospy.Publisher('open_object_name', String, queue_size=10)
        # rospy.init_node('command', anonymous=True)
        # Subscribe to the recognizer output
        # rospy.Subscriber('speech_recognition_start', String, self.start)
		self.gpsr_name = rospy.Publisher('gpsr_name', String, queue_size=10)
		self.gpsr_missiontype1 = rospy.Publisher('gpsr_mstype_out', Int8, queue_size = 10)
		self.gpsr_mission1 = rospy.Publisher('gpsr_mission_out', String, queue_size=10)
		rospy.Subscriber('nav_output', String, self.control)
		rospy.Subscriber('gpsr_mstype', Int8, self.mstype_out)
		rospy.Subscriber('gpsr_mission', String, self.mission_out)
		rospy.Subscriber('gpsr_mstype_out', Int8, self.mstype_out)
		rospy.Subscriber('gpsr_mission_out', String, self.mission_out)
	def mstype_out(self, msg):
		self.mstype_out1 = msg.data
#		print self.mstype_out1
		self.gpsr_missiontype1.publish(self.mstype_out1)
	def mission_out(self, msg):
		msg.data = msg.data.lower()
		self.mission_out1 = msg.data
		if self.i == 0:
			self.gpsr_mission1.publish(self.mission_out1)
	def control(self, msg):
		msg.data = msg.data.lower()
		if msg.data.find('arrive') > -1:
			rospy.Subscriber('gpsr_mstype_out', Int8, self.dispatch)
#		self.process.stop()
#		print self.i
		if self.i == 0:
			os.system("rosnode kill /talkback_gpsr")#开始执行任务之后,关闭talkback_gpsr.py,即不再跟人对话
			self.i = 1
			                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                               
	def dispatch(self, msg):
		if self.h == 0:
			if(msg.data == 0):
				rospy.Subscriber('gpsr_mission_out', String, self.inform)
			elif(msg.data == 1):
				self.i = 1
				self.soundhandle.say("dear operator",self.voice)
				rospy.sleep(2)
				self.soundhandle.say("i am ready for your question", self.voice)
				rospy.sleep(3)
				print 555
				#rospy.Subscriber('gpsr_mission', String, self.ans_a_question)
				rospy.Subscriber('recognizer_output', String, self.answer_a_question)
			elif(msg.data == 2):
				rospy.Subscriber('gpsr_mission_out', String, self.ask_a_question)
			self.h = 1
#		self.gpsr_mstype_out = 5
			
		#回答问题
	def inform(self, msg):
		msg.data = msg.data.lower()
		if msg.data.find('time') > -1:
			curtime = time.strftime("%H:%M %p", time.localtime())
			self.soundhandle.say("it is "+ curtime, self.voice)
			rospy.sleep(5)

		if msg.data.find('day') > -1:
			timetuple = time.localtime()
			self.soundhandle.say("today is "+ self.week[timetuple[6]], self.voice)
			rospy.sleep(5)
				
		if msg.data.find('date') > -1:
			timetuple = time.localtime()
			_year = time.strftime("%Y", timetuple)
			_month = self.month[timetuple[1]]	
			_day = time.strftime("%d", timetuple)
			self.soundhandle.say("today is "+ _month + " the " + _day + "th " + _year, self.voice)
			rospy.sleep(10)
		
		if msg.data.find('name') > -1:
			self.soundhandle.say("my name is kamerider", self.voice)
			rospy.sleep(5)

		if msg.data.find('team-name') > -1:
			self.soundhandle.say("my team's name is kamerider", self.voice)
			rospy.sleep(5)
		
		os.system("rosnode kill /talkback")
		
	def answer_a_question(self, msg):
		msg.data = msg.data.lower()
		if msg.data.find('kamerider') > -1:
			if msg.data.find('german-count') > -1 or msg.data.find('invented the zeppelin') > -1 or msg.data.find('invented the the zeppelin') > -1 or msg.data.find('invented zeppelin') > -1 :
		      		self.soundhandle.say('Count von Zeppelin invented the zeppelin', self.voice)
		       		rospy.sleep(4)
			elif msg.data.find('first-president') > -1 or msg.data.find('the-usa') > -1 : 
				self.soundhandle.say('george washington was the first president of america', self.voice)
				rospy.sleep(4)
			elif msg.data.find('titanic built') > -1 or msg.data.find('titanic-built') > -1 or msg.data.find('in which city') > -1 :
				self.soundhandle.say('titanic was builted in belfast', self.voice)
				rospy.sleep(4)
			elif msg.data.find('queen-victoria') > -1 or msg.data.find('how-many-children') > -1 :
				self.soundhandle.say('she had nine children', self.voice)
				rospy.sleep(4)
			elif msg.data.find('french-king') > -1 or msg.data.find('the-sun-king') > -1 :
				self.soundhandle.say('louis fourteen was called the sun king', self.voice)
				rospy.sleep(4)
			elif msg.data.find('in england') > -1 or msg.data.find('in the england') > -1 or msg.data.find('northern-frontier') > -1 or msg.data.find('roman-empire') > -1 : 
				self.soundhandle.say('the hadrians wall was the northern frontier of the england', self.voice)
				rospy.sleep(4)
			elif msg.data.find('former-name') > -1 or msg.data.find('new-york') > -1 : 
				self.soundhandle.say('the former name of new york was new amsterdam', self.voice)
				rospy.sleep(4)
			elif msg.data.find('another-name') > -1 or msg.data.find('study of fosslis') > -1 or msg.data.find('study the fosslis') > -1 or msg.data.find('study-of-the-fosslis') > -1 : 
				self.soundhandle.say('the another name for the study of fossils is paleontology', self.voice)
				rospy.sleep(4)
			elif msg.data.find('dragonflies') > -1 or msg.data.find('prefer-to-eat') > -1 : 
				self.soundhandle.say('dragonfilies prefer to eat mosquitoes', self.voice)
				rospy.sleep(4)	
			elif msg.data.find('cannot-fly') > -1 or msg.data.find('but can jump') > -1 : 
				self.soundhandle.say('they are fleas', self.voice)
				rospy.sleep(4)
			elif msg.data.find('european-bison') > -1 : 
				self.soundhandle.say('the name of the european bison is wisent', self.voice)
				rospy.sleep(4)
			elif msg.data.find('called a fish') > -1 or msg.data.find('called fish') > -1 or msg.data.find('called the fish') > -1 or msg.data.find('snake-like') > -1 : 
				self.soundhandle.say('eel fish is called a fish with a snake-like body', self.voice)
				rospy.sleep(4)
			elif msg.data.find('which-plant') > -1 or msg.data.find('canadian-flag') > -1 : 
				self.soundhandle.say('maple plant does the canadian-flag contain', self.voice)
				rospy.sleep(4)
			elif msg.data.find('house-number') > -1 or msg.data.find('simpsons') > -1 : 
				self.soundhandle.say('the house number of the simpsons is seven hundred and forty-two', self.voice)
				rospy.sleep(4)
			elif msg.data.find('in computing') > -1 or msg.data.find('ram short') > -1 or msg.data.find('ram the short') > -1 or msg.data.find('short-for') > -1 : 
				self.soundhandle.say('the random access memory is short for ram', self.voice)
				rospy.sleep(4)
			elif msg.data.find('which-hemisphere') > -1 or msg.data.find('the-most-dinosaur') > -1 or msg.data.find('dinosaur-skeletons') > -1 : 
				self.soundhandle.say('most dinosaur skeletons have been found in the northern hemisphere', self.voice)
				rospy.sleep(4)
			elif msg.data.find('color-is-cobalt') > -1 or msg.data.find('cobalt') > -1 : 
				self.soundhandle.say('the color of cobalt is blue', self.voice)
				rospy.sleep(4)
			elif msg.data.find('which device') > -1 or msg.data.find('use to look at') > -1 or msg.data.find('at-the-stars') > -1 or msg.data.find('at-stars') > -1 : 
				self.soundhandle.say('we also use telescope to look at the stars', self.voice)
				rospy.sleep(4)
			elif msg.data.find('light-intensity') > -1 : 
				self.soundhandle.say('candela also indicates the light intensity', self.voice)
				rospy.sleep(4)
			elif msg.data.find('first-american') > -1 : 
				self.soundhandle.say('alan shepard was the first american in space', self.voice)
				rospy.sleep(4)
			elif msg.data.find('steam-engine') > -1 : 
				self.soundhandle.say('james watt was the inventor of the steam engine', self.voice)
				rospy.sleep(4)
			elif msg.data.find('henry-mill') > -1 : 
				self.soundhandle.say('the typewriter was invented by henry mill', self.voice)
				rospy.sleep(4)
			elif msg.data.find('lightest') > -1 or msg.data.find('existing-metal') > -1 : 
				self.soundhandle.say('aluminium is the lightest existing metal', self.voice)
				rospy.sleep(4)
			elif msg.data.find('primary-colors') > -1 : 
		       		self.soundhandle.say('they are blue yellow and red', self.voice)
				rospy.sleep(4)
			elif msg.data.find('planet') > -1 or msg.data.find('nearest-the-sun') > -1 : 
				self.soundhandle.say('the planet nearest-the-sun is mercury', self.voice)
				rospy.sleep(4)
			elif msg.data.find('great-wall') > -1 : 
				self.soundhandle.say('it is six thousand two hundred fifty-nine kilometers', self.voice)
				rospy.sleep(4)
			elif msg.data.find('largest-number') > -1 or msg.data.find('five-digits') > -1 : 
				self.soundhandle.say('the largest number of five digits is ninty-nine thousand nine hundred and ninty-nine', self.voice)
				rospy.sleep(4)
			elif msg.data.find('new-zealand') > -1 or msg.data.find('on-the-flag') > -1 or msg.data.find('on flag') > -1 or msg.data.find('on the the flag') > -1 : 
				self.soundhandle.say('there are four stars on the flag of new zealand', self.voice)
				rospy.sleep(4)
			else :
				self.soundhandle.say('Dear opreater', self.voice)
				rospy.sleep(1.5)
				self.soundhandle.say('Please repeat your question louder and slower', self.voice)
				rospy.sleep(3)
			msg.data = ''
		os.system("rosnode kill /talkback")

	def ask_a_question(self, msg):
		msg.data = msg.data.lower()
		if msg.data.find('name') > -1:
			self.soundhandle.say("what is your name", self.voice) 
			rospy.sleep(5)
			rospy.Subscriber('recognizer_output', String, self.lis_to_ans)
		
	
	def get_keyword(self, data):
        	for (temp, keywords) in self.names.iteritems():
            		for word in keywords:
                		if data.find(word) > -1:
                    			return word
            		return 'None'

	def lis_to_ans(self, msg):
		msg.data = msg.data.lower()
		keyword = self.get_keyword(msg.data)
		if keyword != 'None':
			self.soundhandle.say("Hello " + keyword, self.voice)
			rospy.sleep(10)
			print 1
			self.temp_pub = rospy.Publisher('information', String, queue_size=20)
			keyword = self.get_keyword(msg.data)
			self.temp_pub.publish(keyword)
			keyword = 'None'
			os.system("rosnode kill /talkback")	
			os.system("rosnode kill /soundplay_node")	
			os.system("rosnode kill /talker")
			os.system("rosnode kill /server2topic")		
        def cleanup(self):
        	rospy.loginfo("Shutting down talkback node...")
    p1 = (1, 2, 40, 'Bernstein')
    p2 = (1, 2, 40, 'Bowers')
    p3 = (1, 2, 40, 'Sprague')
    p4 = (1, 2, 40, 'Aboutabl')
    p5 = (1, 2, 40, 'Fox')
    p6 = (1, 2, 40, 'Simmons')

    # Points are added to an array
    points = []
    points.append(p1)
    points.append(p2)
    points.append(p3)
    points.append(p4)
    points.append(p5)
    points.append(p6)

    nav_node = NavNode()
    soundhandle = SoundClient()

    for x in points:
        current = x
        # The current Professor Tuple is accessed
        nav_node.goto_point(current[0], current[1], current[2])
        message = "Happy holidays Professor " + current[3]
        # The script waits for the robot to arrive or fail
        nav_node.ac.wait_for_result()
        time.sleep(10)
        s1 = soundhandle.voiceSound(message)
        s1.play()
""" Loop needs a wait delay, and it needs a sound output using the speech stuff"""
    def __init__(self):
        # initiliaze
        rospy.init_node('ttsconvert', anonymous=False)

        #rospy.loginfo('Example: Playing sounds in *blocking* mode.')
        soundhandle = SoundClient(blocking=True)

        #rospy.loginfo('Good Morning.')
        #soundhandle.playWave('/home/turtlebot/wavFiles/start.wav')

        # What to do you ctrl + c
        rospy.on_shutdown(self.shutdown)

        # Import the graphml to be read later
        G = nx.read_graphml("2nd_demo.graphml", unicode)
        #G = nx.read_graphml("/home/raeesa/networkx/examples/2nd_demo.graphml",unicode)
        #G = nx.read_graphml("/home/raeesa/Desktop/write_graphml.graphml",unicode)
        root = 'n1'
        current = root

        # Clear words.log
        f = open("words.log", "r+")
        f.seek(0)
        f.truncate()

        # Clear clean.log
        with open("clean.log", "w"):
            pass

# Constantly read from words.log for new lines
        while True:
            line = f.readline()

            # If the user said something
            if line:
                # The lines with dialogue all begin with a numerical value
                if line[0][:1] in '0123456789':
                    # remove 9 numbers, colon, and space from the beginning, and any whitespace from the end of the line
                    speech = line[11:].rstrip()
                    print speech

                    count = 0
                    # Search through all edges connected to the current node
                    for e in G.edges(current, data=True):

                        # If what was spoken matches the 'spoken' attribute of the edge
                        if str(speech) == str(e[2].values()[0]):
                            # Switch the current node to be the node the edge goes to
                            current = e[1]

                            # find '*' symbol in output string and and replace it with what user said stored in speech

                            # Get the dialogue stored in the new node and save it in 'output'
                            output = G.node[current]['dialogue']

                            tts = gTTS(text=output, lang='en')
                            tts.save('/home/raeesa/Desktop/line.wav')

                            subprocess.Popen([
                                'gnome-terminal', '-x', 'bash', '-c',
                                'pacmd set-source-mute 2 1'
                            ])
                            #subprocess.Popen(['gnome-terminal', '-x', 'bash', '-c','amixer set Capture nocap'])
                            #rospy.loginfo('playing output')
                            soundhandle.playWave(
                                '/home/raeesa/Desktop/line.wav', blocking=True)
                            #soundhandle.say(output)
                            subprocess.Popen([
                                'gnome-terminal', '-x', 'bash', '-c',
                                'pacmd set-source-mute 2 0'
                            ])

                            # Add 'output' to the top of clean.log
                            with open("clean.log", "r+") as g:
                                # Read everything from clean.log into 'content'
                                content = g.read()
                                # Go to the top of the file
                                g.seek(0, 0)
                                # Write 'output' with 'content' appended to the end back to clean.log
                                g.write(output.rstrip('\r\n') + '\n' + content)
                                g.close()

                    # If there are no outgoing edges from the current node, go back to the root
                            if G.out_degree(current) == 0:
                                current = root
示例#52
0
class navsp:

	def __init__(self):

		rospy.on_shutdown(self.cleanup)
		self.voice = rospy.get_param("~voice", "voice_cmu_us_clb_arctic_clunits")
		self.wavepath = rospy.get_param("~wavepath", "")
		self.soundhandle=SoundClient()
		rospy.sleep(1)
		self.soundhandle.stopAll()
		rospy.sleep(15)
		self.soundhandle.say("ready",self.voice)

		rospy.sleep(2)
		self.pub = rospy.Publisher('ifFollowme', String, queue_size=15)
		self.loc_pub = rospy.Publisher('voice2bring', String, queue_size=15)
		self.srt_guide = rospy.Publisher('voice2guide', String, queue_size=15)

		rospy.Subscriber('follow2voice',String,self.follow_adj)
		rospy.Subscriber('found_person',String,self.askhelp)
		rospy.Subscriber('nav2speech',String,self.reachdst)
		rospy.Subscriber('nav2image',String,self.start_find_person)
		rospy.Subscriber('img2voice',String,self.just_say)

		self.if_followme=0
		self.if_stop=0
		self.if_locpub=0
		self.soundhandle.say("please say jack before each question",self.voice)
		rospy.sleep(4)
		self.soundhandle.say("say stop following me when you arrive",self.voice)
		rospy.sleep(4)
		self.soundhandle.say("I am waiting for your command",self.voice)
		rospy.sleep(3.5)

		rospy.Subscriber('recognizer_output',String,self.follow)
	
	def just_say(self,msg):
		msg.data=msg.data.lower()
		self.soundhandle.say( msg.data,self.voice)
		rospy.sleep(4)
	def start_find_person(self,msg):
		msg.data=msg.data.lower()
		if msg.data.find("release") > -1:
			self.soundhandle.say(" i have arrived at the living room",self.voice)
			rospy.sleep(15)
			#self.soundhandle.say("please help me deliver the bag  ",self.voice)
			#rospy.sleep(10)
			self.soundhandle.say(" i begin to find person ",self.voice)
			rospy.sleep(4)
	def askhelp(self,msg):
		msg.data=msg.data.lower()
		if msg.data.find("found_person") > -1:
			#self.soundhandle.say(" new operator ",self.voice)
			#rospy.sleep(2)
			#self.soundhandle.say(" i have reached the person ",self.voice)
			rospy.sleep(4)
			
			#self.soundhandle.say("please help me carry the groceries into the house",self.voice)
			#rospy.sleep(7)
			self.soundhandle.say(" now please follow me to the car",self.voice)
			rospy.sleep(5)
			self.soundhandle.say(" I am ready to start guiding ",self.voice)
			rospy.sleep(4)
			self.srt_guide.publish("instruction_over")

	def reachdst(self,msg):
		msg.data=msg.data.lower()
		if msg.data.find('open_door') > -1:
			self.soundhandle.say("the door is closed ",self.voice)
			rospy.sleep(4)
			self.soundhandle.say(" please help me ",self.voice)
			rospy.sleep(4)
		if msg.data.find('arrived') > -1:
			self.soundhandle.say(" I have reached the car location ",self.voice)
			rospy.sleep(4)

	def follow(self,msg):
		msg.data=msg.data.lower()
		print msg.data
		if msg.data.find('jack') > -1:
			rospy.sleep(0.4)
			if msg.data.find('follow-me') > -1 and self.if_followme ==0 :
				self.soundhandle.say('okay i will follw you',self.voice)
				rospy.sleep(3)
				self.pub.publish('follow_start')
				#rospy.Subscriber('follow2voice',String,self.follow_adj)
				self.if_followme=1
				msg.data=' '

		if self.if_followme ==1 and self.if_stop==0:
			if msg.data.find('jack') > -1 :
				rospy.sleep(0.4)
				if msg.data.find('stop-following-me') > -1 or msg.data.find('stop-following') > -1 or msg.data.find('stop') > -1 or msg.data.find('follow-me') > -1:
					self.soundhandle.say('okay i will stop and remember this car location',self.voice)
					rospy.sleep(5)
					self.pub.publish('follow_stop')
					self.if_stop=1
					#rospy.sleep(3)
					msg.data=' '

		if self.if_stop==1 and self.if_locpub==0:
			msg.data=msg.data.lower()
			if msg.data.find('jack') > -1 :
				rospy.sleep(0.4)
				if (msg.data.find('the-living-room') > -1 or msg.data.find('living-room') > -1 or msg.data.find('to-living-room') > -1 or msg.data.find('to-the-living-room') > -1 or msg.data.find('room') > -1) and self.if_locpub==0:
					#self.soundhandle.say(' please put the bag on the top of me ',self.voice)
					#rospy.sleep(10)
					self.loc_pub.publish('living_room')
					self.soundhandle.say('i will take to the living room ',self.voice)
					rospy.sleep(3.5)
					msg.data=' '
					print "living room"
					self.if_locpub=1

	def follow_adj(self,msg):
		msg.data=msg.data.lower()
		print msg.data

		if msg.data.find('nobody') > -1 : 
			self.soundhandle.say('I can not see anybody,Please stand in front of me',self.voice)
			rospy.sleep(5)

		if msg.data.find('far') > -1 : 
			self.soundhandle.say('I have lost you and I will have a new detection,Please stand still in front of me',self.voice)
			rospy.sleep(5)
		if msg.data.find('go_ahead') > -1 : 
			self.soundhandle.say('You are too far from me,Please go ahead',self.voice)
			rospy.sleep(5)
		if msg.data.find('go_left') > -1 : 
			self.soundhandle.say('please go left',self.voice)
			rospy.sleep(5)
		if msg.data.find('go_right') > -1 : 
			self.soundhandle.say('please go right',self.voice)
			rospy.sleep(5)
		if msg.data.find('passing') > -1 : 
			self.soundhandle.say('Someone is passing between us,I am waiting',self.voice)
			rospy.sleep(5)
		if msg.data.find('find') > -1 : 
			self.soundhandle.say('I will follow behind you again',self.voice)
			rospy.sleep(5)


	def cleanup(self):
		rospy.loginfo("shuting down navsp node ....")
示例#53
0
class SpeechModule():
    def __init__(self):
        rospy.init_node("speech_demo")
        
        # Set the shutdown function
        rospy.on_shutdown(self.shutdown)
        
        self.rate = rospy.get_param('~rate', 10)
                
        self.tick = 1.0 / self.rate
        
        # How close does a person have to be to pay attention to them?
        self.max_target_distance = rospy.get_param('~max_target_distance', 2.0)
        
        # Set the default TTS voice to use
        self.tts_voice = rospy.get_param("~tts_voice", "voice_don_diphone")
        
        # How long in seconds before we consider a detection lost?
        self.lost_detection_timeout = rospy.get_param('~lost_detection_timeout', 5.0)
        
        # Get the path of the file listing valid locations
        nav_config_file = rospy.get_param('~nav_config_file', 'config/3dv/locations.yaml')
        
        # Get the Pocketsphinx vocabulary so we can filter recognition results
        allowed_phrases = rospy.get_param('~allowed_phrases', 'config/3dv_demo/3dv_commands.txt')
                
        # Load the location data
        with open(nav_config_file, 'r') as config:
            self.locations = load(config)
        
        # Read in the file
        with open(allowed_phrases, 'r') as phrases:
            self.allowed_phrases = [line.strip() for line in phrases if line is not None]
        
        # Have we said the greeting to a new person?
        self.greeting_finished = False
        
        # Is a person visible?
        self.target_visible = False
        
        # Are we waiting for the next detection?
        self.waiting = False
        
        # Are we lisenting on the speech recognition topic?
        self.listening = False
        
        # Set a timer to determine how long a target is no longer visible
        self.target_lost_time = 0.0
        
        # Create the sound client object
        self.soundhandle = SoundClient(blocking=True)
        
        # Wait a moment to let the client connect to the sound_play server
        rospy.sleep(2)
        
        # Publish the requested location so the executive node can use it
        self.speech_pub = rospy.Publisher('/speech_command', String, queue_size=1)

        # Connect to the goto_location service
        #rospy.wait_for_service('/goto_location', 60)
 
        #self.goto_service = rospy.ServiceProxy('/goto_location', GotoLocation)

        # Subscribe to the speech recognition /recognizer/output topic to receive voice commands
        rospy.Subscriber("/recognizer/output", String, self.speech_recognition, queue_size=1)
        
        # Subscribe to the target topic for tracking people
        #rospy.Subscriber('target_topic', DetectionArray, self.detect_person, queue_size=1)
        
        # Wait for the speech recognition services to be ready
        rospy.wait_for_service('/recognizer/start', 15)
        rospy.wait_for_service('/recognizer/stop', 15)
        
        # Connect to the start/stop services for speech recognition
        self.stop_speech_recognition = rospy.ServiceProxy('/recognizer/stop', Empty)
        self.start_speech_recognition = rospy.ServiceProxy('/recognizer/start', Empty)
        
        rospy.loginfo("JR demo up and running.")
        
        # Announce that we are ready for input
        self.jr_says("Ready", self.tts_voice, start_listening=True)
        #self.jr_says("Say, hello jack rabbit, to get my attention", self.tts_voice, start_listening=True)
        
        #self.start_speech_recognition()
        
        while not rospy.is_shutdown():
#             # If we have lost the target, start a timer
#             if not self.target_visible:
#                 self.target_lost_time += self.tick
#                 
#                 if self.target_lost_time > self.lost_detection_timeout and not self.waiting:
#                     rospy.loginfo("No person in sight.")
#                     self.target_visible = False
#                     self.greeting_finished = False
#                     self.waiting = True
#             else:
#                 if self.waiting:
#                     rospy.loginfo("Person detected.")
#                     self.waiting = False
#                     self.target_lost_time = 0.0

            rospy.sleep(self.tick)
            
    def jr_says(self, text, voice, start_listening=False, pause=2):
        self.listening = False
        #self.stop_speech_recognition()
        self.soundhandle.say(text, voice, blocking=True)
        if start_listening:
            rospy.sleep(pause)
            self.listening = True
            #self.start_speech_recognition()
            
    def detect_person(self, msg):
        min_distance = 10000
        target_head = None
        self.target_visible = False
                
        # Pick the closest detection
        for head in msg.detections:
            pos = head.pose.pose.position
            distance = sqrt(pos.x * pos.x + pos.y * pos.y + pos.z + pos.z)
            if distance < min_distance and distance < self.max_target_distance:
                target_head = head
        
        if target_head is not None:
            # Set detection flag
            self.target_visible = True

    def greet_person(self):
        if not self.greeting_finished:
            self.jr_says("Hello there.  My name is Jack Rab bot.", self.tts_voice)
            self.jr_says("Thanks for coming to the conference.", self.tts_voice)
            self.jr_says("Say, hello jack rabbit, to get my attention", self.tts_voice)

            #self.jr_says("I like what you are wearing.", self.tts_voice)
            #self.jr_says("Where would you like to go?", self.tts_voice)
            self.greeting_finished = True
                
    def speech_recognition(self, msg):
        # Look for a wake up phrase
        if msg.data in ['hey jr', 'hi jr', 'hello jr', 'hey jackrabbot', 'hi jackrabbot', 'hey jackrabbit', 'hi jackrabbit', 'hello jackrabbot', 'hello jackrabbit']:
            self.jr_says("Hello there. How can I help?", self.tts_voice, start_listening=True)
            return
        
        if not self.listening:
            return
        
        found_location = False
        goal_location = None
        
        for phrase in self.allowed_phrases:
            if msg.data.find(phrase) > 0:
                found_location = True
                goal_location = phrase
                break
        
        if not msg.data in self.allowed_phrases and not found_location:
            return
        
        if found_location:
            phrase = goal_location
        else:
            phrase = msg.data
            
        location = None
        command = None
            
        if phrase in ['posters', 'poster session', 'poster sessions', 'the poster', 'the posters', 'poster area']:
            location = "posters"
        elif phrase in ['keynotes', 'keynote session', 'keynote sessions', 'keynote talk', 'keynote talks', 'the keynote', 'the keynotes']:
            location = "keynotes"
        elif phrase in ['demos', 'the demo', 'the demos', 'demonstration', 'demonstrations', 'the demonstration', 'the demonstrations']:
            location = "demos"
        elif phrase in ['tutorials', 'the tutorial', 'the tutorials', 'tutorial session']:
            location = "tutorials"
        elif phrase in ['exhibits', 'the exhibit', 'the exhibits', 'exhibit area']:
            location = "exhibits"
        elif phrase in ['washroom', 'washrooms', 'restroom', 'restrooms', 'bathroom', 'bathrooms', 'mens washroom', 'mens restroom', 'mens bathroom', 'womens washroom', 'womens restroom', 'womens bathroom']:
            location = "restrooms"
        elif phrase in ['food', 'the food', 'food area', 'food truck', 'food trucks', 'something to eat', 'cafeteria', 'the cafeteria']:
            location = "food"
        elif phrase in ['entrance', 'the entrance', 'foyer', 'the foyer']:
            location = "entrance"
        elif phrase in ['exit', 'the exit']:
            location = "exit"
        elif phrase in ['wait', 'wait a minute', 'hold on', 'stop', 'abort']:
            command = "wait"
        elif phrase in ['lead', 'lead me', 'lead me there', 'take me', 'take me there']:
            command = "lead"
        elif phrase in ['go', 'keep going', 'continue']:
            command = "go"
        elif phrase in ['follow', 'follow me', 'follow me there']:
            command = "follow"
        else:
            self.jr_says("I'm sorry. I did not understand that. Please say again?", self.tts_voice, pause=2, start_listening=True)
            return
        
        #self.begin_phrases = ['Great choice.', 'No problem.', 'Sure thing.', 'My pleasure.']
        self.begin_phrases = ['Great.']
        self.end_phrases = ['Right this way.', 'Please follow me.', 'Come this way.']
        
        #random.shuffle(self.begin_phrases)
        #random.shuffle(self.end_phrases)
        
        command_to_phrase = {'wait':'OK. I will wait.',
                             'lead':'OK. I will lead.',
                             'go':'OK. I am going now.',
                             'follow':'OK. Please follow me.'}
       
        location_to_phrase = {'posters':'poster sessions',
                              'keynotes':'keynote talks',
                              'demos':'demonstrations', 
                              'exhibits':'exhibits',
                              'tutorials':'tutorials',
                              'restrooms':'restrooms',
                              'mens restroom':'mens restroom',
                              'womens restroom':'womens restroom',
                              'food':'food trucks',
                              'entrance':'entrance',
                              'exit':'exit'}
        
        # Initialize the speech request
        speech_request = String()
        
        if location is not None:
            location_phrase = location_to_phrase[location]
            speech_request.data = location
            rospy.loginfo("Speech location: " + str(location))
            begin = self.begin_phrases[0]
            end = self.end_phrases[0]
            response = begin + ' I know how to get to the ' + location_to_phrase[location] + '. '
        elif command is not None:
            speech_request.data = command
            rospy.loginfo("Speech command: " + str(command))

            response =  command_to_phrase[command] 
        
        self.jr_says(response, self.tts_voice)
        
        # Publish the request
        self.speech_pub.publish(speech_request)
        
        # Create a goto request for the navigation server
        #request = GotoLocationRequest()
        #request.location.name = location
 
        #response = self.goto_service(request)
        
        rospy.loginfo("Speech command: " + str(location))
        #rospy.loginfo(response)
        
        #if response.success:
        #   self.jr_says("We have arrived.", self.tts_voice)
        
            
    def shutdown(self):
        rospy.sleep(1)
        os._exit(0)
示例#54
0
class SpeechState(smach.State):
    """ Speech player state

    Takes in text and/or a wave file.  Given a wave file, plays the file.
    If no wave file is provided and text-to-speech is on, fetch the audio from
    Amazon Polly and play the audio.  If no wave file is provided and text-to-
    speech is off, return "no_audio"

    **Input keys**
        * text : str
            Can be None; the text version of the robot's speech.  Used to print
            to the screen.
        * wav_file : str
            Can be None if TTS is on. The path to the audio file.

    **Outcomes**
        * done
            Finished fetching behaviors
        * no_audio
            Couldn't find the wave file, or no wave file provided and TTS
            turned off
        * preempted
            State was preempted before audio finished playing.  If preempted,
            will try to stop the audio.

    """
    def __init__(self, use_tts, synchronizer, phrases=None, voice="Kimberly"):
        """ Constructor

        Initializes the speech state with the desired parameters; either with
        or without online TTS, and with or without a pre-generated phrase
        file.

        Parameters
        -----------
        use_tts : bool
            If true, allow online TTS
        synchronizer : Synchronizer
            Synchronizer object to allow sync of speech and behaviors.  Should
            be the same object as is passed to behavior ControllerState objects
            with ``setup_sync``
        voice : str, optional
            Which Amazon Polly voice to use. Defaults to Kimberly

        phrases : str, optional
            Path to the phrase file to use. If None, require online TTS.
        """
        smach.State.__init__(self,
                             outcomes=["preempted", "no_audio", "done"],
                             input_keys=["text", "wav_file"])
        self._tts = use_tts
        if use_tts:
            self._talker = CoRDialTTS(voice)
        self._sound_client = SoundClient()
        self._sync = synchronizer

    def execute(self, userdata):
        rospy.loginfo("Saying: {}".format(userdata.text))
        if userdata.wav_file == None:
            if userdata.text != None and len(userdata.text) != 0:
                speech_duration = self._talker.say(userdata.text, wait=False)
                rospy.sleep(0.5)
                self._sync.start()
            else:
                return "no_audio"
        else:
            wav_file = userdata.wav_file
            with contextlib.closing(wave.open(wav_file, 'r')) as f:
                frames = f.getnframes()
                rate = f.getframerate()
                speech_duration = frames / float(rate)
            self._sound_client.playWave(wav_file)
            self._sync.start()

        while rospy.Time.now() - self._sync.time < rospy.Duration(
                speech_duration):
            if self.preempt_requested():
                if self._tts:
                    self._talker.shutup()
                else:
                    self._sound_client.stopAll()
                self.service_preempt()
                return "preempted"
        return "done"
示例#55
0
#!/usr/bin/env python


import rospy, os, sys
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
import re
from gtts import gTTS
from pygame import mixer
from std_msgs.msg import String
import tempfile
rospy.init_node('aiml_soundplay_client', anonymous = True)

soundhandle = SoundClient()
rospy.sleep(1)
soundhandle.stopAll()
print 'Starting TTS'

def speak(sentence):
	with tempfile.NamedTemporaryFile(delete=True) as fp:
		tts=gTTS(text=sentence,lang='zh-tw')
		tts.save("{}.mp3".format(fp.name))
		mixer.init()
		mixer.music.load('{}.mp3'.format(fp.name))
		mixer.music.play(1)
def speak_english(sentence):
	with tempfile.NamedTemporaryFile(delete=True) as fp:
		tts=gTTS(text=sentence,lang='en-us')
		tts.save("{}.mp3".format(fp.name))
		mixer.init()
		mixer.music.load('{}.mp3'.format(fp.name))
示例#56
0
class Nodo:
    def espera(self, seg):
        for i in range(int(self.rate * seg)):
            self.r.sleep()

    def parar(self):
        stop = Twist()
        self.cmd_vel.publish(stop)

    def modulo(self, X, Y):
        return math.sqrt(X**2 + Y**2)

    def odometryCb(self, msg):
        if self.partir:
            self.inicioX = msg.pose.pose.position.x
            self.inicioY = msg.pose.pose.position.y
            #self.thetaI = (msg.pose.pose.orientation.z + 1) * 180
            self.partir = False
        self.posx = msg.pose.pose.position.x
        self.posy = msg.pose.pose.position.y
        if (msg.pose.pose.orientation.z > 0):
            self.theta = math.acos(
                msg.pose.pose.orientation.w) / (math.pi / 2) * 180
        else:
            self.theta = 360 - math.acos(
                msg.pose.pose.orientation.w) / (math.pi / 2) * 180
        #self.theta = (msg.pose.pose.orientation.z + 1)*180

    def obstaculo(self, msj):
        self.distance = map(float, msj.data.split(':'))
        self.left = self.distance[0] < 0.6 or self.distance[0] > 90
        self.center = self.distance[1] < 0.6 or self.distance[1] > 90
        self.right = self.distance[2] < 0.6 or self.distance[2] > 90

    def amigo(self, msj):
        self.objetivo = msj.data.split(':')
        if len(self.objetivo) < 3:
            self.objetivo.append(0)
        self.objetivo = map(float, self.objetivo)

        # Vemos si el objetivo sigue en el  centro
        if self.objetivo[0] < 280:
            self.pierdeObjetivo = True
            self.sentidoObjetivo = 1
        elif self.objetivo[0] > 360:
            self.pierdeObjetivo = True
            self.sentidoObjetivo = -1
        else:
            self.pierdeObjetivo = False

        # Vemos si se alcanzo el objetivo
        if self.objetivo[2] < 0.5:
            self.alcanceObjetivo = True
        else:
            self.alcanceObjetivo = False

    def enderezame(self, data):
        self.distsPared = data.data.split(';')
        self.distsPared = map(float, self.distsPared)
        if max(abs(self.distsPared[0] - self.distsPared[1]),
               abs(self.distsPared[1] -
                   self.distsPared[2])) < 0.03 and self.distsPared[0] != 0:
            self.enderezado = True
        else:
            self.enderezado = False
        aux = self.distsPared[2] - self.distsPared[0]
        if aux != 0:
            self.sentidoEnderezado = (aux) / abs(aux)

    def __init__(self):
        #Aca se definen variables utiles
        self.posx = 0
        self.posy = 0
        self.inicioX = 0
        self.inicioY = 0
        self.theta = 0
        self.cl = 0.03
        self.partir = False
        self.rate = 20
        self.right = False
        self.center = False
        self.left = False
        self.distance = [0, 0, 0]
        self.objetivo = [0, 0, 0]
        self.pierdeObjetivo = False
        self.sentidoObjetivo = 1
        self.alcanceObjetivo = False
        self.distsPared = [0, 0, 0]
        self.enderezado = False
        self.sentidoEnderezado = 1

        #Inicializar el nodo y suscribirse/publicar
        rospy.init_node('roboto', anonymous=True)  #make node
        rospy.Subscriber('odom', Odometry, self.odometryCb)
        rospy.Subscriber('obstaculo', String, self.obstaculo)
        rospy.Subscriber('amigoFiel', String, self.amigo)
        rospy.Subscriber('enderezador3', String, self.enderezame)
        self.cmd_vel = rospy.Publisher('/cmd_vel_mux/input/navi', Twist)
        self.r = rospy.Rate(20)
        #se asegura de mantener el loop a 20 Hz
        self.chatter = SoundClient()

    def avanza(self, metros, vel):
        self.inicio = 0
        self.partir = True
        cont = 0.1
        move_cmd = Twist()
        move_cmd.angular.z = -0.02
        move_cmd.linear.x = cont * vel  #m/s
        vel_max = vel
        recorrido = 0
        while (not rospy.is_shutdown()) and (abs(recorrido) < metros *
                                             (1 - self.cl)):
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
            recorrido = self.modulo(self.posx - self.inicioX,
                                    self.posy - self.inicioY)
            if min(self.distance) > 0.6:
                cont = min(cont + 0.1, 1)
            else:
                cont = max(cont - 0.1, 0.1)
            move_cmd.linear.x = vel_max * cont
            print(vel_max * cont)
            if (min(self.distance) < 0.5 or max(self.distance) > 90):
                break
        self.parar()
        self.espera(0.7)

    def avanzaAmigo(self, metros, vel):
        self.inicio = 0
        self.partir = True
        cont = 0.1
        move_cmd = Twist()
        move_cmd.angular.z = -0.02
        move_cmd.linear.x = cont * vel  #m/s
        vel_max = vel
        recorrido = 0
        while (not rospy.is_shutdown()) and (abs(recorrido) < metros *
                                             (1 - self.cl)):
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
            recorrido = self.modulo(self.posx - self.inicioX,
                                    self.posy - self.inicioY)
            if self.objetivo[2] > 0.8:
                cont = min(cont + 0.1, 1)
            else:
                cont = max(cont - 0.1, 0.1)
            move_cmd.linear.x = vel_max * cont
            print(vel_max * cont)
            if (self.alcanceObjetivo):
                break
            elif (min(self.distance) < 0.5 or max(self.distance) > 90):
                break
        self.parar()
        self.espera(0.7)

    def gira(self, grados, vel):
        objetivo = grados - 5
        #zobj = objetivo/180 - 1
        move_cmd = Twist()
        move_cmd.angular.z = vel
        error = 2
        self.partir = True
        thetaReal = True
        print('voy a girar')
        while (not rospy.is_shutdown()) and (
            (abs(self.theta - objetivo) > error) or thetaReal):
            if not self.partir and thetaReal:
                thetaReal = False
                objetivo += self.theta
                if (objetivo > 360):
                    objetivo -= 360
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
            if (vel > 0 and not self.right) or (vel < 0 and not self.left):
                break
        self.parar()
        self.espera(0.7)

    def gira2(self, grados, vel):
        objetivo = grados - 5
        #zobj = objetivo/180 - 1
        move_cmd = Twist()
        move_cmd.angular.z = vel
        error = 2
        self.partir = True
        thetaReal = True
        print('voy a girar')
        while (not rospy.is_shutdown()) and (
            (abs(self.theta - objetivo) > error) or thetaReal):
            if not self.partir and thetaReal:
                thetaReal = False
                objetivo += self.theta
                if (objetivo > 360):
                    objetivo -= 360
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
        self.parar()
        self.espera(0.7)

    def giraAmigo(self, grados, vel):
        objetivo = grados - 5
        #zobj = objetivo/180 - 1
        move_cmd = Twist()
        move_cmd.angular.z = vel
        error = 2
        self.partir = True
        thetaReal = True
        while (not rospy.is_shutdown()) and (
            (abs(self.theta - objetivo) > error) or thetaReal):
            if not self.partir and thetaReal:
                thetaReal = False
                objetivo += self.theta
                if (objetivo > 360):
                    objetivo -= 360
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
            if not self.pierdeObjetivo:
                break
        self.parar()
        self.espera(0.7)

    def arco(self, radio, grados, angular):
        #piso liso: 1.51, piso rugoso: 1.93
        objetivo = grados
        move_cmd = Twist()
        move_cmd.angular.z = angular
        move_cmd.linear.x = abs(angular) * radio / 1.85
        error = 5
        self.partir = True
        thetaReal = True
        while (not rospy.is_shutdown()) and (
            (abs(self.theta - objetivo) > error) or thetaReal):
            if not self.partir and thetaReal:
                thetaReal = False
                objetivo = grados + self.theta
                if (objetivo > 360):
                    objetivo -= 360
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
        self.parar()
        self.espera(0.7)

    def ncuadrados(self, n, lado, vel):
        for i in range(n):
            self.cuadrado(lado, vel)

    def cuadrado(self, lado, vel):
        for i in range(4):
            self.avanza(lado, vel)
            self.gira(90, 1.5)

    def hacerDCC(self, radio, velL, velA):
        #Hacer D
        self.avanza(radio * 2, velL)
        self.gira(90, velA + 0.5)
        self.arco(radio, 180, velA)
        #Hacer C
        #self.gira(180, velA)
        self.avanza(2.2 * radio, -velL)
        #self.gira(180, velA)
        self.espera(1)
        self.arco(radio, 180, velA)
        #Hacer otra C
        self.avanza(2.2 * radio, velL)
        self.espera(1)
        self.gira(180, velA + 0.5)
        self.espera(1)
        self.arco(radio, 180, -velA)

    def pasea(self):
        while (not rospy.is_shutdown()):
            if not (self.left or self.right):
                self.avanza(10, 0.4)
            elif self.left:
                self.gira(10000, -1)
            else:
                self.gira(10000, 1)

    def persigueAmigo(self):
        while (not rospy.is_shutdown()):
            if self.pierdeObjetivo:
                self.giraAmigo(10000, self.sentidoObjetivo)
            elif not self.center:
                self.avanzaAmigo(10, 0.3)
            else:
                print("No puedo llegar")

    def enderezar(self, vel):
        move_cmd = Twist()
        move_cmd.angular.z = vel * self.sentidoEnderezado
        print(self.sentidoEnderezado)
        while (not rospy.is_shutdown()):
            self.cmd_vel.publish(move_cmd)
            self.r.sleep()
            if (self.enderezado):
                break
        self.parar()
        self.espera(0.7)
        #roboto.chatter.say('Objective located ready for annihilation')

    def avanzaPasillo(self, vel):
        while (not rospy.is_shutdown()):
            if self.sentidoEnderezado > 0 and self.left:
                self.gira(10000, -1)
            elif self.sentidoEnderezado < 0 and self.right:
                self.gira(10000, 1)
            elif not self.center:
                self.avanza(10, vel)
            else:
                print("estamos rodeados")

            if self.center:
                self.enderezar(1)
                self.chatter.say('Llegue')
                break

    def identificaPared(self):
        if self.center and (self.left or self.right):
            self.enderezar(1)
            self.chatter.say('Objective found')
        else:
            self.chatter.say('Objective lost')
        rospy.sleep(1)

    def buscaPared(self):
        for i in range(4):
            self.identificaPared()
            self.gira2(90, 1)
示例#57
0
def play(filename):
    """plays the wav or ogg file using sound_play"""
    SoundClient(blocking=True).playWave(filename)
示例#58
0
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import os, sys, subprocess, signal

#########################################################################################################
# GLOBAL VARIABLES
#########################################################################################################

# Chhange these variables to have different functionalities
funny_flag = True
interact_flag = False

path = '/home/turtlebot/alpha_ws_new/src/tourbot/tourbot/files/'
battery_level = 0.0
current_pose = 0
sound_client = SoundClient()
rospy.sleep(1)

# Define quaternions for four directions
quats = {
    'right': Quaternion(0, 0, 0, 1),
    'bottom': Quaternion(0, 0, -0.707, 0.707),
    'left': Quaternion(0, 0, 1, 0),
    'top': Quaternion(0, 0, 0.707, 0.707)
}

# Define checkpoints on the map
check_points = [{
    'title': 'emergency_exit',
    'point': (2.08, -4.23),
    'ori': 'top',
class RobotSpeech(object):
    def __init__(self):
        """
			初始化函数,初始化录音以及语音识别api需要用到的各项参数
		"""
        # 初始化ros node
        rospy.init_node("robot_speech")

        # Soundplay 参数
        self.voice = rospy.get_param("~voice", "voice_kal_diphone")
        self.speaker = SoundClient(blocking=True)
        rospy.sleep(1)
        self.speaker.stopAll()
        rospy.sleep(1)

        # 录音参数
        self.start_record = False
        self.stop_record = False
        self.count = 0
        self.chunk = 256
        self.format = pyaudio.paInt16
        self.channels = 1
        self.rate = 11025
        self.record_second = 11

        # api参数
        self.URL = "http://api.xfyun.cn/v1/service/v1/iat"
        self.APPID = "15944331"
        self.API_KEY = "bpNdxEhagyalZVtC6fddFeGZ"
        self.SECRET_KEY = '1QRtmDnXAA9TUQuGH8zHL5K1OW2GnpDu'
        self.audio_folder = os.path.join(os.path.dirname(CURRENT_FOLDER_PATH),
                                         "/sounds/gpsr_record/")
        self.ASR_URL = 'http://vop.baidu.com/server_api'
        self.DEV_PID = 1737
        self.CUID = '123456PYTHON'
        self.FORMAT = 'wav'
        self.RATE = 16000

    def setup_recorder(self):
        self.recorder = pyaudio.PyAudio()

    def speaker_say(self, sentence):
        self.speaker.say(sentence, self.voice)

    def keyword_callback(self, msg):
        if msg.data.lower().strip() == "jack":
            self.get_audio()

        if msg.data.lower().strip() == "ok" or msg.data.lower().strip(
        ) == "okay":
            self.stop_record = True

    def control_callback(self, msg):
        if msg.action == "speak":
            if msg.attributes.speech.sentence:
                # Speak target sentence
                self.speaker_say(msg.attributes.speech.sentence)
        if msg.action == "recognition" and msg.target == "speech":
            # speech recognition start
            self.speaker_say("Please say jack to wake me up")

    def get_audio(self):
        self.setup_recorder()
        play_signal_sound()
        file_name = self.audio_folder + "_" + str(self.count) + ".wav"
        print("[INFO] Start to record input audio and save to file: %s" %
              (file_name))
示例#60
0
class get_audio():
    def __init__(self):
        """
            初始化函数,初始化录音时的各项参数,以及ros话题和参数
            构想是使用pocketsphinx进行一些短的关键词的识别,主要是唤醒词jack以及终止词ok
            然后当pocketshpinx识别到唤醒词和终止词并通过话题输出之后,此脚本接受到话题的输出之后判断开始录音还是终止录音
        """
        # Record params
        rospy.on_shutdown(self.cleanup)
        self.CHUNK = 256
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 11025
        self.RECORD_SECONDS = 7
        # Ros params
        self.start_record = False
        self.stop_record  = False
        self.project_name = '/home/keaixin/catkin_ws/src/kamerider_speech/sounds/gpsr_record/gpsr'
        self.count = 0
        self.sub_pocketsphinx_topic_name = None
        self.pub_record_end_topic_name   = None
        self.pub_record_index_topic_name = None
        self.wavePath = 'question_start_signal.wav'
        
        #self.voice = None
        
        self.get_params()

    def setup_recorder(self):
        self.recorder = pyaudio.PyAudio()

    def get_params(self):
        self.sub_pocketsphinx_topic_name  = rospy.get_param("sub_pocketsphinx_topic_name", "/kws_data")
        self.pub_record_end_topic_name    = rospy.get_param("pub_record_end_topic_name",   "/audio_record")
        self.pub_record_index_topic_name  = rospy.get_param("pub_record_index_topic_name", "/audio_index")
        #self.voice = rospy.get_param("/home/kameridervoice", "voice_kal_diphone")

        rospy.Subscriber(self.sub_pocketsphinx_topic_name, String, self.pocketCallback)
        self.pub_record = rospy.Publisher(self.pub_record_end_topic_name, String, queue_size=1)
        self.pub_index  = rospy.Publisher(self.pub_record_index_topic_name, Int8, queue_size=1)

        
        self.soundhandle = SoundClient(blocking=True)
        rospy.sleep(1)
        self.soundhandle.stopAll()
        rospy.sleep(1)
        

    def pocketCallback(self, msg):
        if msg.data.lower().strip() == 'jack':
            self.start_record = True
            self.stop_record  = False
            self.get_audio()

        if msg.data.lower().strip() == 'ok' or msg.data.lower().strip() == 'okay':
            self.stop_record = True

    def get_audio(self):
        if self.start_record:
            self.setup_recorder()
            self.soundhandle.stopAll()
            print("PLAY SOUND")
            play_signal_sound()
            #self.soundhandle.playWave(self.wavePath)
            print("[INFO] SUCCESSFULLY PLAY THE SOUND")
            file_name = self.project_name + '_' + str(self.count) + '.wav'
            print ("[INFO] Start to record input audio and save to file: %s"%(file_name))
            stream = self.recorder.open(
                format=self.FORMAT,
                channels=self.CHANNELS,
                rate=self.RATE,
                input=True,
                frames_per_buffer=self.CHUNK
            )
            frames = []
            for i in range(int(self.RATE / self.CHUNK * self.RECORD_SECONDS)):
                if self.stop_record:
                    print ("[INFO] Stop recording")
                    break
                data = stream.read(self.CHUNK)
                frames.append(data)
            print ("[INFO] Recording finised now save %d.wav file"%(self.count))

            stream.stop_stream()
            stream.close()
            self.recorder.terminate()

            wf = wave.open(file_name, 'wb')
            wf.setnchannels(self.CHANNELS)
            wf.setsampwidth(self.recorder.get_sample_size(self.FORMAT))
            wf.setframerate(self.RATE)
            wf.writeframes(b''.join(frames))
            wf.close()

            self.pub_index.publish(self.count)
            self.start_record = False
            self.count += 1

    def cleanup(self):
        self.soundhandle.stopAll()