Beispiel #1
0
class NavSpeak:
    def __init__(self):
        self.move_base_goal_sub = rospy.Subscriber(
            "/move_base/goal",
            MoveBaseActionGoal,
            self.move_base_goal_callback,
            queue_size=1)
        self.move_base_result_sub = rospy.Subscriber(
            "/move_base/result",
            MoveBaseActionResult,
            self.move_base_result_callback,
            queue_size=1)
        self.sound = SoundClient()

    def move_base_goal_callback(self, msg):
        self.sound.play(2)

    def move_base_result_callback(self, msg):
        text = "{}: {}".format(goal_status(msg.status.status), msg.status.text)
        rospy.loginfo(text)
        if msg.status.status == GoalStatus.SUCCEEDED:
            self.sound.play(1)
        elif msg.status.status == GoalStatus.PREEMPTED:
            self.sound.play(2)
        elif msg.status.status == GoalStatus.ABORTED:
            self.sound.play(3)
        else:
            self.sound.play(4)
        time.sleep(1)
        self.sound.say(text)
Beispiel #2
0
    def callback(self, signs):
                
        soundhandle = SoundClient()

        soundhandle.play(2)

        rospy.sleep(1)
Beispiel #3
0
def play_nonblocking():
    """
    Play the same sounds with manual pauses between them.
    """
    rospy.loginfo('Example: Playing sounds in *non-blocking* mode.')
    # NOTE: you must sleep at the beginning to let the SoundClient publisher
    # establish a connection to the soundplay_node.
    soundhandle = SoundClient(blocking=False)
    rospy.sleep(1)

    # In the non-blocking version you need to sleep between calls.
    rospy.loginfo('Playing say-beep at full volume.')
    soundhandle.playWave('say-beep.wav')
    rospy.sleep(1)

    rospy.loginfo('Playing say-beep at volume 0.3.')
    soundhandle.playWave('say-beep.wav', volume=0.3)
    rospy.sleep(1)

    rospy.loginfo('Playing sound for NEEDS_PLUGGING.')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)
    rospy.sleep(1)

    rospy.loginfo('Speaking some long string.')
    soundhandle.say('It was the best of times, it was the worst of times.')
def play_nonblocking():
    """
    Play the same sounds with manual pauses between them.
    """
    rospy.loginfo('Example: Playing sounds in *non-blocking* mode.')
    # NOTE: you must sleep at the beginning to let the SoundClient publisher
    # establish a connection to the soundplay_node.
    soundhandle = SoundClient(blocking=False)
    rospy.sleep(1)

    # In the non-blocking version you need to sleep between calls.
    rospy.loginfo('Playing say-beep at full volume.')
    soundhandle.playWave('say-beep.wav')
    rospy.sleep(1)

    rospy.loginfo('Playing say-beep at volume 0.3.')
    soundhandle.playWave('say-beep.wav', volume=0.3)
    rospy.sleep(1)

    rospy.loginfo('Playing sound for NEEDS_PLUGGING.')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)
    rospy.sleep(1)

    rospy.loginfo('Speaking some long string.')
    soundhandle.say('It was the best of times, it was the worst of times.')
Beispiel #5
0
def callback(data):
	if str(data) == "data: Bark":
		soundhandle = SoundClient()
		rospy.sleep(1)
		soundhandle.play(1)
		pubshoot = rospy.Publisher('bark', String)
		pubshoot.publish(String("Fire"))
Beispiel #6
0
def mover():
    global laser_range

    rospy.init_node('mover', anonymous=True)

    # subscribe to odometry data
    rospy.Subscriber('odom', Odometry, get_odom_dir)
    # subscribe to LaserScan data
    rospy.Subscriber('scan', LaserScan, get_laserscan)
    # subscribe to map occupancy data
    rospy.Subscriber('map', OccupancyGrid, get_occupancy)

    rospy.on_shutdown(stopbot)

    rate = rospy.Rate(5)  # 5 Hz

    # save start time to file
    start_time = time.time()
    # initialize variable to write elapsed time to file
    timeWritten = 0

    # find direction with the largest distance from the Lidar,
    # rotate to that direction, and start moving
    pick_direction()

    while not rospy.is_shutdown():
        if laser_range.size != 0:
            # check distances in front of TurtleBot and find values less
            # than stop_distance
            lri = (laser_range[front_angles] < float(stop_distance)).nonzero()
            rospy.loginfo('Distances: %s', str(lri))
        else:
            lri[0] = []

        # if the list is not empty
        if (len(lri[0]) > 0):
            rospy.loginfo(['Stop!'])
            # find direction with the largest distance from the Lidar
            # rotate to that direction
            # start moving
            pick_direction()

        # check if SLAM map is complete
        if timeWritten:
            if closure(occdata):
                # map is complete, so save current time into file
                with open("maptime.txt", "w") as f:
                    f.write("Elapsed Time: " + str(time.time() - start_time))
                timeWritten = 1
                # play a sound
                soundhandle = SoundClient()
                rospy.sleep(1)
                soundhandle.stopAll()
                soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
                rospy.sleep(2)
                # save the map
                cv2.imwrite('mazemap.png', occdata)

        rate.sleep()
Beispiel #7
0
class NavSpeak:
    def __init__(self):
        self.move_base_goal_sub = rospy.Subscriber("/move_base/goal", MoveBaseActionGoal, self.move_base_goal_callback, queue_size = 1)
        self.move_base_result_sub = rospy.Subscriber("/move_base/result", MoveBaseActionResult, self.move_base_result_callback, queue_size = 1)
        self.sound = SoundClient()
        self.lang = "japanese"  # speak japanese by default
        if rospy.has_param("/nav_speak/lang"):
            self.lang = rospy.get_param("/nav_speak/lang")
        self.pub = rospy.Publisher('/robotsound_jp/goal', SoundRequestActionGoal, queue_size=1)

    def move_base_goal_callback(self, msg):
        self.sound.play(2)

    def move_base_result_callback(self, msg):
        text = "{}: {}".format(goal_status(msg.status.status), msg.status.text)
        rospy.loginfo(text)
        if self.lang == "japanese":  # speak japanese
            sound_goal = SoundRequestActionGoal()
            sound_goal.goal_id.stamp = rospy.Time.now()
            sound_goal.goal.sound_request.sound = -3
            sound_goal.goal.sound_request.command = 1
            sound_goal.goal.sound_request.volume = 1.0
            sound_goal.goal.sound_request.arg2 = "jp"
        if msg.status.status == GoalStatus.SUCCEEDED:
            self.sound.play(1)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "到着しました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        elif msg.status.status == GoalStatus.PREEMPTED:
            self.sound.play(2)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "別のゴールがセットされました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        elif msg.status.status == GoalStatus.ABORTED:
            self.sound.play(3)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "中断しました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        else:
            self.sound.play(4)
            time.sleep(1)
            self.sound.say(text)
Beispiel #8
0
def play_blocking():
    """
    Play various sounds, blocking until each is completed before going to the
    next.
    """
    rospy.loginfo('Example: Playing sounds in *blocking* mode.')
    soundhandle = SoundClient(blocking=True)

    rospy.loginfo('Playing say-beep at full volume.')
    soundhandle.playWave('say-beep.wav')

    rospy.loginfo('Playing say-beep at volume 0.3.')
    soundhandle.playWave('say-beep.wav', volume=0.3)

    rospy.loginfo('Playing sound for NEEDS_PLUGGING.')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)

    rospy.loginfo('Speaking some long string.')
    soundhandle.say('It was the best of times, it was the worst of times.')
def play_blocking():
    """
    Play various sounds, blocking until each is completed before going to the
    next.
    """
    rospy.loginfo('Example: Playing sounds in *blocking* mode.')
    soundhandle = SoundClient(blocking=True)

    rospy.loginfo('Playing say-beep at full volume.')
    soundhandle.playWave('say-beep.wav')

    rospy.loginfo('Playing say-beep at volume 0.3.')
    soundhandle.playWave('say-beep.wav', volume=0.3)

    rospy.loginfo('Playing sound for NEEDS_PLUGGING.')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)

    rospy.loginfo('Speaking some long string.')
    soundhandle.say('It was the best of times, it was the worst of times.')
Beispiel #10
0
class Talker:
    def __init__(self):
        self.soundhandle = SoundClient()
        rospy.sleep(1)

        self.voice = 'voice_kal_diphone'
        self.volume = 1.0

    def say(self, txt):
        print('Saying: %s' % txt)
        #print 'Voice: %s' % voice
        #print 'Volume: %s' % volume

        self.soundhandle.say(txt, self.voice, self.volume)
        rospy.sleep(1)

    def play(self, num):
        print('Playing sound nr.: %s' % num)
        self.soundhandle.play(num, self.volume)
        rospy.sleep(1)
Beispiel #11
0
def play_explicit():
    rospy.loginfo('Example: SoundClient play methods can take in an explicit'
                  ' blocking parameter')
    soundhandle = SoundClient()  # blocking = False by default
    rospy.sleep(0.5)  # Ensure publisher connection is successful.

    sound_beep = soundhandle.waveSound("say-beep.wav", volume=0.5)
    # Play the same sound twice, once blocking and once not. The first call is
    # blocking (explicitly specified).
    sound_beep.play(blocking=True)
    # This call is not blocking (uses the SoundClient's setting).
    sound_beep.play()
    rospy.sleep(0.5)  # Let sound complete.

    # Play a blocking sound.
    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING, blocking=True)

    # Create a new SoundClient where the default behavior *is* to block.
    soundhandle = SoundClient(blocking=True)
    soundhandle.say('Say-ing stuff while block-ing')
    soundhandle.say('Say-ing stuff without block-ing', blocking=False)
    rospy.sleep(1)
Beispiel #12
0
class NavSpeak:
    def __init__(self):
        self.move_base_goal_sub = rospy.Subscriber("/move_base/goal", MoveBaseActionGoal, self.move_base_goal_callback, queue_size = 1)
        self.move_base_result_sub = rospy.Subscriber("/move_base/result", MoveBaseActionResult, self.move_base_result_callback, queue_size = 1)
        self.sound = SoundClient()

    def move_base_goal_callback(self, msg):
        self.sound.play(2)

    def move_base_result_callback(self, msg):
        text = "{}: {}".format(goal_status(msg.status.status), msg.status.text)
        rospy.loginfo(text)
        if msg.status.status == GoalStatus.SUCCEEDED:
            self.sound.play(1)
        elif msg.status.status == GoalStatus.PREEMPTED:
            self.sound.play(2)
        elif msg.status.status == GoalStatus.ABORTED:
            self.sound.play(3)
        else:
            self.sound.play(4)
        time.sleep(1)
        self.sound.say(text)
def play_explicit():
    rospy.loginfo('Example: SoundClient play methods can take in an explicit'
                  ' blocking parameter')
    soundhandle = SoundClient()  # blocking = False by default
    rospy.sleep(0.5)  # Ensure publisher connection is successful.

    sound_beep = soundhandle.waveSound("say-beep.wav", volume=0.5)
    # Play the same sound twice, once blocking and once not. The first call is
    # blocking (explicitly specified).
    sound_beep.play(blocking=True)
    # This call is not blocking (uses the SoundClient's setting).
    sound_beep.play()
    rospy.sleep(0.5)  # Let sound complete.

    # Play a blocking sound.
    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING, blocking=True)

    # Create a new SoundClient where the default behavior *is* to block.
    soundhandle = SoundClient(blocking=True)
    soundhandle.say('Say-ing stuff while block-ing')
    soundhandle.say('Say-ing stuff without block-ing', blocking=False)
    rospy.sleep(1)
Beispiel #14
0
class RunStopServer(object):
    def __init__(self):
        """Provide dead-man-switch like server for handling wouse run-stops."""
        rospy.Service("wouse_run_stop", WouseRunStop, self.service_cb)
        self.run_stop = RunStop()
        if DEAD_MAN_CONFIGURATION:
            self.sound_client = SoundClient()
            self.timeout = rospy.Duration(rospy.get_param('wouse_timeout', 1.5))
            self.tone_period = rospy.Duration(10)
            self.last_active_time = rospy.Time.now()
            self.last_sound = rospy.Time.now()
            rospy.Timer(self.timeout, self.check_receiving)

    def check_receiving(self, event):
        """After timeout, check to ensure that activity is seen from wouse."""
        silence = rospy.Time.now() - self.last_active_time
        #print silence, " / ", self.timeout
        if silence < self.timeout:
         #   print "Receiving"
            return
        #else:
          #  print "NOT receiving"
        if (silence > self.timeout and (rospy.Time.now() - self.last_sound) > self.tone_period):
            rospy.logwarn("RunStopServer has not heard from wouse recently.")
            self.sound_client.play(3)#1
            self.last_sound = rospy.Time.now()

    def service_cb(self, req):
        """Handle service requests to start/stop run-stop.  Used to reset."""
        #print "Separation: ", req.time-self.last_active_time
        self.last_active_time = req.time
        #print "Delay: ", rospy.Time.now() - self.last_active_time
        if req.stop:
            return self.run_stop.stop()
        elif req.start:
            return self.run_stop.start()
        else:
            return True #only update timestamp
Beispiel #15
0
def get_occupancy(
    msg
):  #This function gets the occupancy data and pumps it to closure function to check if the map is complete
    global occdata
    global cnt

    # create numpy array
    msgdata = numpy.array(msg.data)
    # compute histogram to identify percent of bins with -1
    occ_counts = numpy.histogram(msgdata, occ_bins)
    # calculate total number of bins
    total_bins = msg.info.width * msg.info.height
    # log the info
    # rospy.loginfo('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i', occ_counts[0][0], occ_counts[0][1], occ_counts[0][2], total_bins)

    # make msgdata go from 0 instead of -1, reshape into 2D
    oc2 = msgdata + 1
    # reshape to 2D array using column order
    occdata = numpy.uint8(
        oc2.reshape(msg.info.height, msg.info.width, order='F'))

    contourCheck = 1
    if contourCheck:
        if closure(occdata):
            # map is complete, so save current time into file
            with open("maptime.txt", "w") as f:
                f.write("done")
            contourCheck = 0
            # play a sound
            soundhandle = SoundClient()
            rospy.sleep(1)
            soundhandle.stopAll()
            soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
            rospy.sleep(2)
            # save the map
            cv2.imwrite('mazemap.png', occdata)
            cnt = 2
            print("Map finished, Check your map at 'mapzemap.png'.")
Beispiel #16
0
class CarSound(object):
    """
    car sound class
    """
    def __init__(self):
        self.entity = car_sound_pb2.SoundRequest()

        self.soundhandle = SoundClient()
        self.voice = 'voice_kal_diphone'
        self.lasttime = rospy.get_time()

    def callback_sound(self, data):
        """
        new sound request
        """
        print "New Sound Msg"
        self.entity.ParseFromString(data.data)
        print self.entity
        if self.entity.mode == car_sound_pb2.SoundRequest.SAY:
            self.soundhandle.say(self.entity.words, self.voice)
        elif self.entity.mode == car_sound_pb2.SoundRequest.BEEP:
            self.soundhandle.play(SoundRequest.NEEDS_PLUGGING)
        self.lasttime = rospy.get_time()
class CarSound(object):
    """
    car sound class
    """

    def __init__(self):
        self.entity = car_sound_pb2.SoundRequest()

        self.soundhandle = SoundClient()
        self.voice = 'voice_kal_diphone'
        self.lasttime = rospy.get_time()

    def callback_sound(self, data):
        """
        new sound request
        """
        print "New Sound Msg"
        self.entity.ParseFromString(data.data)
        print self.entity
        if self.entity.mode == car_sound_pb2.SoundRequest.SAY:
            self.soundhandle.say(self.entity.words, self.voice)
        elif self.entity.mode == car_sound_pb2.SoundRequest.BEEP:
            self.soundhandle.play(SoundRequest.NEEDS_PLUGGING)
        self.lasttime = rospy.get_time()
Beispiel #18
0
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient

if __name__ == '__main__':
    rospy.init_node('battery_guard')
    soundclient = SoundClient()

    last_complain = rospy.Time()
    r = rospy.Rate(.1)
    while not rospy.is_shutdown():
        with open('/sys/class/power_supply/BAT0/capacity') as f:
            capacity = int(f.readline().rstrip())
        with open('/sys/class/power_supply/BAT0/status') as f:
            status = f.readline().rstrip()
        if status != "Charging" and rospy.Time.now(
        ) - last_complain > rospy.Duration(60):
            if capacity < 10:
                rospy.logerr(
                    "No battery power remaining. Connect the robot laptop to power immediately."
                )
                soundclient.play(SoundRequest.NEEDS_PLUGGING_BADLY)
                last_complain = rospy.Time.now()
            elif capacity < 15:
                rospy.logwarn(
                    "Only little battery power remaining. Please connect the robot laptop to power."
                )
                soundclient.play(SoundRequest.NEEDS_PLUGGING)
                last_complain = rospy.Time.now()
        r.sleep()
Beispiel #19
0
    #soundhandle.playWave('dummy')

    # print 'say'
    # soundhandle.say('Hello world!')
    # sleep(3)

    rospy.loginfo('wave')
    soundhandle.playWave('say-beep.wav')
    sleep(2)

    rospy.loginfo('quiet wave')
    soundhandle.playWave('say-beep.wav', 0.3)
    sleep(2)

    rospy.loginfo('plugging')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)
    sleep(2)

    rospy.loginfo('quiet plugging')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING, 0.3)
    sleep(2)

    rospy.loginfo('unplugging')
    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
    sleep(2)

    rospy.loginfo('plugging badly')
    soundhandle.play(SoundRequest.NEEDS_PLUGGING_BADLY)
    sleep(2)

    rospy.loginfo('unplugging badly')
Beispiel #20
0
# Author: Blaise Gassend

import sys

if __name__ == '__main__':
    if len(sys.argv) != 2 or sys.argv[1] == '--help':
        print 'Usage: %s <sound_id>'%sys.argv[0]
        print
        print 'Plays one of the built-in sounds based on its integer ID. Look at the sound_play/SoundRequest message definition for IDs.'
        exit(1)

    # Import here so that usage is fast.
    import rospy
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient
    
    rospy.init_node('play', anonymous = True)
    
    soundhandle = SoundClient()
    
    rospy.sleep(1)
    
    num = int(sys.argv[1])

    print 'Playing sound %i.'%num

    soundhandle.play(num)

    rospy.sleep(1)
Beispiel #21
0
import sys


import rospy
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient

if __name__ == "__main__":
    rospy.init_node('beep_node', anonymous = True)

    soundhandle = SoundClient()
    rospy.sleep(1)

    soundhandle.play(2, 1.0)

    rospy.sleep(1)  
Beispiel #22
0
class anomaly_detector:

    def __init__(self, task_name, feature_list, save_data_path, training_data_pkl, verbose=False, \
                 online_raw_viz=False):
        rospy.init_node(task_name)
        rospy.loginfo('Initializing anomaly detector')

        self.task_name         = task_name
        self.feature_list      = feature_list
        self.save_data_path    = save_data_path
        self.training_data_pkl = os.path.join(save_data_path, training_data_pkl)
        self.online_raw_viz    = online_raw_viz
        self.count = 0

        self.enable_detector = False
        self.soundHandle = SoundClient()

        # visualization
        if self.online_raw_viz: 
            ## plt.ion()
            self.fig = plt.figure(1)
            ## pylab.hold(False)
            self.plot_data = {}
            self.plot_len = 22000
        
        self.initParams()
        self.initComms()
        self.initDetector()
        self.reset()

    def initParams(self):
        '''
        Load feature list
        '''
        self.rf_radius = rospy.get_param('hrl_manipulation_task/receptive_field_radius')
        self.nState    = 15
        self.cov_mult  = 1.0
        self.threshold = -200.0
    
    def initComms(self):
        '''
        Subscribe raw data
        '''
        # Publisher
        self.action_interruption_pub = rospy.Publisher('InterruptAction', String)
        
        # Subscriber
        rospy.Subscriber('/hrl_manipulation_task/raw_data', MultiModality, self.rawDataCallback)
        
        # Service
        self.detection_service = rospy.Service('anomaly_detector_enable/'+self.task_name, Bool_None, \
                                               self.enablerCallback)

    def initDetector(self):

        if os.path.isfile(self.training_data_pkl) is not True: 
            print "There is no saved data"
            sys.exit()
        
        data_dict = ut.load_pickle(self.training_data_pkl)
        self.trainingData, self.param_dict = extractLocalFeature(data_dict['trainData'], self.feature_list, \
                                                            self.rf_radius)

        # training hmm
        self.nEmissionDim = len(self.trainingData)
        detection_param_pkl = os.path.join(self.save_data_path, 'hmm_'+self.task_name+'.pkl')        
        self.ml = hmm.learning_hmm_multi_n(self.nState, self.nEmissionDim, check_method='progress', verbose=False)
        
        ret = self.ml.fit(self.trainingData, cov_mult=[self.cov_mult]*self.nEmissionDim**2, ml_pkl=detection_param_pkl, use_pkl=True)

        if ret == 'Failure': 
            print "-------------------------"
            print "HMM returned failure!!   "
            print "-------------------------"
            sys.exit()

    
    def enablerCallback(self, msg):

        if msg.data is True: 
            self.enable_detector = True
        else:
            # Reset detector
            self.enable_detector = False
            self.reset()            
        
        return Bool_NoneResponse()

        
    def rawDataCallback(self, msg):

        self.audio_feature     = msg.audio_feature
        self.audio_power       = msg.audio_power
        self.audio_azimuth     = msg.audio_azimuth
        self.audio_head_joints = msg.audio_head_joints
        self.audio_cmd         = msg.audio_cmd

        self.kinematics_ee_pos      = msg.kinematics_ee_pos
        self.kinematics_ee_quat     = msg.kinematics_ee_quat
        self.kinematics_jnt_pos     = msg.kinematics_jnt_pos
        self.kinematics_jnt_vel     = msg.kinematics_jnt_vel
        self.kinematics_jnt_eff     = msg.kinematics_jnt_eff
        self.kinematics_target_pos  = msg.kinematics_target_pos
        self.kinematics_target_quat = msg.kinematics_target_quat

        self.ft_force  = msg.ft_force
        self.ft_torque = msg.ft_torque

        self.vision_pos  = msg.vision_pos
        self.vision_quat = msg.vision_quat

        self.pps_skin_left  = msg.pps_skin_left
        self.pps_skin_right = msg.pps_skin_right
        

    def extractLocalFeature(self):

        dataSample = []

        # Unimoda feature - Audio --------------------------------------------        
        if 'unimodal_audioPower' in self.feature_list:

            ang_max, ang_min = getAngularSpatialRF(self.kinematics_ee_pos, self.rf_radius)
            audio_power_min  = self.param_dict['unimodal_audioPower_power_min']          

            if self.audio_azimuth > ang_min and self.audio_azimuth < ang_max:
                unimodal_audioPower = self.audio_power
            else:
                unimodal_audioPower = audio_power_min
            
            dataSample.append(unimodal_audioPower)

        # Unimodal feature - Kinematics --------------------------------------
        if 'unimodal_kinVel' in self.feature_list:
            print "not implemented"

        # Unimodal feature - Force -------------------------------------------
        if 'unimodal_ftForce' in self.feature_list:
            ftForce_pca = self.param_dict['unimodal_ftForce_pca']            
            unimodal_ftForce = ftForce_pca.transform(self.ft_force)

            if len(np.array(unimodal_ftForce).flatten()) > 1:
                dataSample += list(np.squeeze(unimodal_ftForce))
            else: 
                dataSample.append( np.squeeze(unimodal_ftForce) )


        # Crossmodal feature - relative dist --------------------------
        if 'crossmodal_targetRelativeDist' in feature_list:

            crossmodal_targetRelativeDist = np.linalg.norm(np.array(self.kinematics_target_pos) - \
                                                           np.array(self.kinematics_ee_pos))

            dataSample.append( crossmodal_targetRelativeDist )

        # Crossmodal feature - relative angle --------------------------
        if 'crossmodal_targetRelativeAng' in feature_list:                
            
            diff_ang = qt.quat_angle(self.kinematics_ee_quat, self.kinematics_target_quat)
            crossmodal_targetRelativeAng = abs(diff_ang)

            dataSample.append( crossmodal_targetRelativeAng )

        # Scaling ------------------------------------------------------------
        scaled_features = (np.array(dataSample) - np.array(self.param_dict['feature_min']) )\
          /( np.array(self.param_dict['feature_max']) - np.array(self.param_dict['feature_min']) ) 

        return scaled_features
            
            
    def reset(self):
        '''
        Reset parameters
        '''
        self.dataList = []
        

    def run(self):
        '''
        Run detector
        '''            
        rospy.loginfo("Start to run anomaly detection: "+self.task_name)
        self.count = 0
        ## self.enable_detector = True
        
        rate = rospy.Rate(20) # 25Hz, nominally.
        while not rospy.is_shutdown():

            if self.enable_detector is False: continue
            self.count += 1

            # extract feature
            self.dataList.append( self.extractLocalFeature() ) 

            if len(np.shape(self.dataList)) == 1: continue
            if np.shape(self.dataList)[0] < 10: continue

            # visualization
            if self.online_raw_viz: 
                for i in xrange(len(self.dataList[-1])):
                    self.plot_data.setdefault(i, [])
                    self.plot_data[i].append(self.dataList[-1][i])
                    
                if self.count % 50 == 0:

                    for i in xrange(len(self.dataList[-1])):
                        ax = self.fig.add_subplot( len(self.plot_data.keys())*100+10+(i+1) )
                        ax.plot(self.plot_data[i], 'r')
                        
                if self.count > 280: 
                    for i, feature_name in enumerate(feature_list):
                        ax = self.fig.add_subplot( len(self.plot_data.keys())*100+10+(i+1) )
                        ax.plot(np.array(self.trainingData[i]).T, 'b')                        
                    plt.show()
            
            # Run anomaly checker
            anomaly, error = self.ml.anomaly_check(np.array(self.dataList).T, self.threshold)
            print "anomaly check : ", anomaly, " " , error, " dat shape: ", np.array(self.dataList).T.shape

            # anomaly decision
            ## if np.isnan(error): continue #print "anomaly check returned nan"
            if anomaly or np.isnan(error) or np.isinf(error): 
                print "anoooooooooooooooooooooomaly"
                self.action_interruption_pub.publish(self.task_name+'_anomaly')
                self.soundHandle.play(2)
                self.enable_detector = False
                self.reset()                           
                
            
            rate.sleep()
Beispiel #23
0
# Author: Blaise Gassend

import sys

if __name__ == '__main__':
    if len(sys.argv) < 2 or len(sys.argv) > 3 or sys.argv[1] == '--help':
        print 'Usage: %s <sound_id> [volume]'%sys.argv[0]
        print
        print 'Plays one of the built-in sounds based on its integer ID. Look at the sound_play/SoundRequest message definition for IDs.\n The (optional) volume parameter sets the volume for the sound as a value between 0 and 1.0, where 0 is mute.'
        exit(1)

    # Import here so that usage is fast.
    import rospy
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient
    
    rospy.init_node('play', anonymous = True)
    
    soundhandle = SoundClient()
    rospy.sleep(1)
    
    num = int(sys.argv[1])
    volume = float(sys.argv[2]) if len(sys.argv) == 3 else 1.0

    print 'Playing sound %i.'%num

    soundhandle.play(num, volume)

    rospy.sleep(1)
import sys

if __name__ == '__main__':
    import rospy
    argv = rospy.myargv()
    if len(argv) < 2 or len(argv) > 3 or argv[1] == '--help':
        print('Usage: %s <sound_id> [volume]' % argv[0])
        print()
        print(
            'Plays one of the built-in sounds based on its integer ID. Look at the sound_play/SoundRequest message definition for IDs.\n The (optional) volume parameter sets the volume for the sound as a value between 0 and 1.0, where 0 is mute.'
        )
        exit(1)

    # Import here so that usage is fast.
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient

    rospy.init_node('play', anonymous=True)

    soundhandle = SoundClient()
    rospy.sleep(1)

    num = int(argv[1])
    volume = float(argv[2]) if len(argv) == 3 else 1.0

    rospy.loginfo('Playing sound %i.' % num)

    soundhandle.play(num, volume)

    rospy.sleep(1)
class onlineAnomalyDetection(Thread):
    MAX_INT = 32768.0
    CHUNK = 1024  # frame per buffer
    RATE = 48000  # sampling rate
    UNIT_SAMPLE_TIME = 1.0 / float(RATE)
    CHANNEL = 2  # number of channels
    FORMAT = pyaudio.paInt16

    def __init__(self,
                 subject='s1',
                 task='s',
                 targetFrame=None,
                 tfListener=None,
                 isScooping=True,
                 audioTool=None):
        super(onlineAnomalyDetection, self).__init__()
        self.daemon = True
        self.cancelled = False
        self.isRunning = False

        # Predefined settings
        self.downSampleSize = 100  #200
        self.cov_mult = 5.0
        self.isScooping = isScooping
        self.subject = subject
        self.task = task
        if self.isScooping:
            self.nState = 10
            self.cutting_ratio = [0.0, 0.9]  #[0.0, 0.7]
            self.anomaly_offset = -15.0
            self.scale = [1.0, 1.0, 1.0, 0.7]  #10
            self.ml_thres_pkl = 'ml_scooping_thres.pkl'
        else:
            self.nState = 15
            self.cutting_ratio = [0.0, 0.7]
            self.anomaly_offset = -20
            self.scale = [1.0, 1.0, 0.7, 1.0]  #10
            self.ml_thres_pkl = 'ml_feeding_thres.pkl'

        print 'is scooping:', self.isScooping

        self.publisher = rospy.Publisher('visualization_marker', Marker)
        self.interruptPublisher = rospy.Publisher('InterruptAction', String)
        self.targetFrame = targetFrame

        # Data logging
        self.updateNumber = 0
        self.lastUpdateNumber = 0
        self.init_time = rospy.get_time()

        if tfListener is None:
            self.transformer = tf.TransformListener()
        else:
            self.transformer = tfListener

        # Gripper
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.mic = None
        self.grips = []
        # Spoon
        self.spoon = None

        # FT sensor
        self.force = None
        self.torque = None

        # Audio
        if audioTool is None:
            self.audioTool = tool_audio_slim()
            self.audioTool.start()
        else:
            self.audioTool = audioTool

        # Kinematics
        self.jointAngles = None
        self.jointVelocities = None

        self.soundHandle = SoundClient()

        saveDataPath = '/home/dpark/git/hrl-assistive/hrl_multimodal_anomaly_detection/src/hrl_multimodal_anomaly_detection/hmm/batchDataFiles/%s_%d_%d_%d_%d.pkl'
        # Setup HMM to perform online anomaly detection
        self.hmm, self.minVals, self.maxVals, self.minThresholds \
        = onlineHMM.iteration(downSampleSize=self.downSampleSize,
                              scale=self.scale, nState=self.nState,
                              cov_mult=self.cov_mult, anomaly_offset=self.anomaly_offset, verbose=False,
                              isScooping=self.isScooping, use_pkl=False,
                              train_cutting_ratio=self.cutting_ratio,
                              findThresholds=True, ml_pkl=self.ml_thres_pkl,
                              savedDataFile=saveDataPath % (('scooping' if self.isScooping else 'feeding'),
                                            self.downSampleSize, self.scale[0], self.nState, int(self.cov_mult)))

        print 'Threshold:', self.minThresholds

        self.forces = []
        self.distances = []
        self.angles = []
        self.audios = []
        self.forcesRaw = []
        self.distancesRaw = []
        self.anglesRaw = []
        self.audiosRaw = []
        self.times = []
        self.likelihoods = []
        self.anomalyOccured = False

        # self.avgAngle = onlineHMM.trainData[2]

        self.forceSub = rospy.Subscriber('/netft_data', WrenchStamped,
                                         self.forceCallback)
        print 'Connected to FT sensor'

        self.objectCenter = None
        self.objectCenterSub = rospy.Subscriber(
            '/ar_track_alvar/bowl_cen_pose'
            if isScooping else '/ar_track_alvar/mouth_pose', PoseStamped,
            self.objectCenterCallback)
        print 'Connected to center of object publisher'

        groups = rospy.get_param('/haptic_mpc/groups')
        for group in groups:
            if group['name'] == 'left_arm_joints':
                self.joint_names_list = group['joints']

        self.jstate_lock = threading.RLock()
        self.jointSub = rospy.Subscriber("/joint_states", JointState,
                                         self.jointStatesCallback)
        print 'Connected to robot kinematics'

    def reset(self):
        self.isRunning = True
        self.forces = []
        self.distances = []
        self.angles = []
        self.audios = []
        self.forcesRaw = []
        self.distancesRaw = []
        self.anglesRaw = []
        self.audiosRaw = []
        self.times = []
        self.anomalyOccured = False
        self.updateNumber = 0
        self.lastUpdateNumber = 0
        self.init_time = rospy.get_time()
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.mic = None
        self.grips = []
        self.spoon = None
        self.force = None
        self.torque = None
        self.jointAngles = None
        self.jointVelocities = None
        self.objectCenter = None
        self.likelihoods = []
        self.audioTool.reset(self.init_time)

    def run(self):
        """Overloaded Thread.run, runs the update
        method once per every xx milliseconds."""
        # rate = rospy.Rate(1000) # 25Hz, nominally.
        while not self.cancelled:
            if self.isRunning and self.updateNumber > self.lastUpdateNumber and self.objectCenter is not None:
                self.lastUpdateNumber = self.updateNumber
                if not self.processData(): continue

                if not self.anomalyOccured and len(self.forces) > 15:
                    # Perform anomaly detection
                    (anomaly, error) = self.hmm.anomaly_check(
                        self.forces, self.distances, self.angles, self.audios,
                        self.minThresholds * 1.5)  #temp
                    print 'Anomaly error:', error
                    if anomaly:
                        if self.isScooping:
                            self.interruptPublisher.publish('Interrupt')
                        else:
                            self.interruptPublisher.publish('InterruptHead')
                        self.anomalyOccured = True
                        self.soundHandle.play(2)
                        print 'AHH!! There is an anomaly at time stamp', rospy.get_time(
                        ) - self.init_time, (anomaly, error)

                        fig = plt.figure()
                        for i, modality in enumerate(
                            [[self.forces] + onlineHMM.trainData[0][:13],
                             [self.distances] + onlineHMM.trainData[1][:13],
                             [self.angles] + onlineHMM.trainData[2][:13],
                             [self.audios] + onlineHMM.trainData[3][:13]]):
                            ax = plt.subplot(int('41' + str(i + 1)))
                            for index, (modal, times) in enumerate(
                                    zip(modality, [self.times] +
                                        onlineHMM.trainTimeList[:3])):
                                ax.plot(times, modal, label='%d' % index)
                            ax.legend()
                        fig.savefig(
                            '/home/dpark/git/hrl-assistive/hrl_multimodal_anomaly_detection/src/hrl_multimodal_anomaly_detection/fooboohooyou.pdf'
                        )
                        print "saved pdf file"
                        rospy.sleep(2.0)
                        # plt.show()
            # rate.sleep()
        print 'Online anomaly thread cancelled'

    def cancel(self, cancelAudio=True):
        self.isRunning = False
        if cancelAudio:
            self.audioTool.cancel()
        self.saveData()
        rospy.sleep(1.0)

    def saveData(self):
        # TODO Save data (Check with daehyung if any more data should be added)
        data = dict()
        data['forces'] = self.forces
        data['distances'] = self.distances
        data['angles'] = self.angles
        data['audios'] = self.audios
        data['forcesRaw'] = self.forcesRaw
        data['distancesRaw'] = self.distancesRaw
        data['anglesRaw'] = self.anglesRaw
        data['audioRaw'] = self.audiosRaw
        data['times'] = self.times
        data['anomalyOccured'] = self.anomalyOccured
        data['minThreshold'] = self.minThresholds
        data['likelihoods'] = self.likelihoods
        data['jointAngles'] = self.jointAngles
        data['jointVelocities'] = self.jointVelocities

        directory = os.path.join(os.path.dirname(__file__),
                                 'onlineDataRecordings/')
        if not os.path.exists(directory):
            os.makedirs(directory)
        fileName = os.path.join(
            directory, self.subject + '_' + self.task + '_' +
            time.strftime('%m-%d-%Y_%H-%M-%S.pkl'))
        with open(fileName, 'wb') as f:
            pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
        print 'Online data saved to file.'

    def processData(self):
        # Find nearest time stamp from training data
        # timeStamp = rospy.get_time() - self.init_time
        # index = np.abs(self.times - timeStamp).argmin()

        self.transposeGripper()

        # Use magnitude of forces
        force = np.linalg.norm(self.force)

        # Determine distance between mic and center of object
        distance = np.linalg.norm(self.mic - self.objectCenter)
        # Find angle between gripper-object vector and gripper-spoon vector
        micSpoonVector = self.spoon - self.mic
        micObjectVector = self.objectCenter - self.mic
        angle = np.arccos(
            np.dot(micSpoonVector, micObjectVector) /
            (np.linalg.norm(micSpoonVector) * np.linalg.norm(micObjectVector)))

        # Process either visual or audio data depending on which we're using
        if len(self.audioTool.audio_data_raw) > 0:
            audio = self.audioTool.audio_data_raw[-1]
        else:
            return False
        if audio is None:
            print 'Audio is None'
            return False
        audio = get_rms(audio)
        # print 'Audio:', audio

        self.forcesRaw.append(force)
        self.distancesRaw.append(distance)
        self.anglesRaw.append(angle)
        self.audiosRaw.append(audio)

        # Scale data
        force = self.scaling(force,
                             minVal=self.minVals[0],
                             maxVal=self.maxVals[0],
                             scale=self.scale[0])
        distance = self.scaling(distance,
                                minVal=self.minVals[1],
                                maxVal=self.maxVals[1],
                                scale=self.scale[1])
        angle = self.scaling(angle,
                             minVal=self.minVals[2],
                             maxVal=self.maxVals[2],
                             scale=self.scale[2])
        audio = self.scaling(audio,
                             minVal=self.minVals[3],
                             maxVal=self.maxVals[3],
                             scale=self.scale[3])

        # Find nearest time stamp from training data
        timeStamp = rospy.get_time() - self.init_time
        index = np.abs(np.array(onlineHMM.trainTimeList[0]) -
                       timeStamp).argmin()

        self.forces.append(force)
        self.distances.append(distance)
        self.angles.append(angle)
        #self.angles.append(onlineHMM.trainData[2][0][index])
        self.audios.append(audio)
        self.times.append(rospy.get_time() - self.init_time)
        if len(self.forces) > 1:
            self.likelihoods.append(
                self.hmm.likelihoods(self.forces, self.distances, self.angles,
                                     self.audios))

        return True

    @staticmethod
    def scaling(x, minVal, maxVal, scale=1.0):
        return (x - minVal) / (maxVal - minVal) * scale

    def forceCallback(self, msg):
        self.force = np.array(
            [msg.wrench.force.x, msg.wrench.force.y, msg.wrench.force.z])
        self.torque = np.array(
            [msg.wrench.torque.x, msg.wrench.torque.y, msg.wrench.torque.z])
        self.updateNumber += 1

    def objectCenterCallback(self, msg):
        self.objectCenter = np.array(
            [msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])

    def jointStatesCallback(self, data):
        joint_angles = []
        ## joint_efforts = []
        joint_vel = []
        jt_idx_list = [0] * len(self.joint_names_list)
        for i, jt_nm in enumerate(self.joint_names_list):
            jt_idx_list[i] = data.name.index(jt_nm)

        for i, idx in enumerate(jt_idx_list):
            if data.name[idx] != self.joint_names_list[i]:
                raise RuntimeError('joint angle name does not match.')
            joint_angles.append(data.position[idx])
            ## joint_efforts.append(data.effort[idx])
            joint_vel.append(data.velocity[idx])

        with self.jstate_lock:
            self.jointAngles = joint_angles
            ## self.joint_efforts = joint_efforts
            self.jointVelocities = joint_vel

    def transposeGripper(self):
        # Transpose gripper position to camera frame
        self.transformer.waitForTransform(self.targetFrame,
                                          '/l_gripper_tool_frame',
                                          rospy.Time(0), rospy.Duration(5))
        try:
            self.lGripperPosition, self.lGripperRotation = self.transformer.lookupTransform(
                self.targetFrame, '/l_gripper_tool_frame', rospy.Time(0))
            transMatrix = np.dot(
                tf.transformations.translation_matrix(self.lGripperPosition),
                tf.transformations.quaternion_matrix(self.lGripperRotation))
        except tf.ExtrapolationException:
            print 'Transpose of gripper failed!'
            return

        # Use a buffer of gripper positions
        if len(self.grips) >= 2:
            lGripperTransposeMatrix = self.grips[-2]
        else:
            lGripperTransposeMatrix = transMatrix
        self.grips.append(transMatrix)

        # Determine location of mic
        mic = [0.12, -0.02, 0]
        # print 'Mic before', mic
        self.mic = np.dot(lGripperTransposeMatrix,
                          np.array([mic[0], mic[1], mic[2], 1.0]))[:3]
        # print 'Mic after', self.mic
        # Determine location of spoon
        spoon3D = [0.22, -0.050, 0]
        self.spoon = np.dot(
            lGripperTransposeMatrix,
            np.array([spoon3D[0], spoon3D[1], spoon3D[2], 1.0]))[:3]

    def find_input_device(self):
        device_index = None
        for i in range(self.p.get_device_count()):
            devinfo = self.p.get_device_info_by_index(i)
            print('Device %d: %s' % (i, devinfo['name']))

            for keyword in ['mic', 'input', 'icicle']:
                if keyword in devinfo['name'].lower():
                    print('Found an input: device %d - %s' %
                          (i, devinfo['name']))
                    device_index = i
                    return device_index

        if device_index is None:
            print('No preferred input found; using default input device.')

        return device_index

    def publishPoints(self,
                      name,
                      points,
                      size=0.01,
                      r=0.0,
                      g=0.0,
                      b=0.0,
                      a=1.0):
        marker = Marker()
        marker.header.frame_id = '/torso_lift_link'
        marker.ns = name
        marker.type = marker.POINTS
        marker.action = marker.ADD
        marker.scale.x = size
        marker.scale.y = size
        marker.color.a = a
        marker.color.r = r
        marker.color.g = g
        marker.color.b = b
        for point in points:
            p = Point()
            # print point
            p.x, p.y, p.z = point
            marker.points.append(p)
        self.publisher.publish(marker)
Beispiel #26
0
class NavSpeak(object):
    def __init__(self):
        self.is_speaking = False
        self.move_base_goal_sub = rospy.Subscriber(
            "/move_base/goal",
            MoveBaseActionGoal,
            self.move_base_goal_callback,
            queue_size=1)
        self.move_base_result_sub = rospy.Subscriber(
            "/move_base/result",
            MoveBaseActionResult,
            self.move_base_result_callback,
            queue_size=1)
        self.robotsound_jp_status_sub = rospy.Subscriber(
            "/robotsound_jp/status",
            GoalStatusArray,
            self.robotsound_jp_status_callback,
            queue_size=1)
        self.sound = SoundClient(blocking=True)
        self.lang = rospy.get_param("~lang", "japanese")
        self.volume = rospy.get_param("~volume", 1.0)
        self.client = actionlib.SimpleActionClient('robotsound_jp',
                                                   SoundRequestAction)
        self.client.wait_for_server()

    def move_base_goal_callback(self, msg):
        self.sound.play(2, volume=self.volume)

    def move_base_result_callback(self, msg):
        # Wait if other node is speaking
        while self.is_speaking is True:
            time.sleep(1)
        text = "{}: {}".format(goal_status(msg.status.status), msg.status.text)
        rospy.loginfo(text)
        if self.lang == "japanese":  # speak japanese
            sound_goal = SoundRequestGoal()
            sound_goal.sound_request.sound = -3
            sound_goal.sound_request.command = 1
            sound_goal.sound_request.volume = self.volume
            sound_goal.sound_request.arg2 = "jp"

        if msg.status.status == GoalStatus.SUCCEEDED:
            self.sound.play(1, volume=self.volume)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.sound_request.arg = "到着しました"
                self.client.send_goal(sound_goal)
            else:
                self.sound.say(text, volume=self.volume)
        elif msg.status.status == GoalStatus.PREEMPTED:
            self.sound.play(2, volume=self.volume)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.sound_request.arg = "別のゴールがセットされました"
                self.client.send_goal(sound_goal)
            else:
                self.sound.say(text, volume=self.volume)
        elif msg.status.status == GoalStatus.ABORTED:
            self.sound.play(3, volume=self.volume)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.sound_request.arg = "中断しました"
                self.client.send_goal(sound_goal)
            else:
                self.sound.say(text, volume=self.volume)
        else:
            self.sound.play(4, volume=self.volume)
            time.sleep(1)
            self.sound.say(text, volume=self.volume)

    def robotsound_jp_status_callback(self, msg):
        if len(msg.status_list) == 0:
            self.is_speaking = False
        else:
            self.is_speaking = True
Beispiel #27
0
class NavSpeak(object):
    def __init__(self):
        self.move_base_goal_sub = rospy.Subscriber(
            "/move_base/goal",
            MoveBaseActionGoal,
            self.move_base_goal_callback,
            queue_size=1)
        self.move_base_result_sub = rospy.Subscriber(
            "/move_base/result",
            MoveBaseActionResult,
            self.move_base_result_callback,
            queue_size=1)
        self.sound = SoundClient()
        self.lang = "japanese"  # speak japanese by default
        if rospy.has_param("/nav_speak/lang"):
            self.lang = rospy.get_param("/nav_speak/lang")
        self.pub = rospy.Publisher('/robotsound_jp/goal',
                                   SoundRequestActionGoal,
                                   queue_size=1)

    def move_base_goal_callback(self, msg):
        self.sound.play(2)

    def move_base_result_callback(self, msg):
        text = "{}: {}".format(goal_status(msg.status.status), msg.status.text)
        rospy.loginfo(text)
        if self.lang == "japanese":  # speak japanese
            sound_goal = SoundRequestActionGoal()
            sound_goal.goal_id.stamp = rospy.Time.now()
            sound_goal.goal.sound_request.sound = -3
            sound_goal.goal.sound_request.command = 1
            sound_goal.goal.sound_request.volume = 1.0
            sound_goal.goal.sound_request.arg2 = "jp"

        if msg.status.status == GoalStatus.SUCCEEDED:
            self.sound.play(1)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "到着しました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        elif msg.status.status == GoalStatus.PREEMPTED:
            self.sound.play(2)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "別のゴールがセットされました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        elif msg.status.status == GoalStatus.ABORTED:
            self.sound.play(3)
            time.sleep(1)
            if self.lang == "japanese":
                sound_goal.goal.sound_request.arg = "中断しました"
                self.pub.publish(sound_goal)
            else:
                self.sound.say(text)
        else:
            self.sound.play(4)
            time.sleep(1)
            self.sound.say(text)
Beispiel #28
0
class voice_cmd_vel:

    def __init__(self):
        rospy.on_shutdown(self.cleanup)
        self.speed = 0.1
        self.buildmap = False
        self.follower = False
        self.navigation = False
        self.msg = Twist()
        
        # How long in seconds should the robot pause at each location?
        self.rest_time = rospy.get_param("~rest_time", 10)
        # .txt file with name and x, y coordinates for location
        self.map_locations = rospy.get_param("~map_locations")
        # odometry topic name
        self.odometry_topic = rospy.get_param("~odometry_topic", "odom")
        # cmd_vel topic name
        self.cmd_vel_topic = rospy.get_param("~cmd_vel_topic", "cmd_vel")
                       
        self.locations = dict()
        fh=open(self.map_locations)
        for line in fh:
            name = line.rstrip().split(":")
            temp = str(line.rstrip().rsplit(":", 1)[1])
            coordinates = temp.split()
            self.locations[name[0]] = Pose(Point(float(coordinates[0]), float(coordinates[1]), 0.000), Quaternion(*tf.transformations.quaternion_from_euler(0, 0, 0)))

        # Create the sound client object
        self.soundhandle = SoundClient()
       
        rospy.sleep(1)
        self.soundhandle.stopAll()
        
         # Subscribe to the move_base action server
        self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
        
        rospy.loginfo("Waiting for move_base action server...")
        
        # Wait 60 seconds for the action server to become available
        self.move_base.wait_for_server(rospy.Duration(60))
        
        rospy.loginfo("Connected to move base server")
       
        # Announce that we are ready for input
        rospy.sleep(1)
        self.soundhandle.say('Hi, my name is Petrois')
        rospy.sleep(2)
        self.soundhandle.say("Say one of the navigation commands")

        # publish to cmd_vel, subscribe to speech output
        self.pub = rospy.Publisher(self.cmd_vel_topic, Twist, queue_size=2)
        rospy.Subscriber('recognizer/output', String, self.speechCb)

        r = rospy.Rate(10.0)
        while not rospy.is_shutdown():
            self.pub.publish(self.msg)
            r.sleep()
        
    def speechCb(self, msg):
        rospy.loginfo(msg.data)

        if msg.data.find("fast") > -1:
            if self.speed != 0.3:
                self.soundhandle.say('Speeding up')
                self.set_speed(0.3)
            else:
                self.soundhandle.say('Already at full speed')

        if msg.data.find("half") > -1:
            if self.speed != 0.2:
                self.soundhandle.say('Going at half speed')
                self.set_speed(0.2)
            else:
                self.soundhandle.say('Already at half speed')

        if msg.data.find("slow") > -1:
            if self.speed != 0.1:
                self.soundhandle.say('Slowing down')
                self.set_speed(0.1)
            else:
                self.soundhandle.say('Already at slow speed')

        if msg.data.find("forward") > -1:
            self.forward()
        elif msg.data.find("right") > -1:
            self.right()
        elif msg.data.find("left") > -1:
            self.left()
        elif msg.data.find("back") > -1:
            self.backward()
        elif msg.data.find("stop") > -1 or msg.data.find("halt") > -1:
            self.stop()

################################# follower commands
        
        if msg.data.find("follow me") > -1:
            self.run_follower(True)
        elif msg.data.find("stop follower") > -1:
           self.run_follower(False)


################################ map commands

        if msg.data.find("build map") > -1:
           self.build_map(True)

        elif msg.data.find("save map") > -1:
            self.save_map()
		
        elif msg.data.find("stop map") > -1:
            self.build_map(False)

################################ navigation commands

        if msg.data.find("kk") > -1: #OJOOOOOOOOOOOOOOOOOOO cambiar
            if self.navigation == False:
                self.soundhandle.say('Starting navigation stack')
                rospy.sleep(2)
                self.soundhandle.say('Visualizing costmaps')
                self.msg = Twist()
                self.proc5 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'navigation_p3at.launch'])
                self.proc6 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'rviz-navigation.launch'])
                self.navigation = True
            else:
                self.soundhandle.say('Already in navigation mode')

        elif msg.data.find("kk") > -1: #OJOOOOOOOOOOOOO cambiar
            if self.navigation == True:
                self.msg = Twist()
                print 'proc5 = ', self.proc5.pid
                self.proc5.terminate()
                kill(self.proc5.pid)
                self.proc5.kill()
                print 'proc6 = ', self.proc6.pid
                self.proc6.terminate()
                kill(self.proc6.pid)
                self.proc6.kill()
                self.navigation = False
                self.soundhandle.say('Navigation stopped')
            else:
                self.soundhandle.say('I am not in navigation mode')
                
        elif msg.data.find("navigate to") > -1:
            if len(msg.data.split()) > 2:
                if msg.data.rsplit("navigate to ", 1)[1] in self.locations.keys():
                    self.send_goal(msg.data.rsplit("navigate to ", 1)[1])

        self.pub.publish(self.msg)
        
    def send_goal(self, location_name):
        location = self.locations.get(location_name)
        # Set up goal location
        self.goal = MoveBaseGoal()
        self.goal.target_pose.pose = location
        self.goal.target_pose.header.frame_id = 'map'
        self.goal.target_pose.header.stamp = rospy.Time.now()
        rospy.loginfo(self.goal)
            
        # Let the user know where the robot is going next
        rospy.loginfo("Going to: " + str(location_name))
        self.soundhandle.say("Going to " + str(location_name))
            
        # Start the robot toward the next location
        self.move_base.send_goal(self.goal)

    def run_follower(self, on):
        if on and self.follower == False:
            self.msg = Twist()
            subprocess.Popen(['roslaunch', 'pioneer_utils', 'simple-follower.launch'])
            self.soundhandle.say('Okay. Show me the way')
            self.follower = True
        elif on and self.follower:
            self.soundhandle.say('Already in follower mode')
        elif on == False and self.follower:
            self.msg = Twist()
            subprocess.Popen(['rosnode', 'kill', 'turtlebot_follower'])
            self.follower = False
            self.soundhandle.say('Follower mode disabled')

    def forward(self):
        self.soundhandle.play(1)    
        self.msg.linear.x = self.speed
        self.msg.angular.z = 0

    def backward(self):
        self.soundhandle.play(1)
        self.msg.linear.x = -self.speed
        self.msg.angular.z = 0
   
    def left(self):
        self.soundhandle.play(1)
        self.msg.linear.x = 0
        self.msg.angular.z = self.speed*2
        
    def right(self):
        self.soundhandle.play(1)    
        self.msg.linear.x = 0
        self.msg.angular.z = -self.speed*2

    def stop(self):
        self.move_base.cancel_goal()
        self.run_follower(False)
        self.msg = Twist()
        self.soundhandle.play(1)

    def set_speed(self, vel):
        if self.msg.linear.x > 0:
            self.msg.linear.x = vel
        elif self.msg.linear.x < 0:
            self.msg.linear.x = -vel
        if self.msg.angular.z >0:
            self.msg.angular.z = vel
        elif self.msg.angular.z < 0:
            self.msg.angular.z = -vel
            self.speed = vel

    def build_map(self, on):
        if on and self.buildmap == False:
            self.stop()
            self.soundhandle.say('Building map with slam gmapping')
            subprocess.Popen(['rosnode', 'kill', 'amcl'])
            subprocess.Popen(['rosnode', 'kill', 'map_server'])
            subprocess.Popen(['roslaunch', 'p2os_launch', 'gmapping.launch'])
            self.buildmap = True
        elif on and self.buildmap:
            self.soundhandle.say('Already building a map')
        elif on == False and self.buildmap:
            self.stop()
            subprocess.Popen(['rosnode', 'kill', 'slam_gmapping'])
            self.buildmap = False
            subprocess.Popen(['roslaunch', 'pioneer_utils', 'floor_zero-map.launch'])
            subprocess.Popen(['roslaunch', 'p2os_launch', 'amcl.launch'])
            self.soundhandle.say('Building map stopped')
        else:
            self.soundhandle.say('I am not building any map')

    def save_map(self):
        if self.buildmap == True:
            self.msg = Twist()
            subprocess.Popen(['rosrun', 'map_server', 'map_saver', '-f', 'new_map'])
            rospy.sleep(4)
            print 'map saved at ~/.ros directory as new_map.pgm new_map.yaml'
            self.soundhandle.say('Map saved successfully')
        else:
            self.soundhandle.say('I am not building any map so there is no map to save')

    def cleanup(self):
        # stop the robot!
        self.stop()
        self.pub.publish(twist)
class onlineAnomalyDetection(Thread):
    MAX_INT = 32768.0
    CHUNK   = 1024 # frame per buffer
    RATE    = 48000 # sampling rate
    UNIT_SAMPLE_TIME = 1.0 / float(RATE)
    CHANNEL = 2 # number of channels
    FORMAT  = pyaudio.paInt16

    def __init__(self, subject='s1', task='s', targetFrame=None, tfListener=None, isScooping=True, audioTool=None):
        super(onlineAnomalyDetection, self).__init__()
        self.daemon = True
        self.cancelled = False
        self.isRunning = False

        # Predefined settings
        self.downSampleSize = 100 #200
        self.cov_mult       = 5.0
        self.isScooping     = isScooping
        self.subject        = subject
        self.task           = task
        if self.isScooping:
            self.nState         = 10
            self.cutting_ratio  = [0.0, 0.9] #[0.0, 0.7]
            self.anomaly_offset = -15.0
            self.scale          = [1.0,1.0,1.0,0.7]  #10
            self.ml_thres_pkl='ml_scooping_thres.pkl'
        else:
            self.nState         = 15
            self.cutting_ratio  = [0.0, 0.7]
            self.anomaly_offset = -20
            self.scale          = [1.0,1.0,0.7,1.0]  #10
            self.ml_thres_pkl='ml_feeding_thres.pkl'

        print 'is scooping:', self.isScooping

        self.publisher = rospy.Publisher('visualization_marker', Marker)
        self.interruptPublisher = rospy.Publisher('InterruptAction', String)
        self.targetFrame = targetFrame

        # Data logging
        self.updateNumber = 0
        self.lastUpdateNumber = 0
        self.init_time = rospy.get_time()

        if tfListener is None:
            self.transformer = tf.TransformListener()
        else:
            self.transformer = tfListener

        # Gripper
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.mic = None
        self.grips = []
        # Spoon
        self.spoon = None

        # FT sensor
        self.force = None
        self.torque = None

        # Audio
        if audioTool is None:
            self.audioTool = tool_audio_slim()
            self.audioTool.start()
        else:
            self.audioTool = audioTool

        # Kinematics
        self.jointAngles = None
        self.jointVelocities = None

        self.soundHandle = SoundClient()

        saveDataPath = '/home/dpark/git/hrl-assistive/hrl_multimodal_anomaly_detection/src/hrl_multimodal_anomaly_detection/hmm/batchDataFiles/%s_%d_%d_%d_%d.pkl'
        # Setup HMM to perform online anomaly detection
        self.hmm, self.minVals, self.maxVals, self.minThresholds \
        = onlineHMM.iteration(downSampleSize=self.downSampleSize,
                              scale=self.scale, nState=self.nState,
                              cov_mult=self.cov_mult, anomaly_offset=self.anomaly_offset, verbose=False,
                              isScooping=self.isScooping, use_pkl=False,
                              train_cutting_ratio=self.cutting_ratio,
                              findThresholds=True, ml_pkl=self.ml_thres_pkl,
                              savedDataFile=saveDataPath % (('scooping' if self.isScooping else 'feeding'),
                                            self.downSampleSize, self.scale[0], self.nState, int(self.cov_mult)))

        print 'Threshold:', self.minThresholds

        self.forces = []
        self.distances = []
        self.angles = []
        self.audios = []
        self.forcesRaw = []
        self.distancesRaw = []
        self.anglesRaw = []
        self.audiosRaw = []
        self.times = []
        self.likelihoods = []
        self.anomalyOccured = False

        # self.avgAngle = onlineHMM.trainData[2]

        self.forceSub = rospy.Subscriber('/netft_data', WrenchStamped, self.forceCallback)
        print 'Connected to FT sensor'

        self.objectCenter = None
        self.objectCenterSub = rospy.Subscriber('/ar_track_alvar/bowl_cen_pose' if isScooping else '/ar_track_alvar/mouth_pose', PoseStamped, self.objectCenterCallback)
        print 'Connected to center of object publisher'

        groups = rospy.get_param('/haptic_mpc/groups' )
        for group in groups:
            if group['name'] == 'left_arm_joints':
                self.joint_names_list = group['joints']

        self.jstate_lock = threading.RLock()
        self.jointSub = rospy.Subscriber("/joint_states", JointState, self.jointStatesCallback)
        print 'Connected to robot kinematics'

    def reset(self):
        self.isRunning = True
        self.forces = []
        self.distances = []
        self.angles = []
        self.audios = []
        self.forcesRaw = []
        self.distancesRaw = []
        self.anglesRaw = []
        self.audiosRaw = []
        self.times = []
        self.anomalyOccured = False
        self.updateNumber = 0
        self.lastUpdateNumber = 0
        self.init_time = rospy.get_time()
        self.lGripperPosition = None
        self.lGripperRotation = None
        self.mic = None
        self.grips = []
        self.spoon = None
        self.force = None
        self.torque = None
        self.jointAngles = None
        self.jointVelocities = None
        self.objectCenter = None
        self.likelihoods = []
        self.audioTool.reset(self.init_time)

    def run(self):
        """Overloaded Thread.run, runs the update
        method once per every xx milliseconds."""
        # rate = rospy.Rate(1000) # 25Hz, nominally.
        while not self.cancelled:
            if self.isRunning and self.updateNumber > self.lastUpdateNumber and self.objectCenter is not None:
                self.lastUpdateNumber = self.updateNumber
                if not self.processData(): continue
                
                if not self.anomalyOccured and len(self.forces) > 15:
                    # Perform anomaly detection
                    (anomaly, error) = self.hmm.anomaly_check(self.forces, self.distances, self.angles, self.audios, self.minThresholds*1.5) #temp
                    print 'Anomaly error:', error
                    if anomaly:
                        if self.isScooping:
                            self.interruptPublisher.publish('Interrupt')
                        else:
                            self.interruptPublisher.publish('InterruptHead')
                        self.anomalyOccured = True
                        self.soundHandle.play(2)
                        print 'AHH!! There is an anomaly at time stamp', rospy.get_time() - self.init_time, (anomaly, error)

                        fig = plt.figure()
                        for i, modality in enumerate([[self.forces] + onlineHMM.trainData[0][:13], [self.distances] + onlineHMM.trainData[1][:13], [self.angles] + onlineHMM.trainData[2][:13], [self.audios] + onlineHMM.trainData[3][:13]]):
                            ax = plt.subplot(int('41' + str(i+1)))
                            for index, (modal, times) in enumerate(zip(modality, [self.times] + onlineHMM.trainTimeList[:3])):
                                ax.plot(times, modal, label='%d' % index)
                            ax.legend()
                        fig.savefig('/home/dpark/git/hrl-assistive/hrl_multimodal_anomaly_detection/src/hrl_multimodal_anomaly_detection/fooboohooyou.pdf')
                        print "saved pdf file"
                        rospy.sleep(2.0)
                        # plt.show()
            # rate.sleep()
        print 'Online anomaly thread cancelled'

    def cancel(self, cancelAudio=True):
        self.isRunning = False
        if cancelAudio:
            self.audioTool.cancel()
        self.saveData()
        rospy.sleep(1.0)

    def saveData(self):
        # TODO Save data (Check with daehyung if any more data should be added)
        data = dict()
        data['forces'] = self.forces
        data['distances'] = self.distances
        data['angles'] = self.angles
        data['audios'] = self.audios
        data['forcesRaw'] = self.forcesRaw
        data['distancesRaw'] = self.distancesRaw
        data['anglesRaw'] = self.anglesRaw
        data['audioRaw'] = self.audiosRaw
        data['times'] = self.times
        data['anomalyOccured'] = self.anomalyOccured
        data['minThreshold'] = self.minThresholds
        data['likelihoods'] = self.likelihoods
        data['jointAngles'] = self.jointAngles
        data['jointVelocities'] = self.jointVelocities

        directory = os.path.join(os.path.dirname(__file__), 'onlineDataRecordings/')
        if not os.path.exists(directory):
            os.makedirs(directory)
        fileName = os.path.join(directory, self.subject + '_' + self.task + '_' + time.strftime('%m-%d-%Y_%H-%M-%S.pkl'))
        with open(fileName, 'wb') as f:
            pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
        print 'Online data saved to file.'

    def processData(self):
        # Find nearest time stamp from training data
        # timeStamp = rospy.get_time() - self.init_time
        # index = np.abs(self.times - timeStamp).argmin()

        self.transposeGripper()

        # Use magnitude of forces
        force = np.linalg.norm(self.force)

        # Determine distance between mic and center of object
        distance = np.linalg.norm(self.mic - self.objectCenter)
        # Find angle between gripper-object vector and gripper-spoon vector
        micSpoonVector = self.spoon - self.mic
        micObjectVector = self.objectCenter - self.mic
        angle = np.arccos(np.dot(micSpoonVector, micObjectVector) / (np.linalg.norm(micSpoonVector) * np.linalg.norm(micObjectVector)))

        # Process either visual or audio data depending on which we're using
        if len(self.audioTool.audio_data_raw) > 0:
            audio = self.audioTool.audio_data_raw[-1]
        else:
            return False
        if audio is None:
            print 'Audio is None'
            return False
        audio = get_rms(audio)
        # print 'Audio:', audio

        self.forcesRaw.append(force)
        self.distancesRaw.append(distance)
        self.anglesRaw.append(angle)
        self.audiosRaw.append(audio)

        # Scale data
        force = self.scaling(force, minVal=self.minVals[0], maxVal=self.maxVals[0], scale=self.scale[0])
        distance = self.scaling(distance, minVal=self.minVals[1], maxVal=self.maxVals[1], scale=self.scale[1])
        angle = self.scaling(angle, minVal=self.minVals[2], maxVal=self.maxVals[2], scale=self.scale[2])
        audio = self.scaling(audio, minVal=self.minVals[3], maxVal=self.maxVals[3], scale=self.scale[3])

        # Find nearest time stamp from training data
        timeStamp = rospy.get_time() - self.init_time
        index = np.abs(np.array(onlineHMM.trainTimeList[0]) - timeStamp).argmin()

        self.forces.append(force)
        self.distances.append(distance)
        self.angles.append(angle)
        #self.angles.append(onlineHMM.trainData[2][0][index])
        self.audios.append(audio)
        self.times.append(rospy.get_time() - self.init_time)
        if len(self.forces) > 1:
            self.likelihoods.append(self.hmm.likelihoods(self.forces, self.distances, self.angles, self.audios))

        return True
            
    @staticmethod
    def scaling(x, minVal, maxVal, scale=1.0):
        return (x - minVal) / (maxVal - minVal) * scale

    def forceCallback(self, msg):
        self.force = np.array([msg.wrench.force.x, msg.wrench.force.y, msg.wrench.force.z])
        self.torque = np.array([msg.wrench.torque.x, msg.wrench.torque.y, msg.wrench.torque.z])
        self.updateNumber += 1

    def objectCenterCallback(self, msg):
        self.objectCenter = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])

    def jointStatesCallback(self, data):
        joint_angles = []
        ## joint_efforts = []
        joint_vel = []
        jt_idx_list = [0]*len(self.joint_names_list)
        for i, jt_nm in enumerate(self.joint_names_list):
            jt_idx_list[i] = data.name.index(jt_nm)

        for i, idx in enumerate(jt_idx_list):
            if data.name[idx] != self.joint_names_list[i]:
                raise RuntimeError('joint angle name does not match.')
            joint_angles.append(data.position[idx])
            ## joint_efforts.append(data.effort[idx])
            joint_vel.append(data.velocity[idx])

        with self.jstate_lock:
            self.jointAngles  = joint_angles
            ## self.joint_efforts = joint_efforts
            self.jointVelocities = joint_vel

    def transposeGripper(self):
        # Transpose gripper position to camera frame
        self.transformer.waitForTransform(self.targetFrame, '/l_gripper_tool_frame', rospy.Time(0), rospy.Duration(5))
        try :
            self.lGripperPosition, self.lGripperRotation = self.transformer.lookupTransform(self.targetFrame, '/l_gripper_tool_frame', rospy.Time(0))
            transMatrix = np.dot(tf.transformations.translation_matrix(self.lGripperPosition), tf.transformations.quaternion_matrix(self.lGripperRotation))
        except tf.ExtrapolationException:
            print 'Transpose of gripper failed!'
            return

        # Use a buffer of gripper positions
        if len(self.grips) >= 2:
            lGripperTransposeMatrix = self.grips[-2]
        else:
            lGripperTransposeMatrix = transMatrix
        self.grips.append(transMatrix)

        # Determine location of mic
        mic = [0.12, -0.02, 0]
        # print 'Mic before', mic
        self.mic = np.dot(lGripperTransposeMatrix, np.array([mic[0], mic[1], mic[2], 1.0]))[:3]
        # print 'Mic after', self.mic
        # Determine location of spoon
        spoon3D = [0.22, -0.050, 0]
        self.spoon = np.dot(lGripperTransposeMatrix, np.array([spoon3D[0], spoon3D[1], spoon3D[2], 1.0]))[:3]

    def find_input_device(self):
        device_index = None
        for i in range(self.p.get_device_count()):
            devinfo = self.p.get_device_info_by_index(i)
            print('Device %d: %s'%(i, devinfo['name']))

            for keyword in ['mic', 'input', 'icicle']:
                if keyword in devinfo['name'].lower():
                    print('Found an input: device %d - %s'%(i, devinfo['name']))
                    device_index = i
                    return device_index

        if device_index is None:
            print('No preferred input found; using default input device.')

        return device_index

    def publishPoints(self, name, points, size=0.01, r=0.0, g=0.0, b=0.0, a=1.0):
        marker = Marker()
        marker.header.frame_id = '/torso_lift_link'
        marker.ns = name
        marker.type = marker.POINTS
        marker.action = marker.ADD
        marker.scale.x = size
        marker.scale.y = size
        marker.color.a = a
        marker.color.r = r
        marker.color.g = g
        marker.color.b = b
        for point in points:
            p = Point()
            # print point
            p.x, p.y, p.z = point
            marker.points.append(p)
        self.publisher.publish(marker)
Beispiel #30
0
def main():
    global target
    global odata
    global cmd_rotate
    global quats
    global cur_pose

    rospy.init_node('nav_test', anonymous=False, disable_signals=True)

    tfBuffer = tf2_ros.Buffer()
    tfListener = tf2_ros.TransformListener(tfBuffer)
    rospy.sleep(1)
    rospy.Subscriber('map', OccupancyGrid, get_occupancy, tfBuffer)
    rospy.sleep(3.0)

    # save start time
    start_time = time.time()
    # initialize variable to write elapsed time to file
    contourCheck = 1

    navigator = GoToPose()
    position = {'x': target[0], 'y': target[1]}
    quaternion = {'r1': 0.000, 'r2': 0.000, 'r3': 0.000, 'r4': 1.000}

    rospy.loginfo("Go to (%s, %s) pose", position['x'], position['y'])
    navigator.goto(position, quaternion)

    rospy.on_shutdown(stopbot)
    rate = rospy.Rate(10)

    shutdown = False

    while not shutdown:
        while not cmd_rotate:

            if contourCheck == 1:
                if closure(odata):
                    # map is complete, so save current time into file
                    with open("maptime.txt", "w") as f:
                        f.write("Elapsed Time: " +
                                str(time.time() - start_time))
                    contourCheck = 5
                    # play a sound
                    soundhandle = SoundClient()
                    rospy.sleep(1)
                    soundhandle.stopAll()
                    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
                    rospy.sleep(2)
                    # save the map
                    cv2.imwrite('mazemap.png', occdata)
                    pub = rospy.Publisher('mapdone', String)
                    pub.publish('Done!')
                    rospy.sleep(1)
                    toggle = False

            if contourCheck == 5 and not stored_quat and not stored_pose:
                position = {'x': stored_pose[0], 'y': stored_pose[1]}
                quaternion = {
                    'r1': stored_quat[0],
                    'r2': stored_quat[1],
                    'r3': stored_quat[2],
                    'r4': stored_quat[3]
                }

                while distance(cur_pose, stored_pose) > 3:
                    rospy.loginfo("Go to (%s, %s) pose", position['x'],
                                  position['y'])
                    navigator.goto(position, quaternion)

                rospy.loginfo('Shooting Loc Reached')
                pub2 = rospy.Publisher('alldone', String, queue_size=10)
                pub2.publish('Done')
                time.sleep(1)
                shutdown = True

            if toggle or stored_quat or stored_pose:
                position = {'x': target[0], 'y': target[1]}
                quaternion = {
                    'r1': 0.000,
                    'r2': 0.000,
                    'r3': 0.000,
                    'r4': 1.000
                }

                rospy.loginfo("Go to (%s, %s) pose", position['x'],
                              position['y'])
                navigator.goto(position, quaternion)
                rate.sleep()

        rotatebot(cmd_rotate)
        cmd_rotate = ''
        stored_pose = cur_pose
        stored_quat = quats
# Author: Blaise Gassend

import sys

if __name__ == '__main__':
    if len(sys.argv) != 2 or sys.argv[1] == '--help':
        print 'Usage: %s <sound_id>' % sys.argv[0]
        print
        print 'Plays one of the built-in sounds based on its integer ID. Look at the sound_play/SoundRequest message definition for IDs.'
        exit(1)

    # Import here so that usage is fast.
    import rospy
    from sound_play.msg import SoundRequest
    from sound_play.libsoundplay import SoundClient

    rospy.init_node('play', anonymous=True)

    soundhandle = SoundClient()

    rospy.sleep(1)

    num = int(sys.argv[1])

    print 'Playing sound %i.' % num

    soundhandle.play(num)

    rospy.sleep(1)
class voice_cmd_vel:

    def __init__(self):
        rospy.on_shutdown(self.cleanup)
        self.speed = 0.1
        self.buildmap = False
        self.follower = False
        self.navigation = False
        self.msg = Twist()


        # Create the sound client object
        self.soundhandle = SoundClient()
       
        rospy.sleep(1)
        self.soundhandle.stopAll()
       
        # Announce that we are ready for input
        rospy.sleep(1)
        self.soundhandle.say('Hi, my name is Petrois')
        rospy.sleep(3)
        self.soundhandle.say("Say one of the navigation commands")

        # publish to cmd_vel, subscribe to speech output
        self.pub_ = rospy.Publisher("cmd_vel", Twist, queue_size=2)
        rospy.Subscriber('recognizer/output', String, self.speechCb)

        r = rospy.Rate(10.0)
        while not rospy.is_shutdown():
            self.pub_.publish(self.msg)
            r.sleep()
        
    def speechCb(self, msg):
        rospy.loginfo(msg.data)

        if msg.data.find("fast") > -1:
            if self.speed != 0.3:
                self.soundhandle.say('Speeding up')
                if self.msg.linear.x > 0:
                    self.msg.linear.x = 0.3
                elif self.msg.linear.x < 0:
                    self.msg.linear.x = -0.3
                if self.msg.angular.z >0:
                    self.msg.angular.z = 0.3
                elif self.msg.angular.z < 0:
                    self.msg.angular.z = -0.3
                self.speed = 0.3
            else:
                self.soundhandle.say('Already at full speed')

        if msg.data.find("half") > -1:
            if self.speed != 0.2:
                self.soundhandle.say('Going at half speed')
                if self.msg.linear.x > 0:
                    self.msg.linear.x = 0.2
                elif self.msg.linear.x < 0:
                    self.msg.linear.x = -0.2
                if self.msg.angular.z >0:
                    self.msg.angular.z = 0.2
                elif self.msg.angular.z < 0:
                    self.msg.angular.z = -0.2
                self.speed = 0.2
            else:
                self.soundhandle.say('Already at half speed')

        if msg.data.find("slow") > -1:
            if self.speed != 0.1:
                self.soundhandle.say('Slowing down')
                if self.msg.linear.x > 0:
                    self.msg.linear.x = 0.1
                elif self.msg.linear.x < 0:
                    self.msg.linear.x = -0.1
                if self.msg.angular.z >0:
                    self.msg.angular.z = 0.1
                elif self.msg.angular.z < 0:
                    self.msg.angular.z = -0.1
                self.speed = 0.1
            else:
                self.soundhandle.say('Already at slow speed')

        if msg.data.find("forward") > -1:
            self.soundhandle.play(1)    
            self.msg.linear.x = self.speed
            self.msg.angular.z = 0
        elif msg.data.find("left") > -1:
            self.soundhandle.play(1)
            if self.msg.linear.x != 0:
                if self.msg.angular.z < self.speed:
                    self.msg.angular.z += 0.05
            else:        
                self.msg.angular.z = self.speed*2
        elif msg.data.find("right") > -1:
            self.soundhandle.play(1)    
            if self.msg.linear.x != 0:
                if self.msg.angular.z > -self.speed:
                    self.msg.angular.z -= 0.05
            else:        
                self.msg.angular.z = -self.speed*2
        elif msg.data.find("back") > -1:
            self.soundhandle.play(1)
            self.msg.linear.x = -self.speed
            self.msg.angular.z = 0
        elif msg.data.find("stop") > -1 or msg.data.find("halt") > -1:
            self.soundhandle.play(1)
            self.msg = Twist()

################################# follower commands
        
        if msg.data.find("follow me") > -1:
            if self.follower == False:
                self.msg = Twist()
                self.proc1 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'simple-follower.launch'])
                self.soundhandle.say('Okay. Show me the way')
                self.follower = True
            else:
                self.soundhandle.say('Already in follower mode')
		
        elif msg.data.find("stop follower") > -1:
            if self.follower == True:
                self.msg = Twist()
                print 'proc1 = ', self.proc1.pid
                self.proc1.terminate()
                kill(self.proc1.pid)
                self.proc1.kill()
                self.follower = False
                self.soundhandle.say('Follower mode disabled')
            else:
                self.soundhandle.say('Hey, I wasnt following you')

################################ map commands

        if msg.data.find("build map") > -1:
            if self.buildmap == False:
                self.soundhandle.say('Building map with slam gmapping')
                rospy.sleep(2)
                self.soundhandle.say('Visualizing map')
                self.msg = Twist()
                self.proc2 = subprocess.Popen(['roslaunch', 'p2os_launch', 'gmapping.launch'])
                self.proc3 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'rviz-gmapping.launch'])
                self.buildmap = True
            else:
                self.soundhandle.say('Already building a map')


        elif msg.data.find("save map") > -1:
            if self.buildmap == True:
                self.msg = Twist()
                self.proc4 = subprocess.Popen(['rosrun', 'map_server', 'map_saver', '-f', 'new_map'])
                rospy.sleep(6)
                print 'map saved at ~/.ros directory as new_map.pgm new_map.yaml'
                self.soundhandle.say('Map saved successfully')
            else:
                self.soundhandle.say('I am not building any map so there is no map to save')
		
        elif msg.data.find("stop map") > -1:
            if self.buildmap == True:
               self.msg = Twist() 
               print 'proc2 = ', self.proc2.pid
               self.proc2.terminate()
               kill(self.proc2.pid)
               self.proc2.kill()
               print 'proc3 = ', self.proc3.pid
               self.proc3.terminate()
               kill(self.proc3.pid)
               self.proc3.kill()
               print 'proc4 = ', self.proc4.pid
               self.proc4.terminate()
               kill(self.proc4.pid)
               self.proc4.kill()
               self.buildmap = False
               self.soundhandle.say('Building map stopped')
            else:
               self.soundhandle.say('I am not building any map')

################################ navigation commands

        if msg.data.find("navigate") > -1:
            if self.navigation == False:
                self.soundhandle.say('Starting navigation stack')
                rospy.sleep(2)
                self.soundhandle.say('Visualizing costmaps')
                self.msg = Twist()
                self.proc5 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'navigation_p3at.launch'])
                self.proc6 = subprocess.Popen(['roslaunch', 'pioneer_utils', 'rviz-navigation.launch'])
                self.navigation = True
            else:
                self.soundhandle.say('Already in navigation mode')


        elif msg.data.find("stop navigation") > -1:
            if self.navigation == True:
                self.msg = Twist()
                print 'proc5 = ', self.proc5.pid
                self.proc5.terminate()
                kill(self.proc5.pid)
                self.proc5.kill()
                print 'proc6 = ', self.proc6.pid
                self.proc6.terminate()
                kill(self.proc6.pid)
                self.proc6.kill()
                self.navigation = False
                self.soundhandle.say('Navigation stopped')
            else:
                self.soundhandle.say('I am not in navigation mode')
        
        self.pub_.publish(self.msg)

    def cleanup(self):
        # stop the robot!
        twist = Twist()
        self.pub_.publish(twist)
soundhandle = 0

def sleep(t):
	try:
		rospy.sleep(t)
	except:
		pass

def voice_play_callback(data):
	global soundhandle
	s = soundhandle.voiceSound(data.data)
	s.play()
	sleep(2)
	
	

if __name__ == '__main__':
	rospy.init_node('robot_voice_node', anonymous=True)
	rospy.Subscriber("robot_voice", String, voice_play_callback)
	global soundhandle
	soundhandle = SoundClient()
	rospy.sleep(1)
	soundhandle.stopAll()
	soundhandle.play(SoundRequest.NEEDS_PLUGGING)
	sleep(1)
	global soundhandle
	s = soundhandle.voiceSound('Ass slam aly kum sir. How can I be of assistance')
	s.play()
	sleep(3)
	rospy.spin()
Beispiel #34
0
    print
    #print 'Try to play wave files that do not exist.'
    #soundhandle.playWave('17')
    #soundhandle.playWave('dummy')
        
    #print 'say'
    #soundhandle.say('Hello world!')
    #sleep(3)
    #    
    print 'wave'
    soundhandle.playWave('say-beep.wav')
    sleep(2)
        
    print 'plugging'
    soundhandle.play(SoundRequest.NEEDS_PLUGGING)
    sleep(2)

    print 'unplugging'
    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
    sleep(2)

    print 'plugging badly'
    soundhandle.play(SoundRequest.NEEDS_PLUGGING_BADLY)
    sleep(2)

    print 'unplugging badly'
    soundhandle.play(SoundRequest.NEEDS_UNPLUGGING_BADLY)
    sleep(2)

    s1 = soundhandle.builtinSound(SoundRequest.NEEDS_UNPLUGGING_BADLY)
Beispiel #35
0
            rospy.loginfo(['Stop!'])
            # find direction with the largest distance from the Lidar
            # rotate to that direction
            # start moving
            pick_direction()

        # check if SLAM map is complete
        if contourCheck :
            if closure(occdata) :
                # map is complete, so save current time into file
                with open("maptime.txt", "w") as f:
                    f.write("Elapsed Time: " + str(time.time() - start_time))
                contourCheck = 0
                # play a sound
                soundhandle = SoundClient()
                rospy.sleep(1)
                soundhandle.stopAll()
                soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
                rospy.sleep(2)
                # save the map
                cv2.imwrite('mazemap.png',occdata)

        rate.sleep()


if __name__ == '__main__':
    try:
        mover()
    except rospy.ROSInterruptException:
        pass