Esempio n. 1
0
    def turn_angle(self, angle, velocity=1.0):
        """Turns the robot a given number of degrees in radians

        You can easily convert degress into radians with the radians() function:

            robot.turn_angle(radians(45))  # Turn 45 degrees

        You can also give an angular velocity to turn at, in radians per second:

            robot.turn_angle(radians(-45), radians(45))  # Turn back over a second
        """
        self.__exit_if_movement_disabled()
        # No bounds checking because we trust people. Not like William.
        r = rospy.Rate(1)
        while not self.__have_odom and not rospy.is_shutdown():
            self.say("Waiting for odometry")
            r.sleep()

        msg = Twist()
        if angle >= 0:
            msg.angular.z = np.abs(velocity)
        else:
            msg.angular.z = -np.abs(velocity)
        angle0 = self.__cumulative_angle
        r = rospy.Rate(100)
        while not rospy.is_shutdown():
            a_diff = self.__cumulative_angle - angle0
            if (angle > 0 and a_diff >= angle) or (angle < 0 and a_diff <= angle):
                break

            self.__cmd_vel_pub.publish(msg)
            r.sleep()
        msg.angular.z = 0.0
        self.__cmd_vel_pub.publish(msg)
Esempio n. 2
0
 def wait_for_press(self,topic, value=None):
     rospy.logdebug("Tablet manager waiting on topic: " + str(topic) + " for value: " + str(value))
     topic = topic.strip('/')
     if value == None:
         self.subs[topic]["pressed"] = False
         while not self.subs[topic]["pressed"]:
             if rospy.is_shutdown():
                 return
             rospy.sleep(0.1)
         ret = self.subs[topic]["last_press"]
     else:
         #don't return right away if the value we are waiting for was the
         #last value pressed before we started waiting
         self.subs[topic]["pressed"] = False
         while not self.subs[topic]["pressed"]:
             if rospy.is_shutdown():
                 return
             rospy.sleep(0.1)
         #now we can just wait until the last_press value is what we want
         while not self.subs[topic]["last_press"] == value:
             if rospy.is_shutdown():
                 return
             rospy.sleep(0.1)
         ret = self.subs[topic]["last_press"]
     return ret
Esempio n. 3
0
 def run(self):
     while not rospy.is_shutdown():
         while not rospy.is_shutdown():
             m = self.queue.get()
             if self.queue.empty():
                 break
         self.function(m)
Esempio n. 4
0
def joystick():
  global xOut
  global wOut
  global right_state
  msgs = []
  with open(device,'r') as pipe:
    for i in range(144): #there's ~144 bytes (18 packets of data) on initialization. dropping them
       pipe.read(1)
    while not rospy.is_shutdown():
      try:
        for char in pipe.read(1):
          msgs.append(ord(char))
          if len(msgs) is 8:
            if msgs[6] is 2 and msgs[7] is 0:
              wOut = twosCompInverse(msgs[5])
            elif msgs[6] is 2 and msgs[7] is 1:
              xOut = twosCompInverse(msgs[5])
            elif msgs[7] is 4:
              right_state = (msgs[4] is 1)
            msgs = []
      except IOError:
        print "Joystick Error, attempting reconnect"
        reconnected = False
        while not rospy.is_shutdown() and not reconnected:
          try:
            pipe = open("/dev/input/js0",'r')
            print "Reconnect success"
            reconnected = True
          except IOError:
            print "Reconnect failed"
            time.sleep(1)
 def execute(self,userdata):
     self.angle = userdata.angle_in
     print self.angle
     move_cmd = Twist()
     while not rospy.is_shutdown():
         move_cmd = Twist()
         self.odom_angle = self.get_odom_angle()
         last_angle = self.odom_angle
         turn_angle = 0
         angular_speed = self.speed
         while abs(float(turn_angle)) < abs(float(self.angle)):
             if rospy.is_shutdown():
                 return "succeeded"
             if self.angle < 0:
                 move_cmd.angular.z = -angular_speed
             else:
                 move_cmd.angular.z = angular_speed
             self.cmd_vel.publish(move_cmd)
             self.r.sleep()
             self.odom_angle = self.get_odom_angle()
             delta_angle = self.secretindigal * self.normalize_angle(self.odom_angle - last_angle)
             turn_angle += delta_angle
             last_angle = self.odom_angle
         rospy.sleep(0.5)
         if move_cmd.angular.z != 0.0:
             self.cmd_vel.publish(move_cmd)
         else:
             break
             self.cmd_vel.publish(Twist())
         return "succeeded"
 def run(self):
     while not rospy.is_shutdown():
         self.grasp_joint_traj = None
         self.return_grasp_joint_traj = None
         
         print('Waiting for grasp trajectories')
         while not rospy.is_shutdown() and (self.grasp_joint_traj is None or self.return_grasp_joint_traj is None):
             self.sim.update()
             rospy.sleep(1)
         
         print('Grasping!')
         grasp_joint_traj = self.grasp_joint_traj
         return_grasp_joint_traj = self.return_grasp_joint_traj
 
         print('Opening gripper')        
         self.arm.open_gripper()
         rospy.sleep(1)
         print('Going to grasp')
         self.arm.execute_joint_trajectory(grasp_joint_traj, speed=0.06)
         print('Closing grippper')
         self.arm.close_gripper()
         rospy.sleep(1.5)
         print('Returning from grasp')
         self.arm.execute_joint_trajectory(return_grasp_joint_traj, speed=0.06)
         print('Opening gripper')
         self.arm.open_gripper()
    def playback(self):
        """
        Loops playback of recorded joint position waypoints until program is
        exited
        """
        rospy.sleep(1.0)

        rospy.loginfo("Waypoint Playback Started")
        print("  Press Ctrl-C to stop...")

        # Set joint position speed ratio for execution
        self._limb.set_joint_position_speed(self._speed)

        # Loop until program is exited
        loop = 0
        while not rospy.is_shutdown():
            loop += 1
            print("Waypoint playback loop #%d " % (loop,))
            for waypoint in self._waypoints:
                if rospy.is_shutdown():
                    break
                self._limb.move_to_joint_positions(waypoint, timeout=20.0,
                                                   threshold=self._accuracy)
            # Sleep for a few seconds between playback loops
            rospy.sleep(3.0)

        # Set joint position speed back to default
        self._limb.set_joint_position_speed(0.3)
    def execute(self):
	check = 0
        if self._running:
            rospy.logerr("Kobuki TestSuite: already executing a motion, ignoring the request")
            return
        self._stop = False
        self._running = True
        rate = rospy.Rate(10)
        self._current_speed = 0.0
        current_distance_sq = 0.0
	self._current_angle = 0.0
	state = 0
        distance_sq = self.distance*self.distance
        self._starting_pose = self._current_pose
        while not self._stop and not rospy.is_shutdown():
            
	    current_distance_sq = (self._current_pose.position.x - self._starting_pose.position.x)*(self._current_pose.position.x - self._starting_pose.position.x) + \
                                   (self._current_pose.position.y - self._starting_pose.position.y)*(self._current_pose.position.y - self._starting_pose.position.y)
                #current_distance_sq += 0.01 # uncomment this and comment above for debugging
	    print("Distance %s"%math.sqrt(current_distance_sq))
	    print("cur x %s"%self._current_pose.position.x+"y %s"%self._current_pose.position.y)
	
	    rate.sleep()
        if not rospy.is_shutdown():
            cmd = Twist()
            cmd.linear.x = 0.0
            cmd.angular.z = 0.0
            self.cmd_vel_publisher.publish(cmd)
        self._running = False
Esempio n. 9
0
def main():
    global stembotConn

    rospy.init_node('stembot_driver')

    print('Connecting to STEMbot...')
    while not rospy.is_shutdown() and stembotConn == None :
        try:
            stembotConn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            stembotConn.settimeout(1)
            stembotConn.connect(('192.168.1.112', 30000))
        except Exception as ex:
            print(ex)
            stembotConn = None
            sleep(1)
    print('Connected!')

    rospy.Subscriber('/joy', Joy, joyCB, queue_size=1)

    while not rospy.is_shutdown():
        readable, writable, exceptional = select.select([stembotConn], [], [], 0)
        
        for a in readable:
            if a == stembotConn: # If stembot activity
                data = stembotConn.recv(1024)
                print_voltage(data.encode())
        sleep(.01)

    stembotConn.close()
Esempio n. 10
0
    def get_configuration(self, timeout=None):
        """
        Return the latest received server configuration (wait to receive
        one if none have been received)

        @param timeout: time to wait before giving up
        @type  timeout: float
        @return: dictionary mapping parameter names to values or None if unable to retrieve config.
        @rtype: {str: value}
        """
        if timeout is None or timeout == 0.0:
            if self.get_configuration(timeout=1.0) is None:
                print >> sys.stderr, 'Waiting for configuration...'
                
                with self._cv:
                    while self.config is None:
                        if rospy.is_shutdown():
                            return None
                        self._cv.wait()
        else:
            start_time = time.time()
            with self._cv:
                while self.config is None:
                    if rospy.is_shutdown():
                        return None
                    secs_left = timeout - (time.time() - start_time)
                    if secs_left <= 0.0:
                        break
                    self._cv.wait(secs_left)

        return self.config
def run_navigation():
	global goal
	waitForGoal = 1

	print "Waiting for goal"
	while waitForGoal and not rospy.is_shutdown():
		try:
			goalPose = copy.deepcopy(goal)
			waitForGoal = 0
			print "Goal location found"
		except NameError:
			goalPose = None
			pass

	# Run for first goal
	compute_path()

	recalc = 0
	# loop to continue running
	while 1 and not rospy.is_shutdown():
		if goalPose.pose.position.x != goal.pose.position.x or goalPose.pose.position.y != goal.pose.position.y:
			goalPose = copy.deepcopy(goal)
			recalc = 1
		if recalc:
			# run again
			compute_path()
Esempio n. 12
0
    def execute(self, userdata):
        global lang
        rospy.loginfo('Executing: State '+self.state)
        self.next_state=""
        pids=run_all_process(self.launchers)

        #Subscribe to topics
        #Listeners
        self.subscribe=rospy.Subscriber("/listen/"+lang+"_default", Listened, self.listen_callback)

        speak_this(language["QBO WEB INTERFACE IS ACTIVE"])

        while self.next_state=="" and not rospy.is_shutdown():
                time.sleep(0.2)
                #rospy.loginfo("Waiting sentence")
        


        if not rospy.is_shutdown():
            changeLang = rospy.ServiceProxy("/qbo_talk/festival_language", Text2Speach)
	    changeLang("cmu_us_awb_arctic_clunits")
            speak_this(language["STOPPING QBO WEB INTERFACE"])
            self.subscribe.unregister()
            rospy.loginfo("NextState: "+self.next_state)

        kill_all_process(pids)

        time.sleep(5.0)
        return self.next_state
Esempio n. 13
0
    def get_parameter_descriptions(self, timeout=None):
        """
        UNSTABLE. Return a description of the parameters for the server.
        Do not use this method as the type that is returned may change.
        
        @param timeout: time to wait before giving up
        @type  timeout: float
        """
        if timeout is None or timeout == 0.0:
            with self._cv:
                while self.param_description is None:
                    if rospy.is_shutdown():
                        return None
                    self._cv.wait()
        else:
            start_time = time.time()
            with self._cv:
                while self.param_description is None:
                    if rospy.is_shutdown():
                        return None
                    secs_left = timeout - (time.time() - start_time)
                    if secs_left <= 0.0:
                        break
                    self._cv.wait(secs_left)

        return self.param_description
Esempio n. 14
0
    def __init__(self):
        local_uri   = roslib.rosenv.get_master_uri()
        foreign_uri = rospy.get_param('~foreign_master', '')
        if not foreign_uri:
            raise Exception('foreign_master URI not specified')

        local_pubs   = rospy.get_param('~local_pubs',   [])
        foreign_pubs = rospy.get_param('~foreign_pubs', [])

        self._local_services   = rospy.get_param('~local_services',   [])
        self._foreign_services = rospy.get_param('~foreign_services', [])

        foreign_master = rosgraph.masterapi.Master(rospy.get_name(), master_uri=foreign_uri)
        r = rospy.Rate(1)
        while not _is_master_up(foreign_master) and not rospy.is_shutdown():
            rospy.logdebug('Waiting for foreign master to come up...')
            r.sleep()

        self._local_manager   = None
        self._foreign_manager = None
        if not rospy.is_shutdown():
            self._local_manager   = _RemoteManager(local_uri,   self._local_publisher_update)
            self._foreign_manager = _RemoteManager(foreign_uri, self._foreign_publisher_update)
            for t in local_pubs:
                self._local_manager.subscribe(t)
            for t in foreign_pubs:
                self._foreign_manager.subscribe(t)
Esempio n. 15
0
    def execute(self, userdata):
        global lang
        rospy.loginfo('Executing: State '+self.state)
        self.next_state=""
        
        speak_this(language["CHAT MODE IS ACTIVE"])
	time.sleep(4)
	pids=run_all_process(self.launchers)

        #Subscribe to topics
        #Listeners
        self.subscribe=rospy.Subscriber("/listen/"+lang+"_default", Listened, self.listen_callback)

        while self.next_state=="" and not rospy.is_shutdown():
                time.sleep(0.2)
                #rospy.loginfo("Waiting sentence")

        if not rospy.is_shutdown():
            speak_this(language["EXITING CHAT MODE"])
            self.subscribe.unregister()
            rospy.loginfo("NextState: "+self.next_state)
 
        kill_all_process(pids)
        time.sleep(5.0)
        return self.next_state
Esempio n. 16
0
    def search_for_target(self):
        # First pan one way with the head down a bit
        self.servo_speed[self.head_pan_joint](self.default_joint_speed)
        self.servo_speed[self.head_tilt_joint](self.default_joint_speed)   
            
        self.servo_position[self.head_pan_joint].publish(self.max_pan)
        self.servo_position[self.head_tilt_joint].publish(0.1)
        
        current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]      
 
        while not self.target_visible and not rospy.is_shutdown() and current_pan < 0.9 * self.max_pan:
            self.servo_position[self.head_pan_joint].publish(self.max_pan)
            rospy.sleep(1)
            current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
        
        # Now pan the other way
        self.servo_position[self.head_pan_joint].publish(self.min_pan)
        self.servo_position[self.head_tilt_joint].publish(0.1)
        
        current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)]
             
        while not self.target_visible and not rospy.is_shutdown() and current_pan > 0.9 * self.min_pan:
            self.servo_position[self.head_pan_joint].publish(self.min_pan)
            rospy.sleep(1)
            current_pan = self.joint_state.position[self.joint_state.name.index(self.head_pan_joint)] 
Esempio n. 17
0
    def main(self):
        """
        The main function for the turtlebot to explore frontiers.
        """
        print "Starting Main"
        while not self.map.doneSetup() and not (rospy.is_shutdown()):
            rospy.sleep(0.1)
        print "System is prepared to start running"

        try:

            self._notDoneExploring = True
            self.startupSpin()
            print "This map is difficult, let me think..."
            rospy.sleep(5)
            print "Ok, I think I'm ready to try and drive somewhere."
            self._failedList = []
            while self._notDoneExploring and not (rospy.is_shutdown()):
                self.findFrontier()
                self.driveTo(self.frontierX,self.frontierY,None)

                # # Wait until the robot is either: A) done moving or B) it cannot reach the location it wants to go to
                while self._moving and (not self._moveError) and not(rospy.is_shutdown()):
                    rospy.sleep(0.1)

                # # If the robot quit the loop because there's a move error:
                if self._moveError:
                    try:                                # # Try to recover
                        self._recover()
                    except TurtlebotException, e:        # # here for detecting that you finished
                        print e
                        self._notDoneExploring = False

            print "Frontiers Explored,"
    def get_locations(self):
        self._gripper.open()
        good_input = False
        while not good_input:
            filename2 = self._path + 'arm.config'
            self._read_file(filename2)
            self.read_file = raw_input("Would you like to use the previously "
                                       "found cut ribbon path locations (y/n)?")
            if self.read_file != 'y' and self.read_file != 'n':
                print "You must answer 'y' or 'n'"
            elif self.read_file == 'y':
                filename = self._path + self._side + '_poses.config'
                self._read_file(filename)
                good_input = True
            else:

                print ("Move %s arm into a safe location- press Circle button to confirm"% (self._side,))
                while(len(self.safe_jp) == 0 and not rospy.is_shutdown()):
                    rospy.sleep(0.1)
                print ("Cool - Got it!")

                print ("Move Gripper into location directly above ribbon - press Circle button to confirm")
                while(len(self.ribbon_location) == 0 and
                      not rospy.is_shutdown()):
                    rospy.sleep(0.1)
                print ("Cool - Got it!")

                print ("Move Gripper into cut position - press Circle button to confirm "
                       "button to record")
                while(len(self.cut_location) == 0 and not rospy.is_shutdown()):
                    rospy.sleep(0.1)
                print ("Cool - Got it!\n")
                filename = self._path + self._side + '_poses.config'
                self._save_file(filename)
                good_input = True
    def on_goal(self, gh):
        goal = gh.get_goal()
        goal.position = self.map_position_to_calibration(goal)
        result = self.send_gripper_command(goal)
        if not result:
            self.__log.err('Could not send position command.')
            self.finish_failed_goal(gh, 'Communication Timed Out With Gripper')
            return

        self.interrupted = False
        if goal.wait:
            self.last_goal_wait = True

            # Check goal direction
            status = self.wait_for_next_status()
            if goal.position < status.gPO:
                # Gripper moving toward the open position
                is_in_motion = lambda status: status.gOBJ == 0
            else:
                # Gripper moving toward the closed position
                is_in_motion = lambda status: status.gOBJ == 0 or status.gOBJ == 2

            # Wait for motion
            timer = RobotiqCommandTimeout(seconds=3.0)
            timer.start()
            while not rospy.is_shutdown() and not self.interrupted:
                status = self.wait_for_next_status()
                if is_in_motion(status) or abs(status.gPO - goal.position) < 3:
                    break
                if timer.expired():
                    self.__log.err('Timeout on motion start w/ status: "{}"'.format(self.grip_status_to_str(status)))
                    self.finish_failed_goal(gh, 'Timeout waiting for motion to start')
                    return

            if rospy.is_shutdown():
                self.finish_failed_goal(gh, 'ROS shutdown')
                return

            # Wait until we reach the desired position
            timer = RobotiqCommandTimeout(seconds=3.0)
            timer.start()
            while not rospy.is_shutdown() and not self.interrupted:
                status = self.wait_for_next_status()
                if status.gOBJ != 0 or abs(status.gPO - goal.position) < 3:
                    break
                if timer.expired():
                    self.__log.err('Timeout on position goal w/ status: "{}"'.format(self.grip_status_to_str(status)))
                    self.finish_failed_goal(gh, 'Timeout waiting to reach desired position')
                    return

            status = self.wait_for_next_status()
            if self.interrupted or rospy.is_shutdown():
                self.abort_goal(gh, self.get_fault_text(int(status.gFLT)))
            else:
                result = self.generate_grip_result(GripperResult.SUCCESS)
                self.finish_goal(gh, result, 'Succeeded')
        else:
            result = GripperResult()
            result.result_code = result.SUCCESS
            self.finish_goal(gh, result)
Esempio n. 20
0
    def execute(self):
        self._dynamic_reconfigure_client.update_configuration({'enabled': 'True'})
        found_markers = self._initialise_rotation()
        if not found_markers:
            result = self._rotate.execute()
            if not result or self._stop_requested:
                self._post_execute(False)
                return
        rospy.loginfo("AR Pair Approach : found both ar pair markers.")
        if self._parameters['search_only']:
            rospy.loginfo("AR Pair Approach : aborting initialisation and approach as requested.")
            return
        rospy.loginfo("AR Pair Approach : setting an initial pose from the global ar pair reference.")
        self._publishers['initial_pose_trigger'].publish(std_msgs.Empty())
        rospy.loginfo("AR Pair Approach : enabling the approach controller")

        #base_target_frame = self._target_frame + '_relative_' + self._parameters['base_postfix']
        base_target_frame = self._target_frame + '_' + self._parameters['base_postfix']
        self._publishers['enable_approach_controller'].publish(base_target_frame)
        while not rospy.is_shutdown() and not self._stop_requested:
            if self._controller_finished:
                self._controller_finished = False
                break
            rospy.sleep(0.1)
        rospy.loginfo("AR Pair Approach : disabling the approach controller")
        self._publishers['disable_approach_controller'].publish(std_msgs.Empty())
        if rospy.is_shutdown() or self._stop_requested:
            self._post_execute(False)
        else:
            self._post_execute(True)
 def run(self):
     try:
         while not rospy.is_shutdown():
             self.arm.move_to_controlled(self.init)
             if len(self.promp.need_demonstrations()) > 0 or self.promp.num_primitives == 0:
                 needs_demo = 0 if self.promp.num_primitives == 0 else self.promp.need_demonstrations().keys()[0]
                 self.say("Record a demo for Pro MP {}".format(needs_demo))
                 if self.promp.num_demos == 0:
                     self.say("Say stop to finish")
                 self.record_motion()
             else:
                 self.say('Do you want to record a motion or set a new goal?')
                 choice = self.read_user_input(['record', 'goal'])
                 if choice == 'record':
                     self.record_motion()
                 elif choice == 'goal':
                     self.set_goal()
             if not rospy.is_shutdown():
                 self.say('There are {} primitive{} and {} demonstration{}'.format(self.promp.num_primitives,
                                                                       's' if self.promp.num_primitives > 1 else '',
                                                                       self.promp.num_demos,
                                                                       's' if self.promp.num_demos > 1 else ''))
     finally:
         self.promp.plot_demos()
         self.promp.close()
         print("Your dataset has ID {} at {}".format(self.promp.id, self.promp.dataset_path))
Esempio n. 22
0
    def run(self):
        rospy.sleep(2)
        self._client = actionlib.SimpleActionClient('/dock_drive_action', AutoDockingAction)
        rospy.loginfo("waiting for auto_docking server")
        self._client.wait_for_server()
        rospy.loginfo("auto_docking server found")
        goal = AutoDockingGoal()
        rospy.loginfo("Sending auto_docking goal and waiting for result")
        self._client.send_goal(goal)

        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            #if self.preempt_requested():
            #    self.move_base_client.cancel_goal()
            #        self.service_preempt()
            #        return 'preempted'
            state = self._client.get_state()
            if state == GoalStatus.SUCCEEDED:
                rospy.loginfo("auto_docking succeeded")
                break
            elif state not in [GoalStatus.PENDING, GoalStatus.ACTIVE]:
                rospy.loginfo("state was:" + str(state))
                return 'failed'
            r.sleep()

        if  not rospy.is_shutdown():
            rospy.loginfo("Finished docking procedure")
            rospy.sleep(1)

            # Take the opportunity to localize the robot
            rospy.loginfo("Setting initial pose to known dock location")
            self.initial_pose_pub.publish( self._create_dock_pose())
            rospy.sleep(1)
            return 'succeeded'
 def do_qualification(self):
     """
     Executes a qualification run.
     """
     for waypoint in self.path:
         if rospy.is_shutdown(): return
         # Cancel any current goals
         self.move_base_client.cancel_all_goals()
         # Push the goal to the actionlib server
         destination = MoveBaseGoal()
         destination.target_pose.header.frame_id = "odom"
         destination.target_pose.header.stamp = rospy.Time.now()
         # Set the target location
         destination.target_pose.pose.position.x = waypoint[0]
         destination.target_pose.pose.position.y = waypoint[1]
         # Set the heading
         quat = qfe(0, 0, waypoint[2])
         destination.target_pose.pose.orientation.x = quat[0]
         destination.target_pose.pose.orientation.y = quat[1]
         destination.target_pose.pose.orientation.z = quat[2]
         destination.target_pose.pose.orientation.w = quat[3]
         # Send the desired destination to the actionlib server
         rospy.loginfo("Sending waypoint (%f, %f)@%f" % tuple(waypoint))
         self.move_base_client.send_goal(destination)
         # Wait for the goal to finish
         duration = rospy.Duration(1.0)
         for x in range(60):
             self.move_base_client.wait_for_result(duration)
             if rospy.is_shutdown(): return
Esempio n. 24
0
def main():
    rospy.init_node('intro_controller')
    dm = DragonbotManager()
    tm = TabletManager()

    rospy.loginfo("loading dialogue")
    with open(roslib.packages.get_pkg_dir("expeditions_year1")+ "/yaml/intro_dialogue.yaml", 'r') as f:
            s = f.read()

    dialogue_name = "intro"
    dialogue = yaml.load(s)
    
    dg = DialogueManager(tm, dialogue_name, dialogue[dialogue_name])
    
    dm.eye_close()
    tm.change("sleep")
    while not rospy.is_shutdown() and not tm.last_press("/dragon_GUI/sleep") == 0:
        dm.say("intro-05-snore_sleep", wait = False)
        rospy.sleep(6.0)
    dm.express("wakeup")
    dm.eye_open()
    dg.play_dialogue("intro_dialogue")
    dm.eye_close()
    while not rospy.is_shutdown():
        dm.say("intro-05-snore_sleep", wait = False)
        rospy.sleep(6.0)
    def setup_path_following(self):
        """
        Sets up the node for following the planned path.

        Will wait until the initial robot pose is set and until
        the move_base actionlib service is available.
        """
        # Create the actionlib service
        self.move_base_client = SimpleActionClient('move_base', MoveBaseAction)
        connected_to_move_base = False
        dur = rospy.Duration(1.0)
        # Wait for the robot position
        while self.robot_pose == None:
            # Check to make sure ROS is ok still
            if rospy.is_shutdown(): return
            # Print message about the waiting
            msg = "Qualification: waiting on initial robot pose."
            rospy.loginfo(msg)
            rospy.Rate(1.0).sleep()
        # Now we should plan a path
        self.plan_path()
        rospy.loginfo("Qualification: path planning complete.")
        # Now wait for move_base
        while not connected_to_move_base:
            # Wait for the server for a while
            connected_to_move_base = self.move_base_client.wait_for_server(dur)
            # Check to make sure ROS is ok still
            if rospy.is_shutdown(): return
            # Update the user on the status of this process
            msg = "Qualification: waiting on move_base."
            rospy.loginfo(msg)
        # Now we are ready to start feeding move_base waypoints
        return
def kinect_caller():
	global bagvid, spinner, grasp_trial, grasp_test_bool, grasp_num, vid_start_lock, bag_file_location
	
	while True:
		# Pause until the main thread has loaded the video paramters
		vid_start_lock.acquire()
		if rospy.is_shutdown():
			return
		
		if not os.path.exists(bag_file_location +'Grasp' + str(grasp_num)):
			os.makedirs(bag_file_location +'Grasp' + str(grasp_num))

		bag_name = bag_file_location + 'Grasp' + str(grasp_num) +'/' + 'Grasp_' + str(grasp_num) +'_trial_'+ str(grasp_trial) + '_vid' +'.bag'	
		bagvid = rosbag.Bag(bag_name, 'w')
		image_sub = rospy.Subscriber("/camera/rgb/image_color/compressed", CompressedImage, kinect_image_cb, queue_size=1)

       		period = rospy.Duration(0.25)
        	while not rospy.is_shutdown() and spinner == True:
			rospy.sleep(period)
			continue 
	
	
		# Reset the spinner for the next trial/grasp
		vid_start_lock.release()
		image_sub.unregister()
		del image_sub
       		bagvid.close()
Esempio n. 27
0
    def __init__(self, record_rate=5, timeout=90):
        if MoveBaseClient.global_tf is None:
            MoveBaseClient.global_tf = tf.TransformListener()
        self.tf = MoveBaseClient.global_tf
        self.ac = SimpleActionClient('move_base', MoveBaseAction)
        self.record_rate = record_rate
        self.base_frame = '/map'
        self.target_frame = '/base_footprint'
        self.timeout = rospy.Duration( timeout )

        self.recording = False
        self.other_data = []

        self.subscriptions = []
        self.subscribers = []

        printed = False
        limit = rospy.Time.now() + rospy.Duration(30)
        while not self.ac.wait_for_server(rospy.Duration(5.0)) and not rospy.is_shutdown() and rospy.Time.now() < limit:
            printed = True
            rospy.loginfo('Waiting for move_base server')
            
        if rospy.Time.now() >= limit:
            self.ac = None
            return

        if rospy.is_shutdown():
            self.ac = None
            return

        if printed:
            rospy.loginfo('Got move_base server')
Esempio n. 28
0
 def __init__(self, namespace='', read_compensated=True, timeout=3.0):
   """
   FTSensor constructor. It subscribes to the following topics:
     - C{ft_sensor/diagnostics},
     - C{ft_sensor/raw}, and
     - C{endpoint_state}.
   It reports the raw and compensated (if available) FT sensor values.
   @type namespace: string
   @param namespace: Override ROS namespace manually. Useful when accessing multiple FT sensors from the same node.
   @type timeout: double
   @param timeout: Time to wait for each of the FT sensor's topics to start publishing.
   """
   ns = solve_namespace(namespace)
   # Set-up subscribers
   self.rate = read_parameter('%sft_sensor/ft_sensor_controller/publish_rate' % ns, 250.0)
   self.raw_queue = collections.deque(maxlen=self.queue_len)
   rospy.Subscriber('%sft_sensor/diagnostics' % ns, DiagnosticArray, self.cb_diagnostics)
   rospy.Subscriber('%sft_sensor/raw' % ns, WrenchStamped, self.cb_raw)
   rospy.Subscriber('%sendpoint_state' % ns, EndpointState, self.cb_compensated)
   initime = rospy.get_time()
   while not rospy.is_shutdown() and not self.is_raw_alive():
     rospy.sleep(0.1)
     if (rospy.get_time() - initime) > timeout:
       rospy.logwarn('FTSensor: Cannot read raw wrench')
       return
   if read_compensated:
     initime = rospy.get_time()
     while not rospy.is_shutdown() and not self.is_compensated_alive():
       rospy.sleep(0.1)
       if (rospy.get_time() - initime) > timeout:
         rospy.logwarn('FTSensor: Cannot read compensated wrench')
         return
   rospy.loginfo('FTSensor successfully initialized')
    def run(self):
        print '=============    Key Position Saver    =============='
        print 'This tool is used to create key positions for steering.'
        print 'Every time you enter an angle and confirm with [Enter]\n' \
              'a new key position is created and saved in a yaml file.'
        print 'To exit the tool, type "exit" or "quit".\n'

        exiting = False
        while not exiting:
            cmd = raw_input("Enter a new angle: ")
            if cmd in ['exit', 'quit'] or rospy.is_shutdown():
                exiting = True
            else:
                try:
                    angle = float(cmd)
                except ValueError as e:
                    rospy.logerr('Invalid command: ' + cmd)
                else:
                    self.add_key_position(angle, self.get_current_position())
        if not rospy.is_shutdown():
            file_name = raw_input('Enter a name for the yaml-config: ')
            if file_name != '':
                if '.yaml' not in file_name:
                    file_name += '.yaml'
            else:
                file_name = 'key_pos.yaml'
            self.save_key_positions_to_disc(file_name)
Esempio n. 30
0
def handle_arduino(db_server, serial_port, development=False):
    server = Server(db_server)
    if not development:
        update(server, serial_port)
    start_reading(serial_port)

    if development:
        while True:
            if rospy.is_shutdown():
                kill_children()
                sys.exit(0)
            time.sleep(5)

    # Whenever the firmware module configuration changes, reflash the arduino
    last_seq = get_database_changes(
        db_server, DbName.FIRMWARE_MODULE
    )['last_seq']
    while True:
        if rospy.is_shutdown():
            break
        time.sleep(5)
        changes = get_database_changes(
            db_server, DbName.FIRMWARE_MODULE, last_seq
        )
        last_seq = changes['last_seq']
        if len(changes['results']):
            rospy.loginfo("Firmware module configuration changed; Restarting")
            serial_node.terminate()
            serial_node.wait()
            update(server, serial_port)
            start_reading(serial_port)
    kill_children()
Esempio n. 31
0
def controlLoop():

    global JOINT_STATE
    global MULTI_TRAJ_CMD
    global TRAJ_CMD
    global Q_S
    global Q_S_DOT

    # init node
    rospy.init_node('project2_part2', anonymous=True)

    # set up publishers and subscribers
    jointSub = rospy.Subscriber("/robot/joint_states",
                                JointState,
                                joint_callback,
                                queue_size=1)
    trajSub = rospy.Subscriber("/multi_trajectory_command",
                               TrajectoryMultiCommand,
                               traj_callback,
                               queue_size=1)
    jointCmdPub = rospy.Publisher("/robot/limb/right/joint_command",
                                  JointCommand,
                                  queue_size=100)
    truePub = rospy.Publisher("/robot/limb/right/joint_command",
                              JointCommand,
                              queue_size=100)

    # wait for messages
    while JOINT_STATE is None or MULTI_TRAJ_CMD is None:
        pass

    # set robot to first location in trajectory command:
    # verify the arm is in the right location?
    pos_f = MULTI_TRAJ_CMD[0].q_final
    pubTruePos(pos_f, truePub)

    # loop through the list of trajectory commands and publish each section
    for i in range(len(MULTI_TRAJ_CMD) - 1):

        print('performing trajectory i: %s ', i)
        TRAJ_CMD = MULTI_TRAJ_CMD[i]

        # set rates and get timing
        rate_val = 1000
        rate = rospy.Rate(rate_val)
        start = rospy.get_time()
        lin_dur = float(TRAJ_CMD.t_k.data.secs) + float(
            TRAJ_CMD.t_k.data.nsecs) / np.power(10, 9)
        quad_dur = float(TRAJ_CMD.t_k_prime.data.secs) + float(
            TRAJ_CMD.t_k.data.nsecs) / np.power(10, 9)

        # while not shutdown
        next_traj = 0
        while not rospy.is_shutdown() and not next_traj:

            # get timing
            now = rospy.get_time()
            delta_t = now - start

            # compute linear trajectory:
            if delta_t < lin_dur:
                q_k1 = MULTI_TRAJ_CMD[i].q_final
                q_k2 = MULTI_TRAJ_CMD[i + 1].q_final
                t1 = float(MULTI_TRAJ_CMD[i].t_k.data.secs) + float(
                    MULTI_TRAJ_CMD[i].t_k.data.nsecs) / np.power(10, 9)

                q_f_dot = computeLinear(q_k1, q_k2, t1)
                pubVels(q_f_dot, jointCmdPub)
                rospy.loginfo('q_f_dot_linear: %s', q_f_dot)

            # compute the quadtratic transition trajectory:
            elif delta_t < lin_dur + quad_dur:
                q_f_dot_q = computeQuad(i, delta_t)
                pubVels(q_f_dot_q, jointCmdPub)
                rospy.loginfo('q_f_dot_transition: %s', q_f_dot)

            # if past quad transition time, exit loop to get next trajectory
            else:
                next_traj = 1

            # sleep
            rate.sleep()
Esempio n. 32
0
from geometry_msgs.msg import Twist, Vector3
from sensor_msgs.msg import LaserScan
import numpy as np

ahead = 1.0
def distance_scan(data):
    global ahead
    readings = np.array(data.ranges).round(decimals=2)
    ahead = readings[0]

v = 0.2  # Velocidade linear
forward = Twist(Vector3(v,0,0), Vector3(0,0,0))
backward = Twist(Vector3(-v,0,0), Vector3(0,0,0))
stop = Twist(Vector3(0,0,0), Vector3(0,0,0))

if __name__ == "__main__":
    rospy.init_node("roda_exemplo")
    pub = rospy.Publisher("cmd_vel", Twist, queue_size=3)
    scan = rospy.Subscriber("/scan", LaserScan, distance_scan)

    try:
        while not rospy.is_shutdown():
            if ahead >= 1.02:
                pub.publish(forward)
                rospy.sleep(2.0)
            elif ahead <= 1.00:
                pub.publish(backward)
                rospy.sleep(2.0)

    except rospy.ROSInterruptException:
        print("Ocorreu uma exceção com o rospy")
Esempio n. 33
0
    def run(self):
        """Run user code
        """
        while not rospy.is_shutdown():
            time.sleep(0.1)
            # Try to get data
            try:
                rospy.loginfo("Battery voltage: %f"%self.sub.get_data()['mavros']['battery']['voltage'])
                rospy.loginfo("Static pressure: %f"%self.sub.get_data()['mavros']['imu']['static_pressure']['fluid_pressure'])
                rospy.loginfo("Diff pressure: %f"%self.sub.get_data()['mavros']['imu']['diff_pressure']['fluid_pressure'])
                #rospy.loginfo(self.sub.get_field('fluid_pressure')['mavros']['imu']['static_pressure'])
                #rospy.loginfo(self.sub.get_field('fluid_pressure')['mavros']['imu']['diff_pressure'])
                #rospy.loginfo(self.sub.get_data()['mavros']['rc']['in']['channels'])
                #rospy.loginfo(self.sub.get_data()['mavros']['rc']['out']['channels'])
            except Exception as error:
                print('Get data error:', error)

            try:
                # Get joystick data
                joy = self.sub.get_data()['joy']['axes']
                but = self.sub.get_data()['joy']['buttons']
                
		# Converting button presses to roll and pitch setting modification
		self.curr_pitch_setting = self.enforce_limit(self.curr_pitch_setting - joy[7]*0.1)
		self.curr_roll_setting = self.enforce_limit(self.curr_roll_setting - joy[6]*0.1)
		if (but[4] == 1):
		    self.curr_pitch_setting = 0.0
		if (but[5] == 1):
		    self.curr_roll_setting = 0.0

                # Camera tilt setting (one button moves it up, the other down
                camsetpt = but[4]-but[5]

		forces = [0.0, 0.0, 0.0]
		torques = [0.0, 0.0, 0.0]

		forces[0] = joy[4] #Surge
		forces[1] = -joy[3] #Sway
		forces[2] = joy[1] #Heave
		torques[0] = self.curr_roll_setting #Roll
		torques[1] = self.curr_pitch_setting #Pitch
		torques[2] = -joy[0] #Yaw

                # Call to a specialised function
		self.apply_control(forces, torques, camsetpt)

            except Exception as error:
                print('joy error:', error)

            try:
                # Get pwm output and send it to Gazebo model
                rc = self.sub.get_data()['mavros']['rc']['out']['channels']
                joint = JointState()
                joint.name = ["thr{}".format(u + 1) for u in range(5)]
                joint.position = [self.pwm_to_thrust(pwm) for pwm in rc]

                self.pub.set_data('/BlueRov2/body_command', joint)
            except Exception as error:
                print('rc error:', error)

            try:
                if not self.cam.frame_available():
                    continue

                # Show video output
                frame = self.cam.frame()
                # Added code SK
                height, width, depth = frame.shape
                newframe = cv2.resize(frame,(int(width*0.65),int(height*0.65)))
                cv2.imshow('frame', newframe)
                #cv2.imshow('frame', frame)
                cv2.waitKey(1)
            except Exception as error:
                print('imshow error:', error)
Esempio n. 34
0
 def spin(self):
     rate = rospy.Rate(100)
     while not rospy.is_shutdown():
         self.choose_angle()
Esempio n. 35
0
	def driveX(self, distance, speed):
		'''
		Drive in x direction a specified distance based on odometry information
		distance [m]: the distance to travel in the x direction (>0: forward, <0: backwards)
		speed [m/s]: the speed with which to travel; must be positive
		'''

		forward = (distance >= 0)
		
		# record the starting transform from the odom to the base_link frame
		# Note that here the 'from' frame precedes 'to' frame which is opposite to how they are
		# ordered in tf.TransformBroadcaster's sendTransform function.
		# startTranslation is a tuple holding the x,y,z components of the translation vector
		# startRotation is a tuple holding the four components of the quaternion
		(startTranslation, startRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
		
		done = False

		velocityCommand = Twist()
		if forward:
			velocityCommand.linear.x = speed # going forward m/s
		else:
			velocityCommand.linear.x = -speed # going forward m/s
			
		velocityCommand.angular.z = 0.0 # no angular velocity

		while not rospy.is_shutdown():
			try:
				(currentTranslation, currentRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
				

				print "Current Tr",currentTranslation
				print "CUrrent Rot",currentRotation

				dx = currentTranslation[0] - startTranslation[0]

				dy = currentTranslation[1] - startTranslation[1]
				

				print "Dx",dx
				print "Dy",dy
				distanceMoved = math.sqrt(dx * dx + dy * dy)


				print "Distance moved",distanceMoved

				if (forward):
					arrived = distanceMoved >= distance
				else:
					arrived = distanceMoved >= -distance
				
				if (arrived):
					break
				else:
					# send the drive command
					print("sending vel command" + str(velocityCommand))
					self._VelocityCommandPublisher.publish(velocityCommand)
				
			except (tf.LookupException, tf.ConnectivityException):
				continue

			rospy.sleep(0.1)

		#stop
		velocityCommand.linear.x = 0.0
		velocityCommand.angular.z = 0.0
		self._VelocityCommandPublisher.publish(velocityCommand)
		
		return done
Esempio n. 36
0
	def turn(self, angle, angularSpeed):
		'''
		Turn the robot based on odometry information
		angle [rad]: the angle to turn (positive angles mean clockwise rotation)
		angularSpeed [rad/s]: the speed with which to turn; must be positive
		'''

		ccw = (angle >= 0) # counter clockwise rotation
		
		# record the starting transform from the odom to the base frame
		# Note that here the 'from' frame precedes 'to' frame which is opposite to how they are
		# ordered in tf.TransformBroadcaster's sendTransform function.
		(startTranslation, startRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
		startAngle = 2 * math.atan2(startRotation[2], startRotation[3])

		print "start angle: " + str(startAngle)
		previousAngle = startAngle
		angleOffset = 0.0
		
		done = False

		velocityCommand = Twist()
		velocityCommand.linear.x = 0.0 # going forward m/s
		if ccw:
			velocityCommand.angular.z = angularSpeed
		else:
			velocityCommand.angular.z = -angularSpeed

		while not rospy.is_shutdown():
			try:
				(currentTranslation, currentRotation) = self._TransformListener.lookupTransform("/odom", "/base_link", rospy.Time(0))
				currentAngle = 2 * math.atan2(currentRotation[2], currentRotation[3])
				print "currentAngle: " + str(currentAngle)
				
				# we need to handle roll over of the angle
				if (currentAngle * previousAngle < 0 and math.fabs(currentAngle - previousAngle) > math.pi / 2):
					if (currentAngle > previousAngle):
						print "subtracting"
						angleOffset = angleOffset - 2 * math.pi
					else:
						print "adding"
						angleOffset = angleOffset + 2 * math.pi
				
				angleTurned = currentAngle + angleOffset - startAngle
				previousAngle = currentAngle
				
				print "angleTurned: " + str(angleTurned)
				if (ccw):
					arrived = (angleTurned >= angle)
				else:
					arrived = (angleTurned <= angle)
				
				print arrived

				if (arrived):
					break
				else:
					# send the drive command
					print("sending vel command" + str(velocityCommand))
					self._VelocityCommandPublisher.publish(velocityCommand)
				
			except (tf.LookupException, tf.ConnectivityException):
				continue

			time.sleep(0.1)

		#stop
		velocityCommand.linear.x = 0.0
		velocityCommand.angular.z = 0.0
		self._VelocityCommandPublisher.publish(velocityCommand)
		
		return done
    def run(self):
        rate = rospy.Rate(30)
        while not rospy.is_shutdown():
            if self._f_system_down:
                # TODO: test this
                self.save_vja_data()
                # rospy.loginfo('robot_attention_tracker = ' + str(self.robot_attention_tracker))
                # rospy.loginfo('vja_rc_tracker = ' + str(self.vja_rc_tracker))
                break

            if self._system_state == None:
                rate.sleep()
                continue

            # calculate and print out vja
            # if (self._system_state >= SystemState.SYSTEM_READY) and (self._system_state <= SystemState.ROBOT_SLEEP):
            if True:
                with self.lock_robot_attention_target and self.lock_user_attention_targets:
                    # TODO: during animation, the robot's attention should be a special case? IN-MOTION?
                    # color coding: http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python

                    # if (self.robot_attention_target == 'parent') and (self.parent_attention_target == 'robot'):
                    #     rospy.loginfo('\x1b[6;30;47m Having Mutual Gaze between [ROBOT and PARENT] \x1b[0m')
                    # if (self.robot_attention_target == 'child') and (self.user_attention_target == 'robot'):
                    #     rospy.loginfo('\x1b[6;37;45m Having Mutual Gaze between [ROBOT and CHILD] \x1b[0m')
                    # if (self.parent_attention_target == 'child') and (self.user_attention_target == 'parent'):
                    #     rospy.loginfo('\x1b[6;30;46m Having Mutual Gaze between [PARENT and CHILD] \x1b[0m')

                    # if (self.robot_attention_target == 'screen') and (self.user_attention_target == 'screen'):
                    #     rospy.loginfo('\x1b[6;30;42m Having Visual Joint Attention on [screen] between [ROBOT and CHILD] \x1b[0m')
                    # elif (self.robot_attention_target == 'parent') and (self.user_attention_target == 'parent'):
                    #     rospy.loginfo('\x1b[6;30;42m Having Visual Joint Attention on [parent] between [ROBOT and CHILD] \x1b[0m')

                    # if (self.parent_attention_target == 'screen') and (self.user_attention_target == 'screen'):
                    #     rospy.loginfo('\x1b[6;30;43m Having Visual Joint Attention on [screen] between [PARENT and CHILD] \x1b[0m')
                    # elif (self.parent_attention_target == 'robot') and (self.user_attention_target == 'robot'):
                    #     rospy.loginfo('\x1b[6;30;43m Having Visual Joint Attention on [robot] between [PARENT and CHILD] \x1b[0m')

                    # if self.user_attention_target == 'outside':
                    #     rospy.loginfo('\x1b[6;37;41m [CHILD] is not paying attention \x1b[0m')


                    # update the robot_attention_tracker
                    if self.robot_attention_target == 'screen':
                        self.robot_attention_tracker[self._system_state-1][self._index_robot_attention_tracker_screen] += 1
                    elif self.robot_attention_target == 'child':
                        self.robot_attention_tracker[self._system_state-1][self._index_robot_attention_tracker_child] += 1
                    elif self.robot_attention_target == 'parent':
                        self.robot_attention_tracker[self._system_state-1][self._index_robot_attention_tracker_parent] += 1

                    # update the vja_rc_tracker
                    if (self.robot_attention_target == 'screen') and (self.user_attention_target == 'screen'):
                        self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_screen] += 1
                    elif (self.robot_attention_target == 'parent') and (self.user_attention_target == 'parent'):
                        self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_parent] += 1
                    elif (self.robot_attention_target == 'child') and (self.user_attention_target == 'robot'):
                        # indication of mutual gaze between child and robot
                        self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_robot] += 1

                    # calculate current vja_rc_screen, vja_rc_parent, and mg_rc (in percentage)
                    if self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_screen] == 0:
                        self.vja_rc_screen = 0
                    else:
                        self.vja_rc_screen = self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_screen] / \
                                             self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_screen]

                    if self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_parent] == 0:
                        self.vja_rc_parent = 0
                    else:
                        self.vja_rc_parent = self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_parent] / \
                                             self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_parent]

                    if self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_robot] == 0:
                        self.mg_rc = 0
                    else:
                        self.mg_rc = self.vja_rc_tracker[self._system_state-1][self._index_vja_rc_tracker_robot] / \
                                             self.robot_attention_tracker[self._system_state-1][self._index_vja_rc_tracker_robot]

                    # if (self._system_state >= SystemState.SYSTEM_READY):
                        # rospy.loginfo('\033[94m vja_rc_screen = %f, vja_rc_parent = %f, mg_rc = %f \033[0m', self.vja_rc_screen, self.vja_rc_parent, self.mg_rc)
                        # rospy.loginfo('robot_attention_tracker = ' + str(self.robot_attention_tracker))
                        # rospy.loginfo('vja_rc_tracker = ' + str(self.vja_rc_tracker))

            rate.sleep()
Esempio n. 38
0
def secure_log(func, s):
    if rospy.is_shutdown():
        print s
    else:
        func(s)
    def __init__(self):
        self.camera_topic = rospy.get_param("/object_detection_node/camera_topic")
        self.obj_detect_results_topic = rospy.get_param("/object_detection_node/obj_detect_results_topic")
        self.obj_detect_viz_topic = rospy.get_param("/object_detection_node/obj_detect_viz_topic")

        rospy.init_node('object_detection')
        self.camera_sub = rospy.Subscriber(self.camera_topic,
            Image, self.image_update_callback, queue_size=1)
        self.current_frame = None
        self.bridge = CvBridge()

        rospy.loginfo("Object Detection Initializing")
        rospy.loginfo("Tiny Yolo V3")

        self.model_path = rospy.get_param("/object_detection_node/model_path")
        self.classes_path = rospy.get_param("/object_detection_node/classes_path")
        self.anchors_path = rospy.get_param("/object_detection_node/anchors_path")
        self.iou_threshold = rospy.get_param("/object_detection_node/iou_threshold")
        self.score_threshold = rospy.get_param("/object_detection_node/score_threshold")
        self.input_size = (416, 416)

        self.detector = ObjectDetector(model_path=self.model_path,
                                       classes_path=self.classes_path,
                                       anchors_path=self.anchors_path,
                                       score_threshold=self.score_threshold,
                                       iou_threshold=self.iou_threshold,
                                       size=self.input_size)

        labels_visualizer = utils.LabelsVisualizer(self.detector.class_names)
        detection_image_pub = rospy.Publisher(self.obj_detect_viz_topic + "/compressed",
            CompressedImage, queue_size=1)
        detection_results_pub = rospy.Publisher(self.obj_detect_results_topic,
            DetectionResults, queue_size=1)

        rate = rospy.Rate(15)

        while not rospy.is_shutdown():
            time = rospy.get_time()
            if self.current_frame is not None:
                print("new image at time %.2f" % time)
                out_boxes, out_scores, out_classes = \
                    self.detector.detect_object(self.current_frame)
                inference_end_time = rospy.get_time()
                print("*** inference time: %.3f" % (inference_end_time - time))

                if detection_image_pub.get_num_connections() > 0:
                    image = labels_visualizer.draw_bboxes(self.current_frame,
                        out_boxes, out_scores, out_classes)
                    print("***   drawing time: %.3f" % (rospy.get_time() - inference_end_time))

                    # img_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")
                    msg = utils.cv2_to_compressed_imgmsg(image)
                    detection_image_pub.publish(msg)

                msg = self.convert_results_to_message(out_boxes, out_scores, out_classes)
                msg.is_green = self.check_traffic_segmentation(out_boxes, out_classes)
                if detection_results_pub.get_num_connections() > 0:
                    detection_results_pub.publish(msg)
            else:
                print("waiting for image at time %.2f" % time)
            rate.sleep()
Esempio n. 40
0
def update_location():
    global DATA_LOCK, CURRENT_ODOM, MOVE_FLAG, CURRENT_TWIST, CURRENT_POSE2D, CURRENT_IMU, CURRENT_POWER
    rate = rospy.Rate(50)
    odom_pub = rospy.Publisher("/xqserial_server/Odom", Odometry, queue_size=0)
    pose_pub = rospy.Publisher("/xqserial_server/Pose2D", Pose2D, queue_size=0)
    twist_pub = rospy.Publisher("/xqserial_server/Twist", Twist, queue_size=0)
    power_pub = rospy.Publisher("/xqserial_server/Power",
                                Float64,
                                queue_size=0)
    imu_pub = rospy.Publisher("/xqserial_server/IMU", Imu, queue_size=0)
    status_flag_pub = rospy.Publisher("/xqserial_server/StatusFlag",
                                      Int32,
                                      queue_size=0)
    odom_tf = tf.TransformBroadcaster()
    while not rospy.is_shutdown():
        with DATA_LOCK:
            if MOVE_FLAG.data:
                # 更新当前位置
                if CURRENT_TWIST.linear.x > 10:
                    rospy.logerr("Invalid current twist")
                CURRENT_POSE2D.x += CURRENT_TWIST.linear.x * 0.02 * math.cos(
                    CURRENT_POSE2D.theta * math.pi / 180)
                CURRENT_POSE2D.y += CURRENT_TWIST.linear.x * 0.02 * math.sin(
                    CURRENT_POSE2D.theta * math.pi / 180)
                CURRENT_POSE2D.theta += CURRENT_TWIST.angular.z * 0.02 * 180 / math.pi
                if CURRENT_POSE2D.x > 1000 or CURRENT_POSE2D.y > 1000:
                    rospy.logerr("Invalid current pose")
                if CURRENT_POSE2D.theta > 360:
                    CURRENT_POSE2D.theta -= 360
                if CURRENT_POSE2D.theta < 0:
                    CURRENT_POSE2D.theta += 360
                pose_pub.publish(CURRENT_POSE2D)
                # 更新odom
                CURRENT_ODOM.header.stamp = rospy.Time.now()
                CURRENT_ODOM.header.frame_id = "odom"
                CURRENT_ODOM.pose.pose.position.x = CURRENT_POSE2D.x
                CURRENT_ODOM.pose.pose.position.y = CURRENT_POSE2D.y
                CURRENT_ODOM.pose.pose.position.z = 0
                q_angle = quaternion_from_euler(0,
                                                0,
                                                CURRENT_POSE2D.theta * 3.14 /
                                                180,
                                                axes='sxyz')
                q = Quaternion(*q_angle)
                CURRENT_ODOM.pose.pose.orientation = q
                CURRENT_ODOM.twist.twist = CURRENT_TWIST
                odom_pub.publish(CURRENT_ODOM)
            else:
                pose_pub.publish(CURRENT_POSE2D)
                CURRENT_ODOM.header.stamp = rospy.Time.now()
                CURRENT_ODOM.header.frame_id = "odom"
                odom_pub.publish(CURRENT_ODOM)
        # 更新IMU数据
        CURRENT_IMU.header.frame_id = "imu"
        CURRENT_IMU.header.stamp = rospy.Time.now()
        q_angle = quaternion_from_euler(0,
                                        0,
                                        CURRENT_POSE2D.theta * 3.14 / 180,
                                        axes='sxyz')
        q = Quaternion(*q_angle)
        CURRENT_IMU.orientation = q
        CURRENT_IMU.angular_velocity.x = 0
        CURRENT_IMU.angular_velocity.y = 0
        CURRENT_IMU.angular_velocity.z = CURRENT_TWIST.angular.z
        CURRENT_IMU.linear_acceleration.x = 0
        CURRENT_IMU.linear_acceleration.y = 0
        CURRENT_IMU.linear_acceleration.z = 9.8
        imu_pub.publish(CURRENT_IMU)
        # 更新电压, 每秒下降0.01v用于测试自动充电程序
        current_power = None
        with DATA_LOCK:
            current_power = CURRENT_POWER
            current_power.data -= 0.01 / 50
            if current_power.data < 10:
                current_power.data = 10
        power_pub.publish(current_power)
        # 更新statusFlag
        current_status = Int32()
        current_status.data = 0
        status_flag_pub.publish(current_status)
        # 发布 Twist
        twist_pub.publish(CURRENT_TWIST)
        # 发布 base_footprint到odom的tf
        odom_tf.sendTransform((CURRENT_POSE2D.x, CURRENT_POSE2D.y, 0),
                              tf.transformations.quaternion_from_euler(
                                  0, 0, CURRENT_POSE2D.theta * math.pi / 180),
                              rospy.Time.now(), "base_footprint", "odom")
        rate.sleep()
def main():
    """RSDK Inverse Kinematics Pick and Place Example

    A Pick and Place example using the Rethink Inverse Kinematics
    Service which returns the joint angles a requested Cartesian Pose.
    This ROS Service client is used to request both pick and place
    poses in the /base frame of the robot.

    Note: This is a highly scripted and tuned demo. The object location
    is "known" and movement is done completely open loop. It is expected
    behavior that Baxter will eventually mis-pick or drop the block. You
    can improve on this demo by adding perception and feedback to close
    the loop.
    """
    rospy.init_node("moveit_check")
    # Load Gazebo Models via Spawning Services
    # Note that the models reference is the /world frame
    # and the IK operates with respect to the /base frame
    load_gazebo_models()
    # Remove models from the scene on shutdown
    rospy.on_shutdown(delete_gazebo_models)
    # box_pose = geometry_msgs.msg.PoseStamped()
    # box_pose.header.frame_id = "left_arm"
    # box_pose.pose.orientation.w = 1.0
    # box_name = "box"
    # scene.add_box(box_name, box_pose, size=(0.5, 0.5, 0.5))
    # Wait for the All Clear from emulator startup
    rospy.wait_for_message("/robot/sim/started", Empty)

    limb = 'left'
    hover_distance = 0.15 # meters
    # Starting Joint angles for left arm
    starting_joint_angles = {'left_w0': 0.6699952259595108,
                             'left_w1': 1.030009435085784,
                             'left_w2': -0.4999997247485215,
                             'left_e0': -1.189968899785275,
                             'left_e1': 1.9400238130755056,
                             'left_s0': -0.08000397926829805,
                             'left_s1': -0.9999781166910306}

    pnp = PickAndPlace(limb, hover_distance)
    # An orientation for gripper fingers to be overhead and parallel to the obj
    overhead_orientation = Quaternion(
                             x=-0.0249590815779,
                             y=0.999649402929,
                             z=0.00737916180073,
                             w=0.00486450832011)
    block_poses = list()
    # The Pose of the block in its initial location.
    # You may wish to replace these poses with estimates
    # from a perception node.
    block_poses.append(Pose(
        position=Point(x=0.7, y=0.15, z=-0.129),
        orientation=overhead_orientation))
    # Feel free to add additional desired poses for the object.
    # Each additional pose will get its own pick and place.
    block_poses.append(Pose(
        position=Point(x=0.75, y=0.0, z=-0.129),
        orientation=overhead_orientation))
    # Move to the desired starting angles
    pnp.move_to_start(starting_joint_angles)
    idx = 0
    while not rospy.is_shutdown():
        print("\nPicking...")
        pnp.pick(block_poses[idx])
        print("\nPlacing...")
        idx = (idx+1) % len(block_poses)
        pnp.place(block_poses[idx])
    return 0
Esempio n. 42
0
def main():
    ab = airsim_bridge()
    while not rospy.is_shutdown():
        ab.run()
        rospy.Rate(4).sleep()
Esempio n. 43
0
def state_estimation():
	# initialize node
    rospy.init_node('state_estimation', anonymous=True)

    # topic subscriptions / publications
    rospy.Subscriber('imu/data', Imu, imu_callback)
    rospy.Subscriber('encoder', Encoder, enc_callback)
    rospy.Subscriber('ecu', ECU, ecu_callback)
    state_pub 	= rospy.Publisher('state_estimate', Vector3, queue_size = 10)

	# get vehicle dimension parameters
    L_a = rospy.get_param("L_a")       # distance from CoG to front axel
    L_b = rospy.get_param("L_b")       # distance from CoG to rear axel
    m   = rospy.get_param("m")         # mass of vehicle
    I_z = rospy.get_param("I_z")       # moment of inertia about z-axis
    vhMdl   = (L_a, L_b, m, I_z)

    # get encoder parameters
    dt_vx   = rospy.get_param("state_estimation/dt_v_enc")     # time interval to compute v_x

    # get tire model
    B   = rospy.get_param("tire_model/B")
    C   = rospy.get_param("tire_model/C")
    mu  = rospy.get_param("tire_model/mu")
    TrMdl = ([B,C,mu],[B,C,mu])

    # get external force model
    a0  = rospy.get_param("air_drag_coeff")
    Ff  = rospy.get_param("friction")

    # get EKF observer properties
    q_std   = rospy.get_param("state_estimation/q_std")             # std of process noise
    r_std   = rospy.get_param("state_estimation/r_std")             # std of measurementnoise
    v_x_min     = rospy.get_param("state_estimation/v_x_min")  # minimum velociy before using EKF

	# set node rate
    loop_rate 	= 50
    dt 		    = 1.0 / loop_rate
    rate 		= rospy.Rate(loop_rate)
    t0 			= time.time()

    # estimation variables for Luemberger observer
    z_EKF       = array([1.0, 0.0, 0.0])

    # estimation variables for EKF
    P           = eye(3)                # initial dynamics coveriance matrix
    Q           = (q_std**2)*eye(3)     # process noise coveriance matrix
    R           = (r_std**2)*eye(2)     # measurement noise coveriance matrix

    while not rospy.is_shutdown():

		# publish state estimate
        (v_x, v_y, r) = z_EKF           # note, r = EKF estimate yaw rate

        # publish information
        state_pub.publish( Vector3(v_x, v_y, r) )

        # apply EKF
        if v_x_enc > v_x_min:
            # get measurement
            y = array([v_x_enc, w_z])

            # define input
            u       = array([ d_f, FxR ])

            # build extra arguments for non-linear function
            F_ext = array([ a0, Ff ]) 
            args = (u, vhMdl, TrMdl, F_ext, dt) 

            # apply EKF and get each state estimate
            (z_EKF,P) = ekf(f_3s, z_EKF, P, h_3s, y, Q, R, args )

        else:
            z_EKF[0] = float(v_x_enc)
            z_EKF[2] = float(w_z)
        
		# wait
        rate.sleep()
Esempio n. 44
0
def repeat_move():
	""" Indefinitely move robot according to movement file """
	while not rospy.is_shutdown():
		basic_mover.move()
Esempio n. 45
0
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
if __name__ == '__main__':
    rospy.init_node('practice_node')
    rospy.loginfo('Node starting ...')
    rate = rospy.Rate(2)
    pub = rospy.Publisher('/random_topic1', String, queue_size=10)
    msg = String()
    while (not rospy.is_shutdown()):
        msg = 'Hello World 1 !'
        pub.publish(msg)
        rate.sleep()
    rospy.loginfo('Exiting node ...')
def main():
    global depthData, isDepthReady, currentNode, targetNode, directionNumber, m
    rospy.init_node('depth_example', anonymous=True)
    rospy.Subscriber("/camera/depth/image", Image, depthCallback, queue_size=10)
    rospy.Subscriber("/camera/rgb/image_color", Image, updateColorImage)
    rospy.Subscriber('/odom', Odometry, odomCallback)
    
    initMap()
    odomReset.publish(Empty())
    goHome = False
    #remember to record odom somewhere because if minor adjustments are needed (like slightly turning left etc) then we should do that before our robot starts from a wrong direction. 
    
    while not isDepthReady:
        pass

    currentNode = raw_input('Where am I? ')

    if(currentNode != 'start'):
        directionNumber = int(raw_input('Current direction number? '))


    while not rospy.is_shutdown() and not goHome:
        targetNode = raw_input('Where to? ')
        newDirectionNumber = int(raw_input('Direction number? '))

        if(currentNode == 'start'):
            runCommand("F", 0.5, 24)
            runCommand("R", 0.5, 90)
            currentNode = 'start2'

        if(targetNode == 'start'):
            goHome = True
            targetNode = 'start2'

        path = m.find_path(currentNode, targetNode, newDirectionNumber)
        path.pop(0)
        print path
        print m.get_info(currentNode, path[0])

        if(newDirectionNumber != directionNumber):
            runCommand("R", 0.5, 180)

        directionNumber = newDirectionNumber
 
        for node in path:
            runCommand("F", 0.5, m.get_info(currentNode, node)['distance'])

            if "R" in node or "L" in node:
                turnInfo = node.split('-')
                turnDir = turnInfo[directionNumber]
                runCommand(turnDir, 0.5, float(turnInfo[2]))

            currentNode = node

        if(goHome):
            if directionNumber == 0:
                runCommand("R", 0.5, 90)
            else:
                runCommand("L", 0.5, 90)

            runCommand("F", 0.5, 24)
def listener():
    rospy.init_node('record_data', anonymous=True, disable_signals=True)

    global BAX_COLUMNS
    global WATCH_COLUMNS
    global NANOS_TO_MILLIS
    global bax_file_name
    global bax_row
    global watch_rows
    global command
    global sequence_counter

    resetNode()

    rospy.loginfo("Commands :\ns to stop\nr to remove the last file\nn to start new sequence\nc TWICE to shutdown the node\n")

    rate = rospy.Rate(120) # rate
    watch_sub = rospy.Subscriber('/imu_giver', ImuPackage, watchCallback)

    rospy.loginfo("START RECORDING SEQUENCE " + str(sequence_counter))

    getkey_thread = Thread(target = getKey)
    getkey_thread.start()

    left_arm = Limb('left')
    left_gripper = Gripper('left')

    while not rospy.is_shutdown():
        rate.sleep()

        t = round(rospy.get_rostime().to_nsec()*NANOS_TO_MILLIS)

        l_ang = list(left_arm.joint_angles().values())
        l_vel = list(left_arm.joint_velocities().values())
        l_eff = list(left_arm.joint_efforts().values())
        l_grip_pos = str(left_gripper.position())

        bax_row = l_ang + l_vel + l_eff
        bax_row.append(l_grip_pos)
        bax_row.append(str(t))

        with open(bax_file_name + str(sequence_counter), 'a') as writeFile:
            writer = csv.writer(writeFile)
            writer.writerow(bax_row)
        writeFile.close()


        if (watch_rows[1]==1):
            with open(watch_file_name + str(sequence_counter), 'a') as writeFile:
                writer = csv.writer(writeFile)
                writer.writerows(watch_rows[0])
            writeFile.close()
            watch_rows[1]=0

        # s to stop
        # r to remove the last file
        # n to start new sequence
        # c TWICE to shutdown the node
        shutdown = False
        if (command == 's') :
            watch_sub.unregister()
            rospy.loginfo("FINISH RECORDING SEQUENCE " + str(sequence_counter))
            rospy.loginfo("NODE STOPPED!")
            while True :
                rospy.Rate(2).sleep()

                if (command == 'r') :
                    os.remove(bax_file_name + str(sequence_counter))
                    os.remove(watch_file_name + str(sequence_counter))
                    sequence_counter = sequence_counter - 1
                    rospy.loginfo("FILE REMOVED!")
                    command = ''

                if (command == 'n') :
                    rospy.loginfo("RESET NODE!")
                    sequence_counter = sequence_counter + 1
                    resetNode()
                    watch_sub = rospy.Subscriber('/imu_giver', ImuPackage, watchCallback)
                    rospy.loginfo("START RECORDING SEQUENCE " + str(sequence_counter))
                    break

                if (command == 'c') :
                    rospy.loginfo("Enter 'c' to shutdown... ")
                    shutdown = True
                    break

        if (shutdown) :
            rospy.signal_shutdown("reason...")

    getkey_thread.join()
Esempio n. 48
0
    def __init__(self):
        rospy.init_node('ping_plotter')
        rospy.Subscriber('/hydrophones/ping', Ping, self.signal_plotter)

        rate = rospy.Rate(5)

        self.x = []
        self.a = []
        self.b = []
        self.c = []
        self.d = []
        self.trigger = False

        plt.ion()
        fig, ax = plt.subplots(1, 1)

        while not rospy.is_shutdown():
            #print self.trigger
            if self.trigger == True:
                #freq = np.fft.fft(self.x, self.a)
                #freq = np.abs(freq)
                #print freq                

                Fs = 300000
                Ts = 1.0/Fs        

                legends = [None]*4
                wave = [None]*len(self.a)

                if len(self.a) != 0:
                    for i in range(len(self.a)):
                        wave[i] = self.a[i]        

                y = self.a
                #print wave
                n = len(y)

                t = np.arange(0,n*Ts,Ts)

                y = wave[n/2:n]
                n = len(y)

                k = np.arange(n)
                T = float(n)/float(Fs)            
                frq = k/T # two sides frequency range

                frq = frq[range(n/2)] # one side frequency range
                Y = np.fft.fft(y)/n # fft computing and normalization
                Y = Y[range(n/2)]/2**16

                '''for i in range(5,n/2):
                    
                    if abs(Y[i]) > 80:
                        print "ping detected"
                        left_line = np.arange(0,300,300)'''

                '''ax[0].cla()
                ax[0].set_title("Four Hydrophone Channels Full Scale")
                ax[0].plot(t,self.a)
                ax[0].plot(t,self.b)
                ax[0].plot(t,self.c)
                ax[0].plot(t,self.d)
                ax[0].set_ylim(0,65536)
                ax[0].set_xlabel('Time')
                ax[0].set_ylabel('Amplitude')'''

                ax.cla()
                #ax.set_title("Four Hydrophone Channels Autosized")
                ax.plot(t,self.a, linewidth=2.0, label='Hydrophone 0')
                ax.plot(t,self.b, linewidth=2.0, label='Hydrophone 1')
                ax.plot(t,self.c, linewidth=2.0, label='Hydrophone 2')
                ax.plot(t,self.d, linewidth=2.0, label='Hydrophone 3')
                #ax[1].set_xlabel('Time')
                #ax[1].set_ylabel('Amplitude')


                ax.legend(loc="upper left", fontsize=25)
                ax.set_title("Actual Received Signals", weight = 'bold', size = 37, x = 0.5, y = 1.02, horizontalalignment='center')
                ax.set_xlabel('Time (seconds)', size = 25, weight = 'bold', x = 0.5, y = 0)
                ax.set_ylabel('Amplitude', size = 25, weight = 'bold', x = 0, y = 0.5)
                ax.set_xlim(0,0.0008)
                ax.tick_params(axis='both', which='major', labelsize=25, pad=20)
                ax.tick_params(axis='both', which='minor', labelsize=25, pad=20)
                ax.xaxis.labelpad = 20
                ax.yaxis.labelpad = 20




                #ax[2].axvline(30000)
                '''ax[2].cla()
                ax[2].set_title("FFT On Channel One")
                ax[2].plot(frq,abs(Y),'r') # plotting the spectrum
                ax[2].set_xlim(5000,50000)
                #ax[2].set_ylim(0,300)
                ax[2].set_xlabel('Freq (Hz)')
                ax[2].set_ylabel('|Y(freq)|')'''

                #plot_url = py.plot_mpl(fig, filename='mpl-basic-fft')
                plt.pause(0.05)
                self.trigger = False

                #802.3af 48V

                '''plt.gcf().clear()
                plt.plot(self.x, self.a)
                #plt.plot(self.x, self.b)
                #plt.plot(self.x, self.c)
                #plt.plot(self.x, self.d)

                plt.xlabel('time (uS)')
                plt.ylabel('ADC')
                #plt.ylim(30500,35000)
                plt.title('X')
                plt.grid(True)
                plt.pause(0.05)
                self.trigger = False'''

            rate.sleep()

        plt.close('all')
Esempio n. 49
0
def main():
    r = rospy.Rate(60)

    rospy.on_shutdown(hook_shutdown)

    # オブジェクト追跡のしきい値
    # 正規化された座標系(px, px)
    THRESH_X = 0.05
    THRESH_Y = 0.05

    # 首の初期角度 Degree
    INITIAL_YAW_ANGLE = 0
    INITIAL_PITCH_ANGLE = 0

    # 首の制御角度リミット値 Degree
    MAX_YAW_ANGLE   = 120
    MIN_YAW_ANGLE   = -120
    MAX_PITCH_ANGLE = 50
    MIN_PITCH_ANGLE = -70

    # 首の制御量
    # 値が大きいほど首を大きく動かす
    OPERATION_GAIN_X = 5.0
    OPERATION_GAIN_Y = 5.0

    # 初期角度に戻る時の制御角度 Degree
    RESET_OPERATION_ANGLE = 3

    # 現在の首角度を取得する
    # ここで現在の首角度を取得することで、ゆっくり初期角度へ戻る
    while not neck.state_received():
        pass
    yaw_angle = neck.get_current_yaw()
    pitch_angle = neck.get_current_pitch()

    look_object = False
    detection_timestamp = rospy.Time.now()

    while not rospy.is_shutdown():
        # 正規化されたオブジェクトの座標を取得
        object_position = object_tracker.get_object_position()

        if object_tracker.object_detected():
            detection_timestamp = rospy.Time.now()
            look_object = True
        else:
            lost_time = rospy.Time.now() - detection_timestamp
            # 一定時間オブジェクトが見つからない場合は初期角度に戻る
            if lost_time.to_sec() > 1.0:
                look_object = False

        if look_object:
            # オブジェクトが画像中心にくるように首を動かす
            if math.fabs(object_position.x) > THRESH_X:
                yaw_angle += -object_position.x * OPERATION_GAIN_X

            if math.fabs(object_position.y) > THRESH_Y:
                pitch_angle += object_position.y * OPERATION_GAIN_Y

            # 首の制御角度を制限する
            if yaw_angle > MAX_YAW_ANGLE:
                yaw_angle = MAX_YAW_ANGLE
            if yaw_angle < MIN_YAW_ANGLE:
                yaw_angle = MIN_YAW_ANGLE

            if pitch_angle > MAX_PITCH_ANGLE:
                pitch_angle = MAX_PITCH_ANGLE
            if pitch_angle < MIN_PITCH_ANGLE:
                pitch_angle = MIN_PITCH_ANGLE

        else:
            # ゆっくり初期角度へ戻る
            diff_yaw_angle = yaw_angle - INITIAL_YAW_ANGLE
            if math.fabs(diff_yaw_angle) > RESET_OPERATION_ANGLE:
                yaw_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_yaw_angle)
            else:
                yaw_angle = INITIAL_YAW_ANGLE

            diff_pitch_angle = pitch_angle - INITIAL_PITCH_ANGLE
            if math.fabs(diff_pitch_angle) > RESET_OPERATION_ANGLE:
                pitch_angle -= math.copysign(RESET_OPERATION_ANGLE, diff_pitch_angle)
            else:
                pitch_angle = INITIAL_PITCH_ANGLE

        neck.set_angle(math.radians(yaw_angle), math.radians(pitch_angle))

        r.sleep()
Esempio n. 50
0
    def start(self):
        """
        Loop through subscribed img msgs, undistort and
        republish to ud_img_msg
        """

        # self.left_img = False
        while not rospy.is_shutdown():
            if (self.left_img) and (self.left_img.header.seq != -1):
                # print(self.left_img.header)
                self.left_img.header.seq = -1
                self.bridge = CvBridge()
                # (self.left_img, desired_encoding='passthrough')
                original_left_img = self.bridge.imgmsg_to_cv2(
                    self.left_img, "rgb8")
                original_right_img = self.bridge.imgmsg_to_cv2(
                    self.right_img, "rgb8")

                # original_left_img = original_left_img.astype('uint8')
                # original_right_img = original_right_img.astype('uint8')


                resized_left_img = cv2.resize(original_left_img, dsize=(
                    300, 300), interpolation=cv2.INTER_CUBIC)
                resized_right_img = cv2.resize(original_right_img, dsize=(
                    300, 300), interpolation=cv2.INTER_CUBIC)

        # try:
            # Set up an OpenCV window to visualize the results
            WINDOW_TITLE = 'Realsense'
            cv2.namedWindow(WINDOW_TITLE, cv2.WINDOW_NORMAL)

            # Configure the OpenCV stereo algorithm. See
            # https://docs.opencv.org/3.4/d2/d85/classcv_1_1StereoSGBM.html for a
            # description of the parameters
            window_size = 5
            min_disp = 0
            # must be divisible by 16
            num_disp = 112 - min_disp
            max_disp = min_disp + num_disp
            stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                           numDisparities=num_disp,
                                           blockSize=16,
                                           P1=8*3*window_size**2,
                                           P2=32*3*window_size**2,
                                           disp12MaxDiff=1,
                                           uniquenessRatio=10,
                                           speckleWindowSize=100,
                                           speckleRange=32)

            # Retreive the stream and intrinsic properties for both cameras
            # profiles = pipe.get_active_profile()
            # streams = {"left"  : profiles.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),
            #            "right" : profiles.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()}

            # streams: ROS version (images)

            # intrinsics = {"left"  : streams["left"].get_intrinsics(),
            #               "right" : streams["right"].get_intrinsics()}

            # Print information about both cameras
            # print("Left camera:",  intrinsics["left"])
            # print("Right camera:", intrinsics["right"])

            K_left = rospy.get_param("/camera_matrix/data")
            K_left = np.reshape(K_left, [3,3])
            print("K_left reshape: ",K_left)
            D_left = rospy.get_param("/distortion_coefficients/data")
            D_left = np.reshape(D_left, [1,4])
            print("D",D_left)
            K_right = rospy.get_param("/camera_matrix/data")
            K_right = np.reshape(K_left, [3,3])

            D_right = rospy.get_param("/distortion_coefficients/data")
            D_right = np.reshape(D_left, [1,4])
           
            # Translate the intrinsics from librealsense into OpenCV
            # K_left  = camera_matrix(intrinsics["left"])
            # D_left  = fisheye_distortion(intrinsics["left"])
            # K_right = camera_matrix(intrinsics["right"])
            # D_right = fisheye_distortion(intrinsics["right"])
            # (width, height) = (intrinsics["left"].width, intrinsics["left"].height)

            # Get the relative extrinsics between the left and right camera
            #(R, T) = get_extrinsics(streams["left"], streams["right"])

            (R, T) = self.get_extrinsics(self.left_img, self.right_img)

            # We need to determine what focal length our undistorted images should have
            # in order to set up the camera matrices for initUndistortRectifyMap.  We
            # could use stereoRectify, but here we show how to derive these projection
            # matrices from the calibration and a desired height and field of view

            # We calculate the undistorted focal length:
            #
            #         h
            # -----------------
            #  \      |      /
            #    \    | f  /
            #     \   |   /
            #      \ fov /
            #        \|/
            stereo_fov_rad = 90 * (pi/180)  # 90 degree desired fov
            stereo_height_px = 300          # 300x300 pixel stereo output
            stereo_focal_px = stereo_height_px/2 / tan(stereo_fov_rad/2)

            # We set the left rotation to identity and the right rotation
            # the rotation between the cameras
            R_left = np.eye(3)
            R_right = np.eye(3) #+==========  R

            # The stereo algorithm needs max_disp extra pixels in order to produce valid
            # disparity on the desired output region. This changes the width, but the
            # center of projection should be on the center of the cropped image
            stereo_width_px = stereo_height_px + max_disp
            stereo_size = (stereo_width_px, stereo_height_px)
            stereo_cx = (stereo_height_px - 1)/2 + max_disp
            stereo_cy = (stereo_height_px - 1)/2

            # Construct the left and right projection matrices, the only difference is
            # that the right projection matrix should have a shift along the x axis of
            # baseline*focal_length
            P_left = np.array([[stereo_focal_px, 0, stereo_cx, 0],
                               [0, stereo_focal_px, stereo_cy, 0],
                               [0,               0,         1, 0]])
            P_right = P_left.copy()
            # print("-- T[0][0]: ", T[0][0])
            # print("sf_px: ", stereo_focal_px)

            P_right[0][3] = T[0][0]*stereo_focal_px

            # Construct Q for use with cv2.reprojectImageTo3D. Subtract max_disp from x
            # since we will crop the disparity later
            Q = np.array([[1, 0,       0, -(stereo_cx - max_disp)],
                          [0, 1,       0, -stereo_cy],
                          [0, 0,       0, stereo_focal_px],
                          [0, 0, -1/T[0][0], 0]])

            # Create an undistortion map for the left and right camera which applies the
            # rectification and undoes the camera distortion. This only has to be done
            # once
            m1type = cv2.CV_32FC1
            (lm1, lm2) = cv2.fisheye.initUndistortRectifyMap(
                K_left, D_left, R_left, P_left, stereo_size, m1type)
            (rm1, rm2) = cv2.fisheye.initUndistortRectifyMap(
                K_right, D_right, R_right, P_right, stereo_size, m1type)
            undistort_rectify = {"left": (lm1, lm2),
                                 "right": (rm1, rm2)}

            mode = "stack"
            while True:
                # Check if the camera has acquired any frames
                # frame_mutex.acquire()
                frame_data = {"left": None,
                  "right": None,
                  "timestamp_ms": None
                  }
                # valid = frame_data["timestamp_ms"] is not None
                valid = True
                # frame_mutex.release()

                # If frames are ready to process
                if valid:
                    # Hold the mutex only long enough to copy the stereo frames
                    # frame_mutex.acquire()
                    # frame_copy = {"left": frame_data["left"].copy(),
                    #               "right": frame_data["right"].copy()}
                    # frame_mutex.release()

                    # Undistort and crop the center of the frames
                    center_undistorted = {"left": cv2.remap(src=original_left_img,
                                                            map1=undistort_rectify["left"][0],
                                                            map2=undistort_rectify["left"][1],
                                                            interpolation=cv2.INTER_LINEAR),
                                          "right": cv2.remap(src=original_right_img,
                                                             map1=undistort_rectify["right"][0],
                                                             map2=undistort_rectify["right"][1],
                                                             interpolation=cv2.INTER_LINEAR)}

                    # compute the disparity on the center of the frames and convert it to a pixel disparity (divide by DISP_SCALE=16)
                    disparity = stereo.compute(
                        center_undistorted["left"], center_undistorted["right"]).astype(np.float32) / 16.0

                    # re-crop just the valid part of the disparity
                    disparity = disparity[:, max_disp:]

                    # convert disparity to 0-255 and color it
                    disp_vis = 255*(disparity - min_disp) / num_disp
                    disp_color = cv2.applyColorMap(
                        cv2.convertScaleAbs(disp_vis, 1), cv2.COLORMAP_JET)
                    color_image_l = center_undistorted["left"]
                    print("**CU: ",center_undistorted["left"])
                    # cv2.cvtColor(
                    #     center_undistorted["left"][:, max_disp:], cv2.COLOR_GRAY2RGB)
                    color_image_r = center_undistorted["right"]
                    # cv2.cvtColor(
                    #     center_undistorted["right"][:, max_disp:], cv2.COLOR_GRAY2RGB)
                    # (self.left_img, desired_encoding='passthrough')
                    image_to_publish_l = self.bridge.cv2_to_imgmsg(
                        color_image_l, "bgr8")
                    image_to_publish_r = self.bridge.cv2_to_imgmsg(
                        color_image_r, "bgr8")
                    self.left_img_pub.publish(image_to_publish_l)
                    self.right_img_pub.publish(image_to_publish_r)

                    if mode == "stack":
                        cv2.imshow(WINDOW_TITLE, np.hstack(
                            (color_image_l, disp_color)))
                    if mode == "overlay":
                        ind = disparity >= min_disp
                        color_image_l[ind, 0] = disp_color[ind, 0]
                        color_image_l[ind, 1] = disp_color[ind, 1]
                        color_image_l[ind, 2] = disp_color[ind, 2]
                        cv2.imshow(WINDOW_TITLE, color_image_l)
                key = cv2.waitKey(1)
                if key == ord('s'):
                    mode = "stack"
                if key == ord('o'):
                    mode = "overlay"
                if key == ord('q') or cv2.getWindowProperty(WINDOW_TITLE, cv2.WND_PROP_VISIBLE) < 1:
                    break
Esempio n. 51
0
Description: Run simple ROS node with encoder test GUI
"""

import sys
sys.path.insert(1, "/home/pdsherman/projects/pendulum/catkin_ws/src/pendulum/py_libs")

import rospy
import time

import tkinter as tk # 3rd party GUI library

from EncoderTestGui import *


# ----------------------------- #
# --      START OF SCRIPT    -- #
# ----------------------------- #
if __name__ == "__main__":
    try:
        rospy.init_node("encoder_test_node", anonymous=False)
        gui = EncoderTestGui()

        while not gui.quit_requested() and not rospy.is_shutdown():
            gui.cycle()
            time.sleep(0.01)

    except tk._tkinter.TclError, e:
        print("Tk error: " + str(e))
    except rospy.ROSInterruptException:
        pass
def getKey():
    global command
    while not rospy.is_shutdown():
        command = getch()
Esempio n. 53
0
    def trajectory_controller(self):
        previous_pos = None
        count = 0
        previous_index = 0
        path = self.pos[:, 0:3].T
        path_x = path[0, :]
        path_y = path[1, :]
        path_z = path[2, :]
        while not rospy.is_shutdown():
            try:
                (trans, rot) = self.tf_listener.lookupTransform(self.odom_frame_name, self.palm_link_name, rospy.Time(0))
                break
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue
        current_pos = np.array([trans[0], trans[1], trans[2]])
        previous_pos = current_pos[:]
        distance = np.linalg.norm((np.array(path[:, path.shape[1] - 1]) - current_pos))
        followed_trajectory = []

        old_pos_index = 0
        while distance > self.goal_tolerance and not rospy.is_shutdown():
            try:
                (trans, rot) = self.tf_listener.lookupTransform(self.odom_frame_name, self.palm_link_name, rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue
            current_pos = np.array([trans[0], trans[1], trans[2]])
            distance = np.linalg.norm((np.array(path[:, path.shape[1] - 1]) - current_pos))
            dist = []
            for i in range(path.shape[1]):
                dist.append(np.linalg.norm((path[:, i] - current_pos)))
            index =  np.argmin(dist)

            if old_pos_index != index:
                followed_trajectory.append(current_pos)
                old_pos_index = index

            if index < previous_index:
                index = previous_index
            else:
                previous_index = index

            # Delete this block later
            if index > path.shape[1] - 1:
                break

            if index == path.shape[1] - 1:
                ind = index
            else:
                ind = index + 1

            vel_x = self.feedforward_gain * (path_x[ind] - path_x[index]) + self.feedback_gain * (path_x[ind] - current_pos[0])
            vel_y = self.feedforward_gain * (path_y[ind] - path_y[index]) + self.feedback_gain * (path_y[ind] - current_pos[1])
            vel_z = self.feedforward_gain * (path_z[ind] - path_z[index]) + self.feedback_gain * (path_z[ind] - current_pos[2])

            # limiting speed
            norm_ = np.linalg.norm(np.array([vel_x, vel_y, vel_z]))
            if norm_ > 0.05:
                vel_x = vel_x * 0.05 / norm_
                vel_y = vel_y * 0.05 / norm_
                vel_z = vel_z * 0.05 / norm_

            vel_x_arm = vel_x
            vel_y_arm = vel_y
            vel_z_arm = vel_z

            vel_x_base = 0.0
            vel_y_base = 0.0
            vel_z_base = 0.0
            ratio = 1.0

            if self.min_sigma_value != None and self.min_sigma_value < self.sigma_threshold_upper and self.deploy_wbc:
                ratio = (self.min_sigma_value - self.sigma_threshold_lower) / (self.sigma_threshold_upper - self.sigma_threshold_lower)

                vel_x_arm = vel_x * (ratio)
                vel_y_arm = vel_y * (ratio)
                vel_x_base = vel_x * (1 - ratio)
                vel_y_base = vel_y * (1 - ratio)

                # Publish base velocity inside the if consition
                vector_ = Vector3Stamped()
                vector_.header.seq = count
                vector_.header.frame_id = self.odom_frame_name
                vector_.vector.x = vel_x_base
                vector_.vector.y = vel_y_base
                vector_.vector.z = vel_z_base

                vector_ = self.tf_listener.transformVector3(self.base_link_frame_name, vector_)

                message_base = Twist()
                message_base.linear.x = vector_.vector.x
                message_base.linear.y = vector_.vector.y
                message_base.linear.z = vector_.vector.z
                self.vel_publisher_base.publish(message_base)

            message_arm = TwistStamped()
            message_arm.header.seq = count
            message_arm.header.frame_id = self.odom_frame_name
            message_arm.twist.linear.x = vel_x_arm
            message_arm.twist.linear.y = vel_y_arm
            message_arm.twist.linear.z = vel_z_arm
            self.vel_publisher_arm.publish(message_arm)
            count += 1

        # stop arm and base motion after converging
        message_base = Twist()
        message_base.linear.x = 0.0
        message_base.linear.y = 0.0
        message_base.linear.z = 0.0

        message_arm = TwistStamped()
        message_arm.header.seq = count
        message_arm.header.frame_id = self.odom_frame_name
        message_arm.twist.linear.x = 0
        message_arm.twist.linear.y = 0
        message_arm.twist.linear.z = 0

        self.vel_publisher_arm.publish(message_arm)
        if self.deploy_wbc:
            self.vel_publisher_base.publish(message_base)
    # Create timer event and publish.
    period = float(rospy.get_param("~period"))

    # Create lock for msgs.
    lock = Lock()

    # Setup services and publish mission.
    rospy.Service("get_active_mission", Trigger, get_active_mission)
    rospy.Service("get_mission_by_id", GetMissionByID, get_mission_by_id)

    # Get mission to begin publishing. This is the first mission published.
    mission_id = rospy.get_param("~id")
    msgs = None

    retry_rate = rospy.Rate(1)
    while msgs is None and not rospy.is_shutdown():
        # If id is negative then it is default and non existent.
        if mission_id >= 0:
            req = GetMissionByID()
            req.id = mission_id
            get_mission_by_id(req)
        else:
            get_active_mission(Trigger())

        retry_rate.sleep()

    # Publish message on timer.
    timer = rospy.Timer(rospy.Duration(period), publish_mission)

    rospy.spin()
Esempio n. 55
0
def main():

    global new_image

    rs_img = rs_process()
    rospy.init_node('hand_tracking', anonymous=True)
    rospy.loginfo("Hand Detection Start!")

    #Marker Publisher Initialize
    pub = rospy.Publisher('/hand_marker', Marker, queue_size=1)
    hand_mark = MarkerGenerator()
    hand_mark.type = Marker.SPHERE_LIST
    hand_mark.scale = [.07] * 3
    hand_mark.frame_id = "/camera_color_optical_frame"
    hand_mark.id = 0
    hand_mark.lifetime = 10000

    #hand detect args
    parser = argparse.ArgumentParser()
    parser.add_argument('-sth',
                        '--scorethreshold',
                        dest='score_thresh',
                        type=float,
                        default=0.5,
                        help='Score threshold for displaying bounding boxes')
    parser.add_argument('-fps',
                        '--fps',
                        dest='fps',
                        type=int,
                        default=1,
                        help='Show FPS on detection/display visualization')
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=640,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument(
        '-ds',
        '--display',
        dest='display',
        type=int,
        default=0,
        help='Display the detected images using OpenCV. This reduces FPS')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()
    num_hands_detect = 2

    im_width, im_height = (args.width, args.height)

    #time for fps calculation
    start_time = datetime.datetime.now()
    num_frames = 0

    #skin filter color
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    #######################################
    #Define the frame to transform
    #######################################
    target_frame = "/camera_color_optical_frame"  ######FROM
    reference_frame = "/base_link"  ####TO

    #####################################
    #Define the numpy array to record the consequences of the hand location
    ######################################
    # hand_pos = np.empty((1,3))

    is_transform_target = False

    if (is_transform_target):
        listener = tf.TransformListener()
        listener.waitForTransform(reference_frame, target_frame, rospy.Time(0),
                                  rospy.Duration(4.0))
        hand_mark.frame_id = reference_frame
    else:
        hand_mark.frame_id = target_frame

    hand_pose_list = []
    while not rospy.is_shutdown():
        #get rgb,depth frames for synchronized frames
        if not new_image:
            continue

        im_rgb = rs_image_rgb
        # im_rgb = cv2.cvtColor(rs_image_rgb, cv2.COLOR_BGR2RGB)
        im_depth = rs_image_depth
        new_image = False
        #add check

        # depth_map = np.array(rs_image_depth, dtype=np.uint8)
        depth_map = cv2.applyColorMap(
            cv2.convertScaleAbs(rs_image_depth, alpha=0.03), cv2.COLORMAP_JET)
        # cv2.imshow("Depth Image", depth_map)
        cv2.imshow("rs_image_rgb", rs_image_rgb)

        try:
            image_np = im_rgb
        except:
            print("Error converting to RGB")

        # actual hand detection
        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)
        # draw bounding boxes
        detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                         scores, boxes, im_width, im_height,
                                         image_np)

        if (scores[0] > args.score_thresh):
            (left, right, top,
             bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
                        boxes[0][0] * im_height, boxes[0][2] * im_height)
            p1 = (int(left), int(top))
            p2 = (int(right), int(bottom))
            # print p1,p2,int(left),int(top),int(right),int(bottom)
            image_hand = image_np[int(top):int(bottom), int(left):int(right)]
            # cv2.namedWindow("hand", cv2.WINDOW_NORMAL)
            cv2.imshow('hand', cv2.cvtColor(image_hand, cv2.COLOR_RGB2BGR))

            align_hand = im_rgb[int(top):int(bottom), int(left):int(right)]
            align_depth = depth_map[int(top):int(bottom), int(left):int(right)]
            align_hand_detect = np.hstack((align_hand, align_depth))
            # cv2.namedWindow('align hand', cv2.WINDOW_AUTOSIZE)
            # cv2.imshow('align hand', align_hand_detect)

            # cv2.imshow('align_hand_bgr', align_hand)
            align_hand = cv2.cvtColor(align_hand, cv2.COLOR_BGR2RGB)
            #skin filtering
            converted = cv2.cvtColor(align_hand, cv2.COLOR_BGR2HSV)
            skinMask = cv2.inRange(converted, lower, upper)
            # cv2.imshow("skinMask", skinMask)

            # apply a series of erosions and dilations to the mask
            # using an elliptical kernel
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            # skinMask = cv2.erode(skinMask, kernel, iterations = 2)
            # skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            skinMask = cv2.erode(skinMask, kernel, iterations=3)
            skinMask = cv2.dilate(skinMask, kernel, iterations=3)

            # blur the mask to help remove noise, then apply the
            # mask to the frame
            skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
            skin = cv2.bitwise_and(align_hand, align_hand, mask=skinMask)
            # show the skin in the image along with the mask
            # cv2.imshow("images", np.hstack([align_hand, skin]))
            #end skin

            depth_pixel = [(int(left) + int(right)) / 2,
                           (int(top) + int(bottom)) / 2]
            # depth_point = [0.0,0.0,0.0]
            depth_point = rs.rs2_deproject_pixel_to_point(
                depth_intrin, depth_pixel,
                im_depth[depth_pixel[1], depth_pixel[0]] * depth_scale)
            print depth_point
            hand_mark.counter = 0
            t = rospy.get_time()
            hand_mark.color = [0, 1, 0, 1]

            # hand_mark.id = hand_mark.id + 1
            # if (hand_mark.id > 100000) :
            #     hand_mark.id = 0
            # ## hand in /camera_color_optical_frame
            # print ('id ',hand_mark.id)
            m0 = hand_mark.marker(points=[(depth_point[0], depth_point[1],
                                           depth_point[2])])

            hand_point_x = depth_point[0]
            hand_point_y = depth_point[1]
            hand_point_z = depth_point[2]

            if (is_transform_target):
                #########################################################################
                ##convert /camera_color_optical_frame => /world
                #########################################################################

                #transform position from target_frame to reference frame
                target_ref_camera = PointStamped()
                target_ref_camera.header.frame_id = target_frame
                target_ref_camera.header.stamp = rospy.Time(0)
                target_ref_camera.point = m.points[0]

                p = listener.transformPoint(reference_frame, target_ref_camera)
                # p=listener.transformPoint(reference_frame,hand_mark)

                m = hand_mark.marker(points=[(p.point.x, p.point.y,
                                              p.point.z)])

                #substitute data for the variable
                hand_point_x = p.point.x
                hand_point_y = p.point.y
                hand_point_z = p.point.z

                # pub.publish(m)

                #offset z-axiz
                # hand_mark.id = 1
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m)
            else:
                # print('published!')
                ####append the data

                if 0.15 <= hand_point_z <= 0.75 and -0.4 <= hand_point_x <= 0.4:
                    print("recorded hand point")
                    hand_pose = [
                        hand_point_x, hand_point_y, hand_point_z, 0.0, 0.0,
                        0.0, 1.0
                    ]
                    print hand_pose
                    hand_pose_list.append(hand_pose)

                pub.publish(m0)

                #substitute data for the variable
                hand_point_x = depth_point[0]
                hand_point_y = depth_point[1] - 0.08
                hand_point_z = depth_point[2]

                #### offset z-axiz
                # hand_mark.id = 1
                m1 = hand_mark.marker(points=[(depth_point[0],
                                               depth_point[1] - 0.08,
                                               depth_point[2])])
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m1)

        # Calculate Frames per second (FPS)
        num_frames += 1
        elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
        fps = num_frames / elapsed_time

        #display window
        if (args.display > 0):
            # Display FPS on frame
            if (args.fps > 0):
                detector_utils.draw_fps_on_image("FPS : " + str(float(fps)),
                                                 image_np)

            cv2.imshow('Single Threaded Detection',
                       cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
        else:
            print("frames processed: ", num_frames, "elapsed time: ",
                  elapsed_time, "fps: ", str(int(fps)))

        if cv2.waitKey(10) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break

    print('save hand_pub.npy')
    # np.save('./hand_pub.npy',hand_pos)
    np.save("hand_pub", hand_pose_list)
Esempio n. 56
0
def talker(): 
    # Actually launch a node called "set_desired_wheel_speeds_by_path_specs"
    rospy.init_node('set_desired_wheel_speeds_by_path_specs', anonymous=False)

    # Create the publisher. Name the topic "sensors_data", with message type "Sensors"
    pub_speeds = rospy.Publisher('/wheel_speeds_desired', ME439WheelSpeeds, queue_size=10)
    # Declare the message that will go on that topic. 
    # Here we use one of the message name types we Imported, and add parentheses to call it as a function. 
    # We could also put data in it right away using . 
    msg_out = ME439WheelSpeeds()
    msg_out.v_left = 0
    msg_out.v_right = 0
    
    # set up a rate basis to keep it on schedule.
    r = rospy.Rate(100) # N Hz
    try: 
        # start a loop 
        t_start = rospy.get_rostime()
        
        while not rospy.is_shutdown():
            stage = np.argwhere( stage_settings_array[:,0] >= (rospy.get_rostime()-t_start).to_sec() ) [0]
            print stage
            msg_out.v_left = stage_settings_array[stage,1]
            msg_out.v_right = stage_settings_array[stage,2]
            # Actually publish the message
            pub_speeds.publish(msg_out)
            # Log the info (optional)
#            rospy.loginfo(pub_speeds)    
            
            r.sleep()
        
#        # Here step through the settings. 
#        for stage in range(0,len(stage_settings_array)):  # len gets the length of the array (here the number of rows)
#            # Set the desired speeds
#            dur = stage_settings_array[stage,0]
#            msg_out.v_left = stage_settings_array[stage,1]
#            msg_out.v_right = stage_settings_array[stage,2]
#            # Actually publish the message
#            pub_speeds.publish(msg_out)
#            # Log the info (optional)
#            rospy.loginfo(pub_speeds)       
#            
#            # Sleep for just long enough to reach the next command. 
#            sleep_dur = dur - (rospy.get_rostime()-t_start).to_sec()
#            sleep_dur = max(sleep_dur, 0.)  # In case there was a setting with zero duration, we will get a small negative time. Don't use negative time. 
#            rospy.sleep(sleep_dur)
        
    except Exception:
        traceback.print_exc()
        # When done or Errored, Zero the speeds
        msg_out.v_left = 0
        msg_out.v_right = 0
        pub_speeds.publish(msg_out)
        rospy.loginfo(pub_speeds)    
        pass
        
        
    # When done or Errored, Zero the speeds
    msg_out.v_left = 0
    msg_out.v_right = 0
    pub_speeds.publish(msg_out)
    rospy.loginfo(pub_speeds)    
Esempio n. 57
0
    def __init__(self):

        self.arm_front_right_rotSpeed = 0
        self.arm_front_left_rotSpeed = 0
        self.arm_rear_right_rotSpeed = 0
        self.arm_rear_left_rotSpeed = 0
        #self.arm_front_setPoint = -2.35
        #self.arm_rear_setPoint = -0.78
        self.arm_front_right_setPoint = -0.2  #-2.35
        self.arm_front_left_setPoint = 0.2  #2.35
        self.arm_rear_right_setPoint = 0  #3#-np.pi/2
        self.arm_rear_left_setPoint = 0  #-3#np.pi/2
        self.arm_front_right_position = 0
        self.arm_front_left_position = 0
        self.arm_rear_right_position = 0
        self.arm_rear_left_position = 0
        self.omega_right = 0
        self.omega_left = 0
        self.sentido_giro_frente = "ccw"
        self.sentido_giro_atras = "ccw"
        self.arms_speed = 0
        self.state = 0
        self.count_correct_angle = 0
        # sends a message to the user
        rospy.loginfo('Control_arm_speed node started')

        # registering to publisher
        self.pub_traction = rospy.Publisher('/rosi/command_traction_speed',
                                            RosiMovementArray,
                                            queue_size=1)
        self.pub_Arm_sp = rospy.Publisher('/arm_sp',
                                          ArmsSetPoint,
                                          queue_size=1)

        # registering to subscribers
        self.sub_Arm_position = rospy.Subscriber('/rosi/arms_joints_position',
                                                 RosiMovementArray,
                                                 self.callback_Arm_position)

        # defining the eternal loop frequency
        node_sleep_rate = rospy.Rate(10)

        # eternal loop (until second order)
        while not rospy.is_shutdown():
            if self.state == 0:
                rospy.loginfo('Estado 0: Setando as esteiras')
                while True:
                    arm_sp_msg = ArmsSetPoint()
                    arm_sp_msg.front_dir = 0
                    arm_sp_msg.rear_dir = 1
                    arm_sp_msg.front_arms_sp = 3.5
                    arm_sp_msg.rear_arms_sp = 0.15
                    self.pub_Arm_sp.publish(arm_sp_msg)
                    if (abs(self.arm_front_right_position) > 3.0) \
                    and (abs(self.arm_rear_right_setPoint - self.arm_rear_right_position) < 1e-1):
                        break
                self.state += 1

            elif self.state == 1:
                rospy.loginfo('Estado 1: Indo pra frente')
                self.state = 1

            node_sleep_rate.sleep()

   while count > count2:  
      msg3.servoAngle = p.read_angle()
      p.move_to_encoder(1000)
      count = p.read_encoder()
      servo_pub.publish(msg3)

      #rospy.loginfo(math.degrees(p.read_angle()))

         # for i in range(4,2040):
         #     p.move_to_encoder(i)
         #     msg3.servoAngle = p.read_angle()
         #     pub.publish(msg3)



        
         # t.secs=10
         #pub2.publish(msg3)
         #rospy.loginfo(msg3)
   rate.sleep() 
   
  rospy.spin()
  
if __name__ == '__main__':
           
        main()
        if(rospy.is_shutdown()):
          print("Home------------------------------------------------------------------")
        
Esempio n. 59
0
def draw_continuous():
    rate = rospy.Rate(2.0)
    while not rospy.is_shutdown():
        draw_circles(0.)
        rate.sleep()
Esempio n. 60
0
def talker():
    pubGoal = rospy.Publisher('arcontroller/goal', Goal, queue_size=10)
    pubWaypoints = rospy.Publisher('arcontroller/waypoints',
                                   Waypoints,
                                   queue_size=10)

    goal = Goal()
    wp_msg = Waypoints()
    rospy.loginfo(wp_msg)
    pubLand = rospy.Publisher('ardrone/land', Empty, queue_size=10)
    pubTakeoff = rospy.Publisher('ardrone/takeoff', Empty, queue_size=1)
    pubCmd = rospy.Publisher('cmd_vel', Twist, queue_size=1)
    pubReset = rospy.Publisher('ardrone/reset', Empty, queue_size=1)

    with open(
            '/home/duho46/catkin_ws/src/ardrone_controller/src/squarePath.txt'
    ) as f:
        paths = f.read().splitlines()
    f.close()
    path_list = [path.split(',') for path in paths]
    wp_square = [[float(x) for x in y] for y in path_list]

    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(10)  # 10hz
    while not rospy.is_shutdown():
        choice = raw_input(
            'Goal: (1)Center, (2)Front, (3)back, (4)to (-1.5,-1.5), (5)to (-2,-2), (6)yaw waypoints, (7)0 yaw waypoints, (8),to square, (9) square, (l): landing, (t) takeoff, (r) Reset \n'
        )
        if choice == '1':
            goal.t = -1
            goal.x = goals[0][0]
            goal.y = goals[0][1]
            goal.z = goals[0][2]
            goal.yaw = goals[0][3]
            rospy.loginfo(goal)
            pubGoal.publish(goal)
        elif choice == '2':
            goal.t = -1
            goal.x = goals[1][0]
            goal.y = goals[1][1]
            goal.z = goals[1][2]
            goal.yaw = goals[1][3]
            pubGoal.publish(goal)
        elif choice == '3':
            goal.t = -1
            goal.x = goals[2][0]
            goal.y = goals[2][1]
            goal.z = goals[2][2]
            goal.yaw = goals[2][3]
            pubGoal.publish(goal)
        elif choice == '4':
            #pass#self.waypoint_follower(self.points_forward)
            goal.t = -1
            goal.x = wp_ref[0][1]
            goal.y = wp_ref[0][2]
            goal.z = wp_ref[0][3] * 1.25
            goal.yaw = wp_ref[0][4]
            pubGoal.publish(goal)
        elif choice == '5':
            goal.t = -1
            goal.x = wp_ref_0[0][1]
            goal.y = wp_ref_0[0][2]
            goal.z = wp_ref_0[0][3] * 1.25
            goal.yaw = wp_ref_0[0][4]
            pubGoal.publish(goal)
        elif choice == '6':
            wp_msg = Waypoints()
            #rospy.loginfo(wp_msg)
            #rospy.loginfo(type(wp_msg.waypoints))
            for t in range(len(wp_ref)):
                goal = Goal()
                goal.t = wp_ref[t][0] * 0.75
                goal.x = wp_ref[t][1] * 1.25
                goal.y = wp_ref[t][2] * 1.25
                goal.z = wp_ref[t][3] * 1.25
                goal.yaw = wp_ref[t][4]
                wp_msg.waypoints.append(goal)
                #rospy.loginfo(goal)wp_msg
            wp_msg.len = len(wp_ref)
            #rospy.loginfo(wp_msg)
            pubWaypoints.publish(wp_msg)
        elif choice == '7':
            wp_msg = Waypoints()
            #rospy.loginfo(wp_msg)
            #rospy.loginfo(type(wp_msg.waypoints))
            for t in range(len(wp_ref_0)):
                goal = Goal()
                goal.t = wp_ref_0[t][0] * 0.9
                goal.x = wp_ref_0[t][1] * 1.0
                goal.y = wp_ref_0[t][2] * 1.0
                goal.z = wp_ref_0[t][3] * 1.25
                goal.yaw = wp_ref_0[t][4]
                wp_msg.waypoints.append(goal)
                #rospy.loginfo(goal)wp_msg
            wp_msg.len = len(wp_ref_0)
            #rospy.loginfo(wp_msg)
            pubWaypoints.publish(wp_msg)
        elif choice == '8':
            goal = Goal()
            goal.t = -1
            goal.x = wp_square[0][1] * 1.0
            goal.y = wp_square[0][2] * 1.0
            goal.z = wp_square[0][3] * 1.25
            goal.yaw = wp_square[0][4]
            pubGoal.publish(goal)
        elif choice == '9':
            wp_msg = Waypoints()
            for wp in wp_square:
                goal = Goal()
                goal.t = wp[0]
                goal.x = wp[1] * 1.0
                goal.y = wp[2] * 1.0
                goal.z = wp[3] * 1.25
                goal.yaw = wp[4]
                wp_msg.waypoints.append(goal)
            wp_msg.len = len(wp_square)
            pubWaypoints.publish(wp_msg)
        elif choice == 'l':  # Landing
            msg = Twist()
            pubCmd.publish(msg)
            pubLand.publish()
        elif choice == 't':
            pubTakeoff.publish()
        elif choice == 'r':
            msg = Empty()
            pubReset.publish(msg)
        else:
            pass
        rospy.sleep(0.01)