示例#1
0
import math
from std_msgs.msg import String

Transforms = open('Transforms.txt', 'w')

if __name__ == '__main__':
    rospy.init_node('listener')

    listener = tf.TransformListener()

    rate = rospy.Rate(20)

    while not rospy.is_shutdown():
        try:
            (T1, Q1) = listener.lookupTransform('/Shoulder', '/map',
                                                rospy.Time(0))
            (T2, Q2) = listener.lookupTransform('/Elbow', '/map',
                                                rospy.Time(0))
            (T3, Q3) = listener.lookupTransform('/Hand', '/map', rospy.Time(0))
            print(T1, Q1)
            print(T2, Q2)
            print(T3, Q3)
            Transforms.write("Shoulder: \tTransform: " + str(T1[0]) + " " +
                             str(T1[1]) + " " + str(T1[2]) + "\n")
            Transforms.write("\t\tQuat: " + str(Q1[0]) + " " + str(Q1[1]) +
                             " " + str(Q1[2]) + str(Q1[3]) + "\n")
            Transforms.write("Elbow: \t\tTransform: " + str(T2[0]) + " " +
                             str(T2[1]) + " " + str(T2[2]) + "\n")
            Transforms.write("\t\tQuat: " + str(Q2[0]) + " " + str(Q2[1]) +
                             " " + str(Q2[2]) + str(Q2[3]) + "\n")
            Transforms.write("Hand: \t\tTransform: " + str(T3[0]) + " " +
示例#2
0
文件: drone.py 项目: mabuc13/RMUASD
    def __init__(self, drone_id=1):
        # All variables relating to the status and information of the drone

        self.id = drone_id
        self.up_time = 0
        self.gps_timestamp = 0

        self.fsm_state = State.GROUNDED
        self.new_mission = False
        self.upload_done = False
        self.upload_failed = False
        self.cmd_try_again = False
        self.speed_ack = False
        self.loiter = False
        self.active = False
        self.wait = False
        self.hold = False
        self.upload_retries = 0

        self.mission_id = 0
        self.pending_mission_gps = []
        self.active_waypoint_gps = GPS()
        self.active_mission_gps = []
        self.active_mission_ml = mavlink_lora_mission_list()
        self.active_mission_len = 0
        self.active_mission_idx = 0
        self.dist_to_goal = -1

        self.holding_waypoint = GPS()

        self.gcs_status = DroneInfo.Land

        # from heartbeat status
        self.main_mode = ""
        self.sub_mode = ""
        self.autopilot = ""
        self.type = ""
        self.state = ""

        # from mav mode
        self.armed = False
        self.manual_input = False
        self.hil_simulation = False
        self.stabilized_mode = False
        self.guided_mode = False
        self.auto_mode = False
        self.test_mode = False
        self.custom_mode = False

        # from statustext
        self.statustext = deque([], maxlen=BUFFER_SIZE)
        self.severity = deque([], maxlen=BUFFER_SIZE)

        # from vfr hud
        self.ground_speed = 0
        self.climb_rate = 0
        self.throttle = 0

        # from home position
        self.home_position = GPS()

        # from drone attitude
        self.roll = 0
        self.pitch = 0
        self.yaw = 0

        # from drone status
        self.battery_voltage = 0
        self.battery_SOC = 0
        self.cpu_load = 0
        self.msg_sent_gcs = 0
        self.msg_received_gcs = 0
        self.msg_dropped_gcs = 0
        self.msg_dropped_uas = 0
        self.last_heard = rospy.Time().now()

        # from drone pos
        self.latitude = 0
        self.longitude = 0
        self.absolute_alt = 0
        self.relative_alt = 0
        self.heading = 0

        self.clear_mission_pub = rospy.Publisher(
            "/mavlink_interface/mission/mavlink_clear_all",
            Empty,
            queue_size=0)
        self.upload_mission_pub = rospy.Publisher("/telemetry/new_mission",
                                                  mavlink_lora_mission_list,
                                                  queue_size=0)
        self.set_current_mission_pub = rospy.Publisher(
            "/telemetry/mission_set_current", Int16, queue_size=10)

        # TODO add drone id to services
        self.arm = rospy.ServiceProxy("/telemetry/arm_drone", Trigger)
        self.disarm = rospy.ServiceProxy("/telemetry/disarm_drone", Trigger)
        self.takeoff = rospy.ServiceProxy("/telemetry/takeoff_drone",
                                          TakeoffDrone)
        self.land = rospy.ServiceProxy("/telemetry/land_drone", LandDrone)
        self.set_mode = rospy.ServiceProxy("/telemetry/set_mode", SetMode)
        self.set_home = rospy.ServiceProxy("/telemetry/set_home", SetHome)
        self.change_speed = rospy.ServiceProxy("/telemetry/change_speed",
                                               ChangeSpeed)
        self.return_to_home = rospy.ServiceProxy("/telemetry/return_home",
                                                 Trigger)
        self.reposition = rospy.ServiceProxy("/telemetry/goto_waypoint",
                                             GotoWaypoint)
        self.upload_mission = rospy.ServiceProxy("/telemetry/upload_mission",
                                                 UploadMission)

        # class that handles the missions manually while an upload is in progress
        self.manual_mission = manual_mission.ManualMission(
            target_sys=self.id,
            target_comp=0,
            reposition_handle=self.reposition)
示例#3
0
    def initial_pos_pub(self):

        # arucoCodeBroadcaster = TransformBroadcaster()
        # arucoCodeBroadcaster.sendTransform(
        #     (6.56082, 6.63258, 0.0),
        #     (-0.0635783, -0.704243, -0.704243, -0.0635782),
        #     rospy.Time.now(),
        #     "aruco_code",
        #     "map"
        # )

        # this two line is used to stop the while loop
        # when you using CTRL+C to exit the program
        signal.signal(signal.SIGINT, quit)
        signal.signal(signal.SIGTERM, quit)

        # listener.waitForTransform("/map", "/camera_init", rospy.Time(), rospy.Duration(4.0))
        # while not map_camera_trans:
        #     try:
        #         # rospy.sleep(1)
        #         # rospy.loginfo(map_camera_trans)
        #         time_now = rospy.Time.now()
        #         listener.waitForTransform("/map", "/camera_init", time_now,  rospy.Duration(4.0))
        #         # listener.waitForTransform("/map", "/camera_init", rospy.Time(0),  rospy.Duration(1.0))
        #         (map_camera_trans, map_camera_rot) = listener.lookupTransform('/map', '/camera_init', time_now)
        #         # (map_camera_trans, map_camera_rot) = listener.lookupTransform('/map', '/camera_init', rospy.Time(0))
        #     except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
        #         # map_camera_trans = [0, 0, 0]
        #         # map_camera_rot = [0, 0, 0, 0]
        #         # traceback.print_exc()
        #         rospy.loginfo("stop!!!-----------")
        #         continue
        map_camera_trans = []
        listener = tf.TransformListener()
        rospy.sleep(0.1)
        # count = 0
        while not map_camera_trans:
            try:
                # rospy.loginfo(map_camera_trans)
                # time_now = rospy.Time.now()
                # listener.waitForTransform("/map", "/camera_init", time_now,  rospy.Duration(5.0))
                # listener.waitForTransform("/map", "/camera_init", rospy.Time(0),  rospy.Duration(4.0))
                # (map_camera_trans, map_camera_rot) = listener.lookupTransform('/map', '/camera_init', time_now)
                (map_camera_trans, map_camera_rot) = listener.lookupTransform(
                    '/map', '/camera_init', rospy.Time(0))
                rospy.loginfo("********Got Trans & Rot********")
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                # except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
                # map_camera_trans = [0, 0, 0]
                # map_camera_rot = [0, 0, 0, 0]
                # traceback.print_exc()
                rospy.loginfo("********NO CODE --OR-- NO TF ********")
                rospy.sleep(0.1)
                # rospy.loginfo("stop!!!-----------")
                continue

        self.count = self.count + 1
        self.trans_total[0] = self.trans_total[
            0] + map_camera_trans[0] / self.NUM_USED_FOR_AVRG
        self.trans_total[1] = self.trans_total[
            1] + map_camera_trans[1] / self.NUM_USED_FOR_AVRG
        self.rot_total[
            0] = self.rot_total[0] + map_camera_rot[0] / self.NUM_USED_FOR_AVRG
        self.rot_total[
            1] = self.rot_total[1] + map_camera_rot[1] / self.NUM_USED_FOR_AVRG
        self.rot_total[
            2] = self.rot_total[2] + map_camera_rot[2] / self.NUM_USED_FOR_AVRG
        self.rot_total[
            3] = self.rot_total[3] + map_camera_rot[3] / self.NUM_USED_FOR_AVRG
        # rospy.loginfo(map_camera_trans[0])
        # rospy.loginfo(self.trans_total[0])

        if self.count >= self.NUM_USED_FOR_AVRG:
            rospy.loginfo(self.count)
            self.trans_total_seq[0] = self.trans_total_seq[
                0] + self.trans_total[0] / self.MAX_SEQ_NEED
            self.trans_total_seq[1] = self.trans_total_seq[
                1] + self.trans_total[1] / self.MAX_SEQ_NEED
            self.rot_total_seq[0] = self.rot_total_seq[
                0] + self.rot_total[0] / self.MAX_SEQ_NEED
            self.rot_total_seq[1] = self.rot_total_seq[
                1] + self.rot_total[1] / self.MAX_SEQ_NEED
            self.rot_total_seq[2] = self.rot_total_seq[
                2] + self.rot_total[2] / self.MAX_SEQ_NEED
            self.rot_total_seq[3] = self.rot_total_seq[
                3] + self.rot_total[3] / self.MAX_SEQ_NEED
            self.trans_total = [0, 0, 0]
            self.rot_total = [0, 0, 0, 0]
            self.seq = self.seq + 1
            self.count = 0

        if self.seq >= self.MAX_SEQ_NEED:

            # ---------------------------------------------------------------------
            # The following is going to update the /map->/odom TF frame transform
            # https://robot-ros.com/robot/35127.html

            # map_odom_tf = tf.transformations.concatenate_matrices(tf.transformations.translation_matrix(
            #     self.trans_total_seq), tf.transformations.quaternion_matrix(self.rot_total_seq))
            # inversed_map_odom_tf = tf.transformations.inverse_matrix(map_odom_tf)
            #
            # inversed_trans_map_odom = tf.transformations.translation_from_matrix(inversed_map_odom_tf)
            # inversed_rot_map_odom = tf.transformations.quaternion_from_matrix(inversed_map_odom_tf)

            # self.tf_map_to_odom_br_.sendTransform(
            #     self.trans_total_seq,
            #     self.rot_total_seq,
            #     # inversed_trans_map_odom,
            #     # inversed_rot_map_odom,
            #     rospy.Time.now(),
            #     "map",
            #     "odom"
            # )
            # rospy.loginfo("Published /map->/odom TF frame transform!")
            # rospy.sleep(5)
            # ---------------------------------------------------------------------

            start_pos = PoseWithCovarianceStamped()
            # start_pos = initpose_aruco
            # filling header with relevant information
            start_pos.header.frame_id = "map"
            start_pos.header.seq = self.seq

            # start_pos.header.stamp = rospy.Time.now()
            # filling payload with relevant information gathered from subscribing
            # to initialpose topic published by RVIZ via rostopic echo initialpose
            start_pos.pose.pose.position.x = self.trans_total_seq[0]
            # rospy.loginfo(self.trans_total[0])
            start_pos.pose.pose.position.y = self.trans_total_seq[1]
            start_pos.pose.pose.position.z = 0.0
            start_pos.pose.pose.orientation.x = self.rot_total_seq[0]
            start_pos.pose.pose.orientation.y = self.rot_total_seq[1]
            start_pos.pose.pose.orientation.z = self.rot_total_seq[2]
            start_pos.pose.pose.orientation.w = self.rot_total_seq[3]
            # start_pos.pose.pose.position.x=map_camera_trans[0]
            # start_pos.pose.pose.position.y=map_camera_trans[1]
            # start_pos.pose.pose.position.z=0.0
            # start_pos.pose.pose.orientation.x=map_camera_rot[0]
            # start_pos.pose.pose.orientation.y=map_camera_rot[1]
            # start_pos.pose.pose.orientation.z=map_camera_rot[2]
            # start_pos.pose.pose.orientation.w=map_camera_rot[3]
            # start_pos.pose.pose.position.x = 5.0
            # start_pos.pose.pose.position.y = 10.0
            # start_pos.pose.pose.position.z = 0.0
            # start_pos.pose.pose.orientation.x = 0.0
            # start_pos.pose.pose.orientation.y = 0.0
            # start_pos.pose.pose.orientation.z = -0.694837665627
            # start_pos.pose.pose.orientation.w = 0.719166613815
            # start_pos.pose.covariance[0] = 0.01
            # start_pos.pose.covariance[7] = 0.01
            # start_pos.pose.covariance[1:7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            # start_pos.pose.covariance[8:34] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
            #                                    0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            # start_pos.pose.covariance[35] = 0.001

            # rospy.loginfo(start_pos)
            self.initpose_pub.publish(start_pos)
            rospy.sleep(1)
            self.initpose_pub.publish(start_pos)
            # self.trans_total=[0,0,0]
            # self.rot_total=[0,0,0,0]
            self.IF_GOT_INIT = True
示例#4
0
 def read(self, timer_event):
     timeout = rospy.Duration(0.9)
     try:            
         self.listen.waitForTransform("base_footprint", "camera_rgb_optical_frame", rospy.Time.now(), timeout)
         self.trans, self.rot = self.listen.lookupTransform("base_footprint", "camera_rgb_optical_frame", rospy.Time(0))
         rospy.loginfo('Obtained transformation base_footprint --> camera_rgb_optical_frame')
         #Note: it is necessary to use Time(0) here, as this tells the listener to get the latest
         self.timer.shutdown()  # Done so stop timer -- can now use self.trans and self.rot
     except:
         pass
示例#5
0
    def _pose_sending_func(self):
        '''
        Continually sends the desired pose to the controller.
        '''
        r = rospy.Rate(100)
        while not rospy.is_shutdown():
            try:
                r.sleep()
            except rospy.ROSInterruptException:
                break

            # get the desired pose as a transform matrix
            with self._state_lock:
                ps_des = copy.deepcopy(self._desired_pose_stamped)
                delta_dist = self._delta_dist

            if ps_des == None:
                # no desired pose to publish
                continue

            # convert desired pose to transform matrix from goal frame to CONTROLLER_FRAME
            self._transform_listener.waitForTransform(CONTROLLER_FRAME,
                                                      ps_des.header.frame_id,
                                                      rospy.Time(0),
                                                      rospy.Duration(4.0))
            #trans, rot = self._transform_listener.lookupTransform(CONTROLLER_FRAME, ps_des.header.frame_id, rospy.Time(0))
            #torso_H_wrist_des = geom.trans_rot_to_hmat(trans, rot)

            ps_des.header.stamp = rospy.Time(0)
            ps_des = self._transform_listener.transformPose(
                CONTROLLER_FRAME, ps_des)
            torso_H_wrist_des = conversions.pose_to_mat(ps_des.pose)

            if self.br is not None:
                p = ps_des.pose.position
                q = ps_des.pose.orientation
                self.br.sendTransform((p.x, p.y, p.z), (q.x, q.y, q.z, q.w),
                                      rospy.Time.now(), '/cci_goal',
                                      CONTROLLER_FRAME)

            # get the current pose as a transform matrix
            goal_link = self._hand_description.hand_frame

            # really shouldn't need to do waitForTransform here, since rospy.Time(0) means "use the most
            # recent available transform". but without this, tf sometimes raises an ExtrapolationException. -jdb
            self._transform_listener.waitForTransform(CONTROLLER_FRAME,
                                                      goal_link, rospy.Time(0),
                                                      rospy.Duration(4.0))
            trans, rot = self._transform_listener.lookupTransform(
                CONTROLLER_FRAME, goal_link, rospy.Time(0))
            torso_H_wrist = geom.trans_rot_to_hmat(trans, rot)

            # compute difference between actual and desired wrist pose
            wrist_H_wrist_des = np.dot(linalg.inv(torso_H_wrist),
                                       torso_H_wrist_des)
            trans_err, rot_err = geom.hmat_to_trans_rot(wrist_H_wrist_des)

            # scale the relative transform such that the translation is equal
            # to delta_dist. we're assuming that the scaled angular error will
            # be reasonable
            scale_factor = delta_dist / linalg.norm(trans_err)
            if scale_factor > 1.0:
                scale_factor = 1.0  # don't overshoot
            scaled_trans_err = scale_factor * trans_err
            angle_err, direc, point = transformations.rotation_from_matrix(
                wrist_H_wrist_des)
            scaled_angle_err = scale_factor * angle_err
            wrist_H_wrist_control = np.dot(
                transformations.translation_matrix(scaled_trans_err),
                transformations.rotation_matrix(scaled_angle_err, direc,
                                                point))

            # find incrementally moved pose to send to the controller
            torso_H_wrist_control = np.dot(torso_H_wrist,
                                           wrist_H_wrist_control)
            desired_pose = conversions.mat_to_pose(torso_H_wrist_control)
            desired_ps = PoseStamped()
            desired_ps.header.stamp = rospy.Time(0)
            desired_ps.header.frame_id = CONTROLLER_FRAME
            desired_ps.pose = desired_pose

            if self.br is not None:
                p = desired_ps.pose.position
                q = desired_ps.pose.orientation
                self.br.sendTransform((p.x, p.y, p.z), (q.x, q.y, q.z, q.w),
                                      rospy.Time.now(), '/cci_imm_goal',
                                      CONTROLLER_FRAME)

            # send incremental pose to controller
            self._desired_pose_pub.publish(desired_ps)
示例#6
0
q_mode = False

# Used for printing and saving
tf_line = '<node pkg="tf" type="static_transform_publisher" name="{child}_tf" args="{p[0]} {p[1]} {p[2]}  {q[0]} {q[1]} {q[2]} {q[3]}  /{parent} /{child} {prd}" />\n'
prd = 100  # This will get replaced if it needs to

args.tf_child = args.tf_child[1:] if args.tf_child[0] == '/' else args.tf_child
args.tf_parent = args.tf_parent[1:] if args.tf_parent[
    0] == '/' else args.tf_parent

listener = tf.TransformListener()

p_original = (0, 0, 0)
rpy_original = (0, 0, 0)
try:
    listener.waitForTransform(args.tf_parent, args.tf_child, rospy.Time(0),
                              rospy.Duration(1))
    (trans, rot) = listener.lookupTransform(args.tf_parent, args.tf_child,
                                            rospy.Time(0))
    euler = trns.euler_from_quaternion(rot)
    p_original = trans
    rpy_original = euler
except tf.Exception:
    print "TF not found, setting everything to zero"


def set_bars(p, rpy):
    cv2.setTrackbarPos("x", "tf", int(toCvLin(p[0])))
    cv2.setTrackbarPos("y", "tf", int(toCvLin(p[1])))
    cv2.setTrackbarPos("z", "tf", int(toCvLin(p[2])))
    cv2.setTrackbarPos("roll", "tf", int(toCvAng(rpy[0])))
    def callback(self):
        print('points recevied')
        height = self.grid_map.info.height
        width = self.grid_map.info.width
        data = -1*np.ones(width*height)

        level3_objects = []
        num_objs = np.unique(self.id_flags).shape[0]
        print(np.unique(self.id_flags))
        for i in range(1, num_objs):
            print(self.ar_flags.shape[0], self.id_flags.shape[0], self.map_points.shape[0])
            assert self.ar_flags.shape[0] == self.id_flags.shape[0] == self.map_points.shape[0]
            ar_flags = self.ar_flags[self.id_flags == i]
            if ar_flags[ar_flags == 3].shape[0] > ar_flags.shape[0]*0.5:
                level3_objects.append(i)
        print(level3_objects)
        if len(level3_objects) == 0:
            print('no object in level 3')

        try:
            (trans_r, rot_r) = self.tflistener.lookupTransform('map', 'base_footprint', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            return

        try:
            (trans_h, rot_h) = self.tflistener.lookupTransform('map', 'human_frame', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            return
        trans_r = np.array(trans_r)
        quat = self.grid_map.info.origin.orientation
        if quat.w == 0:
            quat.w = 1
        r = R.from_quat([quat.x, quat.y, quat.z, quat.w])
        trans_r = r.apply(trans_r)
        origin_x = self.grid_map.info.origin.position.x
        origin_y = self.grid_map.info.origin.position.y
        src_x = int((trans_r[0] - origin_x)/self.grid_map.info.resolution)
        src_y = int((trans_r[1] - origin_y)/self.grid_map.info.resolution)

        level3_objects = level3_objects
        level3_flags = np.zeros(len(level3_objects))
        grid_map = self.grid_map.data
        grid_map = np.array(grid_map).reshape((height, width))
        print(grid_map.shape)
        indx_h = int((trans_h[0] - origin_x)/self.grid_map.info.resolution)
        indy_h = int((trans_h[1] - origin_y)/self.grid_map.info.resolution)
        for i in range(-1, 2, 1):
            for j in range(-1, 2, 1):
                grid_map[indx_h + i, indy_h + j] = 1

        objs_path = dict()
        objs_path['res'] = self.grid_map.info.resolution
        objs_path['origin'] = self.grid_map.info.origin
        while(len(level3_objects) > 0):
            print(level3_objects)
            path = self.check_nearest_obj(level3_objects, (src_x, src_y), r, grid_map) #[obj_idx, path, dst, i]
            if path is None:
                print('cost is too high')
                continue
            if path[1] is None or len(path[1]) == 0:
                print('No path find', path[3])
                obj_idx = path[0]
                level3_flags[obj_idx] = 1
                del level3_objects[obj_idx]
                continue

            obj_idx = path[0]
            print('obj_id', path[3], obj_idx)
            level3_flags[obj_idx] = 1
            del level3_objects[obj_idx]
            #send vel cmd go to target

            data = -1*np.ones(width*height)
            objs_path[path[3]] = []
            objs_path[path[3]].append(path[1])
            for stop in path[1]:
                stop = np.array(stop)
                src = np.array([src_x, src_y])
                dirt = stop - src
                # self.pub_vel_command(dirt, rot_r)
                src = stop
                data[width*stop[1] + stop[0]] = 100

            # #find nearest point in level 1 - 2
            cost = 1000000
            min_path = None
            map_points = self.map_points[self.ar_flags == 1]
            msg = self.xyzrgb_array_to_pointcloud2(map_points, np.ones(map_points.shape), stamp=rospy.get_rostime(), frame_id='test_teapot', seq=None)
            self.pub_test_temp.publish(msg)
            self.br.sendTransform((0, 0, 0),
                                tf.transformations.quaternion_from_euler(0, 0, 0),
                                rospy.Time.now(),
                                'test_teapot',
                                'map')

            # moving_stops, idxs = self.find_neareat_point_inlevel1(path[1][-1], map_points, r, np.array([origin_x, origin_y]))
            # grid_map[grid_map == -1] = 0
            # grid_map[grid_map > 0] = 1
            # objs_path[path[3]].append(moving_stops)

            indx_h = int((trans_h[0] - origin_x)/self.grid_map.info.resolution)
            indy_h = int((trans_h[1] - origin_y)/self.grid_map.info.resolution)
            moving_stops = [(indx_h, indy_h)]
            for pid, moving_stop in enumerate(moving_stops):        
                # msg = self.grid_map
                # msg.data = tuple(data)
                # msg.info.width = self.grid_map.info.width
                # msg.info.height = self.grid_map.info.height
                # msg.info.origin = self.grid_map.info.origin
                # self.grid_pub.publish(msg)
                # self.br.sendTransform((0, 0, 0),
                #                     [0, 0, 0, 1],
                #                     rospy.Time.now(),
                #                     "ar_grid_map",
                #                     'map')      
                planner = AstarPlanner(grid=grid_map)
                planned_path, cost = planner.search(path[1][-1], (int(moving_stop[0]), int(moving_stop[1])))
                print(moving_stop, len(planned_path), cost)
                min_path = planned_path
            objs_path[path[3]].append(min_path)

            #loop send vel cmd and update ar location
            for stop in min_path:
                stop = np.array(stop)
                src = path[1][-1]
                dirt = stop - src
                # self.pub_vel_command(dirt, rot_r)
                src = stop
                data[width*stop[1] + stop[0]] = 100
                # self.pub_pose(stop, self.frame_ids[path[3]])
            
            msg = self.grid_map
            msg.header.stamp = rospy.Time.now()
            msg.header.frame_id = 'ar_grid_map'
            msg.data = tuple(data)
            msg.info.width = self.grid_map.info.width
            msg.info.height = self.grid_map.info.height
            msg.info.origin = self.grid_map.info.origin
            self.grid_pub.publish(msg)
            self.br.sendTransform((0, 0, 0),
                                [0, 0, 0, 1],
                                rospy.Time.now(),
                                "ar_grid_map",
                                'map')

            self.begin_check = 0
            #update occupancy map
            # assert self.map_points.shape[0] == self.id_flags.shape[0]
            # points = self.map_points[self.id_flags == 2]
            # if self.frame_ids[obj_id[1]] == 'teapot_points':
            # msg = self.xyzrgb_array_to_pointcloud2(points, np.ones(points.shape), stamp=rospy.get_rostime(), frame_id='test_teapot', seq=None)
            # self.pub_test_temp.publish(msg)
            # self.br.sendTransform((0, 0, 0),
            #                     tf.transformations.quaternion_from_euler(0, 0, 0),
            #                     rospy.Time.now(),
            #                     'test_teapot',
            #                     'map')
            # print(grid_map.shape)
            src = min_path[0]
            dst = min_path[-1]
            src_x = dst[0]
            src_y = dst[1]
            diff = [dst[0] - src[0], dst[1] - src[1]]
            print(self.id_flags)
            print(self.id_flags[path[3]-1])
            for point in self.map_points[int(self.id_flags[path[3]-1]):int(self.id_flags[path[3]])]:
                indx = int((point[0] - origin_x)/self.grid_map.info.resolution)
                indy = int((point[1] - origin_y)/self.grid_map.info.resolution)
                grid_map[indx, indy] = 0
                grid_map[indx + diff[0], indy + diff[1]] = 1
            grid_map[grid_map > 0] = 100

            msg = self.grid_map
            msg.header.stamp = rospy.Time.now()
            msg.header.frame_id = 'new_grid_map'
            msg.data = tuple(grid_map.reshape(-1))
            msg.info.width = self.grid_map.info.width
            msg.info.height = self.grid_map.info.height
            msg.info.origin = self.grid_map.info.origin
            self.new_grid_pub.publish(msg)
            self.br.sendTransform((0, 0, 0),
                                [0, 0, 0, 1],
                                rospy.Time.now(),
                                "new_grid_map",
                                'map')
            print('sent')
        if self.save:
            print('writing....')
            with open("/home/shuwen/pathv2.p", 'wb') as f:
                pickle.dump(objs_path, f)
            print("writing saved")
示例#8
0
    def map_ack_data_callback(self, data):
        """ This is the main callback. Data is recieved, processed and sent
            to pose and velocity controllers """

        # Compute desired positions and velocities
        desired = [0 for x in range(12)]
        for i in range(6):
            if (data.axes[i] < 0):
                desired[i] = abs(
                    data.axes[i]) * self.min_pos[i] + self.base_pose[i]
            else:
                desired[i] = data.axes[i] * self.max_pos[i] + self.base_pose[i]
            if i > 2:
                # Normalize angles
                desired[i] = cola2_lib.normalizeAngle(desired[i])

        for i in range(6, 12):
            if (data.axes[i] < 0):
                desired[i] = abs(data.axes[i]) * self.min_vel[i - 6]
            else:
                desired[i] = data.axes[i] * self.max_vel[i - 6]

        # Check if pose controller is enabled
        for b in range(6):
            if data.buttons[b] == 1:
                self.pose_controlled_axis[b] = True
                if self.actualize_base_pose:
                    self.base_pose[b] = self.last_pose[b]
                rospy.loginfo("%s: axis %s now is pose", self.name, str(b))

        # Check if velocity controller is enabled
        for b in range(6, 12):
            if data.buttons[b] == 1:
                self.pose_controlled_axis[b - 6] = False
                rospy.loginfo("%s: axis %s now is velocity", self.name,
                              str(b - 6))

        if self.nav_init:
            # Positions
            world_waypoint_req = WorldWaypointReq()
            world_waypoint_req.goal.priority = GoalDescriptor.PRIORITY_TELEOPERATION
            world_waypoint_req.goal.requester = self.name + '_pose'
            world_waypoint_req.position.north = desired[0]
            world_waypoint_req.position.east = desired[1]
            world_waypoint_req.position.depth = desired[2]
            world_waypoint_req.orientation.roll = desired[3]
            world_waypoint_req.orientation.pitch = desired[4]
            world_waypoint_req.orientation.yaw = desired[5]
            world_waypoint_req.disable_axis.x = not self.pose_controlled_axis[0]
            world_waypoint_req.disable_axis.y = not self.pose_controlled_axis[1]
            world_waypoint_req.disable_axis.z = not self.pose_controlled_axis[2]
            world_waypoint_req.disable_axis.roll = not self.pose_controlled_axis[
                3]
            world_waypoint_req.disable_axis.pitch = not self.pose_controlled_axis[
                4]
            world_waypoint_req.disable_axis.yaw = not self.pose_controlled_axis[
                5]
            world_waypoint_req.header.stamp = rospy.Time().now()
            # if not world_waypoint_req.disable_axis.pitch:
            #    rospy.logfatal("%s: PITCH IS NOT DISABLED!", self.name)
            #    world_waypoint_req.disable_axis.pitch = True

            if (world_waypoint_req.disable_axis.x
                    and world_waypoint_req.disable_axis.y
                    and world_waypoint_req.disable_axis.z
                    and world_waypoint_req.disable_axis.roll
                    and world_waypoint_req.disable_axis.pitch
                    and world_waypoint_req.disable_axis.yaw):
                world_waypoint_req.goal.priority = GoalDescriptor.PRIORITY_TELEOPERATION_LOW

            self.pub_world_waypoint_req.publish(world_waypoint_req)

            # Velocities
            body_velocity_req = BodyVelocityReq()
            body_velocity_req.goal.priority = GoalDescriptor.PRIORITY_TELEOPERATION
            body_velocity_req.goal.requester = self.name + '_vel'
            body_velocity_req.twist.linear.x = desired[6]
            body_velocity_req.twist.linear.y = desired[7]
            body_velocity_req.twist.linear.z = desired[8]
            body_velocity_req.twist.angular.x = desired[9]
            body_velocity_req.twist.angular.y = desired[10]
            body_velocity_req.twist.angular.z = desired[11]
            body_velocity_req.disable_axis.x = self.pose_controlled_axis[0]
            body_velocity_req.disable_axis.y = self.pose_controlled_axis[1]
            body_velocity_req.disable_axis.z = self.pose_controlled_axis[2]
            body_velocity_req.disable_axis.roll = self.pose_controlled_axis[3]
            body_velocity_req.disable_axis.pitch = self.pose_controlled_axis[4]
            body_velocity_req.disable_axis.yaw = self.pose_controlled_axis[5]

            # Check if DoF is disable
            if abs(body_velocity_req.twist.linear.x) < 0.025:
                body_velocity_req.disable_axis.x = True

            if abs(body_velocity_req.twist.linear.y) < 0.025:
                body_velocity_req.disable_axis.y = True

            if abs(body_velocity_req.twist.linear.z) < 0.025:
                body_velocity_req.disable_axis.z = True

            if abs(body_velocity_req.twist.angular.x) < 0.01:
                body_velocity_req.disable_axis.roll = True

            if abs(body_velocity_req.twist.angular.y) < 0.01:
                body_velocity_req.disable_axis.pitch = True

            if abs(body_velocity_req.twist.angular.z) < 0.01:
                body_velocity_req.disable_axis.yaw = True

            # If all DoF are disabled set priority to LOW
            if (body_velocity_req.disable_axis.x
                    and body_velocity_req.disable_axis.y
                    and body_velocity_req.disable_axis.z
                    and body_velocity_req.disable_axis.roll
                    and body_velocity_req.disable_axis.pitch
                    and body_velocity_req.disable_axis.yaw):
                body_velocity_req.goal.priority = GoalDescriptor.PRIORITY_TELEOPERATION_LOW

            # Publish message
            body_velocity_req.header.stamp = rospy.Time().now()
            self.pub_body_velocity_req.publish(body_velocity_req)
示例#9
0
import time

if __name__ == '__main__':
    rospy.init_node('gettf')
    rate = rospy.Rate(50)
    map = np.array(Image.open("icra.pgm"))
    enemy_pub = rospy.Publisher('enemy_pos', EnemyPos, queue_size=1)
    enemy_pos = EnemyPos()
    tflistener = tf.TransformListener()
    trans01=[6.0, 3.0, 0]
    quat01 =[0.0, 0.0, 0.0, 0.0]
    T01 = np.mat(tflistener.fromTranslationRotation(trans01,quat01))
    print T01
    while not rospy.is_shutdown():
        try:
            trans0s, quat0s = tflistener.lookupTransform("/robot_0/odom", "/robot_0/base_footprint",rospy.Time(0))
            trans1e, quat1e = tflistener.lookupTransform("/robot_1/odom", "/robot_1/base_footprint", rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            continue

        T0s = np.mat(tflistener.fromTranslationRotation(trans0s, quat0s))
        T1e = np.mat(tflistener.fromTranslationRotation(trans1e, quat1e))
        Ts0 = T0s.I
        Tse=Ts0*T01*T1e

        T0e = T01 * T1e
        x0s = int(T0s[0, 3] / 0.05) + 20
        y0s = int(T0s[1, 3] / 0.05) + 20
        x0e = int(T0e[0, 3] / 0.05) + 20
        y0e = int(T0e[1, 3] / 0.05) + 20
        A = np.mat([[x0s, 1], [x0e, 1]])
def niryo_demo():
    #Approach vector and offset distance to compensate for gripper length
    approach = [0, 0, -1]
    grasp_offset = -0.12

    moveit_commander.roscpp_initialize(sys.argv)
    rospy.init_node('moveit_niryo_robot')

    n = NiryoOne()
    n.calibrate_auto()
    n.change_tool(TOOL_GRIPPER_2_ID)
    global marker_pose
    robot = None
    tries = 1
    while (robot == None and tries < 5):
        try:
            robot = moveit_commander.RobotCommander()
        except:
            print("Could not connect to move_it action server")
    #scene = moveit_commander.PlanningSceneInterface()

    group_name = "arm"
    group = moveit_commander.MoveGroupCommander(group_name)
    group.set_planning_time(10)
    n.activate_learning_mode(False)

    listener = tf.TransformListener()
    #Let the buffer fill so we can get the frame list
    time.sleep(3)
    frame_list = listener.getFrameStrings()

    #	listener.waitForTransform("camera_color_optical_frame", "ground_link", rospy.Time(), rospy.Duration(2.0))
    #	(trans, quat) = listener.lookupTransform("camera_color_optical_frame", "ground_link", rospy.Time(0))
    #	camera_static_transform = subprocess.Popen(
    #				["rosrun", "tf", "static_tranform_publisher", str(trans[0]), str(trans[1]), str(trans[2]), str(quat[0]), str(quat[1]), str(quat[2]), str(quat[3]), "100"])

    #	print("[INFO] Camera-robot link established, you can remove the ARUCO marker now.")
    #	rospy.sleep(5)
    #One object only
    #	marker_frame = [e for e in frame_list if e[:7] == "object_"][0]
    while True:
        get_object_marker_position()
        n.open_gripper(TOOL_GRIPPER_2_ID, 200)
        print("[GRIPPER 2 OPENED]")
        print("Transforming point from frame: " + marker_pose.header.frame_id)
        listener.waitForTransform("base_link", marker_pose.header.frame_id,
                                  rospy.Time(), rospy.Duration(2.0))
        #	(trans, quat) = listener.lookupTransform("base_link", marker_frame, rospy.Time(0))
        marker_point = listener.transformPoint(
            "base_link", marker_pose)  # the marker position in arm base frame
        marker_pose = geometry_msgs.msg.PointStamped()
        #print "Translation [x, y, z]    = " + str(trans)
        #print "Orientation [x, y, z, w] = " + str(quat)

        point = marker_point.point
        pick_target = geometry_msgs.msg.Pose()
        pick_target.position.x = point.x + grasp_offset * approach[0]
        pick_target.position.y = point.y + grasp_offset * approach[1]
        pick_target.position.z = point.z + grasp_offset * approach[2]
        pick_target.orientation.x = 0.0
        pick_target.orientation.y = 1.0 / math.sqrt(2)
        pick_target.orientation.z = 0.0
        pick_target.orientation.w = 1.0 / math.sqrt(2)
        group.set_pose_target(pick_target)

        pprint(pick_target)

        plan = group.plan()
        group.go(wait=True)

        n.close_gripper(TOOL_GRIPPER_2_ID, 200)
        print("[GRIPPER 2 CLOSED]")

        place_target = geometry_msgs.msg.Pose()
        place_target.position.x = pick_target.position.x
        place_target.position.y = pick_target.position.y
        place_target.position.z = pick_target.position.z + 0.1  #Lift the cube 10cm
        place_target.orientation.x = 0.0
        place_target.orientation.y = 1.0 / math.sqrt(2)
        place_target.orientation.z = 0.0
        place_target.orientation.w = 1.0 / math.sqrt(2)

        group.set_pose_target(place_target)
        plan = group.plan()
        group.go(wait=True)

        #Place cube in the -y position of starting position
        place_target = geometry_msgs.msg.Pose()
        place_target.position.x = pick_target.position.x
        place_target.position.y = -pick_target.position.y
        place_target.position.z = pick_target.position.z
        place_target.orientation.x = 0.0
        place_target.orientation.y = 1.0 / math.sqrt(2)
        place_target.orientation.z = 0.0
        place_target.orientation.w = 1.0 / math.sqrt(2)

        group.set_pose_target(place_target)
        plan = group.plan()
        group.go(wait=True)

        n.open_gripper(TOOL_GRIPPER_2_ID, 200)
        print("[GRIPPER 2 OPENED]")

        #Move to resting position
        resting_joints = [0, 0.64, -1.39, 0, 0, 0]
        n.move_joints(resting_joints)

        n.close_gripper(TOOL_GRIPPER_2_ID, 200)
        print("[GRIPPER 2 CLOSED]")

    n.activate_learning_mode(True)
    moveit_commander.roscpp_shutdown()
示例#11
0
#!/usr/bin/env python
import roslib
import rospy
import math
import tf
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
    rospy.init_node('pose_listner')

    listener = tf.TransformListener()
    rate = rospy.Rate(10)
    cnt = 0
    while not rospy.is_shutdown():
        try:
            (trans,
             quaternion) = listener.lookupTransform('world', 'wrist',
                                                    rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException):
            continue
        #pose = np.array(pose)
        rot = tf.transformations.euler_from_quaternion(quaternion)
        #print("position {}".format(trans))
        #print("rotation {}".format(np.degrees(rot)))
        cnt += 1
        plt.scatter(cnt, np.degrees(rot[2]), c='#2ca02c')
        plt.scatter(cnt, np.degrees(rot[0]), c='#17becf')
        plt.pause(0.05)

        rate.sleep()
示例#12
0
    def update(self, action_label, is_done):
        '''
        Compute endpoint positions and update data. Should happen at some
        fixed frequency like 10 hz.

        Parameters:
        -----------
        action: name of high level action being executed
        '''

        switched = False
        if not self.action == action_label:
            if self.action is not None:
                switched = True
            self.prev_action = self.action
            self.action = action_label
            self.prev_objects.append(self.object)
            self.object = None
        if switched or is_done:
            self.prev_last_goal = self.last_goal
            self.last_goal = len(self.data["label"])
            len_label = len(self.data["label"])

            # Count one more if this is the last frame -- since our goal could
            # not be the beginning of a new action
            if is_done:
                len_label += 1
                extra = 1
            else:
                extra = 0

            rospy.loginfo(
                "Starting new action: " + str(action_label) +
                ", prev was from " + str(self.prev_last_goal) +
                # ' ' + (str(self.data["label"][self.prev_last_goal]) if self.prev_last_goal else "") +
                " to " + str(self.last_goal)
                # ' ' + (str(self.data["label"][self.last_goal]) if self.last_goal else "") +
            )
            self.data["goal_idx"] += (self.last_goal - self.prev_last_goal +
                                      extra) * [self.last_goal]

            len_idx = len(self.data["goal_idx"])
            if not len_idx == len_label:
                rospy.logerr("lens = " + str(len_idx) + ", " + str(len_label))
                raise RuntimeError("incorrectly set goal idx")

        # action text to check will be the string contents after the colon
        label_to_check = action_label.split(':')[-1]

        should_log_this_timestep = (self.object is not None or label_to_check
                                    in self.action_labels_to_always_log)
        if not should_log_this_timestep:
            # here we check if a smartmove object is defined to determine
            # if we should be logging at this time.
            if self.verbose:
                rospy.logwarn(
                    "passing -- has not yet started executing motion")
            return True

        if self.verbose:
            rospy.loginfo("Logging: " + str(self.action) + ", obj = " +
                          str(self.object) + ", prev = " +
                          str(self.prev_objects))

        local_time = rospy.Time.now()
        # this will get the latest available time
        latest_available_time_lookup = rospy.Time(0)

        ##### BEGIN MUTEX
        with self.mutex:
            # get the time for this data sample
            if self.rgb_time is not None:
                t = self.rgb_time
            else:
                t = local_time

            self.t = t
            # make sure we keep the right rgb and depth
            img_jpeg = self.rgb_img
            depth_png = self.depth_img

        have_data = False
        # how many times have we tried to get the transforms
        attempts = 0
        max_attempts = 10
        # the number attempts that should
        # use the backup timestamps
        backup_timestamp_attempts = 4
        while not have_data:
            try:
                c_pose = self.tf_buffer.lookup_transform(
                    self.base_link, self.camera_frame, t)
                ee_pose = self.tf_buffer.lookup_transform(
                    self.base_link, self.ee_frame, t)
                if self.object:
                    lookup_object = False
                    # Check for the detected object at the current time.
                    try:
                        obj_pose = self.tf_buffer.lookup_transform(
                            self.base_link, self.object, t)
                        lookup_object = True
                    except (tf2.ExtrapolationException,
                            tf2.ConnectivityException) as e:
                        pass
                    if not lookup_object:
                        # If we can't get the current time for the object,
                        # get the latest available. This particular case will be common.
                        # This is because the object detection srcipt can only run when the
                        # arm is out of the way.
                        obj_pose = self.tf_buffer.lookup_transform(
                            self.base_link, self.object,
                            latest_available_time_lookup)

                rgb_optical_pose = self.tf_buffer.lookup_transform(
                    self.base_link, self.camera_rgb_optical_frame, t)
                depth_optical_pose = self.tf_buffer.lookup_transform(
                    self.base_link, self.camera_depth_optical_frame, t)
                all_tf2_frames_as_string = self.tf_buffer.all_frames_as_string(
                )
                self.tf2_dict = {}
                transform_strings = all_tf2_frames_as_string.split('\n')
                # get all of the other tf2 transforms
                # using the latest available frame as a fallback
                # if the current timestep frame isn't available
                for transform_string in transform_strings:
                    transform_tokens = transform_string.split(' ')
                    if len(transform_tokens) > 1:
                        k = transform_tokens[1]
                        try:

                            lookup_object = False
                            # Check for the detected object at the current time.
                            try:
                                k_pose = self.tf_buffer.lookup_transform(
                                    self.base_link, k, t)
                                lookup_object = True
                            except (tf2.ExtrapolationException,
                                    tf2.ConnectivityException) as e:
                                pass
                            if not lookup_object:
                                # If we can't get the current time for the object,
                                # get the latest available. This particular case will be common.
                                # This is because the object detection srcipt can only run when the
                                # arm is out of the way.
                                k_pose = self.tf_buffer.lookup_transform(
                                    self.base_link, k,
                                    latest_available_time_lookup)
                            k_pose = self.tf_buffer.lookup_transform(
                                self.base_link, k, t)

                            k_xyz_qxqyqzqw = [
                                k_pose.transform.translation.x,
                                k_pose.transform.translation.y,
                                k_pose.transform.translation.z,
                                k_pose.transform.rotation.x,
                                k_pose.transform.rotation.y,
                                k_pose.transform.rotation.z,
                                k_pose.transform.rotation.w
                            ]
                            self.tf2_dict[k] = k_xyz_qxqyqzqw
                        except (tf2.ExtrapolationException,
                                tf2.ConnectivityException) as e:
                            pass

                # don't load the yaml because it can take up to 0.2 seconds
                all_tf2_frames_as_yaml = self.tf_buffer.all_frames_as_yaml()
                self.tf2_json = json.dumps(self.tf2_dict)

                have_data = True
            except (tf2.LookupException, tf2.ExtrapolationException,
                    tf2.ConnectivityException) as e:
                rospy.logwarn_throttle(
                    10.0, 'Collector transform lookup Failed: %s to %s, %s, %s'
                    ' at image time: %s and local time: %s '
                    '\nNote: This message may print >1000x less often than the problem occurs.'
                    % (self.base_link, self.camera_frame, self.ee_frame,
                       str(self.object), str(t),
                       str(latest_available_time_lookup)))

                have_data = False
                attempts += 1
                # rospy.sleep(0.0)
                if attempts > max_attempts - backup_timestamp_attempts:
                    rospy.logwarn_throttle(
                        10.0,
                        'Collector failed to use the rgb image rosmsg timestamp, '
                        'trying latest available time as backup. '
                        'Note: This message may print >1000x less often than the problem occurs.'
                    )
                    # try the backup timestamp even though it will be less accurate
                    t = latest_available_time_lookup
                if attempts > max_attempts:
                    # Could not look up one of the transforms -- either could
                    # not look up camera, endpoint, or object.
                    raise e

        if t == latest_available_time_lookup:
            # Use either the latest available timestamp or
            # the local timestamp as backup,
            # even though it will be less accurate
            if self.rgb_time is not None:
                t = self.rgb_time
            else:
                t = local_time
        c_xyz_quat = pose_to_vec_quat_list(c_pose)
        rgb_optical_xyz_quat = pose_to_vec_quat_list(rgb_optical_pose)
        depth_optical_xyz_quat = pose_to_vec_quat_list(depth_optical_pose)
        ee_xyz_quat = pose_to_vec_quat_list(ee_pose)
        if self.object:
            obj_xyz_quat = pose_to_vec_quat_list(obj_pose)

        self.current_ee_pose = pm.fromTf(pose_to_vec_quat_pair(ee_pose))

        self.data["nsecs"].append(np.copy(self.t.nsecs))  # time
        self.data["secs"].append(np.copy(self.t.secs))  # time
        self.data["pose"].append(
            np.copy(ee_xyz_quat))  # end effector pose (6 DOF)
        self.data["camera"].append(np.copy(c_xyz_quat))  # camera pose (6 DOF)

        if self.object:
            self.data["object_pose"].append(np.copy(obj_xyz_quat))
        elif 'move_to_home' in label_to_check:
            self.data["object_pose"].append(self.home_xyz_quat)
            # TODO(ahundt) should object pose be all 0 or NaN when there is no object?
            # self.data["object_pose"].append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
        else:
            raise ValueError("Attempted to log unsupported "
                             "object pose data for action_label " +
                             str(action_label))
        self.data["camera_rgb_optical_frame_pose"].append(rgb_optical_xyz_quat)
        self.data["camera_depth_optical_frame_pose"].append(
            depth_optical_xyz_quat)
        #plt.figure()
        #plt.imshow(self.rgb_img)
        #plt.show()
        # print("jpg size={}, png size={}".format(sys.getsizeof(img_jpeg), sys.getsizeof(depth_png)))
        self.data["image"].append(img_jpeg)  # encoded as JPEG
        self.data["depth_image"].append(depth_png)
        self.data["gripper"].append(self.gripper_msg.gPO / 255.)

        # TODO(cpaxton): verify
        if not self.task.validLabel(action_label):
            raise RuntimeError("action not recognized: " + str(action_label))

        action = self.task.index(action_label)
        self.data["label"].append(action)  # integer code for high-level action

        # Take Mutex ---
        with self.mutex:
            self.data["q"].append(np.copy(self.q))  # joint position
            self.data["dq"].append(np.copy(self.dq))  # joint velocuity
            self.data["info"].append(np.copy(
                self.info))  # string description of current step
            self.data["rgb_info_D"].append(self.rgb_info.D)
            self.data["rgb_info_K"].append(self.rgb_info.K)
            self.data["rgb_info_R"].append(self.rgb_info.R)
            self.data["rgb_info_P"].append(self.rgb_info.P)
            self.data["rgb_info_distortion_model"].append(
                self.rgb_info.distortion_model)
            self.data["depth_info_D"].append(self.depth_info.D)
            self.data["depth_info_K"].append(self.depth_info.K)
            self.data["depth_info_R"].append(self.depth_info.R)
            self.data["depth_info_P"].append(self.depth_info.P)
            self.data["depth_distortion_model"].append(
                self.depth_info.distortion_model)
            if self.object:
                self.data["object"].append(np.copy(self.object))
            else:
                self.data["object"].append('none')

        self.data["all_tf2_frames_as_yaml"].append(all_tf2_frames_as_yaml)
        self.data[
            "all_tf2_frames_from_base_link_vec_quat_xyzxyzw_json"].append(
                self.tf2_json)

        return True
def opendoor(req):
    # main(whole_body,  gripper,wrist_wrench)
    frame = req.handle_pose.header.frame_id
    hanlde_pos = req.handle_pose.pose

    # hanlde_pos=geometry_msgs.msg.PoseStamped()
    res = OpendoorResponse()
    cli = actionlib.SimpleActionClient(
        '/hsrb/omni_base_controller/follow_joint_trajectory',
        control_msgs.msg.FollowJointTrajectoryAction)
    vel_pub = rospy.Publisher('/hsrb/command_velocity',
                              geometry_msgs.msg.Twist,
                              queue_size=10)
    armPub = rospy.Publisher('/hsrb/arm_trajectory_controller/command',
                             JointTrajectory,
                             queue_size=1)

    robot = hsrb_interface.Robot()
    whole_body = robot.get('whole_body')
    gripper = robot.get('gripper')
    wrist_wrench = robot.get('wrist_wrench')
    base = robot.get('omni_base')
    start_theta = base.pose[2]
    whole_body.move_to_joint_positions({'head_tilt_joint': -0.18})
    # with hsrb_interface.Robot() as robot:
    # whole_body = robot.get('whole_body')
    # gripper = robot.get('gripper')
    # wrist_wrench = robot.get('wrist_wrench')

    try:
        # Open the gripper
        #whole_body.move_to_neutral()

        whole_body.move_to_joint_positions({'head_tilt_joint': -0.18})
        grasp_point_client()
        global recog_pos
        global is_found
        print recog_pos.pose.position

        print("Opening the gripper")
        #whole_body.move_to_neutral()
        #whole_body.move_to_joint_positions({'head_tilt_joint': -0.18})
        rospy.sleep(2.5)
        switch = ImpedanceControlSwitch()
        gripper.command(1.0)
        # Approach to the door

        listener = tf.TransformListener()
        listener.waitForTransform(_ORIGIN_TF, _BASE_TF, rospy.Time(),
                                  rospy.Duration(4.0))
        # translation,rot = listener.lookupTransform(_BASE_TF, _ORIGIN_TF, rospy.Time())
        recog_pos.header.frame_id = _ORIGIN_TF
        recog_pos2 = listener.transformPose(_BASE_TF, recog_pos)

        print("Approach to the door")
        grab_pose = geometry.multiply_tuples(
            geometry.pose(x=recog_pos2.pose.position.x - HANDLE_TO_HAND_POS_X,
                          y=recog_pos2.pose.position.y,
                          z=recog_pos2.pose.position.z,
                          ej=math.pi / 2), geometry.pose(ek=math.pi / 2))
        #grab_pose = geometry.multiply_tuples(geometry.pose(x=recog_pos2.pose.position.x-HANDLE_TO_HAND_POS_X/2,
        #	 y=recog_pos2.pose.position.y-HANDLE_TO_HAND_POS_X/2,
        #	 z=recog_pos2.pose.position.z,
        #ej=0,
        #	 ek=math.pi/2), geometry.pose(ei=math.pi/2))
        # grab_pose = geometry.multiply_tuples(geometry.pose(x=recog_pos.pose.position.x-translation[0]-HANDLE_TO_HAND_POS_X,
        # 			y=recog_pos.pose.position.y-translation[1],
        # 			z=recog_pos.pose.position.z-translation[2],
        # 			ej=math.pi/2),
        # 			geometry.pose(ek=0.0))
        # 			# geometry.pose(ek=math.pi/2))
        rospy.sleep(300)

        whole_body.move_end_effector_pose(grab_pose, _BASE_TF)
        rospy.sleep(1)
        whole_body.move_end_effector_by_line((0, 0, 1), 0.063)

        # Close the gripper
        wrist_wrench.reset()
        switch.activate("grasping")
        gripper.grasp(-0.01)
        rospy.sleep(1.0)
        switch.inactivate()
        # rospy.sleep(100)
        # Rotate the handle
        whole_body.impedance_config = 'grasping'
        traj = JointTrajectory()

        whole_body.move_end_effector_by_line((0, 1, 0), -0.120)
        rospy.sleep(1.5)
        try:
            whole_body.move_end_effector_by_line((0, 0, 1), 0.2)
            rospy.sleep(1)
        except:
            pirnt("hi!")

        rospy.sleep(1)
        try:
            whole_body.move_end_effector_by_line((0, 1, 0), 0.06)
            rospy.sleep(1)
        except:
            print("hi")

        gripper.command(1.0)

        rospy.sleep(2)
        tw = geometry_msgs.msg.Twist()
        tw.linear.x = 0
        tw.linear.y = 0.3
        tw.linear.z = 0
        vel_pub.publish(tw)

        rospy.sleep(2)
        tw1 = geometry_msgs.msg.Twist()
        tw1.linear.x = 0.5
        tw1.linear.y = 0.1
        tw1.linear.z = 0
        vel_pub.publish(tw1)

        rospy.sleep(2)

        # traj = JointTrajectory()

        # # This controller requires that all joints have values
        # traj.joint_names = ["arm_lift_joint", "arm_flex_joint", 										"arm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
        # p = JointTrajectoryPoint()
        # current_positions = [latest_positions[name] for name in traj.joint_names]
        # current_positions[0] = latest_positions["arm_lift_joint"]-0.0675
        # current_positions[1] = latest_positions["arm_flex_joint"]-0.02
        # current_positions[2] = latest_positions["arm_roll_joint"]
        # current_positions[3] = latest_positions["wrist_flex_joint"]
        # current_positions[4] = latest_positions["wrist_roll_joint"]-0.65
        # p.positions = current_positions
        # p.velocities = [0, 0, 0, 0, 0]
        # p.time_from_start = rospy.Time(3)
        # traj.points = [p]

        #armPub.publish(traj)
        #rospy.sleep(3.0)
        # print("finishing rotating handle")
        # ## Move by End-effector line
        # whole_body.impedance_config = 'compliance_hard'
        # whole_body.move_end_effector_by_line((0.0,0.0,1), 0.45)

        print("pull the door")
        print("test 1")
        base.go_rel(0.0, 0.6, 0.0, 300)
        rospy.sleep(3)
        base.go_rel(0.1, 0.0, 0.0, 300)
        rospy.sleep(2)
        base.go_rel(0.1, 0.0, 0.0, 300)
        rospy.sleep(2)
        base.go_rel(0.1, 0.0, 0.0, 300)
        rospy.sleep(2)
        base.go_rel(0.3, 0.0, 0.0, 300)
        rospy.sleep(2)
        whole_body.move_to_neutral()
        rospy.sleep(4)
        res.success = True

    except Exception as e:
        rospy.logerr(e)
        print "Failed to open door"
        res.success = False
    return res
示例#14
0
    def loop(self):
        """ the main loop of the robot. At each iteration, depending on its
        mode (i.e. the finite state machine's state), if takes appropriate
        actions. This function shouldn't return anything """

        try:
            (translation,rotation) = self.trans_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0))
            self.x = translation[0]
            self.y = translation[1]
            euler = tf.transformations.euler_from_quaternion(rotation)
            self.theta = euler[2]
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            pass

        # logs the current mode
        if not(self.last_mode_printed == self.mode):
            print('-------------Current Mode---------------')
            rospy.loginfo("Current Mode: %s", self.mode)
            print('----------------------------------------')
            self.last_mode_printed = self.mode

        # checks which mode it is in and acts accordingly
        if self.mode == Mode.IDLE:
            # send zero velocity
            self.stay_idle()
            if not self.exploring:
                print('YAY WE ARE DONE!')

        elif self.mode == Mode.POSE:
            # moving towards a desired pose
            if self.close_to(self.x_g,self.y_g,self.theta_g,POS_EPS):
                self.mode = Mode.IDLE
            else:
                self.go_to_pose()

        elif self.mode == Mode.STOP:
            # at a stop sign
            if self.has_stopped():
                self.init_crossing()
            else:
                self.stay_idle()

        elif self.mode == Mode.CROSS:
            # crossing an intersection
            if self.has_crossed():
                self.mode = Mode.NAV
            else:
                self.nav_to_pose()

        elif self.mode == Mode.NAV:

            if self.close_to(self.x_g,self.y_g,self.theta_g,POS_EPS):
                # waypoint reached

                if self.close_to(self.firestation_x,self.firestation_y,self.firestation_theta,3*POS_EPS):
                    # at firestation
                    if (self.exploring):
                        self.wait_for_instr()
                    else:
                        if np.any(self.to_rescue):
                            self.init_rescue()
                        else:
                            self.mode = Mode.IDLE

                else:
                    # non-firestation waypoint reached
                    if (self.exploring):
                        pass
                    else:
                        self.init_rescue()

            else:
                # waypoint not yet reached
                self.nav_to_pose()
                pass

        elif self.mode == Mode.WAIT_FOR_INSTR:
            pass

        elif self.mode == Mode.RESCUE:
            # save the animal!
            if self.has_rescued():
                self.update_waypoint()
            else:
                pass

        else:
            raise Exception('This mode is not supported: %s'
                % str(self.mode))
示例#15
0
 def get_height(self):
     return self.tf.lookupTransform(self.odom_frame, self.base_link_frame,
                                    rospy.Time(0))[0][2]
示例#16
0
def getImageMsg(data, spot_wrapper, inverse_target_frame):
    """Takes the image, camera, and TF data and populates the necessary ROS messages

    Args:
        data: Image proto
        spot_wrapper: A SpotWrapper object
        inverse_target_frame: A frame name to be inversed to a parent frame.
    Returns:
        (tuple):
            * Image: message of the image captured
            * CameraInfo: message to define the state and config of the camera that took the image
            * TFMessage: with the transforms necessary to locate the image frames
    """
    tf_msg = TFMessage()
    for frame_name in data.shot.transforms_snapshot.child_to_parent_edge_map:
        if data.shot.transforms_snapshot.child_to_parent_edge_map.get(
                frame_name).parent_frame_name:
            try:
                transform = data.shot.transforms_snapshot.child_to_parent_edge_map.get(
                    frame_name)
                new_tf = TransformStamped()
                local_time = spot_wrapper.robotToLocalTime(
                    data.shot.acquisition_time)
                new_tf.header.stamp = rospy.Time(local_time.seconds,
                                                 local_time.nanos)
                parent = transform.parent_frame_name
                child = frame_name
                if inverse_target_frame == frame_name:
                    geo_tform_inversed = SE3Pose.from_obj(
                        transform.parent_tform_child).inverse()
                    new_tf.header.frame_id = frame_name
                    new_tf.child_frame_id = transform.parent_frame_name
                    new_tf.transform.translation.x = geo_tform_inversed.position.x
                    new_tf.transform.translation.y = geo_tform_inversed.position.y
                    new_tf.transform.translation.z = geo_tform_inversed.position.z
                    new_tf.transform.rotation.x = geo_tform_inversed.rotation.x
                    new_tf.transform.rotation.y = geo_tform_inversed.rotation.y
                    new_tf.transform.rotation.z = geo_tform_inversed.rotation.z
                    new_tf.transform.rotation.w = geo_tform_inversed.rotation.w
                else:
                    new_tf.header.frame_id = transform.parent_frame_name
                    new_tf.child_frame_id = frame_name
                    new_tf.transform.translation.x = transform.parent_tform_child.position.x
                    new_tf.transform.translation.y = transform.parent_tform_child.position.y
                    new_tf.transform.translation.z = transform.parent_tform_child.position.z
                    new_tf.transform.rotation.x = transform.parent_tform_child.rotation.x
                    new_tf.transform.rotation.y = transform.parent_tform_child.rotation.y
                    new_tf.transform.rotation.z = transform.parent_tform_child.rotation.z
                    new_tf.transform.rotation.w = transform.parent_tform_child.rotation.w
                tf_msg.transforms.append(new_tf)
            except Exception as e:
                print('Error: {}'.format(e))

    image_msg = Image()
    local_time = spot_wrapper.robotToLocalTime(data.shot.acquisition_time)
    image_msg.header.stamp = rospy.Time(local_time.seconds, local_time.nanos)
    image_msg.header.frame_id = data.shot.frame_name_image_sensor
    image_msg.height = data.shot.image.rows
    image_msg.width = data.shot.image.cols

    # Color/greyscale formats.
    # JPEG format
    if data.shot.image.format == image_pb2.Image.FORMAT_JPEG:
        image_msg.encoding = "rgb8"
        image_msg.is_bigendian = True
        image_msg.step = 3 * data.shot.image.cols
        image_msg.data = data.shot.image.data

    # Uncompressed.  Requires pixel_format.
    if data.shot.image.format == image_pb2.Image.FORMAT_RAW:
        # One byte per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_GREYSCALE_U8:
            image_msg.encoding = "mono8"
            image_msg.is_bigendian = True
            image_msg.step = data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Three bytes per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGB_U8:
            image_msg.encoding = "rgb8"
            image_msg.is_bigendian = True
            image_msg.step = 3 * data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Four bytes per pixel.
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_RGBA_U8:
            image_msg.encoding = "rgba8"
            image_msg.is_bigendian = True
            image_msg.step = 4 * data.shot.image.cols
            image_msg.data = data.shot.image.data

        # Little-endian uint16 z-distance from camera (mm).
        if data.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_DEPTH_U16:
            image_msg.encoding = "16UC1"
            image_msg.is_bigendian = False
            image_msg.step = 2 * data.shot.image.cols
            image_msg.data = data.shot.image.data

    camera_info_msg = DefaultCameraInfo()
    local_time = spot_wrapper.robotToLocalTime(data.shot.acquisition_time)
    camera_info_msg.header.stamp = rospy.Time(local_time.seconds,
                                              local_time.nanos)
    camera_info_msg.header.frame_id = data.shot.frame_name_image_sensor
    camera_info_msg.height = data.shot.image.rows
    camera_info_msg.width = data.shot.image.cols

    camera_info_msg.K[0] = data.source.pinhole.intrinsics.focal_length.x
    camera_info_msg.K[2] = data.source.pinhole.intrinsics.principal_point.x
    camera_info_msg.K[4] = data.source.pinhole.intrinsics.focal_length.y
    camera_info_msg.K[5] = data.source.pinhole.intrinsics.principal_point.y

    camera_info_msg.P[0] = data.source.pinhole.intrinsics.focal_length.x
    camera_info_msg.P[2] = data.source.pinhole.intrinsics.principal_point.x
    camera_info_msg.P[5] = data.source.pinhole.intrinsics.focal_length.y
    camera_info_msg.P[6] = data.source.pinhole.intrinsics.principal_point.y

    return image_msg, camera_info_msg, tf_msg
示例#17
0
 def publish(self, waypoints):
     lane = Lane()
     lane.header.frame_id = '/world'
     lane.header.stamp = rospy.Time(0)
     lane.waypoints = waypoints
     self.pub.publish(lane)
import turtlesim.srv

if __name__ == '__main__':
    rospy.init_node('current_pose_from_tf')

    tfBuffer = tf2_ros.Buffer()
    listener = tf2_ros.TransformListener(tfBuffer)

    current_pub = rospy.Publisher("/current_pose",
                                  geometry_msgs.msg.PoseStamped,
                                  queue_size=1)

    rate = rospy.Rate(10.0)
    while not rospy.is_shutdown():
        try:
            trans = tfBuffer.lookup_transform("map", "base_link", rospy.Time())
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            continue

        cmd = geometry_msgs.msg.PoseStamped()
        cmd.header.stamp = rospy.Time.now()
        cmd.header.frame_id = "map"
        cmd.pose.position.x = trans.transform.translation.x
        cmd.pose.position.y = trans.transform.translation.y
        cmd.pose.position.z = trans.transform.translation.z
        cmd.pose.orientation.w = trans.transform.rotation.w
        cmd.pose.orientation.x = trans.transform.rotation.x
        cmd.pose.orientation.y = trans.transform.rotation.y
        cmd.pose.orientation.z = trans.transform.rotation.z
        current_pub.publish(cmd)
 def check_nearest_obj(self, obj_list, src, r, grid_map):
     min_dist = 10000000
     min_path = None
     for obj_idx, i in enumerate(obj_list):
         try:
             (trans_ar, rot_ar) = self.tflistener.lookupTransform('map', self.frame_ids[i], rospy.Time(0))
         except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
             return [obj_idx, None, None, i]
         trans_ar = np.array(trans_ar)
         trans_ar = r.apply(trans_ar)
         indx = int((trans_ar[0] - self.grid_map.info.origin.position.x)/self.grid_map.info.resolution)
         indy = int((trans_ar[1] - self.grid_map.info.origin.position.y)/self.grid_map.info.resolution)
         shifts = list(product(np.arange(-2, 2, 1), np.arange(-2, 2, 1)))
         for shift_x, shift_y in shifts:
             dst = (indx + shift_x, indy + shift_y)
             # grid_map[grid_map == 0] = 1
             grid_map[grid_map == -1] = 0
             grid_map[grid_map > 0] = 1
             planner = AstarPlanner(grid=grid_map)
             path, cost = planner.search(src, dst)
             print(cost)
             if cost < min_dist and len(path) > 0:
                 min_dist = cost
                 min_path = [obj_idx, path, dst, i]
     return min_path
示例#20
0
if __name__ == "__main__":
    try:
        rospy.init_node("imu_node", anonymous=True)

        # retrieves module corresponding to sensor module and class in rosparam
        # load imu configuration into workspace
        imu_cfg = rospy.get_param(rospy.get_namespace(), 0)
        try:
            assert (imu_cfg is not 0)
        except AssertionError:
            raise ValueError(
                "Imu configuration could not be found in spacecraft cfg file!")

        # find correct sensor whose name ends with "imu"
        imu_cfg = [
            value for key, value in imu_cfg.iteritems() if key.endswith("imu")
        ][0]

        sensor_module = importlib.import_module("rospace_lib.sensor." +
                                                imu_cfg["module"])
        sensor_class = getattr(sensor_module, imu_cfg["class"])
        rate_gyro = sensor_class()

        import_sensor_lib = importlib.import_module("matplotlib.text")

        last_callback_time = rospy.Time(0, 0)
        subs = rospy.Subscriber("pose", PoseVelocityStamped, callback_IMU_pose)
        publish_IMU_message()
    except rospy.ROSInterruptException:
        pass
示例#21
0
    def process_trajectory(self, traj):
        num_points = len(traj.points)

        # make sure the joints in the goal match the joints of the controller
        if set(self.joint_names) != set(traj.joint_names):
            res = FollowJointTrajectoryResult().INVALID_JOINTS
            msg = 'Incoming trajectory joints do not match the joints of the controller'
            rospy.logerr(msg)
            rospy.logerr(' self.joint_names={}' % (set(self.joint_names)))
            rospy.logerr(' traj.joint_names={}' % (set(traj.joint_names)))
            self.action_server.set_aborted(result=res, text=msg)
            return

        # make sure trajectory is not empty
        if num_points == 0:
            msg = 'Incoming trajectory is empty'
            rospy.logerr(msg)
            self.action_server.set_aborted(text=msg)
            return

        # correlate the joints we're commanding to the joints in the message
        # map from an index of joint in the controller to an index in the trajectory
        lookup = [traj.joint_names.index(joint) for joint in self.joint_names]
        durations = [0.0] * num_points

        # find out the duration of each segment in the trajectory
        durations[0] = traj.points[0].time_from_start.to_sec()

        for i in range(1, num_points):
            durations[i] = (traj.points[i].time_from_start -
                            traj.points[i - 1].time_from_start).to_sec()

        if not traj.points[0].positions:
            res = FollowJointTrajectoryResult().INVALID_GOAL
            msg = 'First point of trajectory has no positions'
            rospy.logerr(msg)
            self.action_server.set_aborted(result=res, text=msg)
            return

        trajectory = []
        time = rospy.Time.now() + rospy.Duration(0.01)

        for i in range(num_points):
            seg = Segment(self.num_joints)

            if traj.header.stamp == rospy.Time(0.0):
                seg.start_time = (time + traj.points[i].time_from_start
                                  ).to_sec() - durations[i]
            else:
                seg.start_time = (
                    traj.header.stamp +
                    traj.points[i].time_from_start).to_sec() - durations[i]

            seg.duration = durations[i]

            # Checks that the incoming segment has the right number of elements.
            if traj.points[i].velocities and len(
                    traj.points[i].velocities) != self.num_joints:
                res = FollowJointTrajectoryResult().INVALID_GOAL
                msg = 'Command point %d has %d elements for the velocities' % (
                    i, len(traj.points[i].velocities))
                rospy.logerr(msg)
                self.action_server.set_aborted(result=res, text=msg)
                return

            if len(traj.points[i].positions) != self.num_joints:
                res = FollowJointTrajectoryResult().INVALID_GOAL
                msg = 'Command point %d has %d elements for the positions' % (
                    i, len(traj.points[i].positions))
                rospy.logerr(msg)
                self.action_server.set_aborted(result=res, text=msg)
                return

            for j in range(self.num_joints):
                if traj.points[i].velocities:
                    seg.velocities[j] = traj.points[i].velocities[lookup[j]]
                if traj.points[i].positions:
                    seg.positions[j] = traj.points[i].positions[lookup[j]]

            trajectory.append(seg)

        rospy.loginfo('Trajectory start requested at %.3lf, waiting...',
                      traj.header.stamp.to_sec())
        rate = rospy.Rate(self.update_rate)

        while traj.header.stamp > time:
            time = rospy.Time.now()
            rate.sleep()

        end_time = traj.header.stamp + rospy.Duration(sum(durations))
        seg_end_times = [
            rospy.Time.from_sec(trajectory[seg].start_time + durations[seg])
            for seg in range(len(trajectory))
        ]

        rospy.loginfo(
            'Trajectory start time is %.3lf, end time is %.3lf, total duration is %.3lf',
            time.to_sec(), end_time.to_sec(), sum(durations))

        self.trajectory = trajectory
        traj_start_time = rospy.Time.now()

        for seg in range(len(trajectory)):
            rospy.logdebug(
                'current segment is %d time left %f cur time %f' %
                (seg, durations[seg] -
                 (time.to_sec() - trajectory[seg].start_time), time.to_sec()))
            rospy.logdebug('goal positions are: %s' %
                           str(trajectory[seg].positions))

            # first point in trajectories calculated by OMPL is current position with duration of 0 seconds, skip it
            if durations[seg] == 0:
                rospy.logdebug(
                    'skipping segment %d with duration of 0 seconds' % seg)
                continue

            multi_packet = {}

            for port, joints in self.port_to_joints.items():
                vals = []

                for joint in joints:
                    j = self.joint_names.index(joint)

                    start_position = self.joint_states[joint].current_pos
                    if seg != 0:
                        start_position = trajectory[seg - 1].positions[j]

                    desired_position = trajectory[seg].positions[j]
                    desired_velocity = max(
                        self.min_velocity,
                        abs(desired_position - start_position) /
                        durations[seg])

                    self.msg.desired.positions[j] = desired_position
                    self.msg.desired.velocities[j] = desired_velocity

                    # probably need a more elegant way of figuring out if we are dealing with a dual controller
                    if hasattr(self.joint_to_controller[joint], "master_id"):
                        master_id = self.joint_to_controller[joint].master_id
                        slave_id = self.joint_to_controller[joint].slave_id
                        master_pos, slave_pos = self.joint_to_controller[
                            joint].pos_rad_to_raw(desired_position)
                        spd = self.joint_to_controller[joint].spd_rad_to_raw(
                            desired_velocity)
                        vals.append((master_id, master_pos, spd))
                        vals.append((slave_id, slave_pos, spd))
                    else:
                        motor_id = self.joint_to_controller[joint].motor_id
                        pos = self.joint_to_controller[joint].pos_rad_to_raw(
                            desired_position)
                        spd = self.joint_to_controller[joint].spd_rad_to_raw(
                            desired_velocity)
                        vals.append((motor_id, pos, spd))

                multi_packet[port] = vals

            for port, vals in multi_packet.items():
                self.port_to_io[port].set_multi_position_and_speed(vals)

            while time < seg_end_times[seg]:
                # check if new trajectory was received, if so abort current trajectory execution
                # by setting the goal to the current position
                if self.action_server.is_preempt_requested():
                    msg = 'New trajectory received. Aborting old trajectory.'
                    multi_packet = {}

                    for port, joints in self.port_to_joints.items():
                        vals = []

                        for joint in joints:
                            cur_pos = self.joint_states[joint].current_pos

                            motor_id = self.joint_to_controller[joint].motor_id
                            pos = self.joint_to_controller[
                                joint].pos_rad_to_raw(cur_pos)

                            vals.append((motor_id, pos))

                        multi_packet[port] = vals

                    for port, vals in multi_packet.items():
                        self.port_to_io[port].set_multi_position(vals)

                    self.action_server.set_preempted(text=msg)
                    rospy.logwarn(msg)
                    return

                rate.sleep()
                time = rospy.Time.now()

            # Verifies trajectory constraints
            for j, joint in enumerate(self.joint_names):
                if self.trajectory_constraints[
                        j] > 0 and self.msg.error.positions[
                            j] > self.trajectory_constraints[j]:
                    res = FollowJointTrajectoryResult().PATH_TOLERANCE_VIOLATED
                    msg = 'Unsatisfied position constraint for %s, trajectory point %d, %f is larger than %f' % \
                           (joint, seg, self.msg.error.positions[j], self.trajectory_constraints[j])
                    rospy.logwarn(msg)
                    self.action_server.set_aborted(result=res, text=msg)
                    return

        # let motors roll for specified amount of time
        rospy.sleep(self.goal_time_constraint)

        for i, j in enumerate(self.joint_names):
            rospy.logdebug(
                'desired pos was %f, actual pos is %f, error is %f' %
                (trajectory[-1].positions[i], self.joint_states[j].current_pos,
                 self.joint_states[j].current_pos -
                 trajectory[-1].positions[i]))

        # Checks that we have ended inside the goal constraints
        for (joint, pos_error, pos_constraint) in zip(self.joint_names,
                                                      self.msg.error.positions,
                                                      self.goal_constraints):
            if pos_constraint > 0 and abs(pos_error) > pos_constraint:
                res = FollowJointTrajectoryResult().GOAL_TOLERANCE_VIOLATED
                msg = 'Aborting because %s joint wound up outside the goal constraints, %f is larger than %f' % \
                      (joint, pos_error, pos_constraint)
                rospy.logwarn(msg)
                self.action_server.set_aborted(result=res, text=msg)
                break
        else:
            res = FollowJointTrajectoryResult().SUCCESSFUL
            msg = 'Trajectory execution successfully completed'
            rospy.loginfo(msg)
            self.action_server.set_succeeded(result=res, text=msg)
示例#22
0
def wait_for_time():
    """Wait for simulated time to begin.                          
    """
    while rospy.Time().now().to_sec() == 0:
        pass
示例#23
0
    def __init__(self):
        # Give the node a name
        rospy.init_node('calibrate_linear', anonymous=False)

        # Set rospy to execute a shutdown function when terminating the script
        rospy.on_shutdown(self.shutdown)

        # How fast will we check the odometry values?
        self.rate = 10
        r = rospy.Rate(self.rate)

        # Set the distance to travel
        self.test_distance = 1.0  # meters
        self.speed = 1.0  # meters per second
        self.tolerance = 0.01  # meters
        self.odom_linear_scale_correction = 1.0
        self.start_test = True

        # Publisher to control the robot's speed
        self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)

        # The base frame is base_footprint for the TurtleBot but base_link for Pi Robot
        self.base_frame = rospy.get_param('~base_frame', '/base_link')

        # The odom frame is usually just /odom
        self.odom_frame = rospy.get_param('~odom_frame', '/odom')

        # Initialize the tf listener
        self.tf_listener = tf.TransformListener()

        # Give tf some time to fill its buffer
        rospy.sleep(2)

        # Make sure we see the odom and base frames
        self.tf_listener.waitForTransform(self.odom_frame, self.base_frame,
                                          rospy.Time(), rospy.Duration(60.0))

        rospy.loginfo("Bring up rqt_reconfigure to control the test.")

        self.position = Point()

        # Get the starting position from the tf transform between the odom and base frames
        self.position = self.get_position()

        x_start = self.position.x
        y_start = self.position.y

        move_cmd = Twist()

        while not rospy.is_shutdown():
            # Stop the robot by default
            move_cmd = Twist()

            if self.start_test:
                # Get the current position from the tf transform between the odom and base frames
                self.position = self.get_position()

                # Compute the Euclidean distance from the target point
                distance = sqrt(
                    pow((self.position.x - x_start), 2) +
                    pow((self.position.y - y_start), 2))

                # Correct the estimated distance by the correction factor
                distance *= self.odom_linear_scale_correction
                # How close are we?
                error = distance - self.test_distance

                # Are we close enough?
                if not self.start_test or abs(error) < self.tolerance:
                    self.start_test = False
                    params = False
                    rospy.loginfo(params)
                else:
                    # If not, move in the appropriate direction
                    move_cmd.linear.x = copysign(self.speed, -1 * error)
            else:
                self.position = self.get_position()
                x_start = self.position.x
                y_start = self.position.y

            self.cmd_vel.publish(move_cmd)
            r.sleep()

        # Stop the robot
        self.cmd_vel.publish(Twist())
示例#24
0
def callback(msg, tfBuffer):
    global occdata
    global im2arr
    global width
    global height
    global resolution
    # create numpy array
    occdata = np.array([msg.data])
    # compute histogram to identify percent of bins with -1
    occ_counts = np.histogram(occdata, occ_bins)
    # calculate total number of bins
    total_bins = msg.info.width * msg.info.height
    width = msg.info.width
    height = msg.info.height
    # log the info
    rospy.loginfo('Width: %i Height: %i', msg.info.width, msg.info.height)
    rospy.loginfo('Unmapped: %i Unoccupied: %i Occupied: %i Total: %i',
                  occ_counts[0][0], occ_counts[0][1], occ_counts[0][2],
                  total_bins)

    # find transform to convert map coordinates to base_link coordinates
    # lookup_transform(target_frame, source_frame, time)
    trans = tfBuffer.lookup_transform('map', 'base_link', rospy.Time(0))
    cur_pos = trans.transform.translation
    cur_rot = trans.transform.rotation
    rospy.loginfo(['Trans: ' + str(cur_pos)])
    rospy.loginfo(['Rot: ' + str(cur_rot)])

    # get map resolution
    map_res = msg.info.resolution
    resolution = msg.info.resolution
    # get map origin struct has fields of x, y, and z
    map_origin = msg.info.origin.position
    # get map grid positions for x, y position
    grid_x = int(round((cur_pos.x - map_origin.x) / map_res))
    grid_y = int(round((cur_pos.y - map_origin.y) / map_res))
    rospy.loginfo(['Grid Y: ' + str(grid_y) + ' Grid X: ' + str(grid_x)])

    # make occdata go from 0 instead of -1, reshape into 2D
    oc2 = occdata + 1
    # set all values above 1 (i.e. above 0 in the original map data, representing occupied locations)
    oc3 = (oc2 > 1).choose(oc2, 2)
    # reshape to 2D array using column order
    odata = np.uint8(oc3.reshape(msg.info.height, msg.info.width, order='F'))
    # set current robot location to 0
    rospy.loginfo(['len odata is', len(odata)])
    if len(odata) > 1:
        odata[grid_x][grid_y] = 0
    # create image from 2D array using PIL
    img = Image.fromarray(odata.astype(np.uint8))
    # find center of image
    i_centerx = msg.info.width / 2
    i_centery = msg.info.height / 2
    # translate by curr_pos - centerxy to make sure the rotation is performed
    # with the robot at the center
    # using tips from:
    # https://stackabuse.com/affine-image-transformations-in-python-with-numpy-pillow-and-opencv/
    translation_m = np.array([[1, 0, (i_centerx - grid_y)],
                              [0, 1, (i_centery - grid_x)], [0, 0, 1]])
    # Image.transform function requires the matrix to be inverted
    tm_inv = np.linalg.inv(translation_m)
    # translate the image so that the robot is at the center of the image
    img_transformed = img.transform((msg.info.height, msg.info.width),
                                    Image.AFFINE,
                                    data=tm_inv.flatten()[:6],
                                    resample=Image.NEAREST)

    # convert quaternion to Euler angles
    orientation_list = [cur_rot.x, cur_rot.y, cur_rot.z, cur_rot.w]
    (roll, pitch, yaw) = euler_from_quaternion(orientation_list)
    rospy.loginfo(['Yaw: R: ' + str(yaw) + ' D: ' + str(np.degrees(yaw))])

    # rotate by 180 degrees to invert map so that the forward direction is at the top of the image
    rotated = img_transformed.rotate(np.degrees(-yaw) + 180)
    # we should now be able to access the map around the robot by converting
    # back to a numpy array: im2arr = np.array(rotated)
    im2arr = np.array(rotated)
示例#25
0
#!/usr/bin/env python
import rospy
from laser_assembler.srv import AssembleScans2
from sensor_msgs.msg import PointCloud2

rospy.init_node("assemble_scans_to_cloud")
rospy.wait_for_service("assemble_scans2")
assemble_scans = rospy.ServiceProxy('assemble_scans2', AssembleScans2)
pub = rospy.Publisher("/point_Cloud", PointCloud2, queue_size=2)
r = rospy.Rate(10)
while True:
    try:
        resp = assemble_scans(rospy.Time(0, 0), rospy.get_rostime())
        print "Got cloud with %u points" % len(resp.cloud.data)
        pub.publish(resp.cloud)
    except rospy.ServiceException, e:
        print "service call failed: %s" % e
    r.sleep()
示例#26
0
    def spin(self):

        # state
        s = self.sensor_state
        odom = Odometry(header=rospy.Header(frame_id=self.odom_frame),
                        child_frame_id=self.base_frame)
        js = JointState(name=[
            "left_wheel_joint", "right_wheel_joint", "front_castor_joint",
            "back_castor_joint"
        ],
                        position=[0, 0, 0, 0],
                        velocity=[0, 0, 0, 0],
                        effort=[0, 0, 0, 0])

        r = rospy.Rate(self.update_rate)
        last_cmd_vel = 0, 0
        last_cmd_vel_time = rospy.get_rostime()
        last_js_time = rospy.Time(0)
        # We set the retry count to 0 initially to make sure that only
        # if we received at least one sensor package, we are robust
        # agains a few sensor read failures. For some strange reason,
        # sensor read failures can occur when switching to full mode
        # on the Roomba.
        sensor_read_retry_count = 0

        while not rospy.is_shutdown():
            last_time = s.header.stamp
            curr_time = rospy.get_rostime()

            # SENSE/COMPUTE STATE
            try:
                self.sense(s)
                transform = self.compute_odom(s, last_time, odom)
                # Future-date the joint states so that we don't have
                # to publish as frequently.
                js.header.stamp = curr_time + rospy.Duration(1)
            except select.error:
                # packet read can get interrupted, restart loop to
                # check for exit conditions
                continue

            except DriverError:
                if sensor_read_retry_count > 0:
                    rospy.logwarn(
                        'Failed to read sensor package. %d retries left.' %
                        sensor_read_retry_count)
                    sensor_read_retry_count -= 1
                    continue
                else:
                    raise
            sensor_read_retry_count = self._SENSOR_READ_RETRY_COUNT

            # Reboot Create if we detect that charging is necessary.
            if s.charging_sources_available > 0 and \
                   s.oi_mode == 1 and \
                   s.charging_state in [0, 5] and \
                   s.charge < 0.93*s.capacity:
                rospy.loginfo("going into soft-reboot and exiting driver")
                self._robot_reboot()
                rospy.loginfo("exiting driver")
                break

            # Reboot Create if we detect that battery is at critical level switch to passive mode.
            if s.charging_sources_available > 0 and \
                   s.oi_mode == 3 and \
                   s.charging_state in [0, 5] and \
                   s.charge < 0.15*s.capacity:
                rospy.loginfo("going into soft-reboot and exiting driver")
                self._robot_reboot()
                rospy.loginfo("exiting driver")
                break

            # PUBLISH STATE
            self.sensor_state_pub.publish(s)
            self.odom_pub.publish(odom)
            if self.publish_tf:
                self.publish_odometry_transform(odom)
            # 1hz, future-dated joint state
            if curr_time > last_js_time + rospy.Duration(1):
                self.joint_states_pub.publish(js)
                last_js_time = curr_time
            self._diagnostics.publish(s, self._gyro)
            if self._gyro:
                self._gyro.publish(s, last_time)

            # ACT
            if self.req_cmd_vel is not None:
                # check for velocity command and set the robot into full mode if not plugged in
                if s.oi_mode != self.operate_mode and s.charging_sources_available != 1:
                    if self.operate_mode == 2:
                        self._robot_run_safe()
                    else:
                        self._robot_run_full()

                # check for bumper contact and limit drive command
                req_cmd_vel = self.check_bumpers(s, self.req_cmd_vel)

                # Set to None so we know it's a new command
                self.req_cmd_vel = None
                # reset time for timeout
                last_cmd_vel_time = last_time

            else:
                #zero commands on timeout
                if last_time - last_cmd_vel_time > self.cmd_vel_timeout:
                    last_cmd_vel = 0, 0
                # double check bumpers
                req_cmd_vel = self.check_bumpers(s, last_cmd_vel)

            # send command
            self.drive_cmd(*req_cmd_vel)
            # record command
            last_cmd_vel = req_cmd_vel

            r.sleep()
示例#27
0
    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]

    rate = rospy.Rate(50.0)

    limits = 0.015

    while not rospy.is_shutdown():
        try:
            trans = tfBuffer.lookup_transform('world',
                                              'controller_LHR_FF777F05',
                                              rospy.Time())
#            rospy.loginfo(trans)
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rospy.loginfo("error")
            rate.sleep()
            continue
        if vrb.trigger_pressed_event == 1:
            pre_position[0] = trans.transform.translation.x
            pre_position[1] = trans.transform.translation.y
            pre_position[2] = trans.transform.translation.z

            pre_pose[0] = trans.transform.rotation.x
            pre_pose[1] = trans.transform.rotation.y
            pre_pose[2] = trans.transform.rotation.z
            pre_pose[3] = trans.transform.rotation.w
示例#28
0
    def broadcast_frame(self, msg):
        self.num_objects = msg.data
        # print ('we have the frame')

        ################## Place Positions #######################

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.15, -0.2, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "scissors_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.5, -0.2, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "water_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.3, -0.2, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "hammer_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.3, -0.4, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "bowl_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.2, -0.4, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "chip_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.5, -0.4, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "lemon_place_position", "root")

        if self.listen.frameExists("/root") and self.listen.frameExists(
                "/world"):
            # print ('we have the frame')
            t = self.listen.getLatestCommonTime("/root", "/world")
            translation, quaternion = self.listen.lookupTransform(
                "/root", "/world", rospy.Time(0))

            # Identity matrix. Set the requ rot n trans wrt obj frame
            requrd_rot = (np.pi / 2, 0, 0)  # in radians
            requrd_trans = (0.5, -0.45, 0.1)
            # calculate and get an offset frame w/o ref to objct frame
            pose = self.getOffsetPoses(translation, quaternion, requrd_rot,
                                       requrd_trans)
            trans_1 = tuple(pose[:3])
            quat_1 = tuple(pose[3:])

            self.broadcast.sendTransform(trans_1, quat_1, rospy.Time.now(),
                                         "banana_place_position", "root")
示例#29
0
    def __init__(self):
        try:
            self.position_publisher = rospy.Publisher('/current_position',
                                                      Point,
                                                      queue_size=10)
            self.heading_publisher = rospy.Publisher('/current_heading',
                                                     Float32,
                                                     queue_size=10)

            self.offset_change_x_subscriber = rospy.Subscriber(
                '/offset_change_x', Float32, self.callback_offset_change_x)
            self.offset_change_y_subscriber = rospy.Subscriber(
                '/offset_change_y', Float32, self.callback_offset_change_y)
            self.offset_change_theta_subscriber = rospy.Subscriber(
                '/offset_change_theta', Float32,
                self.callback_offset_change_theta)

            self.offset_x = 0
            self.offset_y = 0
            self.offset_theta = 0

            self.pnt = Point()
            self.heading = Float32()

            self.isFirst = True

            self.loop_cnt = 0
            self.isGoodToGo = False

            self.tf_listener = tf.TransformListener()
            self.odom_frame = '/odom'

            try:
                self.tf_listener.waitForTransform(self.odom_frame,
                                                  '/base_footprint',
                                                  rospy.Time(),
                                                  rospy.Duration(1.0))
                self.base_frame = '/base_footprint'
            except (tf.Exception, tf.ConnectivityException,
                    tf.LookupException):
                try:
                    self.tf_listener.waitForTransform(self.odom_frame,
                                                      '/base_link',
                                                      rospy.Time(),
                                                      rospy.Duration(1.0))
                    self.base_frame = '/base_link'
                except (tf.Exception, tf.ConnectivityException,
                        tf.LookupException):
                    rospy.loginfo(
                        "Cannot find transform between /odom and /base_link or /base_footprint"
                    )
                    rospy.signal_shutdown("tf Exception")

            while (True):
                #wait for 2 sec after booting to set poistion&heading
                if self.isGoodToGo == False:
                    if self.loop_cnt == 200:
                        self.isGoodToGo = True
                    else:
                        self.loop_cnt = self.loop_cnt + 1
                else:
                    (trans, rot) = self.tf_listener.lookupTransform(
                        self.odom_frame, self.base_frame, rospy.Time(0))
                    self.pnt = Point(*trans)
                    self.heading.data = euler_from_quaternion(rot)[2]

                    if self.isFirst:
                        self.offset_x = self.pnt.x
                        self.offset_y = self.pnt.y
                        #self.offset_theta=self.heading.data-pi/2.0
                        self.offset_theta = self.heading.data
                        self.isFirst = False

                    self.pnt.x = self.pnt.x - self.offset_x
                    self.pnt.y = self.pnt.y - self.offset_y
                    self.heading.data = normalize_rad(self.heading.data -
                                                      self.offset_theta)

                    self.position_publisher.publish(self.pnt)
                    self.heading_publisher.publish(self.heading)

                sleep(0.01)

        except (tf.Exception, tf.ConnectivityException, tf.LookupException):
            rospy.loginfo("TF Exception at wheel_odometry.py")
            sys.exit()

        except Exception as e:
            print(e)
            sys.exit()
def main():
    parser = argparse.ArgumentParser(
        description=("Extracts grayscale and event images from a ROS bag and "
                     "saves them as TFRecords for training in TensorFlow."))
    parser.add_argument("--bag",
                        dest="bag",
                        help="Path to ROS bag.",
                        required=True)
    parser.add_argument("--prefix",
                        dest="prefix",
                        help="Output file prefix.",
                        required=True)
    parser.add_argument("--output_folder",
                        dest="output_folder",
                        help="Output folder.",
                        required=True)
    parser.add_argument(
        "--max_aug",
        dest="max_aug",
        help="Maximum number of images to combine for augmentation.",
        type=int,
        default=6)
    parser.add_argument(
        "--n_skip",
        dest="n_skip",
        help="Maximum number of images to combine for augmentation.",
        type=int,
        default=1)
    parser.add_argument("--start_time",
                        dest="start_time",
                        help="Time to start in the bag.",
                        type=float,
                        default=0.0)
    parser.add_argument("--end_time",
                        dest="end_time",
                        help="Time to end in the bag.",
                        type=float,
                        default=-1.0)

    args = parser.parse_args()

    bridge = CvBridge()

    n_msgs = 0
    left_event_image_iter = 0
    right_event_image_iter = 0
    left_image_iter = 0
    right_image_iter = 0
    first_left_image_time = -1
    first_right_image_time = -1

    left_events = []
    right_events = []
    left_images = []
    right_images = []
    left_image_times = []
    right_image_times = []
    left_event_count_images = []
    left_event_time_images = []
    left_event_image_times = []

    right_event_count_images = []
    right_event_time_images = []
    right_event_image_times = []

    cols = 346
    rows = 260
    print("Processing bag")
    bag = Bag(args.bag)

    left_tf_writer = tf.python_io.TFRecordWriter(
        os.path.join(args.output_folder, args.prefix,
                     "left_event_images.tfrecord"))
    right_tf_writer = tf.python_io.TFRecordWriter(
        os.path.join(args.output_folder, args.prefix,
                     "right_event_images.tfrecord"))

    # Get actual time for the start of the bag.
    t_start = bag.get_start_time()
    t_start_ros = rospy.Time(t_start)
    # Set the time at which the bag reading should end.
    if args.end_time == -1.0:
        t_end = bag.get_end_time()
    else:
        t_end = t_start + args.end_time

    eps = 0.1
    for topic, msg, t in bag.read_messages(
            topics=[
                '/davis/left/image_raw', '/davis/right/image_raw',
                '/davis/left/events', '/davis/right/events'
            ],
            start_time=rospy.Time(max(args.start_time, eps) - eps + t_start),
            end_time=rospy.Time(t_end)):
        # Check to make sure we're working with stereo messages.
        if not ('left' in topic or 'right' in topic):
            print(
                'ERROR: topic {} does not contain left or right, is this stereo?'
                'If not, you will need to modify the topic names in the code.'.
                format(topic))
            return

        # Counter for status updates.
        n_msgs += 1
        if n_msgs % 500 == 0:
            print("Processed {} msgs, {} images, time is {}.".format(
                n_msgs, left_event_image_iter,
                t.to_sec() - t_start))

        isLeft = 'left' in topic
        if 'image' in topic:
            width = msg.width
            height = msg.height
            if width != cols or height != rows:
                print(
                    "Image dimensions are not what we expected: set: ({} {}) vs  got:({} {})"
                    .format(cols, rows, width, height))
                return
            time = msg.header.stamp
            if time.to_sec() - t_start < args.start_time:
                continue
            image = np.asarray(bridge.imgmsg_to_cv2(msg, msg.encoding))
            image = np.reshape(image, (height, width))

            if isLeft:
                cv2.imwrite(
                    os.path.join(
                        args.output_folder, args.prefix,
                        "left_image{:05d}.png".format(left_image_iter)), image)
                if left_image_iter > 0:
                    left_image_times.append(time)
                else:
                    first_left_image_time = time
                    left_event_image_times.append(time.to_sec())
                    # filter events we added previously
                    left_events = filter_events(
                        left_events, left_event_image_times[-1] - t_start)
                left_image_iter += 1
            else:
                cv2.imwrite(
                    os.path.join(
                        args.output_folder, args.prefix,
                        "right_image{:05d}.png".format(right_image_iter)),
                    image)
                if right_image_iter > 0:
                    right_image_times.append(time)
                else:
                    first_right_image_time = time
                    right_event_image_times.append(time.to_sec())
                    # filter events we added previously
                    right_events = filter_events(
                        right_events, left_event_image_times[-1] - t_start)

                right_image_iter += 1
        elif 'events' in topic and msg.events:
            # Add events to list.
            for event in msg.events:
                ts = event.ts
                event = [
                    event.x, event.y, (ts - t_start_ros).to_sec(),
                    (float(event.polarity) - 0.5) * 2
                ]
                if isLeft:
                    # add event if it was after the first image or we haven't seen the first image
                    if first_left_image_time == -1 or ts > first_left_image_time:
                        left_events.append(event)
                elif first_right_image_time == -1 or ts > first_right_image_time:
                    right_events.append(event)
            if isLeft:
                if len(left_image_times) >= args.max_aug and\
                   left_events[-1][2] > (left_image_times[args.max_aug-1]-t_start_ros).to_sec():
                    left_event_image_iter = _save_events(
                        left_events, left_image_times, left_event_count_images,
                        left_event_time_images, left_event_image_times, rows,
                        cols, args.max_aug, args.n_skip, left_event_image_iter,
                        args.prefix, 'left', left_tf_writer, t_start_ros)
            else:
                if len(right_image_times) >= args.max_aug and\
                   right_events[-1][2] > (right_image_times[args.max_aug-1]-t_start_ros).to_sec():
                    right_event_image_iter = _save_events(
                        right_events, right_image_times,
                        right_event_count_images, right_event_time_images,
                        right_event_image_times, rows, cols, args.max_aug,
                        args.n_skip, right_event_image_iter, args.prefix,
                        'right', right_tf_writer, t_start_ros)

    left_tf_writer.close()
    right_tf_writer.close()

    image_counter_file = open(
        os.path.join(args.output_folder, args.prefix, "n_images.txt"), 'w')
    image_counter_file.write("{} {}".format(left_event_image_iter,
                                            right_event_image_iter))
    image_counter_file.close()