コード例 #1
0
ファイル: test_align.py プロジェクト: m4nh/t-less_toolkit
    T = numpy.concatenate([T, numpy.array([0, 0, 0, 1]).reshape(1, 4)], 0)
    return T


images_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_rgb_image_raw_compressed'
dataset_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome.roars'
camera_extrinsics_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_extrinsics.txt'
camera_intrinsics_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/camera_intrisics.txt'
poses_path = '/Users/daniele/Desktop/to_delete/roars_dataset/indust_scene_1_dome/robot_poses.txt'
model_path = '/Users/daniele/Downloads/industrial_part_color.ply'


#######################################
# Model
#######################################
model = inout.load_ply(model_path)


#######################################
# Dataset data
#######################################
json_data = json.load(open(dataset_path))


#######################################
# Intrinsics
#######################################
K_raw = np.loadtxt(camera_intrinsics_path)
K = np.array([
    [K_raw[2], 0, K_raw[4]],
    [0, K_raw[3], K_raw[5]],
コード例 #2
0
    # Load info about the test images (including camera parameters etc.)
    scene_info_path = scene_info_path_mask.format(device, scene_id)
    scene_info = inout.load_scene_info(scene_info_path)

    scene_gt_path = scene_gt_path_mask.format(device, scene_id)
    gts = inout.load_scene_gt(scene_gt_path)

    # Load models of objects present in the scene
    scene_obj_ids = set()
    for gt in gts[0]:
        scene_obj_ids.add(gt['obj_id'])
    models = {}
    for scene_obj_id in scene_obj_ids:
        model_path = model_path_mask.format(scene_obj_id)
        models[scene_obj_id] = inout.load_ply(model_path)

    for im_id, im_info in scene_info.items():
        if im_id % im_step != 0:
            continue
        print('scene: ' + str(scene_id) + ', device: ' + device + ', im_id: ' +
              str(im_id))

        # Get intrinsic camera parameters
        K = im_info['cam_K']

        # Visualization #1
        #-----------------------------------------------------------------------
        # Load RGB image
        rgb_path = rgb_path_mask.format(device, scene_id, im_id,
                                        rgb_ext[device])
コード例 #3
0
    # Load info about the test images (including camera parameters etc.)
    scene_info_path = scene_info_path_mask.format(device, scene_id)
    scene_info = inout.load_info(scene_info_path)

    scene_gt_path = scene_gt_path_mask.format(device, scene_id)
    scene_gt = inout.load_gt(scene_gt_path)

    # Load models of objects present in the scene
    scene_obj_ids = set()
    for gt in scene_gt[0]:
        scene_obj_ids.add(gt['obj_id'])
    models = {}
    for scene_obj_id in scene_obj_ids:
        model_path = model_path_mask.format(scene_obj_id)
        models[scene_obj_id] = inout.load_ply(model_path)

    for im_id, im_info in scene_info.items():
        if im_id % im_step != 0:
            continue
        print('scene: ' + str(scene_id) + ', device: ' + device + ', im_id: ' + str(im_id))

        # Get intrinsic camera parameters
        K = im_info['cam_K']

        # Visualization #1
        #-----------------------------------------------------------------------
        # Load RGB image
        rgb_path = rgb_path_mask.format(device, scene_id, im_id, rgb_ext[device])
        rgb = scipy.misc.imread(rgb_path)
コード例 #4
0
def test(ctx):
    config = ctx.obj['config']
    scene_ids = range(1, 21)
    device = config['device']
    model_type = config['model_type']
    im_step = int(config['image_step'])

    data_path = config['dataset_path']
    output_dir = config['output_path']
    # Paths to the elements of the T-LESS dataset
    model_path_mask = os.path.join(data_path, 'models_' + model_type,
                                   'obj_{:02d}.ply')
    scene_info_path_mask = os.path.join(data_path, 'test_{}', '{:02d}',
                                        'info.yml')
    scene_gt_path_mask = os.path.join(data_path, 'test_{}', '{:02d}', 'gt.yml')
    rgb_path_mask = os.path.join(data_path, 'test_{}', '{:02d}', 'rgb',
                                 '{:04d}.{}')
    depth_path_mask = os.path.join(data_path, 'test_{}', '{:02d}', 'depth',
                                   '{:04d}.png')
    rgb_ext = {'primesense': 'png', 'kinect': 'png', 'canon': 'jpg'}
    obj_colors_path = os.path.join('data', 'obj_rgb.txt')
    vis_rgb_path_mask = os.path.join(output_dir, '{:02d}_{}_{}_{:04d}_rgb.png')
    vis_depth_path_mask = os.path.join(output_dir,
                                       '{:02d}_{}_{}_{:04d}_depth_diff.png')

    misc.ensure_dir(output_dir)
    obj_colors = inout.load_colors(obj_colors_path)

    plt.ioff()  # Turn interactive plotting off

    for scene_id in scene_ids:

        # Load info about the test images (including camera parameters etc.)
        scene_info_path = scene_info_path_mask.format(device, scene_id)
        scene_info = inout.load_scene_info(scene_info_path)

        scene_gt_path = scene_gt_path_mask.format(device, scene_id)
        gts = inout.load_scene_gt(scene_gt_path)

        # Load models of objects present in the scene
        scene_obj_ids = set()
        for gt in gts[0]:
            scene_obj_ids.add(gt['obj_id'])
        models = {}
        for scene_obj_id in scene_obj_ids:
            model_path = model_path_mask.format(scene_obj_id)
            models[scene_obj_id] = inout.load_ply(model_path)

        for im_id, im_info in scene_info.items():
            if im_id % im_step != 0:
                continue
            print('scene: ' + str(scene_id) + ', device: ' + device +
                  ', im_id: ' + str(im_id))

            # Get intrinsic camera parameters
            K = im_info['cam_K']

            # Visualization #1
            # -----------------------------------------------------------------------
            # Load RGB image
            rgb_path = rgb_path_mask.format(device, scene_id, im_id,
                                            rgb_ext[device])
            rgb = scipy.misc.imread(rgb_path)

            im_size = (rgb.shape[1], rgb.shape[0])
            vis_rgb = np.zeros(rgb.shape, np.float)
            for gt in gts[im_id]:
                model = models[gt['obj_id']]
                R = gt['cam_R_m2c']
                t = gt['cam_t_m2c']
                surf_color = obj_colors[gt['obj_id'] - 1]

                ren_rgb = renderer.render(model,
                                          im_size,
                                          K,
                                          R,
                                          t,
                                          surf_color=surf_color,
                                          mode='rgb')

                import cv2
                cv2.imshow("test", ren_rgb)
                cv2.waitKey()

                # Draw the bounding box of the object
                ren_rgb = misc.draw_rect(ren_rgb, gt['obj_bb'])

                vis_rgb += 0.7 * ren_rgb.astype(np.float)

            # Save the visualization
            vis_rgb = 0.6 * vis_rgb + 0.4 * rgb
            vis_rgb[vis_rgb > 255] = 255
            vis_rgb_path = vis_rgb_path_mask.format(scene_id, device,
                                                    model_type, im_id)
            scipy.misc.imsave(vis_rgb_path, vis_rgb.astype(np.uint8))

            # Visualization #2
            # -----------------------------------------------------------------------
            if device != 'canon':
                # Load depth image
                depth_path = depth_path_mask.format(device, scene_id, im_id,
                                                    rgb_ext[device])
                depth = scipy.misc.imread(depth_path)  # Unit: 0.1 mm
                depth = depth.astype(np.float) * 0.1  # Convert to mm

                # Render the objects at the ground truth poses
                im_size = (depth.shape[1], depth.shape[0])
                ren_depth = np.zeros(depth.shape, np.float)
                for gt in gts[im_id]:
                    model = models[gt['obj_id']]
                    R = gt['cam_R_m2c']
                    t = gt['cam_t_m2c']

                    # Render the current object
                    ren_depth_obj = renderer.render(model,
                                                    im_size,
                                                    K,
                                                    R,
                                                    t,
                                                    mode='depth')

                    # Add to the final depth map only the parts of the surface that
                    # are closer than the surfaces rendered before
                    visible_mask = np.logical_or(ren_depth == 0,
                                                 ren_depth_obj < ren_depth)
                    mask = np.logical_and(ren_depth_obj != 0, visible_mask)
                    ren_depth[mask] = ren_depth_obj[mask].astype(np.float)

                # Calculate the depth difference at pixels where both depth maps
                # are valid
                valid_mask = (depth > 0) * (ren_depth > 0)
                depth_diff = valid_mask * (depth - ren_depth.astype(np.float))

                # Save the visualization
                vis_depth_path = vis_depth_path_mask.format(
                    scene_id, device, model_type, im_id)
                plt.matshow(depth_diff)
                plt.title('captured - rendered depth [mm]')
                plt.colorbar()
                plt.savefig(vis_depth_path, pad=0)
                plt.close()
コード例 #5
0
    def __init__(self):
        super(UR5_UI, self).__init__()
        loadUi('UR_Robot_image_collector.ui', self)

        self.setWindowTitle('UR5_UI')

        self.bridge = CvBridge()
        rospy.set_param('calculation', False)  ###

        self.objects = [
            'aloe', 'apple', 'banana', 'board_eraser', 'clamp', 'cube', 'cup',
            'diget', 'diget_sand', 'duckie', 'dumbbell', 'glue', 'gotica',
            'orange', 'padlock', 'screw_driver', 'small_spam', 'tomato_soup',
            'vitamin_water'
        ]
        num_objects = len(self.objects)

        for i in range(num_objects):
            self.Class_Selector.addItem(self.objects[i])
            self.Object_List.addItem(self.objects[i])

        self.save_path = "/media/irobot/Samsung_T5/YOLO/desk_yolo/light_off"

        self.corners3D_list = []
        self.model_list = []

        for i in range(num_objects):
            meshname = "/home/irobot/David/3D_pose_estimation/real-time_detection/models/%s.ply" % self.objects[
                i]
            mesh = MeshPly(meshname)
            vertices = np.c_[np.array(mesh.vertices),
                             np.ones((len(mesh.vertices), 1))].transpose()
            vertices = vertices * 1000.0
            corners3D = self.get_3D_corners(vertices)
            self.corners3D_list.append(corners3D)

            model = inout.load_ply(meshname)
            model['pts'] = model['pts'] * 1000.0
            self.model_list.append(model)

        self.internal_calibration = self.get_camera_intrinsic()

        self.capture_trigger = False
        self.cal_working = False

        self.rgb_save = []
        self.rotation_save = []
        self.translation_save = []
        self.depth_save = []
        self.inspection_save = []
        self.proj_2d_gt_save = []
        self.save_count = 7010  ## JS JS JS
        self.tmp_idx = 0

        self.guide_xcen = 320
        self.guide_ycen = 240
        self.guide_width = 50
        self.guide_height = 50

        self.ppx = 314.00128173828125
        self.ppy = 242.40391540527344
        self.fu = 614.14501953125
        self.fv = 614.680419921875

        self.z_cam_to_tool = 0.520

        self.progressing = False

        self.image_size = [640, 480]
        self.trans_x = [0] * num_objects
        self.trans_y = [0] * num_objects
        self.trans_z = [0] * num_objects

        self.orien_r = [0] * num_objects
        self.orien_p = [0] * num_objects
        self.orien_y = [0] * num_objects

        self.orien_r_btn = [0] * num_objects
        self.orien_p_btn = [0] * num_objects
        self.orien_y_btn = [0] * num_objects

        zero_trans = np.zeros((3, 1))
        zero_rotations = np.zeros((3, 3))
        zero_proj_2d = np.zeros((8, 2))

        self.translation_matrix_list = [zero_trans] * num_objects
        self.rotation_matrix_list = [zero_rotations] * num_objects
        self.proj_2d_gt_list = [zero_proj_2d] * num_objects

        # self.group

        rospy.Subscriber("/camera/color/image_raw", Image, self.callback_rgb)
        rospy.Subscriber("/tag_detections", AprilTagDetectionArray,
                         self.callback_april)
        # rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image, self.callback_depth)

        # self.result = Result()
        self.frame = None
        self.frame_result = None

        self.handling_item = None
        self.selected_item = None
        self.activated_items = []
        self.color_items = []

        ############ Button Connection ############
        self.Streaming_Start_Btn.clicked.connect(self.start)
        self.Pose1_Btn.clicked.connect(self.pose1)
        self.Home_Btn.clicked.connect(self.home)
        self.Grasp_Btn.clicked.connect(self.grasp)
        self.Save_Count_Btn.clicked.connect(self.save_count_setup)

        self.Orien_roll_down.clicked.connect(
            lambda: self.change_button('roll_down'))
        self.Orien_roll_up.clicked.connect(
            lambda: self.change_button('roll_up'))
        self.Orien_pitch_down.clicked.connect(
            lambda: self.change_button('pitch_down'))
        self.Orien_pitch_up.clicked.connect(
            lambda: self.change_button('pitch_up'))
        self.Orien_yaw_down.clicked.connect(
            lambda: self.change_button('yaw_down'))
        self.Orien_yaw_up.clicked.connect(lambda: self.change_button('yaw_up'))
        ###########################################

        self.Slider_X.valueChanged.connect(self.change_XYZ)
        self.Slider_Y.valueChanged.connect(self.change_XYZ)
        self.Slider_Z.valueChanged.connect(self.change_XYZ)

        ###########################################

        self.Slider_Roll.valueChanged.connect(self.change_RPY)
        self.Slider_Pitch.valueChanged.connect(self.change_RPY)
        self.Slider_Yaw.valueChanged.connect(self.change_RPY)

        self.roi_x_value = self.ROI_X
        self.roi_x = 0
        self.roi_x_value.valueChanged.connect(self.ROI_x_change)
        self.roi_y_value = self.ROI_Y
        self.roi_y = 0
        self.roi_y_value.valueChanged.connect(self.ROI_y_change)
        self.roi_width_value = self.ROI_Width
        self.roi_width = 0
        self.roi_width_value.valueChanged.connect(self.ROI_width_change)
        self.roi_height_value = self.ROI_Height
        self.roi_height = 0
        self.roi_height_value.valueChanged.connect(self.ROI_height_change)

        ###########################################

        self.Object_List.itemClicked.connect(self.Select_item_objlist)
        self.Object_List_Activated.itemClicked.connect(
            self.Select_item_actlist)
        self.Activate.clicked.connect(self.Activate_item)
        self.Deactivate.clicked.connect(self.Deactivate_item)

        self.Class_Selector.currentIndexChanged.connect(self.select_class)

        self.marker_trans = []
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        self.marker_trans.append((0, 0.170, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))
        # self.marker_trans.append((0, 0, 0))

        self.marker_rot = []
        self.marker_rot.append((0, 0, 0))
        self.marker_rot.append((0, 0, (pi / 4)))
        self.marker_rot.append((0, 0, (pi / 4) * 2))
        self.marker_rot.append((0, 0, (pi / 4) * 3))
        self.marker_rot.append((0, 0, (pi / 4) * 4))
        self.marker_rot.append((0, 0, (pi / 4) * 5))
        self.marker_rot.append((0, 0, (pi / 4) * 6))
        self.marker_rot.append((0, 0, (pi / 4) * 7))

        self.marker_list = []
        self.marker_list.append((0, 1, 2))
        self.marker_list.append((1, 2, 3))
        self.marker_list.append((2, 3, 4))
        self.marker_list.append((3, 4, 5))
        self.marker_list.append((4, 5, 6))
        self.marker_list.append((5, 6, 7))
        self.marker_list.append((6, 7, 8))
        self.marker_list.append((7, 8, 9))
        self.marker_list.append((8, 9, 10))
        self.marker_list.append((9, 10, 11))
        self.marker_list.append((10, 11, 1))
        self.marker_list.append((11, 12, 13))
        self.marker_list.append((12, 13, 14))
        self.marker_list.append((13, 14, 15))
        self.marker_list.append((14, 15, 0))
        self.marker_list.append((15, 0, 1))