Пример #1
0
def gen_observed():
    # output path
    observed_root_dir = os.path.join(LINEMOD_syn_root, "data", "observed")
    image_set_dir = os.path.join(LINEMOD_syn_root, "image_set")
    mkdir_if_missing(observed_root_dir)
    mkdir_if_missing(image_set_dir)

    syn_poses_path = os.path.join(observed_pose_dir,
                                  "LM6d_ds_train_observed_pose_all.pkl")
    with open(syn_poses_path, "rb") as f:
        syn_pose_dict = cPickle.load(f)

    for class_idx, class_name in enumerate(classes):
        if class_name == "__back_ground__":
            continue
        # uncomment here to only generate data for ape
        # if class_name not in ['ape']:
        #     continue

        # init render machines
        brightness_ratios = [0.2, 0.25, 0.3, 0.35, 0.4]
        model_dir = os.path.join(LINEMOD_syn_root, "models", class_name)
        render_machine = Render_Py_Light(model_dir, K, width, height, ZNEAR,
                                         ZFAR, brightness_ratios)

        syn_poses = syn_pose_dict[class_name]
        num_poses = syn_poses.shape[0]
        observed_index_list = [
            "{}/{:06d}".format(class_name, i + 1) for i in range(num_poses)
        ]

        observed_set_path = os.path.join(
            image_set_dir,
            "observed/LM6d_data_syn_train_observed_{}.txt".format(class_name))
        mkdir_if_missing(os.path.join(image_set_dir, "observed"))
        f_observed_set = open(observed_set_path, "w")

        for idx, observed_index in enumerate(tqdm(observed_index_list)):
            f_observed_set.write("{}\n".format(observed_index))
            prefix = observed_index.split("/")[1]

            observed_dir = os.path.join(observed_root_dir, class_name)
            mkdir_if_missing(observed_dir)

            observed_color_file = os.path.join(observed_dir,
                                               prefix + "-color.png")
            observed_depth_file = os.path.join(observed_dir,
                                               prefix + "-depth.png")
            observed_pose_file = os.path.join(observed_dir,
                                              prefix + "-pose.txt")

            observed_label_file = os.path.join(observed_dir,
                                               prefix + "-label.png")

            pose_quat = syn_poses[idx, :]
            pose = se3.se3_q2m(pose_quat)

            # generate random light_position
            if idx % 6 == 0:
                light_position = [1, 0, 1]
            elif idx % 6 == 1:
                light_position = [1, 1, 1]
            elif idx % 6 == 2:
                light_position = [0, 1, 1]
            elif idx % 6 == 3:
                light_position = [-1, 1, 1]
            elif idx % 6 == 4:
                light_position = [-1, 0, 1]
            elif idx % 6 == 5:
                light_position = [0, 0, 1]
            else:
                raise Exception("???")
            light_position = np.array(light_position) * 0.5
            # inverse yz
            light_position[0] += pose[0, 3]
            light_position[1] -= pose[1, 3]
            light_position[2] -= pose[2, 3]

            # randomly adjust color and intensity for light_intensity
            colors = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
                               [1, 0, 1], [1, 1, 0], [1, 1, 1]])
            intensity = np.random.uniform(0.9, 1.1, size=(3, ))
            colors_randk = random.randint(0, colors.shape[0] - 1)
            light_intensity = colors[colors_randk] * intensity

            # randomly choose a render machine
            rm_randk = random.randint(0, len(brightness_ratios) - 1)
            # get render result
            rgb_gl, depth_gl = render_machine.render(se3.mat2quat(
                pose[:3, :3]),
                                                     pose[:, -1],
                                                     light_position,
                                                     light_intensity,
                                                     brightness_k=rm_randk)
            rgb_gl = rgb_gl.astype("uint8")
            # gt_observed label
            label_gl = np.zeros(depth_gl.shape)
            # print('depth gl:', depth_gl.shape)
            label_gl[depth_gl != 0] = 1

            cv2.imwrite(observed_color_file, rgb_gl)
            depth_gl = (depth_gl * depth_factor).astype(np.uint16)
            cv2.imwrite(observed_depth_file, depth_gl)

            cv2.imwrite(observed_label_file, label_gl)

            text_file = open(observed_pose_file, "w")
            text_file.write("{}\n".format(class_idx))
            pose_str = "{} {} {} {}\n{} {} {} {}\n{} {} {} {}".format(
                pose[0, 0],
                pose[0, 1],
                pose[0, 2],
                pose[0, 3],
                pose[1, 0],
                pose[1, 1],
                pose[1, 2],
                pose[1, 3],
                pose[2, 0],
                pose[2, 1],
                pose[2, 2],
                pose[2, 3],
            )
            text_file.write(pose_str)

        print(class_name, " done")
def gen_gt_observed():
    with open(syn_poses_path, "rb") as f:
        syn_pose_dict = cPickle.load(f)

    for class_idx, class_name in enumerate(classes):
        if class_name == "__back_ground__":
            continue
        # uncomment here to only generate data for ape
        # if class_name not in ['ape']:
        #     continue

        # init render machines
        # brightness_ratios = [0.2, 0.25, 0.3, 0.35, 0.4] ###################
        model_dir = os.path.join(LINEMOD_syn_root,
                                 "models/{}".format(class_name))
        render_machine = Render_Py(model_dir, K, width, height, ZNEAR, ZFAR)

        # syn_poses_path = os.path.join(syn_poses_dir, 'LM6d_v1_all_rendered_pose_{}.txt'.format(class_name))
        # syn_poses = np.loadtxt(syn_poses_path)
        # print(syn_poses.shape) # nx7
        syn_poses = syn_pose_dict[class_name]
        num_poses = syn_poses.shape[0]
        observed_index_list = [
            "{}/{:06d}".format(class_name, i + 1) for i in range(num_poses)
        ]

        # observed_set_path = os.path.join(
        #     image_set_dir,
        #     'observed/LM_data_syn_train_observed_{}.txt'.format(class_name))
        # mkdir_if_missing(os.path.join(image_set_dir, 'observed'))
        # f_observed_set = open(observed_set_path, 'w')

        for idx, observed_index in enumerate(tqdm(observed_index_list)):
            # f_observed_set.write('{}\n'.format(observed_index))
            # continue # just generate observed set file
            prefix = observed_index.split("/")[1]

            gt_observed_dir = os.path.join(gt_observed_root_dir, class_name)
            mkdir_if_missing(gt_observed_dir)

            gt_observed_color_file = os.path.join(gt_observed_dir,
                                                  prefix + "-color.png")
            gt_observed_depth_file = os.path.join(gt_observed_dir,
                                                  prefix + "-depth.png")
            gt_observed_pose_file = os.path.join(gt_observed_dir,
                                                 prefix + "-pose.txt")

            # observed_label_file = os.path.join(observed_root_dir, video_name, prefix + "-label.png")
            gt_observed_label_file = os.path.join(gt_observed_dir,
                                                  prefix + "-label.png")

            pose_quat = syn_poses[idx, :]
            pose = se3.se3_q2m(pose_quat)

            # generate random light_position
            if idx % 6 == 0:
                light_position = [1, 0, 1]
            elif idx % 6 == 1:
                light_position = [1, 1, 1]
            elif idx % 6 == 2:
                light_position = [0, 1, 1]
            elif idx % 6 == 3:
                light_position = [-1, 1, 1]
            elif idx % 6 == 4:
                light_position = [-1, 0, 1]
            elif idx % 6 == 5:
                light_position = [0, 0, 1]
            else:
                raise Exception("???")
            # print( "light_position a: {}".format(light_position))
            light_position = np.array(light_position) * 0.5
            # inverse yz
            light_position[0] += pose[0, 3]
            light_position[1] -= pose[1, 3]
            light_position[2] -= pose[2, 3]
            # print("light_position b: {}".format(light_position))

            # get render result
            rgb_gl, depth_gl = render_machine.render(pose[:3, :3],
                                                     pose[:, 3],
                                                     r_type="mat")
            rgb_gl = rgb_gl.astype("uint8")
            # gt_observed label
            label_gl = np.zeros(depth_gl.shape)
            # print('depth gl:', depth_gl.shape)
            label_gl[depth_gl != 0] = 1

            cv2.imwrite(gt_observed_color_file, rgb_gl)
            depth_gl = (depth_gl * depth_factor).astype(np.uint16)
            cv2.imwrite(gt_observed_depth_file, depth_gl)

            cv2.imwrite(gt_observed_label_file, label_gl)

            text_file = open(gt_observed_pose_file, "w")
            text_file.write("{}\n".format(class_idx))
            pose_str = "{} {} {} {}\n{} {} {} {}\n{} {} {} {}".format(
                pose[0, 0],
                pose[0, 1],
                pose[0, 2],
                pose[0, 3],
                pose[1, 0],
                pose[1, 1],
                pose[1, 2],
                pose[1, 3],
                pose[2, 0],
                pose[2, 1],
                pose[2, 2],
                pose[2, 3],
            )
            text_file.write(pose_str)

        print(class_name, " done")
                                        "{:04d}.mat".format(yu_idx))
            yu_pred = sio.loadmat(yu_pred_file)
            rois = yu_pred['rois']
            if len(rois) != 0:
                labels = rois[:, 0]  # 1: found; -1: not found
                if labels != -1:
                    try:
                        meta_data = sio.loadmat(
                            real_meta_path.format(real_index))
                    except:
                        raise Exception(real_index)
                    proposal_idx = np.where(labels == 1)
                    assert len(proposal_idx) == 1
                    pose_ori_q = yu_pred['poses'][proposal_idx].reshape(7)

                    pose_ori_m = RT_transform.se3_q2m(pose_ori_q)
                    pose_ori_q[:4] = RT_transform.mat2quat(pose_ori_m[:, :3])

                    pose_gt = meta_data['poses']
                    if len(pose_gt.shape) > 2:
                        pose_gt = pose_gt[:, :,
                                          list(meta_data['cls_indexes'][0]).
                                          index(big_class_idx)]
                    print("{}, {:04d}, {}".format(
                        class_name, idx,
                        RT_transform.calc_rt_dist_m(pose_ori_m, pose_gt)))

                    pose_ori_file = os.path.join(
                        rendered_dir,
                        '{}_{}_{}-pose.txt'.format(class_name,
                                                   real_prefix_list[idx], 0))
def gen_real():
    syn_poses_dir = os.path.join(
        cur_path,
        '../data/LINEMOD_6D/LM6d_converted/LM6d_render_v1/syn_poses_single/')

    # output path
    real_root_dir = os.path.join(LINEMOD_root, 'LM6d_data_syn_light', 'data',
                                 'real')
    image_set_dir = os.path.join(LINEMOD_root, 'LM6d_data_syn_light/image_set')
    mkdir_if_missing(real_root_dir)
    mkdir_if_missing(image_set_dir)

    syn_poses_path = os.path.join(syn_poses_dir, 'LM6d_ds_v1_all_syn_pose.pkl')
    with open(syn_poses_path, 'rb') as f:
        syn_pose_dict = cPickle.load(f)

    for class_idx, class_name in enumerate(tqdm(classes)):
        if class_name == '__back_ground__':
            continue
        if class_name not in ['ape']:
            continue

        # init render machines
        brightness_ratios = [0.2, 0.25, 0.3, 0.35, 0.4]  ###################
        model_dir = os.path.join(LINEMOD_root, 'models', class_name)
        render_machine = Render_Py_Light(model_dir, K, width, height, ZNEAR,
                                         ZFAR, brightness_ratios)

        # syn_poses_path = os.path.join(syn_poses_dir, 'LM6d_v1_all_rendered_pose_{}.txt'.format(class_name))
        # syn_poses = np.loadtxt(syn_poses_path)
        # print(syn_poses.shape) # nx7
        syn_poses = syn_pose_dict[class_name]
        num_poses = syn_poses.shape[0]
        real_index_list = [
            '{}/{:06d}'.format(class_name, i + 1) for i in range(num_poses)
        ]

        real_set_path = os.path.join(
            image_set_dir,
            'real/LM6d_data_syn_train_real_{}.txt'.format(class_name))
        mkdir_if_missing(os.path.join(image_set_dir, 'real'))
        f_real_set = open(real_set_path, 'w')

        all_pair = []
        for idx, real_index in enumerate(real_index_list):
            f_real_set.write('{}\n'.format(real_index))
            # continue # just generate real set file
            prefix = real_index.split('/')[1]
            video_name = real_index.split('/')[0]

            real_dir = os.path.join(real_root_dir, class_name)
            mkdir_if_missing(real_dir)

            real_color_file = os.path.join(real_dir, prefix + "-color.png")
            real_depth_file = os.path.join(real_dir, prefix + "-depth.png")
            real_pose_file = os.path.join(real_dir, prefix + "-pose.txt")

            # real_label_file = os.path.join(real_root_dir, video_name, prefix + "-label.png")
            real_label_file = os.path.join(real_dir, prefix + "-label.png")

            if idx % 500 == 0:
                print('  ', class_name, idx, '/', len(real_index_list), ' ',
                      real_index)

            pose_quat = syn_poses[idx, :]
            pose = se3.se3_q2m(pose_quat)

            # generate random light_position
            if idx % 6 == 0:
                light_position = [1, 0, 1]
            elif idx % 6 == 1:
                light_position = [1, 1, 1]
            elif idx % 6 == 2:
                light_position = [0, 1, 1]
            elif idx % 6 == 3:
                light_position = [-1, 1, 1]
            elif idx % 6 == 4:
                light_position = [-1, 0, 1]
            elif idx % 6 == 5:
                light_position = [0, 0, 1]
            else:
                raise Exception("???")
            # print( "light_position a: {}".format(light_position))
            light_position = np.array(light_position) * 0.5
            # inverse yz
            light_position[0] += pose[0, 3]
            light_position[1] -= pose[1, 3]
            light_position[2] -= pose[2, 3]
            # print("light_position b: {}".format(light_position))

            # randomly adjust color and intensity for light_intensity
            colors = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
                               [1, 0, 1], [1, 1, 0], [1, 1, 1]])
            intensity = np.random.uniform(0.9, 1.1, size=(3, ))
            colors_randk = random.randint(0, colors.shape[0] - 1)
            light_intensity = colors[colors_randk] * intensity
            # print('light intensity: ', light_intensity)

            # randomly choose a render machine
            rm_randk = random.randint(0, len(brightness_ratios) - 1)
            # print('brightness ratio:', brightness_ratios[rm_randk])
            # get render result
            rgb_gl, depth_gl = render_machine.render(se3.mat2quat(
                pose[:3, :3]),
                                                     pose[:, -1],
                                                     light_position,
                                                     light_intensity,
                                                     brightness_k=rm_randk)
            rgb_gl = rgb_gl.astype('uint8')
            # render_real label
            label_gl = np.zeros(depth_gl.shape)
            # print('depth gl:', depth_gl.shape)
            label_gl[depth_gl != 0] = 1

            # import matplotlib.pyplot as plt
            # fig = plt.figure()
            # plt.axis('off')
            # fig.add_subplot(1, 3, 1)
            # plt.imshow(rgb_gl[:, :, [2,1,0]])
            #
            # fig.add_subplot(1, 3, 2)
            # plt.imshow(depth_gl)
            #
            # fig.add_subplot(1, 3, 3)
            # plt.imshow(label_gl)
            #
            # fig.suptitle('light position: {}\n light_intensity: {}\n brightness: {}'.format(light_position, light_intensity, brightness_ratios[rm_randk]))
            # plt.show()

            cv2.imwrite(real_color_file, rgb_gl)
            depth_gl = (depth_gl * depth_factor).astype(np.uint16)
            cv2.imwrite(real_depth_file, depth_gl)

            cv2.imwrite(real_label_file, label_gl)

            text_file = open(real_pose_file, 'w')
            text_file.write("{}\n".format(class_idx))
            pose_str = "{} {} {} {}\n{} {} {} {}\n{} {} {} {}" \
                .format(pose[0, 0], pose[0, 1], pose[0, 2], pose[0, 3],
                        pose[1, 0], pose[1, 1], pose[1, 2], pose[1, 3],
                        pose[2, 0], pose[2, 1], pose[2, 2], pose[2, 3])
            text_file.write(pose_str)

        print(class_name, " done")