Exemple #1
0
                label_path_for_show[b] = os.path.basename(cur_data_batch[1][b])

                cur_img = cv2.imread(cur_data_batch[0][b])
                cur_label = np.load(cur_data_batch[1][b]).tolist()

                cur_joints_zidx = (
                    cur_label["joints_zidx"] -
                    1).copy()  # cause lua is from 1 to n not 0 to n-1

                cur_joints = np.concatenate(
                    [cur_label["joints_2d"], cur_joints_zidx[:, np.newaxis]],
                    axis=1)

                cur_img, cur_joints = preprocessor.preprocess(
                    cur_img,
                    cur_joints,
                    is_training=not is_valid,
                    is_rotate=False)
                # generate the heatmaps and volumes
                batch_images_np[b] = cur_img

                hm_joint_2d = np.round(cur_joints[:, 0:2] /
                                       configs.coords_2d_scale)
                hm_joint_3d = np.concatenate(
                    [hm_joint_2d, cur_joints[:, 2][:, np.newaxis]], axis=1)
                batch_centers_np[b] = hm_joint_3d

            acc_hm = 0
            acc_vol = 0

            if is_valid:
Exemple #2
0
                    cur_img = cv2.imread(img_list[data_index.val])
                    cur_label = np.load(lbl_list[data_index.val]).tolist()
                    data_index.val += 1

                    ########## Save the data for evaluation ###########
                    source_txt_arr.append(cur_label["source"])
                    center_arr.append(cur_label["center"])
                    scale_arr.append(cur_label["scale"])
                    depth_root_arr.append(cur_label["joints_3d"][0, 2])
                    gt_joints_3d_arr.append(cur_label["joints_3d"].copy())
                    crop_joints_2d_arr.append(cur_label["joints_2d"].copy())
                    cam_matrix_arr.append(cur_label["cam_mat"].copy())
                    ###################################################

                    cur_img, _ = preprocessor.preprocess(cur_img,
                                                         None,
                                                         is_training=False)

                    batch_images_np[b] = cur_img
                    batch_images_flipped_np[b] = preprocessor.flip_img(
                        batch_images_np[b])

                mean_vol_joints, \
                raw_vol_joints  = sess.run(
                        [
                         ordinal_model.mean_joints,
                         ordinal_model.raw_joints
                        ],
                        feed_dict={input_images: np.concatenate([batch_images_np, batch_images_flipped_np], axis=0)})

                print((len(img_path_for_show) * "{}\n").format(
                    cur_label = np.load(
                        scale_lbl_list[scale_data_index.val]).tolist()
                    scale_data_index.val += 1

                    ########## Save the data for evaluation ###########
                    gt_joints_3d = cur_label["joints_3d"].copy()
                    gt_depth_arr.append(gt_joints_3d[:, 2] -
                                        gt_joints_3d[0, 2])
                    ###################################################
                    cur_joints = np.concatenate([
                        cur_label["joints_2d"],
                        cur_label["joints_3d"][:, 2][:, np.newaxis]
                    ],
                                                axis=1)

                    cur_img, cur_joints = preprocessor.preprocess(
                        cur_img, cur_joints, is_training=False)
                    batch_images_np[b] = cur_img
                    batch_images_flipped_np[b] = preprocessor.flip_img(
                        batch_images_np[b])

                mean_scale_depth,\
                raw_scale_depth = sess.run(
                        [
                         ordinal_model.mean_volumes_z,
                         ordinal_model.raw_volumes_z
                        ],
                        feed_dict={input_images: np.concatenate([batch_images_np, batch_images_flipped_np], axis=0)})

                scale_for_show = []

                scale_depth = mean_scale_depth