Esempio n. 1
0
def _dump_vis(model, pred, gt, common,
              video_list, K_list, dist_list, M_list):
    from utils.plot_util import draw_skel
    from utils.StitchedImage import StitchedImage
    import utils.CamLib as cl
    from tqdm import tqdm

    # iterate frames
    for i, (_, fid) in tqdm(enumerate(common), desc='Dumping Samples', total=len(common)):
        # Accumulate frames
        merged_list = list()
        # inpaint pred/gt
        for K, dist, M, v in zip(K_list, dist_list, M_list, video_list):
            img = read_vid_frame(v, fid)
            uv_p = cl.project(cl.trafo_coords(pred[i], M), K, dist)
            img_p = draw_skel(img.copy(), model, uv_p, color_fixed='r', order='uv')
            uv_gt = cl.project(cl.trafo_coords(gt[i], M), K, dist)
            img_p = draw_skel(img_p, model, uv_gt, color_fixed='g', order='uv')

            merged_list.append(img_p)

        merged = StitchedImage(merged_list)
        p = os.path.join(os.path.dirname(video_list[0]), 'eval_vis_dump/%04d.png' % i)
        # cv2.imshow('img', merged.image)
        # cv2.waitKey()
        my_mkdir(p, is_file=True)
        cv2.imwrite(p, merged.image)
Esempio n. 2
0
    def postfunc_save_samples(self, trainer):
        """ Adds a visual sample to the summary writer. """
        import numpy as np
        from utils.plot_util import draw_skel

        tmp = list()
        for bid, (img, uv_gt, uv_pred, pred_uv_refine) in enumerate(zip(trainer.fetches_v[data_t.image],
                                                                        trainer.fetches_v[data_t.uv_merged],  # ground truth
                                                                        trainer.fetches_v[data_t.pred_uv_final])):  # projection

            img_rgb = ((img + 0.5) * 255).round().astype(np.uint8)[:, :, ::-1]
            img_p1 = img_rgb.copy()
            if self.net_config.use_2drefinement_net:
                img_p1 = draw_skel(img_p1, self.net_config.model,
                                   trainer.fetches_v[data_t.pred_uv_refine][-1][bid],
                                   order='uv')  # this is estimated from the single views

            img_p2 = draw_skel(img_rgb.copy(), self.net_config.model, uv_pred, order='uv')
            img_gt = draw_skel(img_rgb.copy(), self.net_config.model, uv_gt, order='uv')
            tmp.append(np.concatenate([img_p1, img_p2, img_gt], 1))

            if len(tmp) == trainer.config.save_sample_num:
                break

        summary_v = trainer.session.run(self.merged_vis_sum, {self.merged_vis: np.stack(tmp)})
        trainer.summary_writer.add_summary(summary_v, trainer.global_step_v)
        trainer.summary_writer.flush()
        print('Saved some samples.')
Esempio n. 3
0
    model = Model(args.model)
    df = build_dataflow(model, [args.set_name],
                        ['/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat506_200306/run046_cam%d.avi'],
                        ['/misc/lmbraid18/datasets/RatPose/RatTrack_paper_resub_sessions/Rat506_200306/pred_run046__00.json'],
                        is_train=False,
                        threaded=True, single_sample=False)

    start = None
    for idx, dp in enumerate(df.get_data()):
        if idx >= df.size():
            break

        data = df2dict(dp)
        img_rgb = np.round((data[data_t.image]+0.5)*255.0).astype(np.uint8)[:, :, :, ::-1]
        num_cams = img_rgb.shape[0]
        print('is_supervised', data[data_t.is_supervised])

        img_list = list()
        for i in range(num_cams):
            xyz_cam = cl.trafo_coords(data[data_t.xyz_nobatch][0], data[data_t.M][i])
            uv = cl.project(xyz_cam, data[data_t.K][i])
            I = draw_skel(img_rgb[i], model, data[data_t.uv][i], data[data_t.vis_nobatch][0], order='uv')
            img_list.append(I)
        xyz = data[data_t.xyz_nobatch][0]

        merge = StitchedImage(img_list, target_size=(int(0.8 * args.window_size), args.window_size))

        cv2.imshow('pose labeled', merge.image[:, :, ::-1])
        cv2.waitKey(0 if args.wait else 10)

Esempio n. 4
0
    def _evaluate_on_set(self, trainer, dataflow, summary,
                         epe2d_tf, auc2d_tf,
                         epe2d_tf_aux, auc2d_tf_aux,
                         epe3d_tf, auc3d_tf,
                         img_vis, is_eval):
        """ Run evaluation. """
        from utils.eval_util import EvalUtil
        from colored import stylize, fg
        from utils.plot_util import draw_skel, draw_text

        df2dict, df = dataflow

        # EVAL on TRAIN SET
        eval3d = EvalUtil()
        eval3d_refine = EvalUtil()
        eval2d = EvalUtil()
        eval2d_aux = EvalUtil()
        img_dump = list()
        for i in tqdm(range(trainer.config.eval_steps)):
            data = df2dict(next(df.get_data()))
            feed = {trainer.inputs[k]: data[k] for k in self.needed_inputs}

            #setup fetch
            fetches = [
                trainer.predictions[data_t.pred_xyz_final],
                trainer.predictions[data_t.pred_uv_final],
            ]

            # if refinement case show these, otherwise show auxiliary keypoint predictions

            if self.net_config.use_2drefinement_net:
                fetches.append(trainer.predictions[data_t.pred_uv_refine][-1])
                fetches.append(trainer.predictions[data_t.pred_xyz_refine])
                fetches.append(trainer.predictions[data_t.pred_vis3d_refine])

            # forward pass
            fetches_v = trainer.session.run(fetches, feed)
            fetches_v = fetches_v[::-1]  #reverse

            # extract values
            kp3d_pred = fetches_v.pop()
            kp2d_pred = fetches_v.pop()

            if self.net_config.use_2drefinement_net:
                kp2d_pred_alt = fetches_v.pop()
                pred_xyz_refine = fetches_v.pop()
                pred_vis3d_refine = fetches_v.pop()

            img = data[data_t.image]
            xyz_gt_v = data[data_t.xyz_nobatch]
            kp_vis_gt_v = data[data_t.vis_nobatch]
            kp_uv_gt_v = data[data_t.uv_merged]

            eval3d.feed(xyz_gt_v[0], kp_vis_gt_v[0], kp3d_pred[0])

            if self.net_config.use_2drefinement_net:
                eval3d_refine.feed(xyz_gt_v[0], kp_vis_gt_v[0], pred_xyz_refine[0])

            for bid in range(trainer.config.batch_size):
                eval2d.feed(kp_uv_gt_v[bid], kp_vis_gt_v[0], kp2d_pred[bid])
                eval2d_aux.feed(kp_uv_gt_v[bid], kp_vis_gt_v[0], kp2d_pred_alt[bid])

                if len(img_dump) < self.net_config.save_sample_num:
                    # assemble image
                    img_rgb = ((img[bid] + 0.5) * 255).round().astype(np.uint8)[:, :, ::-1]
                    img_p1 = img_rgb.copy()
                    if self.net_config.use_2drefinement_net:
                        img_p1 = draw_skel(img_p1, self.config.model, kp2d_pred_alt[bid], order='uv')  # this is estimated from the single view
                        img_p1 = draw_text(img_p1, 'ref')

                    img_p2 = draw_skel(img_rgb.copy(), self.config.model, kp2d_pred[bid], order='uv')
                    img_p2 = draw_text(img_p2, 'proj')
                    img_gt = draw_skel(img_rgb.copy(), self.config.model, kp_uv_gt_v[bid], order='uv')
                    img_gt = draw_text(img_gt, 'gt')
                    img_dump.append(np.concatenate([img_p1, img_p2, img_gt], 1))

        # get eval results and write to log
        mean2d, median2d, auc2d, _, _ = eval2d.get_measures(0.0, 100, 100)
        print(stylize('Evaluation 2D results:', fg('cyan')))
        print(stylize('auc=%.3f, mean_kp2d_avg=%.2f, median_kp2d_avg=%.2f' % (auc2d, mean2d, median2d), fg('green')))

        mean2d_aux, median2d_aux, auc2d_aux, _, _ = eval2d_aux.get_measures(0.0, 100, 100)
        if self.net_config.use_2drefinement_net:
            print(stylize('Evaluation 2D results (refinement):', fg('cyan')))
        else:
            print(stylize('Evaluation 2D results (aux):', fg('cyan')))
        print(stylize('auc=%.3f, mean_kp2d_avg=%.2f, median_kp2d_avg=%.2f' % (auc2d_aux, mean2d_aux, median2d_aux), fg('green')))

        if self.net_config.use_2drefinement_net:
            mean3d, median3d, auc3d, _, _ = eval3d_refine.get_measures(0.0, 0.05, 100)
            print(stylize('Evaluation 3D (refine) results:', fg('cyan')))
            print(stylize('auc=%.3f, mean_kp3d_avg=%.2f cm, median_kp3d_avg=%.2f  cm' % (auc3d,
                                                                                   mean3d*100.0,
                                                                                   median3d*100.0), fg('green')))

        mean3d, median3d, auc3d, _, _ = eval3d.get_measures(0.0, 0.05, 100)
        print(stylize('Evaluation 3D results:', fg('cyan')))
        print(stylize('auc=%.3f, mean_kp3d_avg=%.2f cm, median_kp3d_avg=%.2f  cm' % (auc3d,
                                                                               mean3d*100.0,
                                                                               median3d*100.0), fg('green')))

        if self.config.use_early_stopping and is_eval:
            trainer.es_util.feed(mean3d)

        eval_summary_train_v = trainer.session.run(summary,
                                        {epe2d_tf: np.clip(median2d, 0.0, 200.0), auc2d_tf: auc2d,
                                         epe2d_tf_aux: np.clip(median2d_aux, 0.0, 200.0), auc2d_tf_aux: auc2d_aux,
                                         epe3d_tf: np.clip(median3d, 0.0, 1.0), auc3d_tf: auc3d,
                                         img_vis: np.stack(img_dump)})
        trainer.summary_writer.add_summary(eval_summary_train_v, trainer.global_step_v)
Esempio n. 5
0
                    cl.trafo_coords(np.array(predictions[idx]['xyz']),
                                    M_list[i]), K_list[i])
                this_img = cv2.circle(this_img,
                                      (int(root_uv[0, 0]), int(root_uv[0, 1])),
                                      radius=5,
                                      color=(0, 255, 255),
                                      thickness=-1)

            # draw keypoints
            if 'kp_xyz' in predictions[idx].keys():
                uv_proj = cl.project(
                    cl.trafo_coords(np.array(predictions[idx]['kp_xyz'][0]),
                                    M_list[i]), K_list[i])
                this_img = draw_skel(this_img,
                                     model,
                                     uv_proj,
                                     order='uv',
                                     linewidth=2,
                                     kp_style=(5, 1))

            # draw frame id
            if args.draw_fid and i == 0:
                this_img = draw_text(this_img, '%03d' % idx)

            img_list.append(this_img)

        merge = StitchedImage(img_list,
                              target_size=(int(0.8 * args.window_size),
                                           args.window_size))

        if args.save:
            writer.feed(merge.image[:, :, ::-1])