Exemple #1
0
def _dump_vis(model, pred, gt, common,
              video_list, K_list, dist_list, M_list):
    from utils.plot_util import draw_skel
    from utils.StitchedImage import StitchedImage
    import utils.CamLib as cl
    from tqdm import tqdm

    # iterate frames
    for i, (_, fid) in tqdm(enumerate(common), desc='Dumping Samples', total=len(common)):
        # Accumulate frames
        merged_list = list()
        # inpaint pred/gt
        for K, dist, M, v in zip(K_list, dist_list, M_list, video_list):
            img = read_vid_frame(v, fid)
            uv_p = cl.project(cl.trafo_coords(pred[i], M), K, dist)
            img_p = draw_skel(img.copy(), model, uv_p, color_fixed='r', order='uv')
            uv_gt = cl.project(cl.trafo_coords(gt[i], M), K, dist)
            img_p = draw_skel(img_p, model, uv_gt, color_fixed='g', order='uv')

            merged_list.append(img_p)

        merged = StitchedImage(merged_list)
        p = os.path.join(os.path.dirname(video_list[0]), 'eval_vis_dump/%04d.png' % i)
        # cv2.imshow('img', merged.image)
        # cv2.waitKey()
        my_mkdir(p, is_file=True)
        cv2.imwrite(p, merged.image)
Exemple #2
0
def export(output_path, db_id, db):
    def _join_check(*args):
        p = os.path.join(*args)
        assert os.path.exists(p), 'File should exist.'
        return p

    output_path_this = os.path.join(output_path, 'run%03d' % db_id)
    calib_file = _join_check(db['path'], db['calib'])
    anno_file = _join_check(db['path'], db['frame_dir'], db['anno'])
    frame_dir = _join_check(db['path'], db['frame_dir'])

    # copy frames
    print('Dealing with', frame_dir, ' saved to:', output_path_this)
    for cid in db['cam_range']:
        print('cam %d/%d' % (cid, len(db['cam_range'])), end='\r')

        output_path_this_cam = os.path.join(output_path_this, 'cam%d' % cid)
        frame_dir_this_cam = os.path.join(frame_dir, 'cam%d' % cid)
        frames = os.listdir(frame_dir_this_cam)
        frames = [os.path.join(frame_dir, 'cam%d' % cid, f) for f in frames]
        frames = [f for f in frames if _is_image_file(f)]
        sort_nicely(frames)
        assert len(frames) > 0, 'There should be frames.'

        my_mkdir(output_path_this_cam, is_file=False)

        for i, f in enumerate(frames):
            shutil.copy2(f,
                         os.path.join(output_path_this_cam))

    my_mkdir(output_path_this, is_file=False)
    shutil.copy2(calib_file,
                 os.path.join(output_path_this, 'M.json'))
    shutil.copy2(anno_file,
                 os.path.join(output_path_this, 'anno.json'))
Exemple #3
0
 def _init_recording(self):
     """Create output folders. """
     take_name = self._take_name + '_' + datetime.datetime.now().strftime(
         "%Y-%m-%d_%H-%M")
     out_path = os.path.join(params_t.out_path, take_name)
     my_mkdir(out_path)
     out_path = out_path + '/run%03d' % self._rid + '_%s.avi'
     return out_path
        jobs = [(0, 'front'), (parsed.n_cols * parsed.n_rows, 'back')]

    else:
        jobs = [(0, 'front')]

    for offset, name in jobs:
        # open a new canvas
        c = canvas.canvas()

        # draw the board
        generateAprilBoard(canvas, parsed.n_cols, parsed.n_rows, name, offset,
                           parsed.tsize, parsed.tagspacing, parsed.tagfamily,
                           parsed.border)

        # write to file
        my_mkdir(output_name_pdf % name, is_file=True)
        c.writePDFfile(output_name_pdf % name)
        print('Created %s.pdf' % output_name_pdf % name)

    tag_desc = dict()
    tag_desc['family'] = parsed.tagfamily
    tag_desc['border'] = parsed.border
    tag_desc['double'] = parsed.double
    tag_desc['tsize'] = parsed.tsize
    tag_desc[
        'tspace'] = parsed.tsize * parsed.tagspacing  # actual size in meters
    tag_desc['n_x'] = parsed.n_cols
    tag_desc['n_y'] = parsed.n_rows
    tag_desc['offset'] = [
        0.0, 0.0, 0.0
    ]  # these we dont know because they depend on the manufacturing process
Exemple #5
0
    my_mkdir(output_path_this, is_file=False)
    shutil.copy2(calib_file,
                 os.path.join(output_path_this, 'M.json'))
    shutil.copy2(anno_file,
                 os.path.join(output_path_this, 'anno.json'))


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Accumulate all labeled data into a single dataset.')
    parser.add_argument('model', type=str, help='Model definition file.')
    parser.add_argument('output_path', type=str, help='Path to where to save the data.')
    parser.add_argument('--set_name', type=str, default='train', help='Set name to export.')
    parser.add_argument('--force', action='store_true', help='Force outputting even when the path already exists.')
    args = parser.parse_args()

    # create output path
    if not args.force:
        assert not os.path.exists(args.output_path), 'Path should not exist yet.'
    my_mkdir(args.output_path, is_file=False)

    m = Model(args.model)

    # output
    db_id = 0
    for db in m.datasets:
        if db['db_set'] != args.set_name:
            continue

        # export
        export(args.output_path, db_id, db)
        db_id += 1
Exemple #6
0
def process_labeled(model, cnt, out_path, db, anno, calib_all):
    """ How to process frames when there is annotation data available. """
    this_dataset_index_labeled = list()

    # find camera directories
    cam_base_dir = os.path.join(db['path'], db['frame_dir'])
    cam_names = ['cam%d' % cid for cid in db['cam_range']]
    cam_dirs = [os.path.join(cam_base_dir, x) for x in cam_names]
    print(' > Found %d cam directories in %s' % (len(cam_dirs), cam_base_dir))

    # find frame names
    frame_names = None
    for cd in cam_dirs:
        frames = list_frames(cd)
        print('\t> Found %d frames in %s' % (len(frames), cd))

        # check all cameras have the same amount of frames
        if frame_names is None:
            frame_names = [os.path.basename(x) for x in frames]
        assert len(frame_names) == len(
            frames), 'Number of frames does not match between cameras'

    min_num_kp = int(len(model.keypoints) * 0.8)
    # add to dataset index
    for f in tqdm(frame_names, desc=' > Processing %s' % db['path']):
        if f not in anno.keys():
            # this frame is not labeled
            continue

        # triangulate 2D points to 3D hypothesis
        K_list, _, M_list = calib_to_list(calib_all[-1], db['cam_range'])
        _, _, kp_uv, vis2d = anno_to_mat(anno[f], db['cam_range'],
                                         len(model.keypoints))
        points3d, _, vis3d, points2d_merged, vis_merged = triangulate_robust(
            kp_uv, vis2d, K_list, M_list)

        # sufficient number of 3d points found
        if np.sum(vis3d) < min_num_kp:
            continue

        img_c_list, scale_list, offset_list = preproc_sample(
            os.path.join(db['path'], db['frame_dir']), cam_names, f,
            points2d_merged, vis_merged, calib_all[-1],
            model.preprocessing['crop_oversampling'],
            model.preprocessing['crop_size'])

        for cam, img in zip(cam_names, img_c_list):
            tmp = os.path.join(
                out_path, cam,
                '%08d.jpg' % (cnt + len(this_dataset_index_labeled)))
            my_mkdir(tmp, is_file=True)
            cv2.imwrite(tmp, img)

        voxel_root = points3d[vis3d > 0.5].mean(0)
        this_dataset_index_labeled.append([
            out_path, cam_names,
            '%08d.jpg' % (cnt + len(this_dataset_index_labeled)), points3d,
            vis3d, points2d_merged, vis_merged, voxel_root, scale_list,
            offset_list,
            len(calib_all) - 1
        ])

    return this_dataset_index_labeled
Exemple #7
0
    def btn_write(self):
        self.save_label_state()  # save current annotation

        num_kp = len(self.config['keypoints'])
        empty = {
            'kp_xyz': np.zeros((num_kp, 3)),
            'vis3d': np.zeros((num_kp, ))
        }

        # assemble all info we want to write to disk
        output_data = dict()
        for k in self.file_list_sel_full_keys:
            fid = int(k)
            if k in self.label_tasks.keys():
                output_data[fid] = self.label_tasks[k]

                # project into views
                for i, cid in enumerate(self.cam_range):
                    # project into frame
                    xyz = self.label_tasks[k]['kp_xyz']
                    kp_uv = cl.project(cl.trafo_coords(xyz, self.M_list[i]),
                                       self.K_list[i], self.dist_list[i])
                    output_data[fid]['cam%d' % cid] = {
                        'kp_uv': kp_uv,
                        'vis': self.label_tasks[k]['vis3d']
                    }

            else:
                output_data[fid] = empty

        self.pb_start(len(output_data))

        # figure out base path
        i = 0
        while True:
            base_path = os.path.join(os.path.dirname(self.video_list[0]),
                                     self.output_task_dir % i)
            if not os.path.exists(base_path):
                break
            i += 1

        # dump frames
        for fid, _ in output_data.items():
            img_list, K_list, M_list, dist_list = self.precacher.get_data(fid)

            # write image frames
            for cid, img in zip(self.cam_range, img_list):
                output_path = os.path.join(base_path, 'cam%d' % cid,
                                           '%08d.png' % fid)
                my_mkdir(output_path, is_file=True)
                cv2.imwrite(output_path, img)
                # print('Dumped: ', output_path)
            self.pb_update()

        self.pb_finish()

        # dump anno
        anno_out_path = os.path.join(base_path, 'anno.json')
        my_mkdir(anno_out_path, is_file=True)
        json_dump(anno_out_path,
                  {'%08d.png' % k: v
                   for k, v in output_data.items()},
                  verbose=True)