def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('davis_annotations', type=Path)
    parser.add_argument('output_dir', type=Path)

    args = parser.parse_args()
    args.output_dir.mkdir(exist_ok=True, parents=True)

    output_log_file = log_utils.add_time_to_path(args.output_dir /
                                                 Path(__file__).name)
    log_utils.setup_logging(output_log_file)
    logging.info('Args: %s', pprint.pformat(vars(args)))

    subprocess.call([
        './git-state/save_git_state.sh',
        output_log_file.with_suffix('.git-state')
    ])

    for sequence_dir in tqdm(list(args.davis_annotations.iterdir())):
        if not sequence_dir.is_dir():
            continue
        sequence = sequence_dir.stem
        output_sequence_dir = args.output_dir / sequence
        output_sequence_dir.mkdir(exist_ok=True)
        for image_path in sequence_dir.glob('*.png'):
            image = np.array(Image.open(image_path))
            if image.ndim != 2:
                __import__('ipdb').set_trace()
            image = (image != 0) * 256
            scipy.misc.imsave(args.output_dir / sequence / image_path.name,
                              image)
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('numpy_dir', type=Path)
    parser.add_argument('output_dir', type=Path)
    parser.add_argument('--npy-extension', default='.npy')

    args = parser.parse_args()

    args.output_dir.mkdir(exist_ok=True, parents=True)
    output = args.output_dir / 'masks'
    output.mkdir(exist_ok=True, parents=True)

    setup_logging(
        add_time_to_path(args.output_dir / (Path(__file__).name + '.log')))
    logging.info('Args: %s\n', pprint.pformat(vars(args)))

    for path in tqdm(list(args.numpy_dir.rglob('*' + args.npy_extension))):
        segmentation = np.load(path)
        if isinstance(segmentation, np.lib.npyio.NpzFile):
            # Segmentation saved with savez_compressed; ensure there is only
            # one item in the dict and retrieve it.
            keys = list(segmentation.keys())
            assert len(keys) == 1, (
                'Numpy file (%s) contained dict with multiple items, not sure '
                'which one to load.' % path)
            segmentation = segmentation[keys[0]]
        sequence_output = output / path.stem
        sequence_output.mkdir(exist_ok=True, parents=True)
        for frame, frame_segmentation in enumerate(segmentation):
            scipy.misc.imsave(sequence_output / f'{frame:05d}.png',
                              (frame_segmentation != 0) * 255)
예제 #3
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--detections-root', type=Path, required=True)
    parser.add_argument('--fbms-root', type=Path, required=True)
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument('--threshold', type=float, default=0.7)

    args = parser.parse_args()

    fbms_root = args.fbms_root
    detections_root = args.detections_root
    output_dir = args.output_dir

    # assert not output_dir.exists()
    assert detections_root.exists()
    assert fbms_root.exists()

    output_dir.mkdir(exist_ok=True, parents=True)

    setup_logging(
        add_time_to_path(output_dir / (Path(__file__).name + '.log')))
    logging.info('Args: %s\n', pprint.pformat(vars(args)))

    train_split = 'TrainingSet'
    train_fbms = fbms_root / train_split
    if train_fbms.exists():
        train_detections = detections_root / train_split
        train_output = output_dir / train_split
        assert train_detections.exists(), (
            f'No detections found for TrainingSet at {train_detections}')
        create_masks_split(train_fbms, train_detections, train_output,
                           args.threshold)

    test_split = 'TestSet'
    test_fbms = fbms_root / test_split
    if test_fbms.exists():
        test_detections = detections_root / test_split
        test_output = output_dir / test_split
        assert test_detections.exists(), (
            f'No detections found for TestSet at {test_detections}')
        create_masks_split(test_fbms, test_detections, test_output,
                           args.threshold)

    if not (train_fbms.exists() or test_fbms.exists()):
        # Assume that --fbms-root and --detections-root refer to a specific
        # split.
        create_masks_split(fbms_root, detections_root, output_dir,
                           args.threshold)
예제 #4
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--masks-dir', type=Path, required=True)
    parser.add_argument('--output-dir',
                        default='{masks_parent}/davis16-evaluation')
    parser.add_argument(
        '--annotations-dir',
        type=Path,
        help='Specify alternative annotations, e.g. DAVIS 2017 fg/bg.')
    parser.add_argument(
        '--sequences-dir',
        type=Path,
        help='Specify alternative sequences dir, e.g. DAVIS 2017.')

    args = parser.parse_args()

    args.output_dir = Path(
        args.output_dir.format(masks_parent=args.masks_dir.parent))
    args.output_dir.mkdir(exist_ok=True)

    log_path = log_utils.add_time_to_path(args.output_dir /
                                          (Path(__file__).name + '.log'))
    log_utils.setup_logging(log_path)

    if args.annotations_dir is not None:
        cfg.PATH.ANNOTATION_DIR = str(args.annotations_dir)
        cfg.N_JOBS = 1  # Config changes don't propagate if this is > 1

    if args.sequences_dir is not None:
        cfg.PATH.SEQUENCES_DIR = str(args.sequences_dir)
        cfg.N_JOBS = 1  # Config changes don't propagate if this is > 1

    db_eval_dict = db_eval(args.masks_dir.name,
                           [x.name for x in args.masks_dir.iterdir()],
                           str(args.masks_dir.parent),
                           metrics=['J', 'F', 'T'])

    output_h5 = args.output_dir / (args.masks_dir.name + '.h5')
    logging.info("Saving results in: %s", output_h5)
    db_save_eval(db_eval_dict, outputdir=args.output_dir)

    davis_data = aggregate_frame_eval(output_h5)
    output = db_eval_view(davis_data)
    logging.info('Results:\n%s\n', output)
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--detections-pickle', type=Path, required=True)
    parser.add_argument('--annotations-json', type=Path, required=True)
    parser.add_argument('--davis-root', type=Path, required=True)
    parser.add_argument('--threshold', type=float, default=0.7)

    args = parser.parse_args()

    output_log = log_utils.add_time_to_path(
        args.detections_pickle.parent / (Path(__file__).name + '.log'))
    log_utils.setup_logging(output_log)
    logging.info('Args:\n%s', vars(args))

    groundtruth = COCO(str(args.annotations_json))
    image_ids = sorted(groundtruth.getImgIds())
    # Map <sequence_name>/<frame>.png to list of segmentations, sorted in
    # ascending order of scores.
    results = {}
    with open(args.detections_pickle, 'rb') as f:
        data = pickle.load(f)
        boxes = data['all_boxes']
        masks = data['all_segms']
        num_classes = len(boxes)
        for c in range(num_classes):
            assert len(boxes[c]) == len(image_ids), (
                f'Expected {len(image_ids)} boxes for class {c}, got '
                f'{len(boxes[c])}')
        for i, image_id in enumerate(image_ids):
            scores = []
            segmentations = []
            # Merge all classes into one.
            for c in range(1, num_classes):
                scores.extend(boxes[c][i][:, 4])
                segmentations.extend(masks[c][i])
            segmentation_scores = sorted(
                zip(segmentations, scores), key=lambda x: x[1])
            results[groundtruth.imgs[image_id]['file_name']] = [
                segmentation for segmentation, score in segmentation_scores
                if score > args.threshold
            ]

    sequence_frames = collections.defaultdict(list)
    for x in results.keys():
        x = Path(x)
        sequence_frames[x.parent.name].append(x)
    annotations_dir = args.davis_root / 'Annotations' / '480p'

    metrics = []  # List of (frame name, precision, recall, f-measure) tuples
    for sequence, frames in tqdm(sequence_frames.items()):
        frames = sorted(frames, key=lambda x: int(x.stem))
        davis_sequence = annotations_dir / sequence
        davis_frames = sorted(
            davis_sequence.glob('*.png'), key=lambda x: int(x.stem))
        assert (
            len(davis_frames) == len(frames)
            or len(davis_frames) == (len(frames) + 1)
        ), 'Unexpected number of frames. Expected: %s or %s, saw %s' % (
            len(frames), len(frames) + 1, len(davis_frames))
        for i, frame_path in enumerate(davis_frames):
            frame_name = str(frame_path.relative_to(annotations_dir))
            groundtruth = np.array(Image.open(frame_path))
            # Some frames in DAVIS 16 have an extra channel, but this code
            # should only be used with DAVIS 17.
            assert groundtruth.ndim == 2, (
                'Groundtruth has multiple channels. This may be because you '
                'are passing DAVIS 2016 annotations, which is not supported.')
            unique_objects = get_unique_objects(groundtruth)
            groundtruth_masks = [
                groundtruth == i for i in unique_objects
            ]
            if i == (len(davis_frames) - 1) and frame_name not in results:
                previous_frame_name = '%s/%05d.png' % (sequence, i - 1)
                results[frame_name] = results[previous_frame_name]

            prediction = np.full(groundtruth.shape, fill_value=-1)
            for p, predicted_mask in enumerate(results[frame_name]):
                prediction[mask_util.decode(predicted_mask) != 0] = p
            predicted_masks = [
                (prediction == p) for p in np.unique(prediction)
                if p != -1
            ]

            num_predicted = [m.sum() for m in predicted_masks]
            num_groundtruth = [x.sum() for x in groundtruth_masks]
            f_measures = np.zeros((len(groundtruth_masks),
                                   len(predicted_masks)))
            intersections = {}
            for g, groundtruth_mask in enumerate(groundtruth_masks):
                for p, predicted_mask in enumerate(predicted_masks):
                    intersection = (groundtruth_mask & predicted_mask).sum()
                    intersections[g, p] = intersection
                    precision = intersection / num_predicted[p]
                    recall = intersection / num_groundtruth[g]
                    f_measures[g, p] = compute_f_measure(precision, recall)

            # Tuple of (groundtruth_indices, predicted_indices)
            assignment = scipy.optimize.linear_sum_assignment(-f_measures)
            assignment = zip(assignment[0].tolist(), assignment[1].tolist())

            num_predicted = (prediction != -1).sum()
            num_groundtruth = sum(groundtruth_mask.sum()
                                  for groundtruth_mask in groundtruth_masks)
            num_correct = sum(intersections[(g, p)] for g, p in assignment)

            precision = 100 * num_correct / max(num_predicted, 1e-10)
            recall = 100 * num_correct / num_groundtruth
            f_measure = compute_f_measure(precision, recall)
            metrics.append((frame_name, precision, recall, f_measure))

    logging.info('Average precision: %.2f', np.mean([m[1] for m in metrics]))
    logging.info('Average recall: %.2f', np.mean([m[2] for m in metrics]))
    logging.info('Average f-measure: %.2f', np.mean([m[3] for m in metrics]))
예제 #6
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--frames-dir', type=Path, required=True)
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument(
        '--code-dir',
        type=Path,
        default=Path(
            '/home/achald/research/misc/motion-segmentation-methods/moseg-multicut/'
        ))
    parser.add_argument(
        '--sampling',
        default=4,
        type=int,
        help=("From Keuper et al code: 'sampling' specifies the subsampling "
              "parameter. If you specify 8 (a good default value), only every "
              "8th pixel in x and y direction is taken into account. If you "
              "specify 1, the sampling will be dense (be careful, memory "
              "consumption and computation time will be very large in this "
              "setting)."))
    parser.add_argument(
        '--prior',
        default=0.5,
        type=float,
        help=("From Keuper et al code: prior specifies the prior cut "
              "probability. The higher this value is chosen, the more "
              "segments will be generated. For good performance, choose 0.5."))
    parser.add_argument('--extension', default='.png')

    args = parser.parse_args()

    frames = list(args.frames_dir.glob('*' + args.extension))
    if not frames:
        raise ValueError('Found no images with extension %s.' % args.extension)
    args.output_dir.mkdir(exist_ok=True, parents=True)

    log_utils.setup_logging(
        log_utils.add_time_to_path(
            args.output_dir / (Path(__file__).name + '.log')))
    logging.info('Args:\n%s', vars(args))

    from natsort import natsorted, ns
    frames = natsorted(frames, alg=ns.PATH)

    ppm_frames = []
    for frame in frames:
        ppm_frame = args.output_dir / (frame.stem + '.ppm')
        if not ppm_frame.exists():
            Image.open(frame).save(ppm_frame)
        ppm_frames.append(ppm_frame)

    video_name = args.frames_dir.parent.name
    bmf_path = args.output_dir / (video_name + '.bmf')
    with open(bmf_path, 'w') as f:
        f.write('%s %s\n' % (len(frames), 1))
        for ppm_frame in ppm_frames:
            f.write(ppm_frame.name + '\n')

    # Command:
    # ./motionseg_release <bmf_path> 0 <num_frames> <sampling> <prior>
    command = [
        './motionseg_release', bmf_path, 0,
        len(frames), args.sampling, args.prior
    ]
    command = [str(x) for x in command]
    logging.info('Running command:\n%s', ' '.join(command))
    subprocess.check_output(command, cwd=args.code_dir)
    logging.info('Finished!')
예제 #7
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--sparse-dir',
                        type=Path,
                        required=True,
                        help='Multicut results directory')
    parser.add_argument('--images-dir',
                        type=Path,
                        required=True,
                        help='Path to original ppm images.')
    parser.add_argument(
        '--ochs-code-dir',
        type=Path,
        default=DEFAULT_CODE_DIR,
        help=('Ochs et al. PAMI 2014 code directory. Should contain '
              '"./dens100gpu".'))
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument(
        '--cuda55-dir',
        help=('Directory containing libcudart.so.5.5. By default, this is '
              'assumed to be the code dir.'),
        default='{code_dir}')
    parser.add_argument('--lambda', dest='lmbda', type=float, default=200.0)
    parser.add_argument('--max-iter', type=int, default=2000)

    args = parser.parse_args()

    if not args.ochs_code_dir.exists():
        raise ValueError('Could not find code directory at %s' %
                         args.ochs_code_dir)

    args.cuda55_dir = Path(args.cuda55_dir.format(code_dir=args.ochs_code_dir))
    if not args.cuda55_dir.exists():
        raise ValueError('Could not find cuda5.5 directory at %s' %
                         args.cuda55_dir)

    dat_path = list(args.sparse_dir.glob('Track*.dat'))
    if len(dat_path) != 1:
        raise ValueError(
            'Expected exactly 1 .dat file in --sparse-dir, found %s' %
            len(dat_path))
    dat_path = dat_path[0]

    args.output_dir.mkdir(parents=True)
    log_path = log_utils.add_time_to_path(args.output_dir /
                                          (Path(__file__).name + '.log'))
    log_utils.setup_logging(log_path)
    logging.info('Args:\n%s', vars(args))

    filestructure_lines = [
        '', 's dataDir /', 's tracksDir /',
        'f lambda %.1f' % args.lmbda,
        'i maxiter %d' % args.max_iter
    ]
    filestructure_path = args.output_dir / 'filestructureDensify.cfg'
    with open(filestructure_path, 'w') as f:
        f.write('\n'.join(filestructure_lines))

    env = os.environ.copy()
    env['LD_LIBRARY_PATH'] = str(args.cuda55_dir) + ':' + env['PYTHONPATH']
    dense_output = args.output_dir
    cmd = [
        './dens100gpu',
        str(filestructure_path),
        str(args.images_dir / 'image.ppm'),
        str(dat_path), '-1',
        str(dense_output)
    ]
    logging.info('Running command:\n%s', ' '.join(cmd))
    # bufsize=0: Print output immediately (don't buffer)
    subprocess.call(cmd, bufsize=0, cwd=args.ochs_code_dir, env=env)
    logging.info('Output results to %s' % dense_output)
예제 #8
0
def main():
    tracking_parser = create_tracking_parser()

    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        parents=[tracking_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--images-dir', type=Path, required=True)
    parser.add_argument('--detections-dir', type=Path, required=True)
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument('--save-images', type=parse_bool, default=False)
    parser.add_argument('--save-numpy', type=parse_bool, default=False)
    parser.add_argument('--save-numpy-every-kth-frame',
                        help='Save only every kth frame in numpy output.',
                        type=int,
                        default=1)
    parser.add_argument('--save-video', type=parse_bool, default=True)
    parser.add_argument('--fps', default=30, type=float)
    parser.add_argument('--extensions', nargs='*', default=IMG_EXTENSIONS)
    parser.add_argument('--dataset',
                        default='coco',
                        choices=['coco', 'objectness'])
    parser.add_argument(
        '--filename-format',
        choices=[
            'frame', 'frameN', 'sequence_frame', 'sequence-frame', 'fbms'
        ],
        default='frame',
        help=('Specifies how to get frame number from the filename. '
              '"frame": the filename is the frame number, '
              '"frameN": format "frame<number>", '
              '"sequence_frame": frame number is separated by an underscore, '
              '"sequence-frame": frame number is separated by a dash, '
              '"fbms": assume fbms style frame numbers'))
    parser.add_argument('--quiet', action='store_true')

    tracking_params, remaining_argv = tracking_parser.parse_known_args()
    args = parser.parse_args(remaining_argv)

    tracking_params = vars(tracking_params)

    assert (args.save_images or args.save_video or args.save_numpy), (
        'One of --save-image, --save-video, or --save-numpy must be specified')

    output_log_file = log_utils.add_time_to_path(args.output_dir /
                                                 'tracker.log')
    output_log_file.parent.mkdir(exist_ok=True, parents=True)
    log_utils.setup_logging(output_log_file)
    if args.quiet:
        logging.root.setLevel(logging.WARN)

    logging.info('Args: %s', pprint.pformat(vars(args)))
    logging.info('Tracking params: %s', pprint.pformat(tracking_params))
    subprocess.call([
        './git-state/save_git_state.sh',
        output_log_file.with_suffix('.git-state')
    ])

    detectron_input = args.detections_dir
    if not detectron_input.is_dir():
        raise ValueError('--detectron-dir %s is not a directory!' %
                         args.detections_dir)

    if args.filename_format == 'fbms':
        from utils.fbms.utils import get_framenumber
    elif args.filename_format == 'frameN':

        def get_framenumber(x):
            return int(x.split('frame')[1])
    elif args.filename_format == 'sequence-frame':

        def get_framenumber(x):
            return int(x.split('-')[-1])
    elif args.filename_format == 'sequence_frame':

        def get_framenumber(x):
            return int(x.split('_')[-1])
    elif args.filename_format == 'frame':
        get_framenumber = int
    else:
        raise ValueError('Unknown --filename-format: %s' %
                         args.filename_format)

    args.extensions = [x if x[0] == '.' else '.' + x for x in args.extensions]

    images = glob_ext(args.images_dir, args.extensions, recursive=True)

    image_subdirs = sorted(
        set(x.resolve().parent.relative_to(args.images_dir) for x in images))
    for subdir in tqdm(image_subdirs):
        output_numpy = None
        output_images_dir = None
        output_video = None

        if args.save_numpy:
            if subdir == Path('.'):
                output_numpy = args.output_dir / 'tracked.npz'
            else:
                output_numpy = args.output_dir / subdir.with_suffix('.npz')
        if args.save_images:
            output_images_dir = args.output_dir / subdir / 'images'
            output_images_dir.mkdir(exist_ok=True, parents=True)
        if args.save_video:
            if subdir == Path('.'):
                output_video = args.output_dir / 'tracked.mp4'
            else:
                output_video = args.output_dir / subdir.with_suffix('.mp4')

        if all(x is None or x.exists()
               for x in (output_numpy, output_images_dir, output_video)):
            logging.info('%s already processed, skipping', subdir)
            continue
        detections_dir = args.detections_dir / subdir
        if not detections_dir.exists():
            logging.warn('Skipping sequence %s: detections not found at %s',
                         subdir, detections_dir)
            continue

        detection_results = load_detectron_pickles(
            detections_dir, frame_parser=get_framenumber)
        track_and_visualize(
            detection_results,
            args.images_dir / subdir,
            tracking_params,
            get_framenumber,
            args.extensions,
            vis_dataset=args.dataset,
            output_images_dir=output_images_dir,
            output_video=output_video,
            output_video_fps=args.fps,
            output_numpy=output_numpy,
            output_numpy_every_kth=args.save_numpy_every_kth_frame,
            output_track_file=None)
예제 #9
0
def main():
    tracking_parser = tracker.create_tracking_parser()

    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        parents=[tracking_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--detections-dir',
        type=Path,
        help=('Contains subdirectories for each FBMS sequence, each of which '
              'contain pickle files of detections for each frame.'),
        required=True)
    parser.add_argument(
        '--fbms-split-root',
        type=Path,
        required=True,
        help=('Directory containing subdirectories for each sequence, each of '
              'which contains a ".dat" file of groundtruth. E.g. '
              '<FBMS_ROOT>/TestSet'))
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument('--save-images', action='store_true')
    parser.add_argument('--save-video', action='store_true')
    parser.add_argument('--fps', type=int, default=30)
    parser.add_argument('--extension', default='.jpg')
    parser.add_argument(
        '--vis-dataset',
        default='objectness',
        choices=['coco', 'objectness'],
        help='Dataset to use for mapping label indices to names.')
    parser.add_argument('--filter-sequences', default=[], nargs='*', type=str)
    parser.add_argument('--duplicate-last-frame', action='store_true')

    tracking_params, remaining_argv = tracking_parser.parse_known_args()
    args = parser.parse_args(remaining_argv)

    tracking_params = vars(tracking_params)

    args.output_dir.mkdir(exist_ok=True, parents=True)
    output_log_file = log_utils.add_time_to_path(
        args.output_dir / 'tracker.log')
    log_utils.setup_logging(output_log_file)
    logging.info('Args: %s', pprint.pformat(vars(args)))
    logging.info('Tracking params: %s', pprint.pformat(tracking_params))
    subprocess.call([
        './git-state/save_git_state.sh',
        output_log_file.with_suffix('.git-state')
    ])

    detectron_input = args.detections_dir
    if not detectron_input.is_dir():
        raise ValueError(
            '--detectron-dir %s is not a directory!' % args.detections_dir)

    for split in ['TestSet', 'TrainingSet']:
        if (detectron_input / split).exists():
            raise ValueError(
                f"--detectron-dir contains a '{split}' subdirectory; it "
                "should just contain a subdirectory for each sequence.")

    def detections_loader(sequence):
        return tracker.load_detectron_pickles(
            detectron_input / sequence,
            frame_parser=fbms_utils.get_framenumber)

    if not args.filter_sequences:
        args.filter_sequences = None

    track_fbms(
        args.fbms_split_root,
        detections_loader,
        args.output_dir,
        tracking_params,
        args.extension,
        args.save_video,
        args.vis_dataset,
        args.fps,
        save_images=args.save_images,
        filter_sequences=args.filter_sequences,
        duplicate_last_frame=args.duplicate_last_frame)
예제 #10
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--eval-binary',
        help='Path to MoSegEvalAllPr binary from FBMS eval code.',
        required=True)
    parser.add_argument(
        '--predictions-dir',
        help=('Predictions directory. Should contain "all_tracks.txt" and '
              '"all_shots.txt".'))
    parser.add_argument('--split',
                        default='all',
                        choices=['10', '50', '200', 'all'])
    parser.add_argument(
        '--object-threshold',
        default=0.75,
        help=('F-measure threshold for whether an object has been extracted. '
              'Default is 0.75 following {Ochs, Peter, Jitendra Malik, and '
              'Thomas Brox. "Segmentation of moving objects by long term '
              'video analysis." IEEE transactions on pattern analysis and '
              'machine intelligence 36.6 (2014): 1187-1200.}'))

    args = parser.parse_args()

    binary = args.eval_binary
    predictions_dir = Path(args.predictions_dir)

    logging_path = log_utils.add_time_to_path(predictions_dir /
                                              (Path(__file__).name + '.log'))
    log_utils.setup_logging(logging_path)
    logging.info('Args:\n%s', vars(args))

    file_logger = logging.getLogger(str(logging_path))

    shots_file = predictions_dir / 'all_shots.txt'
    tracks_file = predictions_dir / 'all_tracks.txt'
    command = [
        str(binary),
        str(shots_file), args.split,
        str(tracks_file),
        str(args.object_threshold)
    ]
    logging.info('Running command:\n%s', (' '.join(command)))

    try:
        output = subprocess.check_output(command, stderr=subprocess.STDOUT)
        file_logger.info('Output from FBMS evaluation:\n%s',
                         output.decode('utf-8'))
    except subprocess.CalledProcessError as e:
        logging.error('Error found when evaluating:')
        output = e.output.decode('utf-8')
        logging.exception(output)
        if ('Could not find "Average region density" in the file!' in output
                and len(args.predictions_dir) > 250):
            logging.info(
                "\n############\n"
                "### NOTE ###\n"
                "############\n"
                "This may be due to the very long path to --predictions-dir. "
                "Either move your results to have fewer characters in the "
                "path, OR use the modified FBMS evaluation code at "
                "<https://github.com/achalddave/fbms-evaluation>, or apply "
                "the patch at "
                "https://github.com/achalddave/fbms-evaluation/commit/e7df914"
                " to your copy of the evaluation code.")
        elif ('Could not find "Average Precision, Recall, F-measure" '
              'in the file!' in output):
            logging.info(
                "\n############\n"
                "### NOTE ###\n"
                "############\n"
                "This may be due to a very long output Numbers file from "
                "MoSegEvalPR. Try using the modified FBMS evaluation code "
                "at <https://github.com/achalddave/fbms-evaluation>, or "
                "apply the patch at "
                "https://github.com/achalddave/fbms-evaluation/commit/7fdff53f"
                " to your copy of the evaluation code.")
        import sys
        sys.exit(e)

    # Format of the output file from FBMS evaluation code.
    output_file = tracks_file.with_name(
        tracks_file.stem + '_Fgeq{:4.2f}'.format(100 * args.object_threshold) +
        'Numbers.txt')
    if output_file.exists():
        logging.info('Final results:\n')
        with open(output_file, 'r') as f:
            logging.info(f.read())
    else:
        logging.error("Couldn't find FBMS evaluation results at path: %s" %
                      output_file)
예제 #11
0
if __name__ == "__main__":
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--masks-dir', type=Path, required=True)
    parser.add_argument('--fbms-dir', required=True, type=Path)
    parser.add_argument(
        '--eval-code-dir',
        type=Path,
        default='/home/achald/research/misc/datasets/fbms/fgbg-eval-pavel/')
    parser.add_argument('--matlab-binary', type=Path, default='matlab')

    args = parser.parse_args()

    output_log_file = log_utils.add_time_to_path(
        args.masks_dir / (Path(__file__).name + '.log'))
    log_utils.setup_logging(output_log_file)

    for split in ['TrainingSet', 'TestSet']:
        try:
            command = [
                'matlab', '-nodesktop', '-nosplash', '-r',
                (f"evaluateAllSeqs('{args.fbms_dir}', '{args.masks_dir}', "
                 f"{{'{split}'}}); quit")
            ]
            logging.info(f'Command:\n{" ".join(command)}')
            output = subprocess.run(command,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    cwd=args.eval_code_dir)
        except subprocess.CalledProcessError as e:
def main():
    tracking_parser = tracker.create_tracking_parser(
        suppress_args=['--score-init-min', '--score-continue-min'])

    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        parents=[tracking_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--images-dir', type=Path, required=True)
    parser.add_argument(
        '--init-detections-dir',
        type=Path,
        help=('Contains pickle files of detections for each frame.These '
              'detections are used to initialize and continue tracks.'),
        required=True)
    parser.add_argument(
        '--continue-detections-dir',
        type=Path,
        help=('Contains pickle files of detections for each frame.These '
              'detections are used only to continue tracks. If not specified, '
              'the continue detections are assumed to be stored in the '
              'pickles in init_detections_dir, under the "appearance_stream" '
              'key for each frame.'))
    parser.add_argument(
        '--remove-continue-overlap',
        type=float,
        default=0.1,
        help=('Remove detections from --continue-detections-dir if they '
              'overlap more than this threshold with a detection from '
              '--init-detections-dir.'))
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument(
        '--score-init-min',
        type=float,
        default=0.9,
        help=('Detection confidence threshold for starting a new track from '
              '--init-detections-dir detections.'))
    parser.add_argument(
        '--score-continue-min',
        type=float,
        default=0.7,
        help=('Detection confidence threshold for continuing a new track from '
              '--init-detections-dir or --continue-detections-dir '
              'detections.'))
    parser.add_argument('--fps', type=int, default=30)
    parser.add_argument('--extensions', nargs='*', default=IMG_EXTENSIONS)
    parser.add_argument(
        '--vis-dataset',
        default='objectness',
        choices=['coco', 'objectness'],
        help='Dataset to use for mapping label indices to names.')
    parser.add_argument('--save-images', type=parse_bool, default=False)
    parser.add_argument('--save-merged-detections',
                        type=parse_bool,
                        default=False)
    parser.add_argument('--save-numpy', type=parse_bool, default=False)
    parser.add_argument('--save-numpy-every-kth-frame',
                        help='Save only every kth frame in numpy output.',
                        type=int,
                        default=1)
    parser.add_argument('--save-video', type=parse_bool, default=True)
    parser.add_argument(
        '--filename-format',
        choices=[
            'frame', 'frameN', 'sequence_frame', 'sequence-frame', 'fbms'
        ],
        default='frame',
        help=('Specifies how to get frame number from the filename. '
              '"frame": the filename is the frame number, '
              '"frameN": format <frame><number>, '
              '"sequence_frame": frame number is separated by an underscore, '
              '"sequence-frame": frame number is separated by a dash, '
              '"fbms": assume fbms style frame numbers'))
    parser.add_argument('--recursive',
                        action='store_true',
                        help='Look recursively in --images-dir for images.')
    parser.add_argument('--quiet', action='store_true')

    tracking_params, remaining_argv = tracking_parser.parse_known_args()
    args = parser.parse_args(remaining_argv)

    tracking_params = vars(tracking_params)
    tracking_params['score_init_min'] = args.score_init_min
    tracking_params['score_continue_min'] = args.score_continue_min

    args.output_dir.mkdir(exist_ok=True, parents=True)
    output_log_file = log_utils.add_time_to_path(args.output_dir /
                                                 'tracker.log')
    log_utils.setup_logging(output_log_file)
    if args.quiet:
        logging.root.setLevel(logging.WARN)
    logging.info('Args:\n%s', pprint.pformat(vars(args)))
    logging.info('Tracking params:\n%s', pprint.pformat(tracking_params))

    subprocess.call([
        './git-state/save_git_state.sh',
        output_log_file.with_suffix('.git-state')
    ])

    if args.filename_format == 'fbms':
        get_framenumber = fbms_utils.get_framenumber
    elif args.filename_format == 'frameN':

        def get_framenumber(x):
            return int(x.split('frame')[1])
    elif args.filename_format == 'sequence-frame':

        def get_framenumber(x):
            return int(x.split('-')[-1])
    elif args.filename_format == 'sequence_frame':

        def get_framenumber(x):
            return int(x.split('_')[-1])
    elif args.filename_format == 'frame':
        get_framenumber = int
    else:
        raise ValueError('Unknown --filename-format: %s' %
                         args.filename_format)

    args.extensions = [x if x[0] == '.' else '.' + x for x in args.extensions]
    track_fn = functools.partial(
        two_detector_track,
        get_framenumber=get_framenumber,
        tracking_params=tracking_params,
        remove_continue_overlap=args.remove_continue_overlap,
        extensions=args.extensions,
        output_numpy_every_kth_frame=args.save_numpy_every_kth_frame,
        fps=args.fps)

    if not args.recursive:
        output_merged = None
        if args.save_merged_detections:
            output_merged = args.output_dir / 'merged'
            assert not output_merged.exists()
            output_merged.mkdir()
        output_numpy = None
        if args.save_numpy:
            output_numpy = args.output_dir / 'results.npz'
        output_images_dir = None
        if args.save_images:
            output_images_dir = args.output_dir / 'images'
            output_images_dir.mkdir(exist_ok=True, parents=True)
        output_video = None
        if args.save_video:
            output_video = args.output_dir / 'video.mp4'
        track_fn(images_dir=args.images_dir,
                 init_detections_dir=args.init_detections_dir,
                 continue_detections_dir=args.continue_detections_dir,
                 output_video=output_video,
                 output_merged_dir=output_merged,
                 output_numpy=output_numpy,
                 progress=True)
    else:
        images = glob_ext(args.images_dir, args.extensions, recursive=True)
        image_subdirs = sorted(
            set(x.parent.relative_to(args.images_dir) for x in images))
        for subdir in tqdm(image_subdirs):
            output_merged = None
            output_numpy = None
            output_images_dir = None
            output_video = None
            if args.save_merged_detections:
                output_merged = args.output_dir / subdir / 'merged-detections'
                output_merged.mkdir(exist_ok=True, parents=True)
            if args.save_numpy:
                output_numpy = args.output_dir / subdir.with_suffix('.npz')
            if args.save_images:
                output_images_dir = args.output_dir / subdir / 'images'
                output_images_dir.mkdir(exist_ok=True, parents=True)
            if args.save_video:
                output_video = args.output_dir / subdir.with_suffix('.mp4')

            if all(x is None or x.exists()
                   for x in (output_merged, output_numpy, output_images_dir,
                             output_video)):
                logging.info('%s already processed, skipping', subdir)
                continue
            init_dir = args.init_detections_dir / subdir
            if not init_dir.exists():
                logging.warn(
                    'Skipping sequence %s: detections not found at %s', subdir,
                    init_dir)
                continue
            if args.continue_detections_dir:
                continue_dir = args.continue_detections_dir / subdir
                if not continue_dir.exists():
                    logging.warn(
                        'Skipping sequence %s: detections not found at %s',
                        subdir, continue_dir)
                    continue

            if args.continue_detections_dir:
                continue_dir = args.continue_detections_dir / subdir
            else:
                continue_dir = None

            track_fn(images_dir=args.images_dir / subdir,
                     init_detections_dir=args.init_detections_dir / subdir,
                     continue_detections_dir=continue_dir,
                     output_video=output_video,
                     output_images_dir=output_images_dir,
                     output_merged_dir=output_merged,
                     output_numpy=output_numpy,
                     progress=False)
예제 #13
0
def main():
    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--detections-root',
        type=Path,
        help=('Contains subdirectory for each sequence, containing pickle '
              'files of detectron outputs for each frame.'))
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument('--images-dir', type=Path, required=True)
    parser.add_argument('--threshold', type=float, default=0.7)
    parser.add_argument('--recursive', action='store_true')
    parser.add_argument('--extension',
                        default='.png',
                        help='Extension for images in --images-dir')
    parser.add_argument(
        '--duplicate-last-frame',
        action='store_true',
        help=('Whether to duplicate the last frame. This is useful if we '
              'only have predictions for n-1 frames (since flow is only '
              'computed on the first n-1 frames). NOTE: This only works if '
              'the pickle files are of the form "<frame_id>.pickle".'))

    args = parser.parse_args()

    assert args.detections_root.exists()

    args.output_dir.mkdir(exist_ok=True, parents=True)

    setup_logging(
        add_time_to_path(args.output_dir / (Path(__file__).name + '.log')))
    logging.info('Args: %s\n', pprint.pformat(vars(args)))

    if args.recursive:
        all_pickles = args.detections_root.rglob('*.pickle')
        all_predictions = collections.defaultdict(list)
        for x in all_pickles:
            all_predictions[x.parent].append(x)
    else:
        all_predictions = {
            args.detections_root: list(args.detections_root.glob('*.pickle'))
        }
        if not all_predictions[args.detections_root]:
            raise ValueError("Found no .pickle files in --detections-root. "
                             "Did you mean to specify --recursive?")
    all_predictions = {
        k: natsorted(v, alg=ns.PATH)
        for k, v in all_predictions.items()
    }

    # The DAVIS 2016 evaluation code really doesn't like any other files /
    # directories in the input directory, so we put the masks in a subdirectory
    # without the log file.
    masks_output_dir = args.output_dir / 'masks'
    for sequence_dir, sequence_predictions in tqdm(all_predictions.items()):
        relative_dir = sequence_dir.relative_to(args.detections_root)
        create_masks_sequence(sequence_predictions,
                              args.images_dir / relative_dir,
                              masks_output_dir / relative_dir,
                              args.threshold,
                              args.extension,
                              duplicate_last_frame=args.duplicate_last_frame)
예제 #14
0
def main():
    tracking_parser = tracker.create_tracking_parser(
        suppress_args=['--score-init-min', '--score-continue-min'])

    # Use first line of file docstring as description if it exists.
    parser = argparse.ArgumentParser(
        description=__doc__.split('\n')[0] if __doc__ else '',
        parents=[tracking_parser],
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--init-detections-dir',
        type=Path,
        help=('Contains subdirectories for each FBMS sequence, each of which '
              'contain pickle files of detections for each frame. These '
              'detections are used to initialize and continue tracks.'),
        required=True)
    parser.add_argument(
        '--continue-detections-dir',
        type=Path,
        help=('Contains subdirectories for each FBMS sequence, each of which '
              'contain pickle files of detections for each frame. These '
              'detections are used only to continue tracks.'),
        required=True)
    parser.add_argument(
        '--remove-continue-overlap',
        type=float,
        default=0.1,
        help=('Remove detections from --continue-detections-dir if they '
              'overlap more than this threshold with a detection from '
              '--init-detections-dir.'))
    parser.add_argument(
        '--fbms-split-root',
        type=Path,
        required=True,
        help=('Directory containing subdirectories for each sequence, each of '
              'which contains a ".dat" file of groundtruth. E.g. '
              '<FBMS_ROOT>/TestSet'))
    parser.add_argument('--output-dir', type=Path, required=True)
    parser.add_argument(
        '--score-init-min',
        type=float,
        default=0.9,
        help=('Detection confidence threshold for starting a new track from '
              '--init-detections-dir detections.'))
    parser.add_argument(
        '--score-continue-min',
        type=float,
        default=0.7,
        help=('Detection confidence threshold for continuing a new track from '
              '--init-detections-dir or --continue-detections-dir '
              'detections.'))
    parser.add_argument('--save-video', action='store_true')
    parser.add_argument('--fps', type=int, default=30)
    parser.add_argument('--extension', default='.jpg')
    parser.add_argument(
        '--vis-dataset',
        default='objectness',
        choices=['coco', 'objectness'],
        help='Dataset to use for mapping label indices to names.')
    parser.add_argument('--save-images', action='store_true')
    parser.add_argument('--filter-sequences', default=[], nargs='*', type=str)
    parser.add_argument('--save-merged-detections', action='store_true')

    tracking_params, remaining_argv = tracking_parser.parse_known_args()
    args = parser.parse_args(remaining_argv)

    tracking_params = vars(tracking_params)

    args.output_dir.mkdir(exist_ok=True, parents=True)
    output_log_file = log_utils.add_time_to_path(args.output_dir /
                                                 'tracker.log')
    log_utils.setup_logging(output_log_file)
    logging.info('Args: %s', pprint.pformat(vars(args)))
    logging.info('Tracking params: %s', pprint.pformat(tracking_params))

    subprocess.call([
        './git-state/save_git_state.sh',
        output_log_file.with_suffix('.git-state')
    ])
    if args.save_merged_detections:
        output_merged = args.output_dir / 'merged'
        assert not output_merged.exists()
        output_merged.mkdir()

    # We want to use init_detections with score > s_i to init tracks, and
    # (init_detections or continue_detections with score > s_c) to continue
    # tracks. However, tracker.track only wants one set of detections, so we
    # do some score rescaling and then merge the detections.
    #
    # Let s_i be --score-init-min and s_c be --score-continue-min.
    #
    # Detections that can init tracks:
    #   I1: init_detections     with score > s_i
    #
    # Detections that can continue tracks:
    #   C1: init_detections     with score > s_c
    #   C2: continue_detections with score > s_c
    #
    # Set the score_init_min passed to the tracker to 1. Then, we can increase
    # the score of all detections in I1 to be above 1 (by adding 1.001 to the
    # score), and leave all other detections' scores as they are. 1.001 is
    # arbitrary; we just need it to be higher than any regular scoring
    # detection.
    tracking_params['score_init_min'] = 1.001
    tracking_params['score_continue_min'] = args.score_continue_min

    def detections_loader(sequence):
        init_detections = tracker.load_detectron_pickles(
            args.init_detections_dir / sequence, fbms_utils.get_framenumber)
        continue_detections = tracker.load_detectron_pickles(
            args.continue_detections_dir / sequence,
            fbms_utils.get_framenumber)
        merged_detections = merge_detections(
            init_detections,
            continue_detections,
            score_init_min=args.score_init_min,
            score_continue_min=args.score_continue_min,
            remove_continue_overlap=args.remove_continue_overlap,
            new_score_init_min=tracking_params['score_init_min'])
        if args.save_merged_detections:
            output_merged_sequence = output_merged / sequence
            output_merged_sequence.mkdir()
            for file in merged_detections:
                with open(output_merged_sequence / (file + '.pickle'),
                          'wb') as f:
                    pickle.dump(merged_detections[file], f)
        return merged_detections

    if not args.filter_sequences:
        args.filter_sequences = None

    track_fbms(fbms_split_root=args.fbms_split_root,
               detections_loader=detections_loader,
               output_dir=args.output_dir,
               tracking_params=tracking_params,
               frame_extension=args.extension,
               save_video=args.save_video,
               vis_dataset=args.vis_dataset,
               fps=args.fps,
               save_images=args.save_images,
               filter_sequences=args.filter_sequences,
               duplicate_last_frame=False)