示例#1
0
def make_containers(vid_path):

    dssp = DatasetSourceProvider()
    dssp.add_folder(folder_path=vid_path, dataset_name='playtime')
    dssp_tracks = DatasetSourceProvider()
    dssp_tracks.add_folder(folder_path=vid_path, dataset_name='tracks')

    container = AnnotationContainer(dataset_source_provider=dssp,
                                    dataset_version='0.0.1')
    container_tracks = AnnotationContainer(dataset_source_provider=dssp_tracks,
                                           dataset_version='0.0.1')

    return container, container_tracks
示例#2
0
def detect_on_preview_dir(preview_data, detector):
    dsp = DatasetSourceProvider()
    dsp.add_source(preview_data, 'predicted')
    container = AnnotationContainer(dataset_source_provider=dsp)

    for image_path in Path(preview_data).glob('preview*'):
        image = load_preview_image(str(image_path))
        det = detector.detect_image(image)

        e = AnnotationEntry(image_path.name,
                            ImageSize.from_image(image),
                            'predicted',
                            instances=det)
        container.add_entry(e)

    return container
示例#3
0
def main(args):
    thresh = .01 if args.box_blending else args.threshold
    get_detector = _get_detector_func(args.model)
    if get_detector == caffe_detector:
        detector = get_detector(args.model,
                                thresh,
                                box_blending=args.box_blending,
                                anchors=args.anchors)
    else:
        detector = _get_detector_func(args.model)(
            args.model, thresh, box_blending=args.box_blending)

    with detector:
        if args.container:
            gt = AnnotationContainer.from_file(args.container)
            pred = detector.detect_on_annotation_container(
                gt)  # .with_selected_labels(['person'])

            if args.box_blending and type(detector) == DarknetObjectDetector:
                pred = _blend_boxes(pred)
                pred.filter_all_instances_by_threshold(args.threshold,
                                                       in_place=True)
            pred.as_evaluated_against(gt).summary()

        elif args.images:
            pred = detector.detect_on_image_folder(args.images,
                                                   dataset_name='predicted')
            if args.box_blending and type(detector) == DarknetObjectDetector:
                pred = _blend_boxes(pred)
                pred.filter_all_instances_by_threshold(args.threshold,
                                                       in_place=True)

        elif args.preview:
            pred = detect_on_preview_dir(args.preview, detector)
            if args.box_blending and type(detector) == DarknetObjectDetector:
                pred = _blend_boxes(pred)
                pred.filter_all_instances_by_threshold(args.threshold,
                                                       in_place=True)
    if args.output:
        pred.to_file(args.output)
示例#4
0
        labels = args.labels

    if args.container == args.out_container:
        if input('Are you sure you want to overwrite container? (y/n): ') != 'y':
            quit()

    print('Selected labels:')
    print(labels)
    print('Remove empty entries from trimmed container:', args.remove_empty)
    print('Clean container of entries with non-valid paths to images:', args.clean)

    if args.out_container is None:
            out = 'trimmed_container_{}.bbox'.format('_'.join(args.labels))
    else:
            out = args.out_container


    container = AnnotationContainer.from_file(args.container)
    container.with_selected_labels(labels, in_place=True)
    if args.remove_empty:
        for e in list(container.entries.keys()):
            if len(container.entries[e]) == 0:
                del container.entries[e]
    if args.clean:
        for e in list(container.entries.keys()):
            if container.entries[e].get_image_full_path() is None or not Path(container.entries[e].get_image_full_path()).exists():
                del container.entries[e]

    container.summary()
    container.to_file(out)
    for c in args.container:
        assert Path(c).exists(), f'{c} does not exists'

    return args


if __name__ == '__main__':
    args = get_args()
    labels = args.labels
    fps = args.fps

    colors = defaultdict(lambda: np.random.randint(0, 255, 3))
    output_files = []
    for container_path in tqdm(args.container, desc='containers'):
        name = Path(container_path).stem
        container = AnnotationContainer.from_file(container_path)
        container.label_to_colour = colors
        video_writer = None

        output_file_name = Path(
            container_path).parent / f'{Path(container_path).stem}.avi'
        output_files.append(output_file_name)

        if len(labels) > 0:
            container.with_selected_labels(labels,
                                           prune_empty_entries=False,
                                           in_place=True)
        keys = list(
            sorted(container.entries.keys(), key=lambda x: int(Path(x).stem)))

        for k in tqdm(keys, desc='frames'):
示例#6
0
            success, image = vidcap.read()
            if success:  # and frame < 500:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                yield frame, image
                frame += 1
            else:
                break

    from matplotlib import pyplot as plt

    input_side = 64
    facenet = cv2.dnn.readNetFromCaffe(
        '../demo/face_detector/deploy.prototxt',
        '../demo/face_detector/res10_300x300_ssd_iter_140000.caffemodel')
    orientation_model = get_orientation_model(input_side=input_side)

    container = AnnotationContainer.from_file(
        '/home/martin/Desktop/meeting_cut/spaces/no_crop/task_nr_354.bbox')
    for fnr, frame in tqdm(get_frame_generator(
            '/home/martin/Desktop/meeting_cut/spaces/no_crop/overview.mp4'),
                           total=5483):
        entry = container.entries[str(fnr)]
        add_face_and_head_orientation(entry, frame, facenet, orientation_model)

        # img = entry.overlaid_on_image(frame)
        # plt.imshow(img)
        # plt.show()

    container.to_file(
        '/home/martin/Desktop/meeting_cut/spaces/no_crop/task_nr_354_face_and_orientation.bbox'
    )
示例#7
0
        '-o',
        '--output',
        type=str,
        help='Output filename. If not set, user will be ' +
        'prompted by request for output filename during runtime.')
    args = parser.parse_args()
    if args.output is None:
        output_file_name = input('Write file to: ')
    else:
        output_file_name = args.output

    cnt = None
    for to_merge in args.containers:
        print(f'Merging container {to_merge} ...'.ljust(70), end='')
        try:
            new_cont = AnnotationContainer.from_file(to_merge)
            if cnt is None:
                cnt = new_cont
            else:
                cnt.merge(new_cont,
                          merge_instance_iou=args.nms,
                          destructively=True)
            del new_cont
            gc.collect()
        except Exception as e:
            print('\tFailed!')
            print(e)
            continue
        print('\tOK')

    cnt.to_file(output_file_name)
示例#8
0
import sys
from pathlib import Path

from bbox import AnnotationContainer

ALIAS_NAME = 'contsum'

if __name__ == '__main__':

    if len(sys.argv) == 1:
        print(
            'use with arguments <full path to container> or $PWD <container>')
        quit()

    container = sys.argv[1]
    suffix = container.split('.')[-1]
    if suffix != 'bbox' and suffix != 'json':
        if len(sys.argv) < 2:
            print(
                'if not using 2 arguments, first must be absolute path to container'
            )
            quit()
        container = Path(container) / sys.argv[2]
    AnnotationContainer.from_file(container).summary()
示例#9
0
                        action='store_true',
                        help='Prune empty entries')
    parser.add_argument('-max',
                        '--max_labels',
                        type=int,
                        default=None,
                        help='Max amount of labels in image')
    parser.add_argument('-min',
                        '--min_labels',
                        type=int,
                        default=None,
                        help='Min amount of labels in image')
    args = parser.parse_args()

    container = args.container
    cont = AnnotationContainer.from_file(container)
    new_cont = AnnotationContainer(
        dataset_source_provider=cont.dataset_source_provider)
    if args.labels and len(args.labels) > 0:
        cont = cont.with_selected_labels(args.labels,
                                         prune_empty_entries=args.prune,
                                         in_place=True)

    entry_keys = list(cont.entries.keys())
    shuffle(entry_keys)

    key = None
    i = 0
    not_found_counter = 0
    found_counter = 0
    successfully_shown = set()
示例#10
0
                        '--shuffle',
                        action='store_true',
                        help='Shuffle images')
    parser.add_argument('-t',
                        '--threshold',
                        type=float,
                        help='Filter detections with confidence')
    parser.add_argument('-m',
                        '--max',
                        type=int,
                        default=None,
                        help='Show only this amount of images')
    args = parser.parse_args()

    container = args.container
    cont = AnnotationContainer.from_file(container)
    if args.labels and len(args.labels) > 0:
        cont = cont.with_selected_labels(args.labels,
                                         prune_empty_entries=args.prune,
                                         in_place=True)
    if args.threshold is not None:
        assert 0 <= args.threshold <= 1, 'Confidence threshold must be in [0, 1]'
        cont = cont.filter_all_instances_by_threshold(args.threshold,
                                                      in_place=True)

    if args.name:
        for n in args.name:
            if n not in cont:
                print(n, 'not in container')
            print('Num instances:', len(cont[n].instances))
            cont[n].show()