コード例 #1
0
ファイル: predict.py プロジェクト: plarr2020-team1/monoloco
def factory_outputs(args,
                    images_outputs,
                    output_path,
                    pifpaf_outputs,
                    dic_out=None,
                    kk=None):
    """Output json files or images according to the choice"""

    # Save json file
    if 'pifpaf' in args.networks:
        keypoint_sets, scores, pifpaf_out = pifpaf_outputs[:]

        # Visualizer
        keypoint_painter = show.KeypointPainter(show_box=False)
        skeleton_painter = show.KeypointPainter(show_box=False,
                                                color_connections=True,
                                                markersize=1,
                                                linewidth=4)

        if 'json' in args.output_types and keypoint_sets.size > 0:
            with open(output_path + '.pifpaf.json', 'w') as f:
                json.dump(pifpaf_out, f)

        if 'keypoints' in args.output_types:
            with show.image_canvas(images_outputs[0],
                                   output_path + '.keypoints.png',
                                   show=args.show,
                                   fig_width=args.figure_width,
                                   dpi_factor=args.dpi_factor) as ax:
                keypoint_painter.keypoints(ax, keypoint_sets)

        if 'skeleton' in args.output_types:
            with show.image_canvas(images_outputs[0],
                                   output_path + '.skeleton.png',
                                   show=args.show,
                                   fig_width=args.figure_width,
                                   dpi_factor=args.dpi_factor) as ax:
                skeleton_painter.keypoints(ax, keypoint_sets, scores=scores)

    if 'monoloco' in args.networks:
        if any(
            (xx in args.output_types for xx in ['front', 'bird', 'combined'])):
            epistemic = False
            if args.n_dropout > 0:
                epistemic = True
            if dic_out['boxes']:  # Only print in case of detections
                printer = Printer(images_outputs[1],
                                  output_path,
                                  kk,
                                  output_types=args.output_types,
                                  z_max=args.z_max,
                                  epistemic=epistemic)
                figures, axes = printer.factory_axes()
                printer.draw(figures,
                             axes,
                             dic_out,
                             images_outputs[1],
                             draw_box=args.draw_box,
                             save=True,
                             show=args.show)
コード例 #2
0
    def __call__(self, first_image, fig_width=4.0, **kwargs):
        if plt is None:
            while True:
                image, all_fields = yield
            return

        if 'figsize' not in kwargs:
            kwargs['figsize'] = (fig_width, fig_width * first_image.shape[0] /
                                 first_image.shape[1])

        fig = plt.figure(**kwargs)
        ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
        ax.set_axis_off()
        ax.set_xlim(0, first_image.shape[1])
        ax.set_ylim(first_image.shape[0], 0)
        text = 'OpenPifPaf'
        ax.text(1,
                1,
                text,
                fontsize=10,
                verticalalignment='top',
                bbox=dict(facecolor='white', alpha=0.5, linewidth=0))
        fig.add_axes(ax)
        mpl_im = ax.imshow(first_image)
        fig.show()

        # visualizer
        if self.args.colored_connections:
            viz = show.KeypointPainter(show_box=False,
                                       color_connections=True,
                                       markersize=1,
                                       linewidth=6)
        else:
            viz = show.KeypointPainter(show_box=False)

        while True:
            image, all_fields = yield
            annotations = self.processor.annotations(all_fields)

            draw_start = time.time()
            while ax.lines:
                del ax.lines[0]
            mpl_im.set_data(image)
            viz.annotations(ax, annotations)
            fig.canvas.draw()
            #print('draw', time.time() - draw_start)
            plt.pause(0.01)

        plt.close(fig)
コード例 #3
0
def draw_skeletons(pose):
    from ..annotation import Annotation  # pylint: disable=import-outside-toplevel
    from .. import show  # pylint: disable=import-outside-toplevel

    scale = np.sqrt((np.max(pose[:, 0]) - np.min(pose[:, 0])) *
                    (np.max(pose[:, 1]) - np.min(pose[:, 1])))

    show.KeypointPainter.show_joint_scales = True
    keypoint_painter = show.KeypointPainter(color_connections=True,
                                            linewidth=6)

    ann = Annotation(keypoints=COCO_KEYPOINTS, skeleton=COCO_PERSON_SKELETON)
    ann.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
    draw_ann(ann,
             filename='docs/skeleton_coco.png',
             keypoint_painter=keypoint_painter)

    ann = Annotation(keypoints=COCO_KEYPOINTS,
                     skeleton=KINEMATIC_TREE_SKELETON)
    ann.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
    draw_ann(ann,
             filename='docs/skeleton_kinematic_tree.png',
             keypoint_painter=keypoint_painter)

    ann = Annotation(keypoints=COCO_KEYPOINTS,
                     skeleton=DENSER_COCO_PERSON_SKELETON)
    ann.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
    draw_ann(ann,
             filename='docs/skeleton_dense.png',
             keypoint_painter=keypoint_painter)
コード例 #4
0
ファイル: visualizer.py プロジェクト: MatanAvitan/openpifpaf
    def __init__(self, head_names, strides, *,
                 pif_indices=None, paf_indices=None,
                 show_margin=False):
        self.head_names = head_names
        self.strides = strides
        self.pif_indices = pif_indices or []
        self.paf_indices = paf_indices or []
        self.show_margin = show_margin

        self.keypoint_painter = show.KeypointPainter()
コード例 #5
0
ファイル: eval_coco.py プロジェクト: MatanAvitan/openpifpaf
    def view_keypoints(image_cpu, annotations, gt):
        highlight = [5, 7, 9, 11, 13, 15]
        keypoint_painter = show.KeypointPainter(highlight=highlight)
        skeleton_painter = show.KeypointPainter(show_box=False,
                                                color_connections=True,
                                                markersize=1,
                                                linewidth=6)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])

        with show.canvas() as ax:
            ax.set_axis_off()
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            skeleton_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])

        instances_gt = None
        if gt:
            instances_gt = np.stack([a['keypoints'] for a in gt])

            # for test: overwrite prediction with true values
            # instances = instances_gt.copy()[:1]

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            keypoint_painter.keypoints(ax,
                                       instances_gt,
                                       skeleton=COCO_PERSON_SKELETON)

        with show.canvas() as ax:
            ax.imshow((np.moveaxis(image_cpu.numpy(), 0, -1) + 2.0) / 4.0)
            show.white_screen(ax)
            keypoint_painter.keypoints(ax,
                                       instances_gt,
                                       color='lightgrey',
                                       skeleton=COCO_PERSON_SKELETON)
            keypoint_painter.annotations(
                ax, [ann for ann in annotations if ann.score() > 0.01])
コード例 #6
0
def draw_skeletons(pose, sigmas, skel, kps, scr_weights):
    from openpifpaf.annotation import Annotation  # pylint: disable=import-outside-toplevel
    from openpifpaf import show  # pylint: disable=import-outside-toplevel

    scale = np.sqrt((np.max(pose[:, 0]) - np.min(pose[:, 0])) *
                    (np.max(pose[:, 1]) - np.min(pose[:, 1])))

    show.KeypointPainter.show_joint_scales = True
    keypoint_painter = show.KeypointPainter()
    ann = Annotation(keypoints=kps, skeleton=skel, score_weights=scr_weights)
    ann.set(pose, np.array(sigmas) * scale)
    os.makedirs('docs', exist_ok=True)
    draw_ann(ann,
             filename='docs/skeleton_car.png',
             keypoint_painter=keypoint_painter)
コード例 #7
0
def draw_mhp_skeletons(pose):
    from openpifpaf.annotation import Annotation  # pylint: disable=import-outside-toplevel
    from openpifpaf import show  # pylint: disable=import-outside-toplevel

    scale = np.sqrt((np.max(pose[:, 0]) - np.min(pose[:, 0])) *
                    (np.max(pose[:, 1]) - np.min(pose[:, 1])))

    show.KeypointPainter.show_joint_scales = True
    keypoint_painter = show.KeypointPainter(color_connections=True,
                                            linewidth=6)

    ann = Annotation(keypoints=MHP_KEYPOINTS, skeleton=MHP_PERSON_SKELETON)
    ann.set(pose, np.array(MHP_PERSON_SIGMAS) * scale)
    draw_ann(ann,
             filename='skeleton_mhp.png',
             keypoint_painter=keypoint_painter)
コード例 #8
0
def draw_skeletons(pose):
    from openpifpaf.annotation import Annotation  # pylint: disable=import-outside-toplevel
    from openpifpaf import show  # pylint: disable=import-outside-toplevel

    scale = np.sqrt((np.max(pose[:, 0]) - np.min(pose[:, 0])) *
                    (np.max(pose[:, 1]) - np.min(pose[:, 1])))

    show.KeypointPainter.show_joint_scales = True
    keypoint_painter = show.KeypointPainter()

    ann = Annotation(keypoints=ANIMAL_KEYPOINTS,
                     skeleton=ANIMAL_SKELETON,
                     score_weights=ANIMAL_SCORE_WEIGHTS)
    ann.set(pose, np.array(ANIMAL_SIGMAS) * scale)
    draw_ann(ann,
             filename='docs/skeleton_animal.png',
             keypoint_painter=keypoint_painter)
コード例 #9
0
def draw_skeletons(pose, prefix=""):
    from openpifpaf.annotation import Annotation  # pylint: disable=import-outside-toplevel
    from openpifpaf import show  # pylint: disable=import-outside-toplevel

    scale = np.sqrt((np.max(pose[:, 0]) - np.min(pose[:, 0])) *
                    (np.max(pose[:, 1]) - np.min(pose[:, 1])))

    show.KeypointPainter.show_joint_scales = True
    keypoint_painter = show.KeypointPainter(line_width=2)

    ann = Annotation(keypoints=WHOLEBODY_KEYPOINTS,
                     skeleton=WHOLEBODY_SKELETON,
                     score_weights=WHOLEBODY_SCORE_WEIGHTS)
    ann.set(pose, np.array(WHOLEBODY_SIGMAS) * scale)
    draw_ann(ann,
             filename='./docs/' + prefix + 'skeleton_wholebody.png',
             keypoint_painter=keypoint_painter)
コード例 #10
0
ファイル: predict.py プロジェクト: MatanAvitan/openpifpaf
def main():
    args = cli()
    if args.our_new_model:
        args.checkpoint = TRAINED_MODEL_PATH
    # load model
    model_cpu, _ = nets.factory_from_args(args)
    model = model_cpu.to(args.device)
    if not args.disable_cuda and torch.cuda.device_count() > 1:
        LOG.info('Using multiple GPUs: %d', torch.cuda.device_count())
        model = torch.nn.DataParallel(model)
        model.head_names = model_cpu.head_names
        model.head_strides = model_cpu.head_strides
    processor = decoder.factory_from_args(args, model, args.device)

    # data
    preprocess = None
    if args.long_edge:
        preprocess = transforms.Compose([
            transforms.NormalizeAnnotations(),
            transforms.RescaleAbsolute(args.long_edge),
            transforms.CenterPad(args.long_edge),
            transforms.EVAL_TRANSFORM,
        ])
    data = datasets.ImageList(args.images, preprocess=preprocess)
    data_loader = torch.utils.data.DataLoader(
        data,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=args.pin_memory,
        num_workers=args.loader_workers,
        collate_fn=datasets.collate_images_anns_meta)

    # visualizers
    keypoint_painter = show.KeypointPainter(
        show_box=args.debug,
        show_joint_scale=args.debug,
    )
    skeleton_painter = show.KeypointPainter(
        color_connections=True,
        markersize=args.line_width - 5,
        linewidth=args.line_width,
        show_box=args.debug,
        show_joint_scale=args.debug,
    )

    for batch_i, (image_tensors_batch, _,
                  meta_batch) in enumerate(data_loader):
        fields_batch = processor.fields(image_tensors_batch)
        pred_batch = processor.annotations_batch(
            fields_batch, debug_images=image_tensors_batch)

        # unbatch
        for pred, meta in zip(pred_batch, meta_batch):
            if args.output_directory is None:
                output_path = meta['file_name']
            else:
                file_name = os.path.basename(meta['file_name'])
                output_path = os.path.join(args.output_directory, file_name)
            LOG.info('batch %d: %s to %s', batch_i, meta['file_name'],
                     output_path)

            # load the original image if necessary
            cpu_image = None
            if args.debug or \
                    'keypoints' in args.output_types or \
                    'skeleton' in args.output_types:
                with open(meta['file_name'], 'rb') as f:
                    cpu_image = PIL.Image.open(f).convert('RGB')

            processor.set_cpu_image(cpu_image, None)
            if preprocess is not None:
                pred = preprocess.annotations_inverse(pred, meta)

            if 'json' in args.output_types:
                with open(output_path + '.pifpaf.json', 'w') as f:
                    json.dump([{
                        'keypoints':
                        np.around(ann.data, 1).reshape(-1).tolist(),
                        'bbox':
                        np.around(bbox_from_keypoints(ann.data), 1).tolist(),
                        'score':
                        round(ann.score(), 3),
                    } for ann in pred], f)

            if 'keypoints' in args.output_types:
                with show.image_canvas(cpu_image,
                                       output_path + '.keypoints.png',
                                       show=args.show,
                                       fig_width=args.figure_width,
                                       dpi_factor=args.dpi_factor) as ax:
                    keypoint_painter.annotations(ax, pred)

            if 'skeleton' in args.output_types:
                with show.image_canvas(cpu_image,
                                       output_path + '.skeleton.png',
                                       show=args.show,
                                       fig_width=args.figure_width,
                                       dpi_factor=args.dpi_factor) as ax:
                    skeleton_painter.annotations(ax, pred)
コード例 #11
0
def generate(m, inputs):
    args = cli()
    model, processor = m
    image = inputs["image"]

    # data
    preprocess = None
    if args.long_edge:
        preprocess = transforms.Compose([
            transforms.Normalize(),
            transforms.RescaleAbsolute(args.long_edge),
            transforms.CenterPad(args.long_edge),
        ])
    data = datasets.PilImageList([image], preprocess=preprocess)
    data_loader = torch.utils.data.DataLoader(data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=args.pin_memory,
                                              num_workers=args.loader_workers)

    # visualizers
    keypoint_painter = show.KeypointPainter(show_box=False)
    skeleton_painter = show.KeypointPainter(show_box=False,
                                            color_connections=True,
                                            markersize=1,
                                            linewidth=6)

    image_paths, image_tensors, processed_images_cpu = next(iter(data_loader))
    images = image_tensors.permute(0, 2, 3, 1)

    processed_images = processed_images_cpu.to(args.device, non_blocking=True)
    fields_batch = processor.fields(processed_images)
    pred_batch = processor.annotations_batch(fields_batch,
                                             debug_images=processed_images_cpu)

    # unbatch
    image_path, image, processed_image_cpu, pred = image_paths[0], images[
        0], processed_images_cpu[0], pred_batch[0]

    processor.set_cpu_image(image, processed_image_cpu)
    keypoint_sets, scores = processor.keypoint_sets_from_annotations(pred)

    kp_json = json.dumps([{
        'keypoints': np.around(kps, 1).reshape(-1).tolist(),
        'bbox': bbox_from_keypoints(kps),
    } for kps in keypoint_sets])

    kwargs = {
        'figsize': (args.figure_width,
                    args.figure_width * image.shape[0] / image.shape[1]),
    }
    fig = plt.figure(**kwargs)
    ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
    ax.set_axis_off()
    ax.set_xlim(0, image.shape[1])
    ax.set_ylim(image.shape[0], 0)
    fig.add_axes(ax)
    ax.imshow(image)
    skeleton_painter.keypoints(ax, keypoint_sets, scores=scores)

    fig.canvas.draw()
    w, h = fig.canvas.get_width_height()
    output_image = np.fromstring(fig.canvas.tostring_rgb(),
                                 dtype='uint8').reshape(h, w, 3)

    return {'keypoints': kp_json, 'image': output_image}