コード例 #1
0
ファイル: imvis_demo.py プロジェクト: snototter/vitocpp
def demo_overlay():
    """How to overlay images and highlight regions."""
    # Overlay
    rgb = imutils.imread('../data/flamingo.jpg', mode='L')
    # Generate some data to overlay
    im_height, im_width = rgb.shape[0], rgb.shape[1]
    peak_pos = (im_width * 0.75, im_height * 0.15)
    xv, yv = np.meshgrid(np.arange(0, im_width), np.arange(0, im_height))
    overlay = np.exp(
        -(np.power(xv - peak_pos[0], 2) + np.power(yv - peak_pos[1], 2)) /
        (3e4))
    overlay_vis = imvis.overlay(imvis.pseudocolor(overlay), rgb, 0.7)

    # Highlight regions
    rgb = imutils.imread('../data/flamingo.jpg', mode='RGB')
    rgb_mask = np.zeros((rgb.shape[0], rgb.shape[1]), dtype=np.uint8)
    rgb_mask[160:334, 120:290] = 1
    highlight = imvis.highlight(rgb, rgb_mask)
    # highlight another region, this time in blue
    rgb_mask[:] = 0
    rgb_mask[200:374, 250:420] = 1
    highlight = imvis.highlight(highlight, rgb_mask, color=(0, 0, 255))

    # Combine all "colored" images:
    collage = imvis.make_collage([overlay_vis, highlight],
                                 padding=5,
                                 bg_color=(255, 255, 255))
    imvis.imshow(collage, title="Overlay & Highlights", wait_ms=-1)
コード例 #2
0
 def _col_ir(f):
     #TODO
     # Normalize/stretch, etc. using: self._args.max_ir
     if f.dtype == np.uint8:
         return f
     return imvis.pseudocolor(f,
                              limits=None,
                              color_map=colormaps.colormap_turbo_rgb)
コード例 #3
0
    def _process_next_frameset(self, capture, frameset):
        # assert len(frameset) == 1  # Currently, we assume there's only one mobotix camera connected!
        if frameset[0] is None:
            self._streamer.stop()
            return

        if self._args.verbose:
            # Measure time between received framesets
            if self._mean_ms_between_framesets is None:
                ms_between_framesets = 0
            else:
                ms_between_framesets = pyutils.ttoc('[frameset received]')
            self._mean_ms_between_framesets = ms_between_framesets if self._mean_ms_between_framesets is None else 0.9 * self._mean_ms_between_framesets + 0.1 * ms_between_framesets
            if ms_between_framesets > 0:
                print(
                    'Frameset received after {:.2f} ms, avg. framerate {:.1f}'.
                    format(ms_between_framesets,
                           1000.0 / self._mean_ms_between_framesets))
            pyutils.tic('[frameset received]')

        vis_frames = list()
        vis_labels = list()
        for idx, frame in enumerate(frameset):
            frame_lbl = capture.frame_label(idx)
            vis_frame = imvis.pseudocolor(frame, [0, 5000], color_map=colormaps.colormap_turbo_rgb)\
                if capture.is_depth(idx) or capture.is_infrared(idx) else frame
            vis_frames.append(vis_frame)
            vis_labels.append(frame_lbl)

        # Overlay labels
        vis_frames = [
            imvis.draw_text_box(vis_frames[idx],
                                vis_labels[idx],
                                (vis_frames[idx].shape[1] // 2, 40),
                                text_anchor='north',
                                bg_color=(220, 0, 0),
                                font_color=(0, 0, 0))
            for idx in range(len(vis_frames))
        ]

        # Forward to storage processes
        self._store(capture, frameset)

        # Display the live stream
        collage = imvis.make_collage(vis_frames,
                                     num_images_per_row=2,
                                     padding=0,
                                     fixed_size_per_image=(640, 480))
        k = imvis.imshow(collage, "Live view", wait_ms=10)
        if k == ord('q') or k == 27:
            self._streamer.stop()
コード例 #4
0
ファイル: imvis_demo.py プロジェクト: snototter/vitocpp
def demo_pseudocolor():
    """Pseudocoloring."""
    peaks = imutils.imread('../data/peaks.png', mode='L')
    # For visualization purposes only, reduce input to a few
    # distinct categories/labels:
    data = ((peaks / 25) - 5).astype(np.int16)
    names = ['Bone', 'Magma', 'Viridis']
    images = list()
    for name in names:
        pc = imvis.pseudocolor(
            data,
            limits=None,  # Compute min/max from data
            color_map=colormaps.by_name(name, return_rgb=True))
        images.append(pc)

    # Display as a single collage
    padding = 10
    # Add alpha channel to render the collage nicely for the repo's README
    images[0] = np.dstack(
        (images[0], 255 * np.ones(images[0].shape[:2], dtype=np.uint8)))
    collage = imvis.make_collage(images,
                                 padding=padding,
                                 fixed_size_per_image=(200, 200),
                                 bg_color=(0, 0, 0, 0),
                                 num_images_per_row=len(images))

    # Add labels
    height, width = collage.shape[:2]
    mask_width = (width - (len(names) - 1) * padding) / len(names)
    for i in range(len(names)):
        pos = (i * (mask_width + padding) + mask_width / 2, height - 10)
        collage = imvis.draw_text_box(collage,
                                      names[i],
                                      pos,
                                      text_anchor='south',
                                      bg_color=(0, 0, 0),
                                      font_color=(-1, -1, -1),
                                      font_scale=1.0,
                                      font_thickness=1,
                                      padding=5,
                                      fill_opacity=0.8)

    imvis.imshow(collage, title='Pseudocoloring', wait_ms=-1)
    imutils.imsave('../../doc/example-pseudocolor.png', collage)
コード例 #5
0
 def _vis_depth_colormap(self, f, cm):
     return imvis.pseudocolor(f, limits=self._depth_range_vis, color_map=cm)
コード例 #6
0
def vis_depth(f, max_depth=2000):
    return imvis.pseudocolor(f,
                             limits=[0, max_depth],
                             color_map=colormaps.colormap_turbo_rgb)
コード例 #7
0
 def _col_depth(f):
     # return imutils.transform(f, 'depth2surfnorm', 'surfnorm2rgb')
     return imvis.pseudocolor(f,
                              limits=[0, self._args.max_depth],
                              color_map=colormaps.colormap_turbo_rgb)
コード例 #8
0
def demo(pseudocolor=True, inspect=False):
    imgs = [imutils.imread('../data/ninja-01.jpg'), imutils.imread('../data/ninja-02.jpg')]

    bgmodels = list()
    bgmodel = bgm.BackgroundModel()
    bgmodel.approximate_median_bgm(
        adaptation_step=5,
        fg_report_threshold=50,
        median_on_grayscale=True)
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.block_mean_bgm(
        block_size=(16, 16),
        block_overlap=0.5,
        update_rate=0.01,
        fg_report_threshold=50,
        channel='grayscale')
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.gaussian_mixture_bgm(
        history=500,
        detect_shadows=True,
        var_thresh=100,
        comp_thresh=0.05)
    bgmodels.append(bgmodel)

    bgmodel = bgm.BackgroundModel()
    bgmodel.normalized_rgb_bgm(
        report_as_binary=False,
        # binary_reporting_threshold=20,
        update_rate=0.1,
        alpha=0.1,
        beta=1.0)
    bgmodels.append(bgmodel)

    print("""
! Note that the reported initialization time for the first BGM includes
  library initialization - maybe OpenCV (haven't tracked that down yet).
    """)
    fg_masks = list()
    for bgmodel in bgmodels:
        pyutils.tic('init')
        bgmodel.init(imgs[0])
        t_init = pyutils.ttoc('init')
        pyutils.tic('apply')
        mask = bgmodel.report_changes(imgs[1])
        t_apply = pyutils.ttoc('apply')

        if pseudocolor:
            fg_masks.append(imvis.pseudocolor(mask, limits=None, color_map=colormaps.colormap_viridis_rgb))
        else:
            fg_masks.append(mask)
        print('init/apply: {:17s} {:7.2f} ms, {:7.2f} ms'.format(bgmodel.name(), t_init, t_apply))

        if inspect:
            import iminspect
            iminspect.show(mask)

    padding = 10
    # Add alpha channel to render the README visualization nicely for web display
    fg_masks[0] = np.dstack((fg_masks[0], 255 * np.ones(fg_masks[0].shape[:2], dtype=np.uint8)))
    collage = imvis.make_collage(fg_masks,
        padding=padding,
        fixed_size_per_image=(200, 266),
        bg_color=(0, 0, 0, 0),
        num_images_per_row=2)
    input_seq = cv2.resize(imutils.imread('../data/ninja-seq.png'), (200, 266))
    collage = imvis.make_collage([input_seq, collage],
        padding=padding, bg_color=(0, 0, 0, 0))

    # Overlay names
    height, width = collage.shape[:2]
    collage = imvis.draw_text_box(collage, 'Input',
            (input_seq.shape[1]/2, height/3+20), text_anchor='center', bg_color=(255, 255, 255),
            font_color=(-1, -1, -1), font_scale=1.0,
            font_thickness=1, padding=5, fill_opacity=0.8)

    names = [bg.name() for bg in bgmodels]
    mask_width = (width-input_seq.shape[1] - padding) / 2
    for i in range(len(names)):
        pos = (input_seq.shape[1] + padding + (i % 2) * (mask_width + padding) + mask_width/2,
            (i // 2) * (266 + padding) + 266 - 10)
        collage = imvis.draw_text_box(collage, names[i],
            pos, text_anchor='south', bg_color=(255, 255, 255),
            font_color=(-1, -1, -1), font_scale=1.0,
            font_thickness=1, padding=5, fill_opacity=0.8)

    imvis.imshow(collage, title="Background Subtraction", wait_ms=-1)
    imutils.imsave('../../doc/example-bgm.png', collage)