예제 #1
0
    # use floats to prevent strides etc. uint8 caused crash on qt backend.
    im = gl.glReadPixels(x, y, w, h, gl.GL_RGB, gl.GL_FLOAT)

    # reshape, flip, and store
    im.shape = h, w, 3
    im = np.flipud(im)

    # done
    return im


if __name__ == '__main__':

    # Prepare
    f = vv.figure()
    a1 = vv.subplot(211)
    a2 = vv.subplot(212)

    # Draw some data
    vv.plot([2, 3, 4, 2, 4, 3], axes=a1)
    f.DrawNow()

    # Take snapshots
    im1 = vv.getframe(f)
    im2 = vv.getframe(a1)
    # clear and show snapshots
    a1.Clear()
    a2.Clear()
    vv.imshow(im1, axes=a1, clim=(0, 1))
    vv.imshow(im2, axes=a2, clim=(0, 1))
예제 #2
0
def screenshot(filename, ob=None, sf=2, bg=None, format=None, tension=-0.25):
    """ screenshot(filename, ob=None sf=2, bg=None, format=None)
    
    Make a screenshot and store it to a file, using cubic interpolation
    to increase the resolution (and quality) of the image.
    
    Parameters
    ----------
    filename : string
        The name of the file to store the screenshot to. If filename is None, 
        the interpolated image is returned as a numpy array.
    ob : Axes, AxesContainer, or Figure
        The object to take the screenshot of. The AxesContainer can be
        obtained using vv.gca().parent. It can be usefull to take a 
        screeshot of an axes including thickmarks and labels.
    sf : integer
        The scale factor. The image is increased in size with this factor,
        using a high quality interpolation method. A factor of 2 or 3
        is recommended; the image quality does not improve with higher
        factors. If using a sf larger than 1, the image is best saved in
        the jpg format.
    bg : 3-element tuple or char
        The color of the background. If bg is given, ob.bgcolor is set to
        bg before the frame is captured.
    format : string
        The format for the screenshot to be saved in. If not given, the
        format is deduced from the filename.
    
    Notes
    -----
    Uses vv.getframe(ob) to obtain the image in the figure or axes. 
    That image is interpolated with the given scale factor (sf) using 
    bicubic interpolation. Then  vv.imwrite(filename, ..) is used to 
    store the resulting image to a file.
    
    Rationale
    ---------
    We'd prefer storing screenshots of plots as vector (eps) images, but 
    the nature of OpenGl prevents this. By applying high quality 
    interpolation (using a cardinal spline), the resolution can be increased, 
    thereby significantly improving the visibility/smoothness for lines 
    and fonts. Use this to produce publication quality snapshots of your
    plots.
    
    """
    
    # The tension controls the
    # responsivenes of the filter. The more negative, the more overshoot,
    # but the more it is capable to make for example font glyphs smooth.
    # If tension is 0, the interpolator is a Catmull-Rom spline.
    
    # Scale must be integer
    s = int(sf)
    
    # Object given?
    if ob is None:
        ob = vv.gcf()
    
    # Get figure
    fig = ob
    if not hasattr(ob, 'DrawNow'):
        fig = ob.GetFigure()
    
    # Get object to set background of
    bgob = ob
    
    # Set background
    if bg and fig:
        bgOld = bgob.bgcolor
        bgob.bgcolor = bg
        fig.DrawNow()  
    
    # Obtain image      
    im1 = vv.getframe(ob)
    shape1 = im1.shape
    
    # Return background
    if bg and fig:
        bgob.bgcolor = bgOld
        fig.Draw()
    
    # Pad original image, so we have no trouble at the edges
    shape2 = shape1[0]+2, shape1[1]+2, 3
    im2 = np.zeros(shape2, dtype=np.float32) # Also make float
    im2[1:-1,1:-1,:] = im1
    im2[0,:,:] = im2[1,:,:]
    im2[-1,:,:] = im2[-2,:,:]
    im2[:,0,:] = im2[:,1,:]
    im2[:,-1,:] = im2[:,-2,:]
    
    # Create empty new image. It is sized by the scaleFactor, 
    # but the last row is not. 
    shape3 = (shape1[0]-1)*s+1, (shape1[1]-1)*s+1, 3    
    im3 = np.zeros(shape3, dtype=np.float32)
    
    # Fill in values!
    for dy in range(s+1):
        for dx in range(s+1):
            
            # Get interpolation fraction and coefs
            ty = float(dy)/s
            tx = float(dx)/s
            cy = getCardinalSplineCoefs(ty, tension)
            cx = getCardinalSplineCoefs(tx, tension)
            
            # Create tmp image to which we add the contributions
            # Note that this image is 1 pixel smaller in each dimension.
            # The last pixel is filled because dy and dx iterate INCLUDING s.
            shapeTmp = shape1[0]-1, shape1[1]-1, 3
            imTmp = np.zeros(shapeTmp, dtype=np.float32)
            
            # Collect all 16 neighbours and weight them apropriately
            for iy in range(4):
                for ix in range(4):
                    
                    # Get weight
                    w = cy[iy]*cx[ix]
                    if w==0:
                        continue
                    
                    # Get slice. Note that we start at 0,1,2,3 rather than
                    # -1,0,1,2, because we padded the image.
                    D = {0:-3,1:-2,2:-1,3:None}
                    slicey = slice(iy, D[iy])
                    slicex = slice(ix, D[ix])
                    
                    # Get contribution and add to temp image
                    imTmp += w * im2[slicey, slicex, :]
            
            # Store contributions            
            D = [-1 for tmp in range(s)]; D.append(None)
            slicey = slice(dy,D[dy],s)
            slicex = slice(dx,D[dx],s)
            im3[slicey, slicex, :] = imTmp
    
    
    # Correct for overshoot
    im3[im3>1]=1
    im3[im3<0]=0
    
    # Store image to file
    if filename is not None:
        vv.imwrite(filename, im3, format)
    else:
        return im3
예제 #3
0
    def plot3d(self,
               img,
               bodies={
                   'pose3d': np.empty((0, 13, 3)),
                   'pose2d': np.empty((0, 13, 2))
               },
               hands={
                   'pose3d': np.empty((0, 21, 3)),
                   'pose2d': np.empty((0, 21, 2))
               },
               faces={
                   'pose3d': np.empty((0, 84, 3)),
                   'pose2d': np.empty((0, 84, 2))
               },
               body_with_wrists=[],
               body_with_head=[],
               interactive=False):
        """
        :param img: a HxWx3 numpy array
        :param bodies: dictionnaroes with 'pose3d' (resp 'pose2d') with the body 3D (resp 2D) pose
        :param faces: same with face pose
        :param hands: same with hand pose
        :param body_with_wrists: list with for each body, a tuple (left_hand_id, right_hand_id) of the index of the hand detection attached to this body detection (-1 if none) for left and right hands
        :parma body_with_head: list with for each body, the index of the face detection attached to this body detection (-1 if none)
        :param interactive: whether to open the viewer in an interactive manner or not
        """

        # body pose do not use the same coordinate systems
        bodies['pose3d'][:, :, 0] *= -1
        bodies['pose3d'][:, :, 1] *= -1

        # Compute 3D scaled representation of each part, stored in "points3d"
        hands, bodies, faces = [copy.copy(s) for s in (hands, bodies, faces)]
        parts = (hands, bodies, faces)
        for part in parts:
            part['points3d'] = np.zeros_like(part['pose3d'])
            for part_idx in range(len(part['pose3d'])):
                points3d = scale_orthographic(part['pose3d'][part_idx],
                                              part['pose2d'][part_idx])
                part['points3d'][part_idx] = points3d

        # Various display tricks to make the 3D visualization of full-body nice
        # (1) for faces, add a Z offset to faces to align them with the body
        for body_id, face_id in enumerate(body_with_head):
            if face_id != -1:
                z_offset = bodies['points3d'][body_id, 12, 2] - np.mean(
                    faces['points3d'][face_id, :, 2])
                faces['points3d'][face_id, :, 2] += z_offset
        # (2) for hands, add a 3D offset to put them at the wrist location
        for body_id, (lwrist_id, rwrist_id) in enumerate(body_with_wrists):
            if lwrist_id != -1:
                hands['points3d'][lwrist_id, :, :] = bodies['points3d'][
                    body_id, 7, :] - hands['points3d'][lwrist_id, 0, :]
            if rwrist_id != -1:
                hands['points3d'][rwrist_id, :, :] = bodies['points3d'][
                    body_id, 6, :] - hands['points3d'][rwrist_id, 0, :]

        img = np.asarray(img)
        height, width = img.shape[:2]

        fig = vv.figure(1)
        fig.Clear()

        fig._SetPosition(0, 0, self.figsize[0], self.figsize[1])
        if not interactive:
            fig._enableUserInteraction = False

        axes = vv.gca()
        # Hide axis
        axes.axis.visible = False

        scaling_factor = 1.0 / height

        # Camera interaction is not intuitive along z axis
        # We reference every object to a parent frame that is rotated to circumvent the issue
        ref_frame = vv.Wobject(axes)
        ref_frame.transformations.append(vv.Transform_Rotate(-90, 1, 0, 0))
        ref_frame.transformations.append(
            vv.Transform_Translate(-0.5 * width * scaling_factor, -0.5, 0))

        # Draw image
        if self.display2d:
            # Display pose in 2D
            img = visu.visualize_bodyhandface2d(img,
                                                dict_poses2d={
                                                    'body': bodies['pose2d'],
                                                    'hand': hands['pose2d'],
                                                    'face': faces['pose2d']
                                                },
                                                lw=2,
                                                max_padding=0,
                                                bgr=False)

            XX, YY = np.meshgrid([0, width * scaling_factor], [0, 1])
            img_z_offset = 0.5
            ZZ = img_z_offset * np.ones(XX.shape)
            # Draw image
            embedded_img = vv.surf(XX, YY, ZZ, img)
            embedded_img.parent = ref_frame
            embedded_img.ambientAndDiffuse = 1.0

            # Draw a grid on the bottom floor to get a sense of depth
            XX, ZZ = np.meshgrid(
                np.linspace(0, width * scaling_factor, 10),
                img_z_offset - np.linspace(0, width * scaling_factor, 10))
            YY = np.ones_like(XX)
            grid3d = vv.surf(XX, YY, ZZ)
            grid3d.parent = ref_frame
            grid3d.edgeColor = (0.1, 0.1, 0.1, 1.0)
            grid3d.edgeShading = 'plain'
            grid3d.faceShading = None

        # Draw pose
        for part in parts:

            for part_idx in range(len(part['points3d'])):
                points3d = part['points3d'][part_idx] * scaling_factor
                # Draw bones
                J = len(points3d)
                is_body = (J == 13)
                ignore_neck = False if not is_body else body_with_head[
                    part_idx] != -1
                bones, bonecolors, pltcolors = visu._get_bones_and_colors(
                    J, ignore_neck=ignore_neck)
                for (kpt_id1, kpt_id2), color in zip(bones, bonecolors):
                    color = color[2], color[1], color[0]  # BGR vs RGB
                    p1 = visu._get_xyz(points3d, kpt_id1)
                    p2 = visu._get_xyz(points3d, kpt_id2)
                    pointset = vv.Pointset(3)
                    pointset.append(p1)
                    pointset.append(p2)

                    # Draw bones as solid capsules
                    bone_radius = 0.005
                    line = vv.solidLine(pointset, radius=bone_radius)
                    line.faceColor = color
                    line.ambientAndDiffuse = 1.0

                    line.parent = ref_frame

                # Draw keypoints, except for faces
                if J != 84:
                    keypoints_to_plot = points3d
                    if ignore_neck:
                        # for a nicer display, ignore head keypoint
                        keypoints_to_plot = keypoints_to_plot[:12, :]
                    # Use solid spheres
                    for i in range(len(keypoints_to_plot)):
                        kpt_wobject = vv.solidSphere(
                            translation=keypoints_to_plot[i, :].tolist(),
                            scaling=1.5 * bone_radius)
                        kpt_wobject.faceColor = (255, 0, 0)
                        kpt_wobject.ambientAndDiffuse = 1.0
                        kpt_wobject.parent = ref_frame

        # Use just an ambient lighting
        axes.light0.ambient = 0.8
        axes.light0.diffuse = 0.2
        axes.light0.specular = 0.0

        cam = vv.cameras.ThreeDCamera()
        axes.camera = cam
        #z axis
        cam.azimuth = -45
        cam.elevation = 20
        cam.roll = 0
        # Orthographic camera
        cam.fov = 0
        if self.camera_zoom is None:
            cam.zoom *= 1.3  # Zoom a bit more
        else:
            cam.zoom = self.camera_zoom
        if self.camera_location is not None:
            cam.loc = self.camera_location
        cam.SetView()

        if interactive:
            self.app.Run()
        else:
            fig._widget.update()
            self.app.ProcessEvents()

            img3d = vv.getframe(vv.gcf())
            img3d = np.clip(img3d * 255, 0, 255).astype(np.uint8)
            # Crop gray borders
            img3d = img3d[10:-10, 10:-10, :]

            return img3d, img
예제 #4
0
파일: record.py 프로젝트: chiluf/visvis.dev
 def _OnAfterDraw(self, event):
     im = vv.getframe(self._ob)
     self._frames.append(im)
예제 #5
0
def screenshot(filename, ob=None, sf=2, bg=None, format=None, tension=-0.25):
    """ screenshot(filename, ob=None sf=2, bg=None, format=None)
    
    Make a screenshot and store it to a file, using cubic interpolation
    to increase the resolution (and quality) of the image.
    
    Parameters
    ----------
    filename : string
        The name of the file to store the screenshot to. If filename is None, 
        the interpolated image is returned as a numpy array.
    ob : Axes, AxesContainer, or Figure
        The object to take the screenshot of. The AxesContainer can be
        obtained using vv.gca().parent. It can be usefull to take a 
        screeshot of an axes including thickmarks and labels.
    sf : integer
        The scale factor. The image is increased in size with this factor,
        using a high quality interpolation method. A factor of 2 or 3
        is recommended; the image quality does not improve with higher
        factors. If using a sf larger than 1, the image is best saved in
        the jpg format.
    bg : 3-element tuple or char
        The color of the background. If bg is given, ob.bgcolor is set to
        bg before the frame is captured.
    format : string
        The format for the screenshot to be saved in. If not given, the
        format is deduced from the filename.
    
    Notes
    -----
    Uses vv.getframe(ob) to obtain the image in the figure or axes. 
    That image is interpolated with the given scale factor (sf) using 
    bicubic interpolation. Then  vv.imwrite(filename, ..) is used to 
    store the resulting image to a file.
    
    Rationale
    ---------
    We'd prefer storing screenshots of plots as vector (eps) images, but 
    the nature of OpenGl prevents this. By applying high quality 
    interpolation (using a cardinal spline), the resolution can be increased, 
    thereby significantly improving the visibility/smoothness for lines 
    and fonts. Use this to produce publication quality snapshots of your
    plots.
    
    """

    # The tension controls the
    # responsivenes of the filter. The more negative, the more overshoot,
    # but the more it is capable to make for example font glyphs smooth.
    # If tension is 0, the interpolator is a Catmull-Rom spline.

    # Scale must be integer
    s = int(sf)

    # Object given?
    if ob is None:
        ob = vv.gcf()

    # Get figure
    fig = ob
    if not hasattr(ob, 'DrawNow'):
        fig = ob.GetFigure()

    # Get object to set background of
    bgob = ob

    # Set background
    if bg and fig:
        bgOld = bgob.bgcolor
        bgob.bgcolor = bg
        fig.DrawNow()

    # Obtain image
    im1 = vv.getframe(ob)
    shape1 = im1.shape

    # Return background
    if bg and fig:
        bgob.bgcolor = bgOld
        fig.Draw()

    # Pad original image, so we have no trouble at the edges
    shape2 = shape1[0] + 2, shape1[1] + 2, 3
    im2 = np.zeros(shape2, dtype=np.float32)  # Also make float
    im2[1:-1, 1:-1, :] = im1
    im2[0, :, :] = im2[1, :, :]
    im2[-1, :, :] = im2[-2, :, :]
    im2[:, 0, :] = im2[:, 1, :]
    im2[:, -1, :] = im2[:, -2, :]

    # Create empty new image. It is sized by the scaleFactor,
    # but the last row is not.
    shape3 = (shape1[0] - 1) * s + 1, (shape1[1] - 1) * s + 1, 3
    im3 = np.zeros(shape3, dtype=np.float32)

    # Fill in values!
    for dy in range(s + 1):
        for dx in range(s + 1):

            # Get interpolation fraction and coefs
            ty = float(dy) / s
            tx = float(dx) / s
            cy = getCardinalSplineCoefs(ty, tension)
            cx = getCardinalSplineCoefs(tx, tension)

            # Create tmp image to which we add the contributions
            # Note that this image is 1 pixel smaller in each dimension.
            # The last pixel is filled because dy and dx iterate INCLUDING s.
            shapeTmp = shape1[0] - 1, shape1[1] - 1, 3
            imTmp = np.zeros(shapeTmp, dtype=np.float32)

            # Collect all 16 neighbours and weight them apropriately
            for iy in range(4):
                for ix in range(4):

                    # Get weight
                    w = cy[iy] * cx[ix]
                    if w == 0:
                        continue

                    # Get slice. Note that we start at 0,1,2,3 rather than
                    # -1,0,1,2, because we padded the image.
                    D = {0: -3, 1: -2, 2: -1, 3: None}
                    slicey = slice(iy, D[iy])
                    slicex = slice(ix, D[ix])

                    # Get contribution and add to temp image
                    imTmp += w * im2[slicey, slicex, :]

            # Store contributions
            D = [-1 for tmp in range(s)]
            D.append(None)
            slicey = slice(dy, D[dy], s)
            slicex = slice(dx, D[dx], s)
            im3[slicey, slicex, :] = imTmp

    # Correct for overshoot
    im3[im3 > 1] = 1
    im3[im3 < 0] = 0

    # Store image to file
    if filename is not None:
        vv.imwrite(filename, im3, format)
    else:
        return im3
예제 #6
0
파일: record.py 프로젝트: chiluf/visvis.dev
 def _OnAfterDraw(self, event):
     im = vv.getframe(self._ob)
     self._frames.append(im)
# Turn off the main light
axes.light0.Off()

# Create a fixed light source
light_obj = axes.lights[1]
light_obj.On()
light_obj.position = (5.0, 5.0, 5.0, 0.0)

# Empty array that contains all images of the comet's rotation
comet_images = []

# Rotate camera in 300 steps in azimuth
for azm_angle in tqdm(range(300)):

    # Change azimuth angle of the camera
    axes.camera.azimuth = 360 * float(azm_angle) / 300

    # Draw the axes and figure
    axes.Draw()
    figure.DrawNow()

    # Get the current image
    temp_image = vv.getframe(vv.gca())

    # Apped the current image in 8 bit integer
    comet_images.append((temp_image * 255).astype(np.uint8))

# Save the images as an animated GIF
imageio.mimsave('Comet67P.gif', comet_images, duration=0.04)
예제 #8
0
    
    # reshape, flip, and store
    im.shape = h,w,3
    im = np.flipud(im)
    
    # done
    return im
    


if __name__ == '__main__':
    import time
    
    # Prepare
    f = vv.figure()
    a1 = vv.subplot(211)
    a2 = vv.subplot(212)
    
    # Draw some data
    vv.plot([2,3,4,2,4,3], axes=a1)
    f.DrawNow()
    
    # Take snapshots
    im1 = vv.getframe(f)
    im2 = vv.getframe(a1)
    # clear and show snapshots
    a1.Clear()
    a2.Clear()
    vv.imshow(im1,axes=a1, clim=(0,1))
    vv.imshow(im2,axes=a2, clim=(0,1))