コード例 #1
0
ファイル: morpher.py プロジェクト: yiunsr/imglst
def morph_image(
    src_img,
    src_points,
    dest_img,
    dest_points,
    percent,
    width=500,
    height=600,
):
    """
    
    모핑 이미지로 변환  
    :param src_img: ndarray source image
    :param src_points: source image array of x,y face points
    :param dest_img: ndarray destination image
    :param dest_points: destination image array of x,y face points
    :param percent: 변환 percent 
    """
    size = (height, width)

    points = locator.weighted_average_points(src_points, dest_points, percent)
    src_face = warper.warp_image(src_img, src_points, points, size)
    end_face = warper.warp_image(dest_img, dest_points, points, size)
    average_face = blender.weighted_average(src_face, end_face, percent)
    average_face = alpha_image(average_face, points)
    return average_face
コード例 #2
0
def morph(src_img,
          src_points,
          dest_img,
          dest_points,
          video,
          width=500,
          height=600,
          num_frames=20,
          fps=10,
          out_frames=None,
          out_video=None,
          alpha=False,
          plot=False,
          keep_bg=False):
    """
  Create a morph sequence from source to destination image

  :param src_img: ndarray source image
  :param src_img: source image array of x,y face points
  :param dest_img: ndarray destination image
  :param dest_img: destination image array of x,y face points
  :param video: facemorpher.videoer.Video object
  """
    size = (height, width)
    stall_frames = np.clip(int(fps * 0.15), 1, fps)  # Show first & last longer
    plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)
    num_frames -= (stall_frames * 2)  # No need to process src and dest image

    plt.plot_one(src_img)
    video.write(src_img, 1)

    # Produce morph frames!
    for percent in np.linspace(1, 0, num=num_frames):
        points = locator.weighted_average_points(src_points, dest_points,
                                                 percent)
        src_face = warper.warp_image(src_img, src_points, points, size)
        end_face = warper.warp_image(dest_img, dest_points, points, size)
        average_face = blender.weighted_average(src_face, end_face, percent)
        average_face = alpha_image(average_face,
                                   points) if alpha else average_face

        # Average background (find transparent pixel, remove alpha from face image, and than replace transparent with averaged bg)
        if (keep_bg):
            average_background = blender.weighted_average(
                src_img, dest_img, percent)
            average_face = alpha_image(average_face, points)
            transparent_pixel = average_face[..., 3] == 0
            average_face = average_face[..., :3]
            average_face[transparent_pixel] = average_background[
                transparent_pixel]

        plt.plot_one(average_face)
        plt.save(average_face)
        video.write(average_face)

    plt.plot_one(dest_img)
    video.write(dest_img, stall_frames)
    plt.show()
コード例 #3
0
def morph(src_img,
          src_points,
          dest_img,
          dest_points,
          video,
          width=500,
          height=600,
          num_frames=20,
          fps=10,
          out_frames=None,
          out_video=None,
          plot=False,
          background='black'):
    """
  Create a morph sequence from source to destination image

  :param src_img: ndarray source image
  :param src_points: source image array of x,y face points
  :param dest_img: ndarray destination image
  :param dest_points: destination image array of x,y face points
  :param video: facemorpher.videoer.Video object
  """
    size = (height, width)
    stall_frames = np.clip(int(fps * 0.15), 1, fps)  # Show first & last longer
    plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)
    num_frames -= (stall_frames * 2)  # No need to process src and dest image

    plt.plot_one(src_img)
    video.write(src_img, 1)

    # Produce morph frames!
    for percent in np.linspace(1, 0, num=num_frames):
        points = locator.weighted_average_points(src_points, dest_points,
                                                 percent)
        src_face = warper.warp_image(src_img, src_points, points, size)
        end_face = warper.warp_image(dest_img, dest_points, points, size)
        average_face = blender.weighted_average(src_face, end_face, percent)

        if background in ('transparent', 'average'):
            mask = blender.mask_from_points(average_face.shape[:2], points)
            average_face = np.dstack((average_face, mask))

            if background == 'average':
                average_background = blender.weighted_average(
                    src_img, dest_img, percent)
                average_face = blender.overlay_image(average_face, mask,
                                                     average_background)

        plt.plot_one(average_face)
        plt.save(average_face)
        video.write(average_face)

    plt.plot_one(dest_img)
    video.write(dest_img, stall_frames)
    plt.show()
コード例 #4
0
def morph_one(src_img,
              src_points,
              dest_img,
              dest_points,
              percent,
              width=500,
              height=600):
    size = (height, width)
    points = locator.weighted_average_points(src_points, dest_points, percent)
    src_face = warper.warp_image(src_img, src_points, points, size)
    end_face = warper.warp_image(dest_img, dest_points, points, size)
    average_face = blender.weighted_average(src_face, end_face, percent)
    return average_face, points
コード例 #5
0
ファイル: morpher.py プロジェクト: ollietb/face_morpher
def morph(src_img,
          src_points,
          dest_img,
          dest_points,
          video,
          width=500,
          height=600,
          num_frames=20,
          fps=10,
          out_frames=None,
          out_video=None,
          alpha=False,
          plot=False):
    """
  Create a morph sequence from source to destination image

  :param out_video:
  :param src_img: ndarray source image
  :param src_img: source image array of x,y face points
  :param dest_img: ndarray destination image
  :param dest_img: destination image array of x,y face points
  :param video: facemorpher.videoer.Video object
  """
    size = (height, width)
    stall_frames = np.clip(int(fps * 0.15), 1, fps)  # Show first & last longer
    plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)
    num_frames -= (stall_frames * 2)  # No need to process src and dest image

    plt.plot_one(src_img)
    video.write(src_img, 1)

    # Produce morph frames!
    for percent in np.linspace(1, 0, num=num_frames):
        points = locator.weighted_average_points(src_points, dest_points,
                                                 percent)
        src_face = warper.warp_image(src_img, src_points, points, size)
        end_face = warper.warp_image(dest_img, dest_points, points, size)
        average_face = blender.weighted_average(src_face, end_face, percent)
        average_face = alpha_image(average_face,
                                   points) if alpha else average_face
        average_bg = blender.weighted_average(src_img, dest_img, percent)
        img_over_bg(average_face, average_bg)
        plt.plot_one(average_bg, 'save')
        video.write(average_bg)

    plt.plot_one(dest_img)
    video.write(dest_img, stall_frames)
    plt.show()
コード例 #6
0
def averager(imgpaths,
             dest_filename=None,
             width=500,
             height=600,
             background='black',
             blur_edges=False,
             out_filename='result.png',
             plot=False):

    size = (height, width)

    images = []
    point_set = []
    for path in imgpaths:
        img, points = load_image_points(path, size)
        if img is not None:
            images.append(img)
            point_set.append(points)

    if len(images) == 0:
        raise FileNotFoundError('Could not find any valid images.' +
                                ' Supported formats are .jpg, .png, .jpeg')

    if dest_filename is not None:
        dest_img, dest_points = load_image_points(dest_filename, size)
        if dest_img is None or dest_points is None:
            raise Exception('No face or detected face points in dest img: ' +
                            dest_filename)
    else:
        dest_img = np.zeros(images[0].shape, np.uint8)
        dest_points = locator.average_points(point_set)

    num_images = len(images)
    result_images = np.zeros(images[0].shape, np.float32)
    for i in range(num_images):
        result_images += warper.warp_image(images[i], point_set[i],
                                           dest_points, size, np.float32)

    result_image = np.uint8(result_images / num_images)
    face_indexes = np.nonzero(result_image)
    dest_img[face_indexes] = result_image[face_indexes]

    mask = blender.mask_from_points(size, dest_points)
    if blur_edges:
        blur_radius = 10
        mask = cv2.blur(mask, (blur_radius, blur_radius))

    if background in ('transparent', 'average'):
        dest_img = np.dstack((dest_img, mask))

        if background == 'average':
            average_background = locator.average_points(images)
            dest_img = blender.overlay_image(dest_img, mask,
                                             average_background)

    print('Averaged {} images'.format(num_images))
    plt = plotter.Plotter(plot, num_images=1, out_filename=out_filename)
    plt.save(dest_img)
    plt.plot_one(dest_img)
    plt.show()
コード例 #7
0
ファイル: averager.py プロジェクト: vish25v/face_morpher
def averager(imgpaths,
             dest_filename=None,
             width=500,
             height=600,
             alpha=False,
             blur_edges=False,
             out_filename='result.png',
             plot=False):

    size = (height, width)

    images = []
    point_set = []
    for path in imgpaths:
        img, points = load_image_points(path, size)
        if img is not None:
            images.append(img)
            point_set.append(points)

    if len(images) == 0:
        raise FileNotFoundError('Could not find any valid images.' +
                                ' Supported formats are .jpg, .png, .jpeg')

    if dest_filename is not None:
        dest_img, dest_points = load_image_points(dest_filename, size)
        if dest_img is None or dest_points is None:
            raise Exception('No face or detected face points in dest img: ' +
                            dest_filename)
    else:
        dest_img = np.zeros(images[0].shape, np.uint8)
        dest_points = locator.average_points(point_set)

    num_images = len(images)
    result_images = np.zeros(images[0].shape, np.float32)
    for i in range(num_images):
        result_images += warper.warp_image(images[i], point_set[i],
                                           dest_points, size, np.float32)

    result_image = np.uint8(result_images / num_images)
    face_indexes = np.nonzero(result_image)
    dest_img[face_indexes] = result_image[face_indexes]

    mask = blender.mask_from_points(size, dest_points)
    if blur_edges:
        blur_radius = 10
        mask = cv2.blur(mask, (blur_radius, blur_radius))
    if alpha:
        dest_img = np.dstack((dest_img, mask))
    mpimg.imsave(out_filename, dest_img)

    if plot:
        plt.axis('off')
        plt.imshow(dest_img)
        plt.show()
コード例 #8
0
def average_face(imgpaths,
                 width=500,
                 height=500,
                 background='black',
                 blur_edges=False,
                 out_filename='result.jpg'):
    size = (height, width)

    images = []
    point_set = []
    for path in imgpaths:
        img, points = load_image_points(path, size)
        if img is not None:
            images.append(img)
            point_set.append(points)

    if len(images) == 0:
        raise FileNotFoundError(
            'Could not find any valid images. Supported formats are .jpg, .png, .jpeg'
        )

    dest_img, dest_points = load_image_points(REFERENCE_IMG_PATH, size)

    num_images = len(images)
    result_images = np.zeros(images[0].shape, np.float32)
    for i in range(num_images):
        result_images += warper.warp_image(images[i], point_set[i],
                                           dest_points, size, np.float32)

    result_image = np.uint8(result_images / num_images)
    face_indexes = np.nonzero(result_image)
    dest_img[face_indexes] = result_image[face_indexes]

    mask = blender.mask_from_points(size, dest_points)
    if blur_edges:
        blur_radius = 10
        mask = cv2.blur(mask, (blur_radius, blur_radius))

    if background in ('transparent', 'average'):
        dest_img = np.dstack((dest_img, mask))

        if background == 'average':
            # average_background = np.uint8(locator.average_points(images))
            avg_background = perlin_background(images)
            avg_background[np.where(
                (avg_background == [0, 0, 0]).all(axis=2))] = [
                    128, 128, 128
                ]  # black -> gray pixels in background
            dest_img = blender.overlay_image(dest_img, mask, avg_background)

    print('Averaged {} images'.format(num_images))
    plt = plotter.Plotter(False, num_images=1, out_filename=out_filename)
    plt.save(dest_img)
コード例 #9
0
def alignment(content, style):

    content_img = np.array(Image.from_file(content).data)
    style_img  = np.array(Image.from_file(style).data)
    content_name = os.path.splitext(content)[0]
    style_name = os.path.splitext(style)[0]


    # extraction of facial landmarks
    content_pts = locator.face_points(content_img, add_boundary_points=True)
    style_pts = locator.face_points(style_img, add_boundary_points=True)

    # translation in terms of coordinates
    content_coords = warper.grid_coordinates(content_pts)
    style_coords = warper.grid_coordinates(style_pts)

    # warp the face from the style image, given the extracted landmarks
    style_aligned_img = warper.warp_image(style_img, style_pts, content_pts, \
        content_img.shape)
    PIL.Image.fromarray(style_aligned_img).save(style_name+"_aligned.png")

    # apply the warped face from the style image on the content image
    mask = np.ma.masked_greater(style_aligned_img, 0)
    content_img[(mask!=0)] = 0
    content_aligned_img = style_aligned_img + content_img
    PIL.Image.fromarray(content_aligned_img).save(content_name+"_aligned.png")

    # obtain a delaunay triangulation of the style image's facial landmarks
    delaunay_style = spatial.Delaunay(style_pts)

    # visualize the delaunay triangulation of the style image's facial landmarks
    plt.triplot(style_pts[:,0], style_pts[:,1], delaunay_style.simplices.copy())
    plt.imshow(style_img)
    plt.plot(style_pts[:,0], style_pts[:,1], 'o')
    plt.savefig(style_name+"_delaunay.png")

    # apply the same triangulation to the new image, warped to fit the content
    # and visualize it
    plt.figure()
    plt.triplot(content_pts[:,0], content_pts[:,1], delaunay_style.simplices.copy())
    plt.imshow(content_aligned_img)
    plt.plot(content_pts[:,0], content_pts[:,1], 'o')
    plt.savefig(content_name+"_aligned_delaunay.png")
コード例 #10
0
def morph(src_img,
          src_points,
          dest_img,
          dest_points,
          video,
          width=500,
          height=600,
          num_frames=20,
          fps=10,
          out_frames=None,
          out_video=None,
          alpha=False,
          plot=False):
    """
  Create a morph sequence from source to destination image
  :param src_img: ndarray source image
  :param src_img: source image array of x,y face points
  :param dest_img: ndarray destination image
  :param dest_img: destination image array of x,y face points
  :param video: facemorpher.videoer.Video object
  """
    size = (height, width)
    stall_frames = np.clip(int(fps * 0.15), 1, fps)  # Show first & last longer
    plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)
    num_frames -= (stall_frames * 2)  # No need to process src and dest image
    label = plotter.Plotter(plot,
                            num_images=2,
                            out_folder=out_frames,
                            label=True)
    label.plot_one(src_img, src_points)
    label.plot_one(dest_img, dest_points)
    label.show()
    plt.plot_one(src_img)
    video.write(src_img, 1)
    try:
        os.mkdir(os.path.join(os.getcwd(), 'result'))
        os.mkdir(os.path.join(os.getcwd(), 'result', 'src'))
        os.mkdir(os.path.join(os.getcwd(), 'result', 'src_corners'))
        os.mkdir(os.path.join(os.getcwd(), 'result', 'end'))
        os.mkdir(os.path.join(os.getcwd(), 'result', 'average'))
    except Exception as e:
        print(e)

    # Produce morph frames!
    for percent in np.linspace(1, 0, num=num_frames):
        points = locator.weighted_average_points(src_points, dest_points,
                                                 percent)
        src_face = warper.warp_image(src_img, src_points, points, size)
        end_face = warper.warp_image(dest_img, dest_points, points, size)
        average_face = blender.weighted_average(src_face, end_face, percent)
        average_face = alpha_image(average_face,
                                   points) if alpha else average_face
        average_face[:, :, :3] = correct_colours(src_face, average_face,
                                                 np.matrix(points))
        corners = np.array([
            np.array([0, 0]),
            np.array([0, height - 2]),
            np.array([width - 2, 0]),
            np.array([width - 2, height - 2])
        ])
        src_points_with_corners = np.concatenate((src_points, corners))
        points_with_corners = np.concatenate((points, corners))
        src_face_corners = warper.warp_image(src_img, src_points_with_corners,
                                             points_with_corners, size)
        average_face = process_edge(src_face_corners, average_face, width,
                                    height)
        plt.plot_one(average_face)
        filename = '%d.jpg' % int((1 - percent) * num_frames)
        cv2.imwrite(os.path.join(os.getcwd(), 'result', 'src', filename),
                    src_face)
        cv2.imwrite(
            os.path.join(os.getcwd(), 'result', 'src_corners', filename),
            src_face_corners)
        cv2.imwrite(os.path.join(os.getcwd(), 'result', 'end', filename),
                    end_face)
        cv2.imwrite(os.path.join(os.getcwd(), 'result', 'average', filename),
                    average_face)
        plt.save(average_face)
        video.write(average_face)

    plt.plot_one(dest_img)
    video.write(dest_img, stall_frames)
    plt.show()