Example #1
0
    def run(self):
        """
        Runs the operation
        
        :Returns:
            A single image with foreground and background blended together seamlessly
            
        :Rtype:
            `Image`
        """
        # TODO: add support for merging off the edge of the image
        rows, cols = self.image.shape[:2]
        self.image.to_rgba()

        # float->int and swaps channels
        opencv_source = self.source_image.opencvimage 
        opencv_dest = self.dest_image.opencvimage
        # construct a mask with 0 corresponding to alpha of 0 in the source
        self.mask_image = (self.source_image[...,3] * 255).astype('uint8')

        offset = (self.offset[1] + cols / 2, self.offset[0] + rows / 2)
        opencv_result = cv2.seamlessClone(opencv_source, opencv_dest, 
                                          self.mask_image, offset, self.clone_type)
        self.opimage = Image(opencv_result[:, :, ::-1]) # swap channels back to rgb
        return self.opimage
Example #2
0
def make_panorama(original1,original2):
    matcher = cv2.BFMatcher(cv2.NORM_L2,False)
    matches = matcher.knnMatch(original1.des,original2.des,2)
    goodmatches = []
    trainkeys = []
    querykeys = []
    maskArray = []

    for i in matches:
        if i[0].distance < 500:
            if i[0].distance/i[1].distance < 0.8:
                print("\U0001F37A", end=' ')
                goodmatches.append(i[0])
                querykeys.append((original1.kp[i[0].queryIdx].pt[0],original1.kp[i[0].queryIdx].pt[1]))
                trainkeys.append((original2.kp[i[0].trainIdx].pt[0],original2.kp[i[0].trainIdx].pt[1]))

    print("-----Calculating Homography-----")
    H, status = cv2.findHomography(np.array(trainkeys),np.array(querykeys),cv2.RANSAC, 5.0)
    print('-----finished to calculate-----')
    div = calcDst4(H, original2.image.shape)
    d = original1.resizeMat2(div)
    print(original1.image.shape)
    T_xy = [[1,0,-d[0]],[0,1,-d[1]],[0,0,1]]
    panorama = cv2.warpPerspective(original2.image,np.dot(T_xy,H),(original1.image.shape[1],original1.image.shape[0]))
    #panorama, mask = Write(panorama,original1)
    CommonMask, SrcMask= MakeMask(panorama, original1.image)
    label = cv2.connectedComponentsWithStats(CommonMask)
    center = np.delete(label[3], 0, 0)
    print(center[0])
    blending = cv2.seamlessClone(original1.image, panorama, CommonMask, (int(center[0][0]),int(center[0][1])), cv2.NORMAL_CLONE)
    blending = Write2(blending, original1.image, SrcMask)
    cv2.imshow('blend',blending)
    print("--next--")
    return blending
Example #3
0
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        outImage = None
        if self.seamless_clone:
            unitMask = numpy.clip( image_mask * 365, 0, 255 ).astype(numpy.uint8)
      
            maxregion = numpy.argwhere(unitMask==255)
      
            if maxregion.size > 0:
              miny,minx = maxregion.min(axis=0)[:2]
              maxy,maxx = maxregion.max(axis=0)[:2]
              lenx = maxx - minx;
              leny = maxy - miny;
              masky = int(minx+(lenx//2))
              maskx = int(miny+(leny//2))
              outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),unitMask,(masky,maskx) , cv2.NORMAL_CLONE )
              
              return outimage
              
        foreground = cv2.multiply(image_mask, new_image.astype(float))
        background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
        outimage = cv2.add(foreground, background)

        return outimage
def faceclone(src_name, dst_name):
    src_img = cv2.imread(src_name)
    dst_img = cv2.imread(dst_name)

    src_rst = api.detection.detect(img = File(src_name), attribute='pose')
    src_img_width   = src_rst['img_width']
    src_img_height  = src_rst['img_height']
    src_face        = src_rst['face'][0]

    dst_rst = api.detection.detect(img = File(dst_name), attribute='pose')
    dst_img_width   = dst_rst['img_width']
    dst_img_height  = dst_rst['img_height']
    dst_face        = dst_rst['face'][0]

    ss = np.array(get_feature_points(src_face, src_img_width, src_img_height), dtype=np.float32)
    ps = np.array(get_feature_points(dst_face, dst_img_width, dst_img_height), dtype=np.float32)
    map_matrix = cv2.getAffineTransform(ps, ss)

    #dsize = (300,300)
    map_result = cv2.warpAffine(dst_img, map_matrix, dsize=(src_img_width,src_img_height))
    
    extract_mask, center = contour.extract_face_mask(src_face['face_id'], src_img_width, src_img_height, src_name)
    # merge 
    ## first blending the border
    extract_alpha = contour.extract_face_alpha(src_face['face_id'], src_img_width, src_img_height, src_name)
    center = (map_result.shape[0]/2, map_result.shape[1]/2)
    map_result = cv2.seamlessClone(src_img, map_result, extract_mask, center, flags=cv2.NORMAL_CLONE)

    imap_matrix = cv2.invertAffineTransform(map_matrix)
    final = cv2.warpAffine(map_result, imap_matrix, dsize=(dst_img.shape[0:2]))
    return final
Example #5
0
    def process(old_face, new_face, raw_mask):
        height, width, _ = old_face.shape
        height = height // 2
        width = width // 2

        y_indices, x_indices, _ = np.nonzero(raw_mask)
        y_crop = slice(np.min(y_indices), np.max(y_indices))
        x_crop = slice(np.min(x_indices), np.max(x_indices))
        y_center = int(np.rint((np.max(y_indices) + np.min(y_indices)) / 2 + height))
        x_center = int(np.rint((np.max(x_indices) + np.min(x_indices)) / 2 + width))

        insertion = np.rint(new_face[y_crop, x_crop] * 255.0).astype("uint8")
        insertion_mask = np.rint(raw_mask[y_crop, x_crop] * 255.0).astype("uint8")
        insertion_mask[insertion_mask != 0] = 255
        prior = np.rint(np.pad(old_face * 255.0,
                               ((height, height), (width, width), (0, 0)),
                               'constant')).astype("uint8")

        blended = cv2.seamlessClone(insertion,  # pylint: disable=no-member
                                    prior,
                                    insertion_mask,
                                    (x_center, y_center),
                                    cv2.NORMAL_CLONE)  # pylint: disable=no-member
        blended = blended[height:-height, width:-width]

        return blended.astype("float32") / 255.0
Example #6
0
def seamlessClone(srcImg,dstImg,dstPoints):
    polyPoints = getConvexHullPoints(dstPoints)
    srcMask = np.zeros(srcImg.shape, srcImg.dtype)
    cv2.fillPoly(srcMask, np.array([polyPoints]), (255, 255, 255))
    rec = cv2.boundingRect(np.array([polyPoints]))
    center = ((rec[0] + rec[2]/2),(rec[1] + rec[3]/2))
    output = cv2.seamlessClone(srcImg, dstImg, srcMask, center, cv2.NORMAL_CLONE)
    return output
Example #7
0
    def generate_complement_composite(self,src,poly):
        if isinstance(src, basestring):
            src = cv2.imread(src)
        #create another mask that will be used to get the negative composite
        cvxpoly = cv2.convexHull(poly)
        cvxmask = src.copy()
        cvxmask = cv2.cvtColor(src_mask,cv2.COLOR_BGR2GRAY)
        cvxmask.fill(0)

        #bounding box around polygon
        y1,x1 = np.min(np.nonzero(cvxmask),axis=1)
        y2,x2 = np.max(np.nonzero(cvxmask),axis=1)

        cv2.fillPoly(cvxmask, [cvxpoly], 255)
        center = ((x1+x2)/2,(y1+y2)/2)
        try:
            dst_neg = cv2.seamlessClone(dst,src,cvxmask , center, cv2.NORMAL_CLONE)
        except:
            dst = cv2.resize(dst,src.shape[:-1][::-1])
            dst_neg = cv2.seamlessClone(dst,src,cvxmask , center, cv2.NORMAL_CLONE)
        yield dst_neg
Example #8
0
def put_image_to_video(videoFramSrc, imageSrc, outPut, a,b, small_scall):
    bgFrame = cv2.imread(videoFramSrc)
    logo = cv2.imread(imageSrc)
    logo_width, logo_height = logo.shape[1]/small_scall, logo.shape[0]/small_scall

    new_logo = cv2.resize(logo, (logo_width, logo_height))
    #mask = 255 * np.ones(new_logo.shape, new_logo.dtype)

    center = (a, b)

    mix_clone = cv2.seamlessClone(new_logo, bgFrame, new_logo, center, cv2.MIXED_CLONE)
    cv2.imwrite(outPut, mix_clone)
def posicionarMultimedia(frame, multimedia, esquinaCaja):
    # Para hallar el centro del rectángulo del ROI
    centroROI = (esquinaCaja[0] - (esquinaCaja[0] / 4), esquinaCaja[1] - (esquinaCaja[1] / 4))
    # Para la máscara (Es del tamaño del elemento multimedia, e inicialmente color negra)
    mascara = np.zeros(multimedia.shape, multimedia.dtype)
    # Arreglo de puntos que determina un polígono (cuadrado) del tamaño del frame. Usado para determinar la zona visible (blanca) la máscara
    poly = np.array([[0, 0], [0, 400], [frame.shape[0], frame.shape[1]], [frame.shape[1], 0]], np.int32)
    # Pinta la máscara visible (blanco) a partir del contorno generado por los polígonos anteriormente definidos
    cv2.fillPoly(mascara, [poly], (255, 255, 255))
    # Genera la sobreposición de la multimedia y la muestra en el centro del ROI
    sobrepuesto = cv2.seamlessClone(multimedia, frame, mascara, centroROI, tipoSobreposicion)
    cv2.imshow("Sobrepuesto", sobrepuesto)  # Muestra la sobreposición de la multimedia en la webcam/video
    # Captura los frames del video aumentado si la opción está habilitada
    if guardarVideoAumentado == True:
        videoAumentado.write(sobrepuesto)
Example #10
0
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT )

        outImage = None
        if self.seamless_clone:
            masky,maskx = cv2.transform( numpy.array([ size/2,size/2 ]).reshape(1,1,2) ,cv2.invertAffineTransform(mat) ).reshape(2).astype(int)
            outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),(image_mask*255).astype(numpy.uint8),(masky,maskx) , cv2.NORMAL_CLONE )
        else:
            foreground = cv2.multiply(image_mask, new_image.astype(float))
            background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
            outimage = cv2.add(foreground, background)

        return outimage
Example #11
0
def put_image_to_video(videoFramSrc, imageSrc, outPut, position):
    bgFrame = cv2.imread(videoFramSrc)
    logo = cv2.imread(imageSrc)

    logo_width, logo_height = logo.shape[1]/2, logo.shape[0]/2

    new_logo = cv2.resize(logo, (logo_width, logo_height))
    mask = 255 * np.ones(new_logo.shape, new_logo.dtype)

    center_x = (position[0] + position[2]/2)
    center_y = (position[1] + position[3]/2)


    print ("origin-x -- origin-y:  ",center_x, center_y)

    #print ("width - height:  ",center_x ,"--",center_y,)

    #print("width - height:  ",l_width/2," -- ",l_height/2)

    global camer_position_y,camer_position_x

    if camer_position_y == 0:
        camer_position_y = center_y

    elif abs(camer_position_y - center_y)>10:
        camer_position_y = center_y
    else:
        camer_position_y = camer_position_y


    if camer_position_x == 0:
        camer_position_x = center_x

    elif abs(camer_position_x - center_x)>3:

        camer_position_x = center_x

    else:
        camer_position_x = camer_position_x

    center = (camer_position_x, camer_position_y)


    mixed_clone = cv2.seamlessClone(new_logo, bgFrame, mask, center, cv2.MIXED_CLONE)

    cv2.imwrite(outPut, mixed_clone)
Example #12
0
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
        
        if self.sharpen_image == "bsharpen":
            # Sharpening using filter2D
            kernel = numpy.ones((3, 3)) * (-1)
            kernel[1, 1] = 9
            new_image = cv2.filter2D(new_image, -1, kernel)
        elif self.sharpen_image == "gsharpen":
            # Sharpening using Weighted Method
            gaussain_blur = cv2.GaussianBlur(new_image, (0, 0), 3.0)
            new_image = cv2.addWeighted(
                new_image, 1.5, gaussain_blur, -0.5, 0, new_image)

        outimage = None
        if self.seamless_clone:
            unitMask = numpy.clip( image_mask * 365, 0, 255 ).astype(numpy.uint8)

            maxregion = numpy.argwhere(unitMask==255)

            if maxregion.size > 0:
              miny,minx = maxregion.min(axis=0)[:2]
              maxy,maxx = maxregion.max(axis=0)[:2]
              lenx = maxx - minx;
              leny = maxy - miny;
              masky = int(minx+(lenx//2))
              maskx = int(miny+(leny//2))
              outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),unitMask,(masky,maskx) , cv2.NORMAL_CLONE )

              return outimage

        foreground = cv2.multiply(image_mask, new_image.astype(float))
        background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
        outimage = cv2.add(foreground, background)

        return outimage
Example #13
0
import cv2
import numpy as np

# Read images : src image will be cloned into dst
im = cv2.imread("hand.jpg")
obj = cv2.imread("logo.png")

# Create an all white mask
mask = 255 * np.ones(obj.shape, obj.dtype)

# The location of the center of the src in the dst
height, width, channels = im.shape
center = ((width/2)+150, (height/2))

# Seamlessly clone src into dst and put the results in output
normal_clone = cv2.seamlessClone(obj, im, mask, center, cv2.NORMAL_CLONE)
mixed_clone = cv2.seamlessClone(obj, im, mask, center, cv2.MIXED_CLONE)

# Write results
cv2.imwrite("opencv-normal-clone-example.jpg", normal_clone)
cv2.imwrite("opencv-mixed-clone-example.jpg", mixed_clone)
Example #14
0
            t2.append(hull2[dt[i][j]])
        
        print img1, img1Warped
        warpTriangle(img1, img1Warped, t1, t2)
    

            
    # Calculate Mask
    hull8U = []
    for i in xrange(0, len(hull2)):
        hull8U.append((hull2[i][0], hull2[i][1]))
    
    mask = np.zeros(img2.shape, dtype = img2.dtype)  
    
    cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
    
    r = cv2.boundingRect(np.float32([hull2]))    
    
    center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
        
    
    # Clone seamlessly.
    output = cv2.seamlessClone(np.uint8(img1Warped), img2, mask, center, cv2.NORMAL_CLONE)
    
    cv2.imshow("Face ", img2)
    cv2.imshow("Face Swapped", output)
    cv2.waitKey(0)
    
    cv2.destroyAllWindows()
        
Example #15
0
def texture_editing(prn, args):
    # read image
    image = imread(args.image_path)
    [h, w, _] = image.shape

    #-- 1. 3d reconstruction -> get texture. 
    pos = prn.process(image) 
    vertices = prn.get_vertices(pos)
    image = image/255.
    texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
    
    #-- 2. Texture Editing
    Mode = args.mode
    # change part of texture(for data augumentation/selfie editing. Here modify eyes for example)
    if Mode == 0: 
        # load eye mask
        uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255. 
        uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255.
        eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)

        # texture from another image or a processed texture
        ref_image = imread(args.ref_path)
        ref_pos = prn.process(ref_image)
        ref_image = ref_image/255.
        ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))

        # modify texture
        new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis]
    
    # change whole face(face swap)
    elif Mode == 1: 
        # texture from another image or a processed texture
        ref_image = imread(args.ref_path)
        ref_pos = prn.process(ref_image)
        ref_image = ref_image/255.
        ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
        ref_vertices = prn.get_vertices(ref_pos)
        new_texture = ref_texture#(texture + ref_texture)/2.

    else:
        print('Wrong Mode! Mode should be 0 or 1.')
        exit()


    #-- 3. remap to input image.(render)
    vis_colors = np.ones((vertices.shape[0], 1))
    face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)
    face_mask = np.squeeze(face_mask > 0).astype(np.float32)
    
    new_colors = prn.get_colors_from_texture(new_texture)
    new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)
    new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]

    # Possion Editing for blending image
    vis_ind = np.argwhere(face_mask>0)
    vis_min = np.min(vis_ind, 0)
    vis_max = np.max(vis_ind, 0)
    center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))
    output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)
   
    # save output
    imsave(args.output_path, output) 
    print('Done.')
Example #16
0
def MergeMaskedFace(predictor_func, predictor_input_shape, cfg, frame_info,
                    img_bgr_uint8, img_bgr, img_face_landmarks):
    img_size = img_bgr.shape[1], img_bgr.shape[0]
    img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
        img_bgr.shape, img_face_landmarks)

    if cfg.mode == 'original':
        return img_bgr, img_face_mask_a

    out_img = img_bgr.copy()
    out_merging_mask_a = None

    input_size = predictor_input_shape[0]
    mask_subres_size = input_size * 4
    output_size = input_size
    if cfg.super_resolution_power != 0:
        output_size *= 4

    face_mat = LandmarksProcessor.get_transform_mat(img_face_landmarks,
                                                    output_size,
                                                    face_type=cfg.face_type)
    face_output_mat = LandmarksProcessor.get_transform_mat(
        img_face_landmarks,
        output_size,
        face_type=cfg.face_type,
        scale=1.0 + 0.01 * cfg.output_face_scale)

    if mask_subres_size == output_size:
        face_mask_output_mat = face_output_mat
    else:
        face_mask_output_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks,
            mask_subres_size,
            face_type=cfg.face_type,
            scale=1.0 + 0.01 * cfg.output_face_scale)

    dst_face_bgr = cv2.warpAffine(img_bgr,
                                  face_mat, (output_size, output_size),
                                  flags=cv2.INTER_CUBIC)
    dst_face_bgr = np.clip(dst_face_bgr, 0, 1)

    dst_face_mask_a_0 = cv2.warpAffine(img_face_mask_a,
                                       face_mat, (output_size, output_size),
                                       flags=cv2.INTER_CUBIC)
    dst_face_mask_a_0 = np.clip(dst_face_mask_a_0, 0, 1)

    predictor_input_bgr = cv2.resize(dst_face_bgr, (input_size, input_size))

    predicted = predictor_func(predictor_input_bgr)
    if isinstance(predicted, tuple):
        #merger return bgr,mask
        prd_face_bgr = np.clip(predicted[0], 0, 1.0)
        prd_face_mask_a_0 = np.clip(predicted[1], 0, 1.0)
        predictor_masked = True
    else:
        #merger return bgr only, using dst mask
        prd_face_bgr = np.clip(predicted, 0, 1.0)
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       (input_size, input_size))
        predictor_masked = False

    if cfg.super_resolution_power != 0:
        prd_face_bgr_enhanced = cfg.superres_func(prd_face_bgr)
        mod = cfg.super_resolution_power / 100.0
        prd_face_bgr = cv2.resize(prd_face_bgr, (output_size, output_size)) * (
            1.0 - mod) + prd_face_bgr_enhanced * mod
        prd_face_bgr = np.clip(prd_face_bgr, 0, 1)

    if cfg.super_resolution_power != 0:
        if predictor_masked:
            prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)
        else:
            prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)

    if cfg.mask_mode == 2:  #dst
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       (output_size, output_size),
                                       cv2.INTER_CUBIC)
    elif cfg.mask_mode >= 3 and cfg.mask_mode <= 8:

        if cfg.mask_mode == 3 or cfg.mask_mode == 5 or cfg.mask_mode == 6:
            prd_face_fanseg_bgr = cv2.resize(prd_face_bgr,
                                             (cfg.fanseg_input_size, ) * 2)
            prd_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, prd_face_fanseg_bgr)
            FAN_prd_face_mask_a_0 = cv2.resize(prd_face_fanseg_mask,
                                               (output_size, output_size),
                                               cv2.INTER_CUBIC)

        if cfg.mask_mode >= 4 and cfg.mask_mode <= 7:

            full_face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                img_face_landmarks,
                cfg.fanseg_input_size,
                face_type=FaceType.FULL)
            dst_face_fanseg_bgr = cv2.warpAffine(img_bgr,
                                                 full_face_fanseg_mat,
                                                 (cfg.fanseg_input_size, ) * 2,
                                                 flags=cv2.INTER_CUBIC)
            dst_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, dst_face_fanseg_bgr)

            if cfg.face_type == FaceType.FULL:
                FAN_dst_face_mask_a_0 = cv2.resize(dst_face_fanseg_mask,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)
            else:
                face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                    img_face_landmarks,
                    cfg.fanseg_input_size,
                    face_type=cfg.face_type)

                fanseg_rect_corner_pts = np.array(
                    [[0, 0], [cfg.fanseg_input_size - 1, 0],
                     [0, cfg.fanseg_input_size - 1]],
                    dtype=np.float32)
                a = LandmarksProcessor.transform_points(fanseg_rect_corner_pts,
                                                        face_fanseg_mat,
                                                        invert=True)
                b = LandmarksProcessor.transform_points(
                    a, full_face_fanseg_mat)
                m = cv2.getAffineTransform(b, fanseg_rect_corner_pts)
                FAN_dst_face_mask_a_0 = cv2.warpAffine(
                    dst_face_fanseg_mask,
                    m, (cfg.fanseg_input_size, ) * 2,
                    flags=cv2.INTER_CUBIC)
                FAN_dst_face_mask_a_0 = cv2.resize(FAN_dst_face_mask_a_0,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)

        if cfg.mask_mode == 3:  #FAN-prd
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0
        elif cfg.mask_mode == 4:  #FAN-dst
            prd_face_mask_a_0 = FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 5:
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 6:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 7:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_dst_face_mask_a_0

    prd_face_mask_a_0[prd_face_mask_a_0 < (1.0 /
                                           255.0)] = 0.0  # get rid of noise

    # resize to mask_subres_size
    if prd_face_mask_a_0.shape[0] != mask_subres_size:
        prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                       (mask_subres_size, mask_subres_size),
                                       cv2.INTER_CUBIC)

    # process mask in local predicted space
    if 'raw' not in cfg.mode:
        # add zero pad
        prd_face_mask_a_0 = np.pad(prd_face_mask_a_0, input_size)

        ero = cfg.erode_mask_modifier
        blur = cfg.blur_mask_modifier

        if ero > 0:
            prd_face_mask_a_0 = cv2.erode(prd_face_mask_a_0,
                                          cv2.getStructuringElement(
                                              cv2.MORPH_ELLIPSE, (ero, ero)),
                                          iterations=1)
        elif ero < 0:
            prd_face_mask_a_0 = cv2.dilate(prd_face_mask_a_0,
                                           cv2.getStructuringElement(
                                               cv2.MORPH_ELLIPSE,
                                               (-ero, -ero)),
                                           iterations=1)

        # clip eroded/dilated mask in actual predict area
        # pad with half blur size in order to accuratelly fade to zero at the boundary
        clip_size = input_size + blur // 2

        prd_face_mask_a_0[:clip_size, :] = 0
        prd_face_mask_a_0[-clip_size:, :] = 0
        prd_face_mask_a_0[:, :clip_size] = 0
        prd_face_mask_a_0[:, -clip_size:] = 0

        if blur > 0:
            blur = blur + (1 - blur % 2)
            prd_face_mask_a_0 = cv2.GaussianBlur(prd_face_mask_a_0,
                                                 (blur, blur), 0)

        prd_face_mask_a_0 = prd_face_mask_a_0[input_size:-input_size,
                                              input_size:-input_size]

        prd_face_mask_a_0 = np.clip(prd_face_mask_a_0, 0, 1)

    img_face_mask_a = cv2.warpAffine(prd_face_mask_a_0,
                                     face_mask_output_mat,
                                     img_size,
                                     np.zeros(img_bgr.shape[0:2],
                                              dtype=np.float32),
                                     flags=cv2.WARP_INVERSE_MAP
                                     | cv2.INTER_CUBIC)[..., None]
    img_face_mask_a = np.clip(img_face_mask_a, 0.0, 1.0)

    img_face_mask_a[img_face_mask_a < (1.0 / 255.0)] = 0.0  # get rid of noise

    if prd_face_mask_a_0.shape[0] != output_size:
        prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                       (output_size, output_size),
                                       cv2.INTER_CUBIC)

    prd_face_mask_a = prd_face_mask_a_0[..., None]
    prd_face_mask_area_a = prd_face_mask_a.copy()
    prd_face_mask_area_a[prd_face_mask_area_a > 0] = 1.0

    if 'raw' in cfg.mode:
        if cfg.mode == 'raw-rgb':
            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     out_img,
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_merging_mask_a = img_face_mask_a

        out_img = np.clip(out_img, 0.0, 1.0)
    else:
        #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
        ar = []
        for i in range(1, 10):
            maxregion = np.argwhere(img_face_mask_a > i / 10.0)
            if maxregion.size != 0:
                miny, minx = maxregion.min(axis=0)[:2]
                maxy, maxx = maxregion.max(axis=0)[:2]
                lenx = maxx - minx
                leny = maxy - miny
                if min(lenx, leny) >= 4:
                    ar += [[lenx, leny]]

        if len(ar) > 0:

            if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:  #rct
                    prd_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip(prd_face_bgr * prd_face_mask_area_a * 255, 0,
                                255).astype(np.uint8),
                        np.clip(dst_face_bgr * prd_face_mask_area_a * 255, 0,
                                255).astype(np.uint8),
                    )

                    prd_face_bgr = np.clip(
                        prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                elif cfg.color_transfer_mode == 2:  #lct
                    prd_face_bgr = imagelib.linear_color_transfer(
                        prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 3:  #mkl
                    prd_face_bgr = imagelib.color_transfer_mkl(
                        prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 4:  #mkl-m
                    prd_face_bgr = imagelib.color_transfer_mkl(
                        prd_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                elif cfg.color_transfer_mode == 5:  #idt
                    prd_face_bgr = imagelib.color_transfer_idt(
                        prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 6:  #idt-m
                    prd_face_bgr = imagelib.color_transfer_idt(
                        prd_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                elif cfg.color_transfer_mode == 7:  #sot-m
                    prd_face_bgr = imagelib.color_transfer_sot(
                        prd_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                    prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)
                elif cfg.color_transfer_mode == 8:  #mix-m
                    prd_face_bgr = imagelib.color_transfer_mix(
                        prd_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)

            if cfg.mode == 'hist-match':
                hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                      dtype=np.float32)

                if cfg.masked_hist_match:
                    hist_mask_a *= prd_face_mask_area_a

                white = (1.0 - hist_mask_a) * np.ones(
                    prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)

                hist_match_1 = prd_face_bgr * hist_mask_a + white
                hist_match_1[hist_match_1 > 1.0] = 1.0

                hist_match_2 = dst_face_bgr * hist_mask_a + white
                hist_match_2[hist_match_1 > 1.0] = 1.0

                prd_face_bgr = imagelib.color_hist_match(
                    hist_match_1, hist_match_2,
                    cfg.hist_match_threshold).astype(dtype=np.float32)

            if 'seamless' in cfg.mode:
                #mask used for cv2.seamlessClone
                img_face_seamless_mask_a = None
                for i in range(1, 10):
                    a = img_face_mask_a > i / 10.0
                    if len(np.argwhere(a)) == 0:
                        continue
                    img_face_seamless_mask_a = img_face_mask_a.copy()
                    img_face_seamless_mask_a[a] = 1.0
                    img_face_seamless_mask_a[img_face_seamless_mask_a <= i /
                                             10.0] = 0.0
                    break

            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     out_img,
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)

            out_img = np.clip(out_img, 0.0, 1.0)

            if 'seamless' in cfg.mode:
                try:
                    #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
                    l, t, w, h = cv2.boundingRect(
                        (img_face_seamless_mask_a * 255).astype(np.uint8))
                    s_maskx, s_masky = int(l + w / 2), int(t + h / 2)
                    out_img = cv2.seamlessClone(
                        (out_img * 255).astype(np.uint8), img_bgr_uint8,
                        (img_face_seamless_mask_a * 255).astype(np.uint8),
                        (s_maskx, s_masky), cv2.NORMAL_CLONE)
                    out_img = out_img.astype(dtype=np.float32) / 255.0
                except Exception as e:
                    #seamlessClone may fail in some cases
                    e_str = traceback.format_exc()

                    if 'MemoryError' in e_str:
                        raise Exception(
                            "Seamless fail: " + e_str
                        )  #reraise MemoryError in order to reprocess this data by other processes
                    else:
                        print("Seamless fail: " + e_str)

            out_img = img_bgr * (1 - img_face_mask_a) + (out_img *
                                                         img_face_mask_a)

            out_face_bgr = cv2.warpAffine(out_img,
                                          face_mat, (output_size, output_size),
                                          flags=cv2.INTER_CUBIC)

            if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:
                    out_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip(out_face_bgr * prd_face_mask_area_a * 255, 0,
                                255).astype(np.uint8),
                        np.clip(dst_face_bgr * prd_face_mask_area_a * 255, 0,
                                255).astype(np.uint8))
                    out_face_bgr = np.clip(
                        out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                elif cfg.color_transfer_mode == 2:  #lct
                    out_face_bgr = imagelib.linear_color_transfer(
                        out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 3:  #mkl
                    out_face_bgr = imagelib.color_transfer_mkl(
                        out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 4:  #mkl-m
                    out_face_bgr = imagelib.color_transfer_mkl(
                        out_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                elif cfg.color_transfer_mode == 5:  #idt
                    out_face_bgr = imagelib.color_transfer_idt(
                        out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 6:  #idt-m
                    out_face_bgr = imagelib.color_transfer_idt(
                        out_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                elif cfg.color_transfer_mode == 7:  #sot-m
                    out_face_bgr = imagelib.color_transfer_sot(
                        out_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)
                    out_face_bgr = np.clip(out_face_bgr, 0.0, 1.0)
                elif cfg.color_transfer_mode == 8:  #mix-m
                    out_face_bgr = imagelib.color_transfer_mix(
                        out_face_bgr * prd_face_mask_area_a,
                        dst_face_bgr * prd_face_mask_area_a)

            if cfg.mode == 'seamless-hist-match':
                out_face_bgr = imagelib.color_hist_match(
                    out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)

            cfg_mp = cfg.motion_blur_power / 100.0
            if cfg_mp != 0:
                k_size = int(frame_info.motion_power * cfg_mp)
                if k_size >= 1:
                    k_size = np.clip(k_size + 1, 2, 50)
                    if cfg.super_resolution_power != 0:
                        k_size *= 2
                    out_face_bgr = imagelib.LinearMotionBlur(
                        out_face_bgr, k_size, frame_info.motion_deg)

            if cfg.blursharpen_amount != 0:
                out_face_bgr = cfg.blursharpen_func(out_face_bgr,
                                                    cfg.sharpen_mode, 3,
                                                    cfg.blursharpen_amount)

            if cfg.image_denoise_power != 0:
                n = cfg.image_denoise_power
                while n > 0:
                    img_bgr_denoised = cv2.medianBlur(img_bgr, 5)
                    if int(n / 100) != 0:
                        img_bgr = img_bgr_denoised
                    else:
                        pass_power = (n % 100) / 100.0
                        img_bgr = img_bgr * (
                            1.0 - pass_power) + img_bgr_denoised * pass_power
                    n = max(n - 10, 0)

            if cfg.bicubic_degrade_power != 0:
                p = 1.0 - cfg.bicubic_degrade_power / 101.0
                img_bgr_downscaled = cv2.resize(
                    img_bgr, (int(img_size[0] * p), int(img_size[1] * p)),
                    cv2.INTER_CUBIC)
                img_bgr = cv2.resize(img_bgr_downscaled, img_size,
                                     cv2.INTER_CUBIC)

            new_out = cv2.warpAffine(out_face_bgr, face_mat, img_size,
                                     img_bgr.copy(),
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_img = np.clip(
                img_bgr * (1 - img_face_mask_a) + (new_out * img_face_mask_a),
                0, 1.0)

            if cfg.color_degrade_power != 0:
                out_img_reduced = imagelib.reduce_colors(out_img, 256)
                if cfg.color_degrade_power == 100:
                    out_img = out_img_reduced
                else:
                    alpha = cfg.color_degrade_power / 100.0
                    out_img = (out_img * (1.0 - alpha) +
                               out_img_reduced * alpha)

        out_merging_mask_a = img_face_mask_a

    return out_img, out_merging_mask_a
Example #17
0
    for i in range(0, len(dt)):
        t1 = []
        t2 = []
       
        for j in range(0, 3):
            t1.append(hull1[dt[i][j]])
            t2.append(hull2[dt[i][j]])
        
        warpTriangle(img1, img1Warped, t1, t2)

    hull8U = []
    for i in range(0, len(hull2)):
        hull8U.append((hull2[i][0], hull2[i][1]))
    
    mask = np.zeros(img2.shape, dtype = img2.dtype)  
    
    cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
    
    r = cv2.boundingRect(np.float32([hull2]))    
    
    center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
        

    output = cv2.seamlessClone(np.uint8(img1Warped), img2, mask, center, cv2.NORMAL_CLONE)
    
    cv2.imshow("Face Swapped", output)
    cv2.waitKey(0)
    
    cv2.destroyAllWindows()
        
def composite_scene(orig_scene, mask_seam, match_scene, dialation_mask, orig_scene1, method="paste", repeat=1):
    """
    combines images based on mask, has a few methods
    
    method='paste', is a straight copy
    'seamlessclone', uses cv2.seamlessclone
    """
    avg_pixel = np.mean(orig_scene1[orig_scene1 != 0])
    
    output = np.zeros(orig_scene.shape)
    if method=="seamlessclone":
        width, height, _ = match_scene.shape
        center = (height/2, width/2)
        
        # create plain white mask
        mask = np.zeros(match_scene.shape, match_scene.dtype) + 255
        
        orig_scene_impute = orig_scene.copy()
        orig_scene_impute[mask_seam == 255] = avg_pixel
        
        
        
        #image_to_compare
        output_blend = cv2.seamlessClone(match_scene.astype(np.uint8), 
                                         orig_scene_impute.astype(np.uint8), 
                                         mask, center,cv2.NORMAL_CLONE)
        
        #implot(output_blend)
        # now reapply the mask with alpha blending to fix it up again.
        
        """
        TO DO CHANGE IT FROM THE DILATION + MASK SEAM, NEED TO FIND THE INTERSECTION OF THESE TWO TO BE THE 
        REAL MASK TO BLUR
        """
        dilation_mask = mask_seam.copy()
        
        dilation_mask = cv2.GaussianBlur(dilation_mask, (101,101), 0) # blur mask and do a alpha blend... between the 
        #implot(dilation_mask, 'gray')
        
        dilation_mask = dilation_mask/255.0
        
        
        
        # 0 is black, 1 is white
        #output = cv2.addWeighted(output_blend, dialation_mask, orig_scene, 1-dialation_mask)
        #print dialation_mask
        #print dialation_mask.shape
        #print output_blend.shape
        #a = cv2.multiply(output_blend.astype(np.float), dialation_mask)
        
        for _ in range(10):
            # some kind of layered alpha blend by the dilation mask values...
            orig_scene_impute = orig_scene.copy()
            orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]
            output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),
                             cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)
        
        
        orig_scene_impute = orig_scene.copy()
        orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]
        output_blend = cv2.add(cv2.multiply(output_blend.astype(np.float), dilation_mask),
                         cv2.multiply(orig_scene_impute.astype(np.float), 1-dilation_mask), 0)
        
        
        
        orig_scene_impute = orig_scene.copy()
        orig_scene_impute[mask_seam == 255] = output_blend[mask_seam == 255]
        output = cv2.seamlessClone(match_scene.astype(np.uint8), 
                                   output_blend.astype(np.uint8), 
                                   mask, center,cv2.NORMAL_CLONE)
        
        # complete blend with seamlessclone...
        
        
        # output = np.maximum(output_blend, orig_scene_impute)
        # or just darken...
        
        
        #if repeat == 1:
        #    return output_blend
        #output = composite_scene(orig_scene_impute, mask_seam, output_blend, dialation_mask, method="paste")
        


    elif method=="paste":
        output[mask_seam == 0] = orig_scene[mask_seam == 0]
        output[mask_seam != 0] = match_scene[mask_seam != 0]
        
    elif method=="alphablend":
        output_blend = output.copy()
        output_blend[mask_seam == 0] = orig_scene[mask_seam == 0]
        output_blend[mask_seam != 0] = match_scene[mask_seam != 0]
        
        
    
        
    else:
        output[mask_seam == 0] = orig_scene[mask_seam == 0]
        output[mask_seam != 0] = match_scene[mask_seam != 0]
    return output
Example #19
0
def face_swap(img, swap_area):
    img = cv2.resize(img, dsize=(0, 0), fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
    height, width, channels = img.shape
    img_new_face = np.zeros((height, width, channels), np.uint8)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    out_size = 112
    model = PFLDInference()
    checkpoint = torch.load('checkpoint/pfld_model_best.pth.tar', map_location='cpu')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.eval()

    for k in range(0, len(swap_area)):
        #face swap할 이미지 분석
        x1 = swap_area[k][0]*2
        y1 = swap_area[k][1]*2
        x2 = swap_area[k][2]*2
        y2 = swap_area[k][3]*2

        dx = max(0, -x1)
        dy = max(0, -y1)
        x1 = max(0, x1)
        y1 = max(0, y1)

        edx = max(0, x2 - width)
        edy = max(0, y2 - height)

        new_bbox = list(map(int, [x1, x2, y1, y2]))
        new_bbox = BBox(new_bbox)
        cropped = img[new_bbox.top:new_bbox.bottom, new_bbox.left:new_bbox.right]
        if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
            cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx), cv2.BORDER_CONSTANT, 0)
        cropped_face = cv2.resize(cropped, (out_size, out_size))

        if cropped_face.shape[0] <= 0 or cropped_face.shape[1] <= 0:
            continue
        test_face = cropped_face.copy()
        test_face = test_face / 255.0
        test_face = test_face.transpose((2, 0, 1))
        test_face = test_face.reshape((1,) + test_face.shape)
        input = torch.from_numpy(test_face).float()
        input = torch.autograd.Variable(input)


        landmark = model(input).cpu().data.numpy()
        landmark = landmark.reshape(-1, 2)
        landmark = new_bbox.reprojectLandmark(landmark)

        points = np.array(landmark, np.int32)
        convexhull = cv2.convexHull(points)
        landmarks_points = []

        for x, y in landmark:
            landmarks_points.append(( int(x), int(y) ))
        img2 = cv2.imread("samples/12--Group/newface2.jpg")
        height2, width2, _ = img2.shape
        img2 =  cv2.resize(img2, dsize=(0, 0), fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
        img_gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        mask = np.zeros_like(img_gray2)

        new_face = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
        new_face = Image.fromarray(new_face)
        face2 = list(map(int, detect_faces(new_face)[0]))

        new_bbox2 = list(map(int, [face2[0], face2[2], face2[1], face2[3]]))
        new_bbox2 = BBox(new_bbox2)
        cropped2 = img2[new_bbox2.top:new_bbox2.bottom, new_bbox2.left:new_bbox2.right]
        cropped2_face = cv2.resize(cropped2, (out_size, out_size))

        test_face2 = cropped2_face.copy()
        test_face2 = test_face2 / 255.0
        test_face2 = test_face2.transpose((2, 0, 1))
        test_face2 = test_face2.reshape((1,) + test_face2.shape)
        input2 = torch.from_numpy(test_face2).float()
        input2 = torch.autograd.Variable(input2)

        landmark2 = model(input2).cpu().data.numpy()
        landmark2 = landmark2.reshape(-1, 2)
        landmark2 = new_bbox2.reprojectLandmark(landmark2)
        points2 = np.array(landmark2, np.int32)

        convexhull2 = cv2.convexHull(points2)
        cv2.fillConvexPoly(mask, convexhull2, 255)

        rect2 = cv2.boundingRect(convexhull2)
        subdiv = cv2.Subdiv2D(rect2)

        landmarks_points2 = []
        for x, y in landmark2:
            landmarks_points2.append((int(x), int(y)))
        subdiv.insert(landmarks_points2)
        triangles = subdiv.getTriangleList()
        triangles = np.array(triangles, dtype=np.int32)
        indexes_triangles = []
        for t in triangles:
            pt1 = (t[0], t[1])
            pt2 = (t[2], t[3])
            pt3 = (t[4], t[5])

            index_pt1 = np.where((points2 == pt1).all(axis=1))
            index_pt1 = extract_index_nparray(index_pt1)

            index_pt2 = np.where((points2 == pt2).all(axis=1))
            index_pt2 = extract_index_nparray(index_pt2)

            index_pt3 = np.where((points2 == pt3).all(axis=1))
            index_pt3 = extract_index_nparray(index_pt3)

            if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
                triangle = [index_pt1, index_pt2, index_pt3]
                indexes_triangles.append(triangle)

        img_face_mask = np.zeros_like(img_gray)
        img_head_mask = cv2.fillConvexPoly(img_face_mask, convexhull, 255)

        for triangle_index in indexes_triangles:
            # Triangulation of the first face2
            tr2_pt1 = landmarks_points2[triangle_index[0]]
            tr2_pt2 = landmarks_points2[triangle_index[1]]
            tr2_pt3 = landmarks_points2[triangle_index[2]]
            triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)

            rect2 = cv2.boundingRect(triangle2)
            (x, y, w, h) = rect2
            cropped_triangle = img2[y: y + h, x: x + w]

            cropped_tr2_mask = np.zeros((h, w), np.uint8)

            points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],
                                [tr2_pt2[0] - x, tr2_pt2[1] - y],
                                [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)

            cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)

            # Triangulation of second face2
            tr1_pt1 = landmarks_points[triangle_index[0]]
            tr1_pt2 = landmarks_points[triangle_index[1]]
            tr1_pt3 = landmarks_points[triangle_index[2]]
            triangle = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)

            rect1 = cv2.boundingRect(triangle)
            (x, y, w, h) = rect1

            cropped_tr1_mask = np.zeros((h, w), np.uint8)

            points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],
                               [tr1_pt2[0] - x, tr1_pt2[1] - y],
                               [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)

            cv2.fillConvexPoly(cropped_tr1_mask, points, 255)

            # Warp triangles
            points2 = np.float32(points2)
            points = np.float32(points)
            M = cv2.getAffineTransform(points2, points)
            warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
            warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr1_mask)

            # Reconstructing destination face2
            img_new_face_rect_area = img_new_face[y: y + h, x: x + w]
            img_new_face_rect_area_gray = cv2.cvtColor(img_new_face_rect_area, cv2.COLOR_BGR2GRAY)
            _, mask_triangles_designed = cv2.threshold(img_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
            warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed)

            img_new_face_rect_area = cv2.add(img_new_face_rect_area, warped_triangle)
            img_new_face[y: y + h, x: x + w] = img_new_face_rect_area

        img_face_mask = cv2.bitwise_not(img_head_mask)
        img_head_noface = cv2.bitwise_and(img, img, mask=img_face_mask)
        img_new_face = cv2.medianBlur(img_new_face, 3)
        result = cv2.add(img_head_noface, img_new_face)

        (x, y, w, h) = cv2.boundingRect(convexhull)
        center_face = (int((x + x + w) / 2), int((y + y + h) / 2))
        img = cv2.seamlessClone(result, img, img_head_mask, center_face, cv2.MIXED_CLONE)
    img = cv2.resize(img, dsize=(0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
    # cv2.imwrite(os.path.join('results', "6.jpg"), img)
    # cv2.imshow("a", img)
    # cv2.waitKey(0)
    return img
Example #20
0
                destImgNewFace[y:y + h, x:x + w] = destImgNewFaceRect

            destImgFaceMask = np.zeros_like(destImgGray)
            destImgHeadMask = cv2.fillConvexPoly(destImgFaceMask,
                                                 destfacePolygon, 255)
            destImgFaceMask = cv2.bitwise_not(destImgHeadMask)

            destImgHead = cv2.bitwise_and(destImg,
                                          destImg,
                                          mask=destImgFaceMask)
            result = cv2.add(destImgHead, destImgNewFace)

            (x, y, w, h) = cv2.boundingRect(destfacePolygon)
            faceCentre = (int((x + x + w) / 2), int((y + y + h) / 2))

            seamlessclone = cv2.seamlessClone(result, destImg, destImgHeadMask,
                                              faceCentre, cv2.NORMAL_CLONE)

            out.write(seamlessclone)

            framesnum += 1
        else:
            break

    end = time.time()
    print("Processing Frames per Second: %f FPS" % (framesnum / (end - start)))
    print("Processing took: %f Seconds" % (end - start))
    print("Total Missed Frames: %d " % missedFrames)
    print("Percentage of Missed Frames: %f " %
          ((100 * missedFrames) / totalFrames))
    out.release()
    capture.release()
import cv2
import numpy as np

# Read images : src image will be cloned into dst
im = cv2.imread("image/wood-texture.jpg")
obj = cv2.imread("image/iloveyouticket.jpg")

# Create an all white mask
mask = 255 * np.ones(obj.shape, obj.dtype)

# The location of the center of the src in the dst
width, height, channels = im.shape
center = (int(height / 2), int(width / 2))

# Seamlessly clone src into dst and put the results in output
normal_clone = cv2.seamlessClone(obj, im, mask, center, cv2.NORMAL_CLONE)
mixed_clone = cv2.seamlessClone(obj, im, mask, center, cv2.MIXED_CLONE)
monochrome_clone = cv2.seamlessClone(obj, im, mask, center,
                                     cv2.MONOCHROME_TRANSFER)
# Write results
cv2.imwrite("opencv-normal-clone-example.jpg", normal_clone)
cv2.imwrite("opencv-mixed-clone-example.jpg", mixed_clone)
cv2.imwrite("opencv-monochrome-clone-example.jpg", monochrome_clone)
    def convert_face(self, img_bgr, img_face_landmarks, debug):
        if (self.mask_mode == 3
                or self.mask_mode == 4) and self.fan_seg == None:
            self.fan_seg = FANSegmentator(256,
                                          FaceType.toString(FaceType.FULL))

        if self.over_res != 1:
            img_bgr = cv2.resize(img_bgr, (img_bgr.shape[1] * self.over_res,
                                           img_bgr.shape[0] * self.over_res))
            img_face_landmarks = img_face_landmarks * self.over_res

        if debug:
            debugs = [img_bgr.copy()]

        img_size = img_bgr.shape[1], img_bgr.shape[0]

        img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
            img_bgr.shape, img_face_landmarks)

        face_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks, self.output_size, face_type=self.face_type)
        face_output_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks,
            self.output_size,
            face_type=self.face_type,
            scale=self.output_face_scale)

        dst_face_bgr = cv2.warpAffine(img_bgr,
                                      face_mat,
                                      (self.output_size, self.output_size),
                                      flags=cv2.INTER_LANCZOS4)
        dst_face_mask_a_0 = cv2.warpAffine(
            img_face_mask_a,
            face_mat, (self.output_size, self.output_size),
            flags=cv2.INTER_LANCZOS4)

        predictor_input_bgr = cv2.resize(
            dst_face_bgr,
            (self.predictor_input_size, self.predictor_input_size))
        predictor_input_mask_a_0 = cv2.resize(
            dst_face_mask_a_0,
            (self.predictor_input_size, self.predictor_input_size))
        predictor_input_mask_a = np.expand_dims(predictor_input_mask_a_0, -1)

        predicted_bgra = self.predictor_func(
            np.concatenate((predictor_input_bgr, predictor_input_mask_a), -1))

        prd_face_bgr = np.clip(predicted_bgra[:, :, 0:3], 0, 1.0)
        prd_face_mask_a_0 = np.clip(predicted_bgra[:, :, 3], 0.0, 1.0)

        if self.mask_mode == 2:  #dst
            prd_face_mask_a_0 = predictor_input_mask_a_0
        elif self.mask_mode == 3:  #FAN-prd
            prd_face_bgr_256 = cv2.resize(prd_face_bgr, (256, 256))
            prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr(
                np.expand_dims(prd_face_bgr_256, 0))[0]
            prd_face_mask_a_0 = cv2.resize(
                prd_face_bgr_256_mask,
                (self.predictor_input_size, self.predictor_input_size))
        elif self.mask_mode == 4:  #FAN-dst
            face_256_mat = LandmarksProcessor.get_transform_mat(
                img_face_landmarks, 256, face_type=FaceType.FULL)
            dst_face_256_bgr = cv2.warpAffine(img_bgr,
                                              face_256_mat, (256, 256),
                                              flags=cv2.INTER_LANCZOS4)
            dst_face_256_mask = self.fan_seg.extract_from_bgr(
                np.expand_dims(dst_face_256_bgr, 0))[0]
            prd_face_mask_a_0 = cv2.resize(
                dst_face_256_mask,
                (self.predictor_input_size, self.predictor_input_size))

        prd_face_mask_a_0[prd_face_mask_a_0 < 0.001] = 0.0

        prd_face_mask_a = np.expand_dims(prd_face_mask_a_0, axis=-1)
        prd_face_mask_aaa = np.repeat(prd_face_mask_a, (3, ), axis=-1)

        img_face_mask_aaa = cv2.warpAffine(prd_face_mask_aaa,
                                           face_output_mat,
                                           img_size,
                                           np.zeros(img_bgr.shape,
                                                    dtype=np.float32),
                                           flags=cv2.WARP_INVERSE_MAP
                                           | cv2.INTER_LANCZOS4)
        img_face_mask_aaa = np.clip(img_face_mask_aaa, 0.0, 1.0)
        img_face_mask_aaa[img_face_mask_aaa <= 0.1] = 0.0  #get rid of noise

        if debug:
            debugs += [img_face_mask_aaa.copy()]

        if 'seamless' in self.mode:
            #mask used for cv2.seamlessClone
            img_face_seamless_mask_aaa = None
            for i in range(9, 0, -1):
                a = img_face_mask_aaa > i / 10.0
                if len(np.argwhere(a)) == 0:
                    continue
                img_face_seamless_mask_aaa = img_face_mask_aaa.copy()
                img_face_seamless_mask_aaa[a] = 1.0
                img_face_seamless_mask_aaa[img_face_seamless_mask_aaa <= i /
                                           10.0] = 0.0

        out_img = img_bgr.copy()

        if self.mode == 'raw':
            if self.raw_mode == 'rgb' or self.raw_mode == 'rgb-mask':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

            if self.raw_mode == 'rgb-mask':
                out_img = np.concatenate(
                    [out_img,
                     np.expand_dims(img_face_mask_aaa[:, :, 0], -1)], -1)

            if self.raw_mode == 'mask-only':
                out_img = img_face_mask_aaa

            if self.raw_mode == 'predicted-only':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size,
                    np.zeros(out_img.shape, dtype=np.float32),
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

        elif ('seamless' not in self.mode) or (img_face_seamless_mask_aaa
                                               is not None):
            #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
            ar = []
            for i in range(1, 10):
                maxregion = np.argwhere(img_face_mask_aaa > i / 10.0)
                if maxregion.size != 0:
                    miny, minx = maxregion.min(axis=0)[:2]
                    maxy, maxx = maxregion.max(axis=0)[:2]
                    lenx = maxx - minx
                    leny = maxy - miny
                    maskx = (minx + (lenx / 2))
                    masky = (miny + (leny / 2))
                    if lenx >= 4 and leny >= 4:
                        ar += [[lenx, leny, maskx, masky]]

            if len(ar) > 0:
                lenx, leny, maskx, masky = np.mean(ar, axis=0)

                if debug:
                    io.log_info("lenx/leny:(%d/%d) maskx/masky:(%f/%f)" %
                                (lenx, leny, maskx, masky))

                maskx = int(maskx)
                masky = int(masky)

                lowest_len = min(lenx, leny)

                if debug:
                    io.log_info("lowest_len = %f" % (lowest_len))

                img_mask_blurry_aaa = img_face_mask_aaa

                if self.erode_mask_modifier != 0:
                    ero = int(lowest_len *
                              (0.126 - lowest_len * 0.00004551365) * 0.01 *
                              self.erode_mask_modifier)
                    if debug:
                        io.log_info("erode_size = %d" % (ero))
                    if ero > 0:
                        img_mask_blurry_aaa = cv2.erode(
                            img_mask_blurry_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (ero, ero)),
                            iterations=1)
                    elif ero < 0:
                        img_mask_blurry_aaa = cv2.dilate(
                            img_mask_blurry_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (-ero, -ero)),
                            iterations=1)

                if self.seamless_erode_mask_modifier != 0:
                    ero = int(lowest_len *
                              (0.126 - lowest_len * 0.00004551365) * 0.01 *
                              self.seamless_erode_mask_modifier)
                    if debug:
                        io.log_info("seamless_erode_size = %d" % (ero))
                    if ero > 0:
                        img_face_seamless_mask_aaa = cv2.erode(
                            img_face_seamless_mask_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (ero, ero)),
                            iterations=1)
                    elif ero < 0:
                        img_face_seamless_mask_aaa = cv2.dilate(
                            img_face_seamless_mask_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (-ero, -ero)),
                            iterations=1)
                    img_face_seamless_mask_aaa = np.clip(
                        img_face_seamless_mask_aaa, 0, 1)

                if self.clip_hborder_mask_per > 0:  #clip hborder before blur
                    prd_hborder_rect_mask_a = np.ones(prd_face_mask_a.shape,
                                                      dtype=np.float32)
                    prd_border_size = int(prd_hborder_rect_mask_a.shape[1] *
                                          self.clip_hborder_mask_per)
                    prd_hborder_rect_mask_a[:, 0:prd_border_size, :] = 0
                    prd_hborder_rect_mask_a[:, -prd_border_size:, :] = 0
                    prd_hborder_rect_mask_a = np.expand_dims(
                        cv2.blur(prd_hborder_rect_mask_a,
                                 (prd_border_size, prd_border_size)), -1)

                    img_prd_hborder_rect_mask_a = cv2.warpAffine(
                        prd_hborder_rect_mask_a, face_output_mat, img_size,
                        np.zeros(img_bgr.shape, dtype=np.float32),
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4)
                    img_prd_hborder_rect_mask_a = np.expand_dims(
                        img_prd_hborder_rect_mask_a, -1)
                    img_mask_blurry_aaa *= img_prd_hborder_rect_mask_a
                    img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0, 1.0)

                    if debug:
                        debugs += [img_mask_blurry_aaa.copy()]

                if self.blur_mask_modifier > 0:
                    blur = int(lowest_len * 0.10 * 0.01 *
                               self.blur_mask_modifier)
                    if debug:
                        io.log_info("blur_size = %d" % (blur))
                    if blur > 0:
                        img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa,
                                                       (blur, blur))

                img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0, 1.0)

                if debug:
                    debugs += [img_mask_blurry_aaa.copy()]

                if self.color_transfer_mode is not None:
                    if self.color_transfer_mode == 'rct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        prd_face_bgr = image_utils.reinhard_color_transfer(
                            np.clip((prd_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            np.clip((dst_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            source_mask=prd_face_mask_a,
                            target_mask=prd_face_mask_a)
                        prd_face_bgr = np.clip(
                            prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                    elif self.color_transfer_mode == 'lct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        prd_face_bgr = image_utils.linear_color_transfer(
                            prd_face_bgr, dst_face_bgr)
                        prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                if self.mode == 'hist-match-bw':
                    prd_face_bgr = cv2.cvtColor(prd_face_bgr,
                                                cv2.COLOR_BGR2GRAY)
                    prd_face_bgr = np.repeat(np.expand_dims(prd_face_bgr, -1),
                                             (3, ), -1)

                if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                    if debug:
                        debugs += [
                            cv2.warpAffine(
                                prd_face_bgr, face_output_mat, img_size,
                                np.zeros(img_bgr.shape, dtype=np.float32),
                                cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                                cv2.BORDER_TRANSPARENT)
                        ]

                    hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                          dtype=np.float32)

                    if self.masked_hist_match:
                        hist_mask_a *= prd_face_mask_a

                    hist_match_1 = prd_face_bgr * hist_mask_a + (
                        1.0 - hist_mask_a) * np.ones(
                            prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)
                    hist_match_1[hist_match_1 > 1.0] = 1.0

                    hist_match_2 = dst_face_bgr * hist_mask_a + (
                        1.0 - hist_mask_a) * np.ones(
                            prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)
                    hist_match_2[hist_match_1 > 1.0] = 1.0

                    prd_face_bgr = image_utils.color_hist_match(
                        hist_match_1, hist_match_2, self.hist_match_threshold)

                if self.mode == 'hist-match-bw':
                    prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)
                out_img = np.clip(out_img, 0.0, 1.0)

                if debug:
                    debugs += [out_img.copy()]

                if self.mode == 'overlay':
                    pass

                if 'seamless' in self.mode:
                    try:
                        out_img = cv2.seamlessClone(
                            (out_img * 255).astype(np.uint8),
                            (img_bgr * 255).astype(np.uint8),
                            (img_face_seamless_mask_aaa * 255).astype(
                                np.uint8), (maskx, masky), cv2.NORMAL_CLONE)
                        out_img = out_img.astype(dtype=np.float32) / 255.0
                    except Exception as e:
                        #seamlessClone may fail in some cases
                        e_str = traceback.format_exc()

                        if 'MemoryError' in e_str:
                            raise Exception(
                                "Seamless fail: " + e_str
                            )  #reraise MemoryError in order to reprocess this data by other processes
                        else:
                            print("Seamless fail: " + e_str)

                    if debug:
                        debugs += [out_img.copy()]

                out_img = np.clip(
                    img_bgr * (1 - img_mask_blurry_aaa) +
                    (out_img * img_mask_blurry_aaa), 0, 1.0)

                if self.mode == 'seamless-hist-match':
                    out_face_bgr = cv2.warpAffine(
                        out_img, face_mat,
                        (self.output_size, self.output_size))
                    new_out_face_bgr = image_utils.color_hist_match(
                        out_face_bgr, dst_face_bgr, self.hist_match_threshold)
                    new_out = cv2.warpAffine(
                        new_out_face_bgr, face_mat, img_size, img_bgr.copy(),
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                        cv2.BORDER_TRANSPARENT)
                    out_img = np.clip(
                        img_bgr * (1 - img_mask_blurry_aaa) +
                        (new_out * img_mask_blurry_aaa), 0, 1.0)

                if self.final_image_color_degrade_power != 0:
                    if debug:
                        debugs += [out_img.copy()]
                    out_img_reduced = image_utils.reduce_colors(out_img, 256)
                    if self.final_image_color_degrade_power == 100:
                        out_img = out_img_reduced
                    else:
                        alpha = self.final_image_color_degrade_power / 100.0
                        out_img = (out_img * (1.0 - alpha) +
                                   out_img_reduced * alpha)

                if self.alpha:
                    out_img = np.concatenate([
                        out_img,
                        np.expand_dims(img_mask_blurry_aaa[:, :, 0], -1)
                    ], -1)

        if self.over_res != 1:
            out_img = cv2.resize(out_img, (img_bgr.shape[1] // self.over_res,
                                           img_bgr.shape[0] // self.over_res))

        out_img = np.clip(out_img, 0.0, 1.0)

        if debug:
            debugs += [out_img.copy()]

        return debugs if debug else out_img
Example #23
0
'''

# Standard imports
import cv2
import numpy as np 

# Read images
src = cv2.imread("images/airplane.jpg")
dst = cv2.imread("images/sky.jpg")


# Create a rough mask around the airplane.
src_mask = np.zeros(src.shape, src.dtype)
poly = np.array([ [4,80], [30,54], [151,63], [254,37], [298,90], [272,134], [43,122] ], np.int32)
cv2.fillPoly(src_mask, [poly], (255, 255, 255))

# This is where the CENTER of the airplane will be placed
center = (800,100)

# Clone seamlessly.
output = cv2.seamlessClone(src, dst, src_mask, center, cv2.NORMAL_CLONE)

# Write result
cv2.imshow("Avion", src)
cv2.imshow("Paysage", dst)
cv2.imshow("Mask", src_mask)
cv2.imshow("SOMME", output)
cv2.waitKey(0)


Example #24
0
    def convert_face(self, img_bgr, img_face_landmarks, debug):
        if debug:
            debugs = [img_bgr.copy()]

        img_size = img_bgr.shape[1], img_bgr.shape[0]

        img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
            img_bgr.shape, img_face_landmarks)

        face_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks, self.output_size, face_type=self.face_type)
        face_output_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks,
            self.output_size,
            face_type=self.face_type,
            scale=self.output_face_scale)

        dst_face_bgr = cv2.warpAffine(img_bgr,
                                      face_mat,
                                      (self.output_size, self.output_size),
                                      flags=cv2.INTER_LANCZOS4)
        dst_face_mask_a_0 = cv2.warpAffine(
            img_face_mask_a,
            face_mat, (self.output_size, self.output_size),
            flags=cv2.INTER_LANCZOS4)

        predictor_input_bgr = cv2.resize(
            dst_face_bgr,
            (self.predictor_input_size, self.predictor_input_size))
        predictor_input_mask_a_0 = cv2.resize(
            dst_face_mask_a_0,
            (self.predictor_input_size, self.predictor_input_size))
        predictor_input_mask_a = np.expand_dims(predictor_input_mask_a_0, -1)

        predicted_bgra = self.predictor(
            np.concatenate((predictor_input_bgr, predictor_input_mask_a), -1))

        prd_face_bgr = np.clip(predicted_bgra[:, :, 0:3], 0, 1.0)
        prd_face_mask_a_0 = np.clip(predicted_bgra[:, :, 3], 0.0, 1.0)

        if not self.use_predicted_mask:
            prd_face_mask_a_0 = predictor_input_mask_a_0

        prd_face_mask_a_0[prd_face_mask_a_0 < 0.001] = 0.0

        prd_face_mask_a = np.expand_dims(prd_face_mask_a_0, axis=-1)
        prd_face_mask_aaa = np.repeat(prd_face_mask_a, (3, ), axis=-1)

        img_prd_face_mask_aaa = cv2.warpAffine(prd_face_mask_aaa,
                                               face_output_mat,
                                               img_size,
                                               np.zeros(img_bgr.shape,
                                                        dtype=np.float32),
                                               flags=cv2.WARP_INVERSE_MAP
                                               | cv2.INTER_LANCZOS4)
        img_prd_face_mask_aaa = np.clip(img_prd_face_mask_aaa, 0.0, 1.0)

        img_face_mask_aaa = img_prd_face_mask_aaa

        if debug:
            debugs += [img_face_mask_aaa.copy()]

        img_face_mask_aaa[img_face_mask_aaa <= 0.1] = 0.0

        img_face_mask_flatten_aaa = img_face_mask_aaa.copy()
        img_face_mask_flatten_aaa[img_face_mask_flatten_aaa > 0.9] = 1.0

        maxregion = np.argwhere(img_face_mask_flatten_aaa == 1.0)

        out_img = img_bgr.copy()

        if self.mode == 'raw':
            if self.raw_mode == 'rgb' or self.raw_mode == 'rgb-mask':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

            if self.raw_mode == 'rgb-mask':
                out_img = np.concatenate(
                    [out_img,
                     np.expand_dims(img_face_mask_aaa[:, :, 0], -1)], -1)

            if self.raw_mode == 'mask-only':
                out_img = img_face_mask_aaa

            if self.raw_mode == 'predicted-only':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size,
                    np.zeros(out_img.shape, dtype=np.float32),
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

        else:
            if maxregion.size != 0:
                miny, minx = maxregion.min(axis=0)[:2]
                maxy, maxx = maxregion.max(axis=0)[:2]

                if debug:
                    print(
                        "maxregion.size: %d, minx:%d, maxx:%d miny:%d, maxy:%d"
                        % (maxregion.size, minx, maxx, miny, maxy))

                lenx = maxx - minx
                leny = maxy - miny
                if lenx >= 4 and leny >= 4:
                    masky = int(minx + (lenx // 2))
                    maskx = int(miny + (leny // 2))
                    lowest_len = min(lenx, leny)

                    if debug:
                        print("lowest_len = %f" % (lowest_len))

                    img_mask_blurry_aaa = img_face_mask_aaa

                    if self.erode_mask_modifier != 0:
                        ero = int(lowest_len *
                                  (0.126 - lowest_len * 0.00004551365) * 0.01 *
                                  self.erode_mask_modifier)
                        if debug:
                            print("erode_size = %d" % (ero))
                        if ero > 0:
                            img_mask_blurry_aaa = cv2.erode(
                                img_mask_blurry_aaa,
                                cv2.getStructuringElement(
                                    cv2.MORPH_ELLIPSE, (ero, ero)),
                                iterations=1)
                        elif ero < 0:
                            img_mask_blurry_aaa = cv2.dilate(
                                img_mask_blurry_aaa,
                                cv2.getStructuringElement(
                                    cv2.MORPH_ELLIPSE, (-ero, -ero)),
                                iterations=1)

                    if self.seamless_erode_mask_modifier != 0:
                        ero = int(lowest_len *
                                  (0.126 - lowest_len * 0.00004551365) * 0.01 *
                                  self.seamless_erode_mask_modifier)
                        if debug:
                            print("seamless_erode_size = %d" % (ero))
                        if ero > 0:
                            img_face_mask_flatten_aaa = cv2.erode(
                                img_face_mask_flatten_aaa,
                                cv2.getStructuringElement(
                                    cv2.MORPH_ELLIPSE, (ero, ero)),
                                iterations=1)
                        elif ero < 0:
                            img_face_mask_flatten_aaa = cv2.dilate(
                                img_face_mask_flatten_aaa,
                                cv2.getStructuringElement(
                                    cv2.MORPH_ELLIPSE, (-ero, -ero)),
                                iterations=1)

                    if self.clip_hborder_mask_per > 0:  #clip hborder before blur
                        prd_hborder_rect_mask_a = np.ones(
                            prd_face_mask_a.shape, dtype=np.float32)
                        prd_border_size = int(
                            prd_hborder_rect_mask_a.shape[1] *
                            self.clip_hborder_mask_per)
                        prd_hborder_rect_mask_a[:, 0:prd_border_size, :] = 0
                        prd_hborder_rect_mask_a[:, -prd_border_size:, :] = 0
                        prd_hborder_rect_mask_a = np.expand_dims(
                            cv2.blur(prd_hborder_rect_mask_a,
                                     (prd_border_size, prd_border_size)), -1)

                        img_prd_hborder_rect_mask_a = cv2.warpAffine(
                            prd_hborder_rect_mask_a, face_output_mat, img_size,
                            np.zeros(img_bgr.shape, dtype=np.float32),
                            cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4)
                        img_prd_hborder_rect_mask_a = np.expand_dims(
                            img_prd_hborder_rect_mask_a, -1)
                        img_mask_blurry_aaa *= img_prd_hborder_rect_mask_a
                        img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0,
                                                      1.0)

                        if debug:
                            debugs += [img_mask_blurry_aaa.copy()]

                    if self.blur_mask_modifier > 0:
                        blur = int(lowest_len * 0.10 * 0.01 *
                                   self.blur_mask_modifier)
                        if debug:
                            print("blur_size = %d" % (blur))
                        if blur > 0:
                            img_mask_blurry_aaa = cv2.blur(
                                img_mask_blurry_aaa, (blur, blur))

                    img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0, 1.0)

                    if debug:
                        debugs += [img_mask_blurry_aaa.copy()]

                    if self.color_transfer_mode is not None:
                        if self.color_transfer_mode == 'rct':
                            if debug:
                                debugs += [
                                    np.clip(
                                        cv2.warpAffine(
                                            prd_face_bgr, face_output_mat,
                                            img_size,
                                            np.zeros(img_bgr.shape,
                                                     dtype=np.float32),
                                            cv2.WARP_INVERSE_MAP
                                            | cv2.INTER_LANCZOS4,
                                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                                ]

                            prd_face_bgr = image_utils.reinhard_color_transfer(
                                np.clip((prd_face_bgr * 255).astype(np.uint8),
                                        0, 255),
                                np.clip((dst_face_bgr * 255).astype(np.uint8),
                                        0, 255),
                                source_mask=prd_face_mask_a,
                                target_mask=prd_face_mask_a)
                            prd_face_bgr = np.clip(
                                prd_face_bgr.astype(np.float32) / 255.0, 0.0,
                                1.0)

                            if debug:
                                debugs += [
                                    np.clip(
                                        cv2.warpAffine(
                                            prd_face_bgr, face_output_mat,
                                            img_size,
                                            np.zeros(img_bgr.shape,
                                                     dtype=np.float32),
                                            cv2.WARP_INVERSE_MAP
                                            | cv2.INTER_LANCZOS4,
                                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                                ]

                        elif self.color_transfer_mode == 'lct':
                            if debug:
                                debugs += [
                                    np.clip(
                                        cv2.warpAffine(
                                            prd_face_bgr, face_output_mat,
                                            img_size,
                                            np.zeros(img_bgr.shape,
                                                     dtype=np.float32),
                                            cv2.WARP_INVERSE_MAP
                                            | cv2.INTER_LANCZOS4,
                                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                                ]

                            prd_face_bgr = image_utils.linear_color_transfer(
                                prd_face_bgr, dst_face_bgr)
                            prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)

                            if debug:
                                debugs += [
                                    np.clip(
                                        cv2.warpAffine(
                                            prd_face_bgr, face_output_mat,
                                            img_size,
                                            np.zeros(img_bgr.shape,
                                                     dtype=np.float32),
                                            cv2.WARP_INVERSE_MAP
                                            | cv2.INTER_LANCZOS4,
                                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                                ]

                    if self.mode == 'hist-match-bw':
                        prd_face_bgr = cv2.cvtColor(prd_face_bgr,
                                                    cv2.COLOR_BGR2GRAY)
                        prd_face_bgr = np.repeat(
                            np.expand_dims(prd_face_bgr, -1), (3, ), -1)

                    if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                        if debug:
                            debugs += [
                                cv2.warpAffine(
                                    prd_face_bgr, face_output_mat, img_size,
                                    np.zeros(img_bgr.shape, dtype=np.float32),
                                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                                    cv2.BORDER_TRANSPARENT)
                            ]

                        hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                              dtype=np.float32)

                        if self.masked_hist_match:
                            hist_mask_a *= prd_face_mask_a

                        hist_match_1 = prd_face_bgr * hist_mask_a + (
                            1.0 - hist_mask_a) * np.ones(
                                prd_face_bgr.shape[:2] + (1, ),
                                dtype=np.float32)
                        hist_match_1[hist_match_1 > 1.0] = 1.0

                        hist_match_2 = dst_face_bgr * hist_mask_a + (
                            1.0 - hist_mask_a) * np.ones(
                                prd_face_bgr.shape[:2] + (1, ),
                                dtype=np.float32)
                        hist_match_2[hist_match_1 > 1.0] = 1.0

                        prd_face_bgr = image_utils.color_hist_match(
                            hist_match_1, hist_match_2,
                            self.hist_match_threshold)

                    if self.mode == 'hist-match-bw':
                        prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

                    out_img = cv2.warpAffine(
                        prd_face_bgr, face_output_mat, img_size, out_img,
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                        cv2.BORDER_TRANSPARENT)
                    out_img = np.clip(out_img, 0.0, 1.0)

                    if debug:
                        debugs += [out_img.copy()]

                    if self.mode == 'overlay':
                        pass

                    if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
                        out_img = np.clip(
                            img_bgr * (1 - img_face_mask_aaa) +
                            (out_img * img_face_mask_aaa), 0, 1.0)
                        if debug:
                            debugs += [out_img.copy()]

                        out_img = cv2.seamlessClone(
                            (out_img * 255).astype(np.uint8),
                            (img_bgr * 255).astype(np.uint8),
                            (img_face_mask_flatten_aaa * 255).astype(np.uint8),
                            (masky, maskx), cv2.NORMAL_CLONE)
                        out_img = out_img.astype(dtype=np.float32) / 255.0

                        if debug:
                            debugs += [out_img.copy()]

                    out_img = np.clip(
                        img_bgr * (1 - img_mask_blurry_aaa) +
                        (out_img * img_mask_blurry_aaa), 0, 1.0)

                    if self.mode == 'seamless-hist-match':
                        out_face_bgr = cv2.warpAffine(
                            out_img, face_mat,
                            (self.output_size, self.output_size))
                        new_out_face_bgr = image_utils.color_hist_match(
                            out_face_bgr, dst_face_bgr,
                            self.hist_match_threshold)
                        new_out = cv2.warpAffine(
                            new_out_face_bgr, face_mat, img_size,
                            img_bgr.copy(),
                            cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                            cv2.BORDER_TRANSPARENT)
                        out_img = np.clip(
                            img_bgr * (1 - img_mask_blurry_aaa) +
                            (new_out * img_mask_blurry_aaa), 0, 1.0)

                    if self.final_image_color_degrade_power != 0:
                        if debug:
                            debugs += [out_img.copy()]
                        out_img_reduced = image_utils.reduce_colors(
                            out_img, 256)
                        if self.final_image_color_degrade_power == 100:
                            out_img = out_img_reduced
                        else:
                            alpha = self.final_image_color_degrade_power / 100.0
                            out_img = (out_img * (1.0 - alpha) +
                                       out_img_reduced * alpha)

                    if self.alpha:
                        out_img = np.concatenate([
                            out_img,
                            np.expand_dims(img_mask_blurry_aaa[:, :, 0], -1)
                        ], -1)

        out_img = np.clip(out_img, 0.0, 1.0)

        if debug:
            debugs += [out_img.copy()]

        return debugs if debug else out_img
Example #25
0
def start(swap, image, video):
    # construct the argument parser and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--shape-predictor", required=True,
    # 	help="path to facial landmark predictor")
    # ap.add_argument("-i", "--image", required=True,
    # 	help="path to input image")
    # args = vars(ap.parse_args())

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    cam = cv2.VideoCapture(video)
    width = int(cam.get(3))  # float
    height = int(cam.get(4))
    print(width, height)
    vidWriter = cv2.VideoWriter("./video_output_data1.mp4",
                                cv2.VideoWriter_fourcc(*'mp4v'),
                                int(cam.get(5)), (width, height))
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
    # swap = True
    face_swap_file = image
    # load the input image, resize it, and convert it to grayscale
    # images = ['Scarlett.jpg', 'Rambo.jpg']
    # imageA = cv2.imread('Scarlett.jpg')

    # cv2.circle(imageB, (x, y), 1, (0, 0, 255), -1)
    currentframe = 0
    while (True):
        # reading from frame
        ret, frame = cam.read()
        if ret:
            imageA = frame
            grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
            rectsA = detector(grayA, 1)
            if (swap and len(rectsA) < 2) or (not swap and len(rectsA) == 0):
                vidWriter.write(frame)
                print("Can't find enough faces in frame")
                continue
            PA = []
            xPointsA = []
            yPointsA = []
            shapeA = None
            # for (i, rect) in enumerate(rectsA):
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            if swap:
                shapeA = predictor(grayA, rectsA[0])
            else:
                shapeA = predictor(grayA, rectsA[0])
            shapeA = face_utils.shape_to_np(shapeA)
            print(len(shapeA))
            for (x, y) in shapeA:
                PA.append([x, y, 1])
                xPointsA.append(x)
                yPointsA.append(y)
                # convert dlib's rectangle to a OpenCV-style bounding box
                # [i.e., (x, y, w, h)], then draw the face bounding box
                # (x, y, w, h) = face_utils.rect_to_bb(rect)
                # cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 255, 0), 2)
                # # show the face number
                # cv2.putText(imageA, "Face #{}".format(i + 1), (x - 10, y - 10),
                # 	cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                # loop over the (x, y)-coordinates for the facial landmarks
                # and draw them on the image
                # for (x, y) in shapeA:
                # cv2.circle(imageA, (x, y), 1, (0, 0, 255), -1)
            if swap:
                imageB = frame
            else:
                imageB = cv2.imread(face_swap_file)

            # detect faces in the grayscale image

            grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
            # detect faces in the grayscale image
            # rectsB = detector(grayB, 1)

            PB = []
            shapeB = None
            xPointsB = []
            yPointsB = []

            # loop over the face detections
            # for (i, rect) in enumerate(rectsB):
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            if swap:
                shapeB = predictor(grayB, rectsA[1])
            else:
                rectsB = detector(grayB, 1)
                shapeB = predictor(grayB, rectsB[0])
            shapeB = face_utils.shape_to_np(shapeB)
            # convert dlib's rectangle to a OpenCV-style bounding box
            # [i.e., (x, y, w, h)], then draw the face bounding box
            # (x, y, w, h) = face_utils.rect_to_bb(rect)
            # cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 255, 0), 2)
            # # show the face number
            # cv2.putText(imageB, "Face #{}".format(i + 1), (x - 10, y - 10),
            # 	cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            # loop over the (x, y)-coordinates for the facial landmarks
            # and draw them on the image
            for (x, y) in shapeB:
                PB.append([x, y, 1])
                xPointsB.append(x)
                yPointsB.append(y)
            weights_x, weights_y = find_weights(PA, shapeA, xPointsB, yPointsB)
            # print(weights)
            mask_warped_img, warped_img = warp_face(imageA, imageB, shapeA,
                                                    shapeB, weights_x,
                                                    weights_y)
            r = cv2.boundingRect(mask_warped_img)
            center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
            output = cv2.seamlessClone(warped_img.copy(), imageA,
                                       mask_warped_img, center,
                                       cv2.NORMAL_CLONE)
            if swap:
                weights_x, weights_y = find_weights(PB, shapeB, xPointsA,
                                                    yPointsA)
                # print(weights)
                mask_warped_img, warped_img = warp_face(
                    imageB, imageA, shapeB, shapeA, weights_x, weights_y)
                r = cv2.boundingRect(mask_warped_img)
                center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
                output = cv2.seamlessClone(warped_img.copy(), output,
                                           mask_warped_img, center,
                                           cv2.NORMAL_CLONE)
                # imageA[rects[0].top() - 40:rects[0].bottom() + 40, rects[0].left() - 40:rects[0].right() + 40, :] = output
                # cv2.imshow("warped_img", warped_img)
                # cv2.imshow("mask_warped", mask_warped_img)
                # cv2.imshow("mask", mask)
            vidWriter.write(output)
            cv2.imwrite('output_data1_' + str(currentframe) + '.jpg', output)
            # cv2.waitKey(0)

            # increasing counter so that it will
            # show how many frames are created
            currentframe += 1
        else:
            break
    vidWriter.release()
    # Release all space and windows once done
    cam.release()
    cv2.destroyAllWindows()
Example #26
0
def face_swap(input_image, direction='AtoB'):
    # perform the actual face swap

    image_size = input_image.shape[1], input_image.shape[0]

    # extract face from input image
    facelist = extract_faces(input_image, size)

    # Only consider first face identified
    if len(facelist) > 0:
        face, resized_image = facelist[0]
    else:
        return None

    # get alignment matrix
    mat = get_align_mat(face)

    resized_face_image = resized_image[crop, crop]
    resized_face_image = cv2.resize(resized_face_image,
                                    (input_size, input_size)) / 255.0

    test_image = numpy.expand_dims(resized_face_image, 0)

    # predict faceswap using encoder A or B depends on direction required
    if direction == 'AtoB':
        figure = autoencoder_B.predict(test_image)
    elif direction == 'BtoA':
        figure = autoencoder_A.predict(test_image)
    else:
        print("Invalid direction, 'AtoB' or 'BtoA' only")

    new_face = numpy.clip(numpy.squeeze(figure[0]) * 255.0, 0,
                          255).astype('uint8')

    # get image mask
    image_mask = get_image_mask(face, new_face, mat, input_image, image_size)

    # apply model output face to input image (without mask)
    base_image = numpy.copy(input_image)
    new_image = numpy.copy(input_image)
    cv2.warpAffine(new_face, mat * input_size, image_size, new_image,
                   cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                   cv2.BORDER_TRANSPARENT)

    # use opencv seamless clone to apply new image with the mask on base image
    unitMask = numpy.clip(image_mask * 365, 0, 255).astype(numpy.uint8)
    maxregion = numpy.argwhere(unitMask == 255)

    if maxregion.size > 0:
        miny, minx = maxregion.min(axis=0)[:2]
        maxy, maxx = maxregion.max(axis=0)[:2]
        lenx = maxx - minx
        leny = maxy - miny
        masky = int(minx + (lenx // 2))
        maskx = int(miny + (leny // 2))
    output_image = cv2.seamlessClone(new_image.astype(numpy.uint8),
                                     base_image.astype(numpy.uint8),
                                     (unitMask).astype(numpy.uint8),
                                     (masky, maskx), cv2.NORMAL_CLONE)

    return output_image
    ## first blending the border
    extract_alpha = contour.extract_face_alpha(src_face['face_id'], src_img_width, src_img_height, src_name)
    cv2.imshow('contour', extract_alpha)
    # merge 
    #for x in xrange(src_img_width):
    #    for y in xrange(src_img_height):
    #        alpha = extract_alpha[y][x] 
    #        map_result[y][x][0] = (1-alpha) * map_result[y][x][0] + (alpha) * src_img[y][x][0]
    #        map_result[y][x][1] = (1-alpha) * map_result[y][x][1] + (alpha) * src_img[y][x][1]
    #        map_result[y][x][2] = (1-alpha) * map_result[y][x][2] + (alpha) * src_img[y][x][2]

    #cv2.imshow('map result', map_result)
   
    #center = src_face['position']['nose']
    #x = center['x'] * src_img_width / 100
    #y = center['y'] * src_img_height / 100
    #center = (int(x), int(y))
    center = (map_result.shape[0]/2, map_result.shape[1]/2)
    map_result = cv2.seamlessClone(src_img, map_result, extract_mask, center, flags=cv2.NORMAL_CLONE)

    cv2.imshow('merge', map_result)

    imap_matrix = cv2.invertAffineTransform(map_matrix)
    print map_result.shape
    print imap_matrix
    final = cv2.warpAffine(map_result, imap_matrix, dsize=(dst_img.shape[0:2]))
    cv2.imshow('final.png', final)
    cv2.imwrite(src_name+dst_name+'final.png', final)

    cv2.waitKey(0)
Example #28
0
def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):
    img_size = img_bgr.shape[1], img_bgr.shape[0]

    img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)

    if cfg.mode == 'original':
        if cfg.export_mask_alpha:
            img_bgr = np.concatenate ( [img_bgr, img_face_mask_a], -1 )
        return img_bgr, img_face_mask_a

    out_img = img_bgr.copy()
    out_merging_mask = None

    output_size = predictor_input_shape[0]
    if cfg.super_resolution_mode != 0:
        output_size *= 2

    face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type)
    face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale   )

    dst_face_bgr      = cv2.warpAffine( img_bgr        , face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
    dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )

    predictor_input_bgr      = cv2.resize (dst_face_bgr, predictor_input_shape[0:2] )

    predicted = predictor_func (predictor_input_bgr)
    if isinstance(predicted, tuple):
        #converter return bgr,mask
        prd_face_bgr      = np.clip (predicted[0], 0, 1.0)
        prd_face_mask_a_0 = np.clip (predicted[1], 0, 1.0)
        predictor_masked = True
    else:
        #converter return bgr only, using dst mask
        prd_face_bgr      = np.clip (predicted, 0, 1.0 )
        prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, predictor_input_shape[0:2] )
        predictor_masked = False

    if cfg.super_resolution_mode:
        prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode, prd_face_bgr)

        if predictor_masked:
            prd_face_mask_a_0 = cv2.resize (prd_face_mask_a_0,  (output_size, output_size), cv2.INTER_CUBIC)
        else:
            prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0,  (output_size, output_size), cv2.INTER_CUBIC)

    if cfg.mask_mode == 2: #dst
        prd_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
    elif cfg.mask_mode >= 3 and cfg.mask_mode <= 7:

        if cfg.mask_mode == 3 or cfg.mask_mode == 5 or cfg.mask_mode == 6:
            prd_face_fanseg_bgr = cv2.resize (prd_face_bgr, (cfg.fanseg_input_size,)*2 )
            prd_face_fanseg_mask = cfg.fanseg_extract_func(FaceType.FULL, prd_face_fanseg_bgr)
            FAN_prd_face_mask_a_0 = cv2.resize ( prd_face_fanseg_mask, (output_size, output_size), cv2.INTER_CUBIC)

        if cfg.mask_mode >= 4 or cfg.mask_mode <= 7:

            full_face_fanseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, cfg.fanseg_input_size, face_type=FaceType.FULL)
            dst_face_fanseg_bgr = cv2.warpAffine(img_bgr, full_face_fanseg_mat, (cfg.fanseg_input_size,)*2, flags=cv2.INTER_CUBIC )
            dst_face_fanseg_mask = cfg.fanseg_extract_func( FaceType.FULL, dst_face_fanseg_bgr )

            if cfg.face_type == FaceType.FULL:
                FAN_dst_face_mask_a_0 = cv2.resize (dst_face_fanseg_mask, (output_size,output_size), cv2.INTER_CUBIC)
            elif cfg.face_type == FaceType.HALF:
                half_face_fanseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, cfg.fanseg_input_size, face_type=FaceType.HALF)

                fanseg_rect_corner_pts = np.array ( [ [0,0], [cfg.fanseg_input_size-1,0], [0,cfg.fanseg_input_size-1] ], dtype=np.float32 )
                a = LandmarksProcessor.transform_points (fanseg_rect_corner_pts, half_face_fanseg_mat, invert=True )
                b = LandmarksProcessor.transform_points (a, full_face_fanseg_mat )
                m = cv2.getAffineTransform(b, fanseg_rect_corner_pts)
                FAN_dst_face_mask_a_0 = cv2.warpAffine(dst_face_fanseg_mask, m, (cfg.fanseg_input_size,)*2, flags=cv2.INTER_CUBIC )
                FAN_dst_face_mask_a_0 = cv2.resize (FAN_dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
            else:
                raise ValueError ("cfg.face_type unsupported")

        if cfg.mask_mode == 3:   #FAN-prd
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0
        elif cfg.mask_mode == 4: #FAN-dst
            prd_face_mask_a_0 = FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 5:
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 6:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 7:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_dst_face_mask_a_0

    prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0

    prd_face_mask_a   = prd_face_mask_a_0[...,np.newaxis]
    prd_face_mask_aaa = np.repeat (prd_face_mask_a, (3,), axis=-1)

    img_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC )
    img_face_mask_aaa = np.clip (img_face_mask_aaa, 0.0, 1.0)
    img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0 #get rid of noise

    if 'raw' in cfg.mode:
        face_corner_pts = np.array ([ [0,0], [output_size-1,0], [output_size-1,output_size-1],  [0,output_size-1] ], dtype=np.float32)
        square_mask = np.zeros(img_bgr.shape, dtype=np.float32)
        cv2.fillConvexPoly(square_mask, \
                           LandmarksProcessor.transform_points (face_corner_pts, face_output_mat, invert=True ).astype(np.int), \
                           (1,1,1) )

        if cfg.mode == 'raw-rgb':
            out_merging_mask = square_mask

        if cfg.mode == 'raw-rgb' or cfg.mode == 'raw-rgb-mask':
            out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        if cfg.mode == 'raw-rgb-mask':
            out_img = np.concatenate ( [out_img, np.expand_dims (img_face_mask_aaa[:,:,0],-1)], -1 )
            out_merging_mask = square_mask

        elif cfg.mode == 'raw-mask-only':
            out_img = img_face_mask_aaa
            out_merging_mask = img_face_mask_aaa
        elif cfg.mode == 'raw-predicted-only':
            out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
            out_merging_mask = square_mask

        out_img = np.clip (out_img, 0.0, 1.0 )
    else:
        #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
        ar = []
        for i in range(1, 10):
            maxregion = np.argwhere( img_face_mask_aaa > i / 10.0 )
            if maxregion.size != 0:
                miny,minx = maxregion.min(axis=0)[:2]
                maxy,maxx = maxregion.max(axis=0)[:2]
                lenx = maxx - minx
                leny = maxy - miny
                if min(lenx,leny) >= 4:
                    ar += [ [ lenx, leny]  ]

        if len(ar) > 0:
            lenx, leny = np.mean ( ar, axis=0 )
            lowest_len = min (lenx, leny)

            if cfg.erode_mask_modifier != 0:
                ero  = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*cfg.erode_mask_modifier )
                if ero > 0:
                    img_face_mask_aaa = cv2.erode(img_face_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
                elif ero < 0:
                    img_face_mask_aaa = cv2.dilate(img_face_mask_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )

            if cfg.clip_hborder_mask_per > 0: #clip hborder before blur
                prd_hborder_rect_mask_a = np.ones ( prd_face_mask_a.shape, dtype=np.float32)
                prd_border_size = int ( prd_hborder_rect_mask_a.shape[1] * cfg.clip_hborder_mask_per )
                prd_hborder_rect_mask_a[:,0:prd_border_size,:] = 0
                prd_hborder_rect_mask_a[:,-prd_border_size:,:] = 0
                prd_hborder_rect_mask_a[-prd_border_size:,:,:] = 0
                prd_hborder_rect_mask_a = np.expand_dims(cv2.blur(prd_hborder_rect_mask_a, (prd_border_size, prd_border_size) ),-1)

                img_prd_hborder_rect_mask_a = cv2.warpAffine( prd_hborder_rect_mask_a, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC )
                img_prd_hborder_rect_mask_a = np.expand_dims (img_prd_hborder_rect_mask_a, -1)
                img_face_mask_aaa *= img_prd_hborder_rect_mask_a
                img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )

            if cfg.blur_mask_modifier > 0:
                blur = int( lowest_len * 0.10 * 0.01*cfg.blur_mask_modifier )
                if blur > 0:
                    img_face_mask_aaa = cv2.blur(img_face_mask_aaa, (blur, blur) )

            img_face_mask_aaa = np.clip( img_face_mask_aaa, 0, 1.0 )

            if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == ColorTransferMode.LCT:
                    prd_face_bgr = imagelib.linear_color_transfer(prd_face_bgr, dst_face_bgr)
                elif ColorTransferMode.RCT <= cfg.color_transfer_mode <= ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                    ct_options = {
                        ColorTransferMode.RCT:                      (False, False, False),
                        ColorTransferMode.RCT_CLIP:                 (False, False, True),
                        ColorTransferMode.RCT_PAPER:                (False, True, False),
                        ColorTransferMode.RCT_PAPER_CLIP:           (False, True, True),
                        ColorTransferMode.MASKED_RCT:               (True, False, False),
                        ColorTransferMode.MASKED_RCT_CLIP:          (True, False, True),
                        ColorTransferMode.MASKED_RCT_PAPER:         (True, True, False),
                        ColorTransferMode.MASKED_RCT_PAPER_CLIP:    (True, True, True),
                    }
                    use_masks, use_paper, use_clip = ct_options[cfg.color_transfer_mode]
                    if not use_masks:
                        prd_face_bgr = imagelib.reinhard_color_transfer(prd_face_bgr, dst_face_bgr,
                                                                        clip=use_clip,
                                                                        preserve_paper=use_paper)
                    else:
                        prd_face_bgr = imagelib.reinhard_color_transfer(prd_face_bgr, dst_face_bgr,
                                                                        clip=use_clip,
                                                                        preserve_paper=use_paper,
                                                                        source_mask=prd_face_mask_a,
                                                                        target_mask=prd_face_mask_a)
                elif cfg.color_transfer_mode == ColorTransferMode.MKL:
                    prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == ColorTransferMode.MASKED_MKL:
                    prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
                elif cfg.color_transfer_mode == ColorTransferMode.IDT:
                    prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == ColorTransferMode.MASKED_IDT:
                    prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)

                elif cfg.color_transfer_mode == ColorTransferMode.EBS:
                    prd_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
                                                     np.clip( (prd_face_bgr*255), 0, 255).astype(np.uint8),  )
                    prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
                prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )

            if cfg.mode == 'hist-match' or cfg.mode == 'hist-match-bw':
                hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)

                if cfg.masked_hist_match:
                    hist_mask_a *= prd_face_mask_a

                white =  (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)

                hist_match_1 = prd_face_bgr*hist_mask_a + white
                hist_match_1[ hist_match_1 > 1.0 ] = 1.0

                hist_match_2 = dst_face_bgr*hist_mask_a + white
                hist_match_2[ hist_match_1 > 1.0 ] = 1.0

                prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold )

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

            if 'seamless' in cfg.mode:
                #mask used for cv2.seamlessClone
                img_face_mask_a = img_face_mask_aaa[...,0:1]

                if cfg.mode == 'seamless2':
                    img_face_mask_a = cv2.warpAffine( img_face_mask_a, face_output_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )

                img_face_seamless_mask_a = None
                for i in range(1,10):
                    a = img_face_mask_a > i / 10.0
                    if len(np.argwhere(a)) == 0:
                        continue
                    img_face_seamless_mask_a = img_face_mask_a.copy()
                    img_face_seamless_mask_a[a] = 1.0
                    img_face_seamless_mask_a[img_face_seamless_mask_a <= i / 10.0] = 0.0
                    break

            if cfg.mode == 'seamless2':

                face_seamless = imagelib.seamless_clone ( prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a )

                out_img = cv2.warpAffine( face_seamless, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
            else:
                out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

            out_img = np.clip(out_img, 0.0, 1.0)

            if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
                try:
                    #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
                    l,t,w,h = cv2.boundingRect( (img_face_seamless_mask_a*255).astype(np.uint8) )
                    s_maskx, s_masky = int(l+w/2), int(t+h/2)
                    out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), img_bgr_uint8, (img_face_seamless_mask_a*255).astype(np.uint8), (s_maskx,s_masky) , cv2.NORMAL_CLONE )
                    out_img = out_img.astype(dtype=np.float32) / 255.0
                except Exception as e:
                    #seamlessClone may fail in some cases
                    e_str = traceback.format_exc()

                    if 'MemoryError' in e_str:
                        raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
                    else:
                        print ("Seamless fail: " + e_str)

            out_img = img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa)

            out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size) )

            if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == ColorTransferMode.LCT:
                    out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
                elif ColorTransferMode.RCT <= cfg.color_transfer_mode <= ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                    ct_options = {
                        ColorTransferMode.RCT:                      (False, False, False),
                        ColorTransferMode.RCT_CLIP:                 (False, False, True),
                        ColorTransferMode.RCT_PAPER:                (False, True, False),
                        ColorTransferMode.RCT_PAPER_CLIP:           (False, True, True),
                        ColorTransferMode.MASKED_RCT:               (True, False, False),
                        ColorTransferMode.MASKED_RCT_CLIP:          (True, False, True),
                        ColorTransferMode.MASKED_RCT_PAPER:         (True, True, False),
                        ColorTransferMode.MASKED_RCT_PAPER_CLIP:    (True, True, True),
                    }
                    use_masks, use_paper, use_clip = ct_options[cfg.color_transfer_mode]
                    if not use_masks:
                        out_face_bgr = imagelib.reinhard_color_transfer(out_face_bgr, dst_face_bgr,
                                                                        clip=use_clip,
                                                                        preserve_paper=use_paper)
                    else:
                        out_face_bgr = imagelib.reinhard_color_transfer(out_face_bgr, dst_face_bgr,
                                                                        clip=use_clip,
                                                                        preserve_paper=use_paper,
                                                                        source_mask=prd_face_mask_a,
                                                                        target_mask=prd_face_mask_a)
                elif cfg.color_transfer_mode == ColorTransferMode.MKL:
                    out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == ColorTransferMode.MASKED_MKL:
                    out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
                elif cfg.color_transfer_mode == ColorTransferMode.IDT:
                    out_face_bgr = imagelib.color_transfer_idt (out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == ColorTransferMode.MASKED_IDT:
                    out_face_bgr = imagelib.color_transfer_idt (out_face_bgr*prd_face_mask_a, dst_face_bgr*prd_face_mask_a)
                elif cfg.color_transfer_mode == ColorTransferMode.EBS:
                    out_face_bgr = cfg.ebs_ct_func ( np.clip( (dst_face_bgr*255), 0, 255).astype(np.uint8),
                                                     np.clip( (out_face_bgr*255), 0, 255).astype(np.uint8),  )
                    out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

            if cfg.mode == 'seamless-hist-match':
                out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)

            cfg_mp = cfg.motion_blur_power / 100.0
            if cfg_mp != 0:
                k_size = int(frame_info.motion_power*cfg_mp)
                if k_size >= 1:
                    k_size = np.clip (k_size+1, 2, 50)
                    if cfg.super_resolution_mode:
                        k_size *= 2
                    out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg)

            if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0:
                out_face_bgr = cfg.sharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.sharpen_amount)

            new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
            out_img =  np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 )

            if cfg.color_degrade_power != 0:
                out_img_reduced = imagelib.reduce_colors(out_img, 256)
                if cfg.color_degrade_power == 100:
                    out_img = out_img_reduced
                else:
                    alpha = cfg.color_degrade_power / 100.0
                    out_img = (out_img*(1.0-alpha) + out_img_reduced*alpha)

            if cfg.export_mask_alpha:
                out_img = np.concatenate ( [out_img, img_face_mask_aaa[:,:,0:1]], -1 )
        out_merging_mask = img_face_mask_aaa

    return out_img, out_merging_mask
Example #29
0
    transformedTargetPlot.set_data(sourceToTarget)
    transformedProtoPlot.set_data(sourceToProto)
    protoToSource = transfer_poly(sourceToTarget, sourceToProto,
                                  newTargetLandmarks, newProtoLandmarks)
    #protoToSource = transfer_poly(sourceToProto,sourceToTarget, newProtoLandmarks, newTargetLandmarks)

    cloneMask = np.zeros(protoToSource.shape, protoToSource.dtype)
    cloneContour = cv2.convexHull(np.int32(target_landmarks))
    cloneMask = cv2.fillConvexPoly(cloneMask, cloneContour, (255, 255, 255))
    cloneMask = cv2.erode(cloneMask, np.ones((5, 5)))
    #finalFrame = cv2.seamlessClone(protoToSource, np.copy(targetFrame), cloneMask ,(int(trg_w/2),int(trg_h/2)),cv2.NORMAL_CLONE)
    target_face_center, _, _, _, target_local_landmarks = get_face_coordinates_system(
        target_landmarks)
    finalFrame = cv2.seamlessClone(
        protoToSource, np.copy(targetFrame), cloneMask,
        (int(target_face_center[0]), int(target_face_center[1] - 10)),
        cv2.NORMAL_CLONE)

    # finalFrame = protoToSource;
    #protoToSource = transfer_face(sourceToProto, sourceToTarget, newProtoLandmarks, newTargetLandmarks, triangle_landmark_indices)
    #protoToSource = transfer_face(sourceFrame,  source_frame_landmarks ,sourceToProto, sourceToTarget, triangle_landmark_indices)
    if args.slow:
        plt.pause(frameTime)

    finalPlot.set_data(finalFrame)

    src_out_path = base_output_path.format('{}_src'.format(frameCount))
    cv2.imwrite(src_out_path, cv2.cvtColor(sourceFrame, cv2.COLOR_RGB2BGR))
    trg_out_path = base_output_path.format('{}_trg'.format(frameCount))
    cv2.imwrite(trg_out_path, cv2.cvtColor(targetFrame, cv2.COLOR_RGB2BGR))
    transformed_target_out_path = base_output_path.format(
    # 각 이미지에서 얼굴 랜드마크 좌표 구하기--- ⑥
    points1 = getPoints(img1)
    points2 = getPoints(img2)

    # 랜드마크 좌표로 볼록 선체 구하기 --- ⑦
    hullIndex = cv2.convexHull(np.array(points2), returnPoints=False)
    hull1 = [points1[int(idx)] for idx in hullIndex]
    hull2 = [points2[int(idx)] for idx in hullIndex]

    # 볼록 선체 안 들로네 삼각형 좌표 구하기 ---⑧
    triangles = getTriangles(img2, hull2)

    # 각 삼각형 좌표로 삼각형 어핀 변환 ---⑨
    for i in range(0, len(triangles)):
        t1 = [hull1[triangles[i][j]] for j in range(3)]
        t2 = [hull2[triangles[i][j]] for j in range(3)]
        warpTriangle(img1, img_draw, t1, t2)

    # 볼록선체를 마스크로 써서 얼굴 합성 ---⑩
    mask = np.zeros(img2.shape, dtype=img2.dtype)
    cv2.fillConvexPoly(mask, np.int32(hull2), (255, 255, 255))
    r = cv2.boundingRect(np.float32([hull2]))
    center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
    output = cv2.seamlessClone(np.uint8(img_draw), img2, mask, center, \
                               cv2.NORMAL_CLONE)

    # 합성 사진 저장
    cv2.imwrite(
        './uploads/' + folder_name + '/' + folder_name + '_eyebrow' + num +
        '.jpg', output)
            warped_triangle, warped_triangle, mask=mask_triangles_designed)

        img2_new_face_rect_area = cv2.add(
            img2_new_face_rect_area, warped_triangle)
        img2_new_face[y: y + h, x: x + w] = img2_new_face_rect_area

    # Face swapped (putting 1st face into 2nd face)
    img2_face_mask = np.zeros_like(img2_gray)
    img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
    img2_face_mask = cv2.bitwise_not(img2_head_mask)

    img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
    result = cv2.add(img2_head_noface, img2_new_face)

    (x, y, w, h) = cv2.boundingRect(convexhull2)
    center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))

    seamlessclone = cv2.seamlessClone(
        result, img2, img2_head_mask, center_face2, cv2.MIXED_CLONE)

    cv2.imshow("img2", img2)
    cv2.imshow("clone", seamlessclone)
    cv2.imshow("result", result)

    key = cv2.waitKey(1)
    if key == 27:
        break

cap.release()
cv2.destroyAllWindows()
    def convert_face (self, img_bgr, img_face_landmarks, debug):        
        if debug:        
            debugs = [img_bgr.copy()]

        img_size = img_bgr.shape[1], img_bgr.shape[0]

        img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr, img_face_landmarks)
        
        face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, self.output_size, face_type=self.face_type)
        dst_face_bgr      = cv2.warpAffine( img_bgr        , face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )
        dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (self.output_size, self.output_size), flags=cv2.INTER_LANCZOS4 )

        predictor_input_bgr      = cv2.resize (dst_face_bgr,      (self.predictor_input_size,self.predictor_input_size))
        predictor_input_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size))
        predictor_input_mask_a   = np.expand_dims (predictor_input_mask_a_0, -1) 
        
        predicted_bgra = self.predictor ( np.concatenate( (predictor_input_bgr, predictor_input_mask_a), -1) )

        prd_face_bgr      = np.clip (predicted_bgra[:,:,0:3], 0, 1.0 )
        prd_face_mask_a_0 = np.clip (predicted_bgra[:,:,3], 0.0, 1.0)
        prd_face_mask_a_0[ prd_face_mask_a_0 < 0.001 ] = 0.0
        
        prd_face_mask_a   = np.expand_dims (prd_face_mask_a_0, axis=-1)
        prd_face_mask_aaa = np.repeat (prd_face_mask_a, (3,), axis=-1)

        img_prd_face_mask_aaa = cv2.warpAffine( prd_face_mask_aaa, face_mat, img_size, np.zeros(img_bgr.shape, dtype=float), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4 )
        img_prd_face_mask_aaa = np.clip (img_prd_face_mask_aaa, 0.0, 1.0)
            
        img_face_mask_aaa = img_prd_face_mask_aaa
        
        if debug:
            debugs += [img_face_mask_aaa.copy()]
        
        img_face_mask_aaa [ img_face_mask_aaa <= 0.1 ] = 0.0
            
        img_face_mask_flatten_aaa = img_face_mask_aaa.copy()
        img_face_mask_flatten_aaa[img_face_mask_flatten_aaa > 0.9] = 1.0

        maxregion = np.argwhere(img_face_mask_flatten_aaa==1.0)        

        out_img = img_bgr.copy()
        if maxregion.size != 0:
            miny,minx = maxregion.min(axis=0)[:2]
            maxy,maxx = maxregion.max(axis=0)[:2]
            lenx = maxx - minx
            leny = maxy - miny
            masky = int(minx+(lenx//2))
            maskx = int(miny+(leny//2))
            lowest_len = min (lenx, leny)
            
            if debug:
                print ("lowest_len = %f" % (lowest_len) )

            ero  = int( lowest_len * ( 0.126 - lowest_len * 0.00004551365 ) * 0.01*self.erode_mask_modifier )
            blur = int( lowest_len * 0.10                                   * 0.01*self.blur_mask_modifier )
          
            if debug:
                print ("ero = %d, blur = %d" % (ero, blur) )
                
            img_mask_blurry_aaa = img_face_mask_aaa
            if self.erode_mask:
                if ero > 0:
                    img_mask_blurry_aaa = cv2.erode(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
                elif ero < 0:
                    img_mask_blurry_aaa = cv2.dilate(img_mask_blurry_aaa, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )

            if self.blur_mask and blur > 0:
                img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa, (blur, blur) )
                
            img_mask_blurry_aaa = np.clip( img_mask_blurry_aaa, 0, 1.0 )
            
            if self.clip_border_mask_per > 0:
                prd_border_rect_mask_a = np.ones ( prd_face_mask_a.shape, dtype=prd_face_mask_a.dtype)        
                prd_border_size = int ( prd_border_rect_mask_a.shape[1] * self.clip_border_mask_per )

                prd_border_rect_mask_a[0:prd_border_size,:,:] = 0
                prd_border_rect_mask_a[-prd_border_size:,:,:] = 0
                prd_border_rect_mask_a[:,0:prd_border_size,:] = 0
                prd_border_rect_mask_a[:,-prd_border_size:,:] = 0
                prd_border_rect_mask_a = np.expand_dims(cv2.blur(prd_border_rect_mask_a, (prd_border_size, prd_border_size) ),-1)

            if self.mode == 'hist-match-bw':
                prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
                prd_face_bgr = np.repeat( np.expand_dims (prd_face_bgr, -1), (3,), -1 )
            
            if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                if debug:
                    debugs += [ cv2.warpAffine( prd_face_bgr, face_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT ) ]
                    
                hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=prd_face_bgr.dtype)
                    
                if self.masked_hist_match:
                    hist_mask_a *= prd_face_mask_a

                new_prd_face_bgr = image_utils.color_hist_match(prd_face_bgr*hist_mask_a, dst_face_bgr*hist_mask_a )

                prd_face_bgr = new_prd_face_bgr
                    
            if self.mode == 'hist-match-bw':
                prd_face_bgr = prd_face_bgr.astype(np.float32)
                    
            out_img = cv2.warpAffine( prd_face_bgr, face_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )

            if debug:
                debugs += [out_img.copy()]
                debugs += [img_mask_blurry_aaa.copy()]

            if self.mode == 'seamless' or self.mode == 'seamless-hist-match':
                out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa) , 0, 1.0 )
                if debug:
                    debugs += [out_img.copy()]
                out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), (img_bgr*255).astype(np.uint8), (img_face_mask_flatten_aaa*255).astype(np.uint8), (masky,maskx) , cv2.NORMAL_CLONE )
                out_img = out_img.astype(np.float32) / 255.0
                
                if debug:
                    debugs += [out_img.copy()]
                    
            if self.clip_border_mask_per > 0:
                img_prd_border_rect_mask_a = cv2.warpAffine( prd_border_rect_mask_a, face_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
                img_prd_border_rect_mask_a = np.expand_dims (img_prd_border_rect_mask_a, -1)

                out_img = out_img * img_prd_border_rect_mask_a + img_bgr * (1.0 - img_prd_border_rect_mask_a)
                img_mask_blurry_aaa *= img_prd_border_rect_mask_a
            
            out_img =  np.clip( img_bgr*(1-img_mask_blurry_aaa) + (out_img*img_mask_blurry_aaa) , 0, 1.0 )

            if self.mode == 'seamless-hist-match':
                out_face_bgr = cv2.warpAffine( out_img, face_mat, (self.output_size, self.output_size) )                
                new_out_face_bgr = image_utils.color_hist_match(out_face_bgr, dst_face_bgr )                
                new_out = cv2.warpAffine( new_out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
                out_img =  np.clip( img_bgr*(1-img_mask_blurry_aaa) + (new_out*img_mask_blurry_aaa) , 0, 1.0 )
                
            if self.transfercolor:                                          #making transfer color from original DST image to fake
                from skimage import io, color
                lab_clr = color.rgb2lab(img_bgr)                            #original DST, converting RGB to LAB color space
                lab_bw = color.rgb2lab(out_img)                             #fake, converting RGB to LAB color space
                tmp_channel, a_channel, b_channel = cv2.split(lab_clr)      #taking color channel A and B from original dst image
                l_channel, tmp2_channel, tmp3_channel = cv2.split(lab_bw)   #taking lightness channel L from merged fake
                img_LAB = cv2.merge((l_channel,a_channel, b_channel))       #merging light and color
                out_img = color.lab2rgb(img_LAB)                            #converting LAB to RGB 
                
            if self.alpha:
                new_image = out_img.copy()
                new_image = (new_image*255).astype(np.uint8)                            #convert image to int
                b_channel, g_channel, r_channel = cv2.split(new_image)                  #splitting RGB
                alpha_channel = img_mask_blurry_aaa.copy()                              #making copy of alpha channel
                alpha_channel = (alpha_channel*255).astype(np.uint8)
                alpha_channel, tmp2, tmp3 = cv2.split(alpha_channel)                    #splitting alpha to three channels, they all same in original alpha channel, we need just one
                out_img = cv2.merge((b_channel,g_channel, r_channel, alpha_channel))    #mergin RGB with alpha
                out_img = out_img.astype(np.float32) / 255.0
                
        if debug:
            debugs += [out_img.copy()]
            
        return debugs if debug else out_img     
Example #33
0
    def apply_fixes(self, frame, new_image, image_mask, image_size):
        """ Apply fixes """

        if self.args.sharpen_image is not None and self.args.sharpen_image.lower(
        ) != "none":
            np.clip(new_image, 0.0, 255.0, out=new_image)
            if self.args.sharpen_image == "box_filter":
                kernel = np.ones((3, 3)) * (-1)
                kernel[1, 1] = 9
                new_image = cv2.filter2D(new_image, -1, kernel)  # pylint: disable=no-member
            elif self.args.sharpen_image == "gaussian_filter":
                blur = cv2.GaussianBlur(new_image, (0, 0), 3.0)  # pylint: disable=no-member
                new_image = cv2.addWeighted(
                    new_image,  # pylint: disable=no-member
                    1.5,
                    blur,
                    -0.5,
                    0,
                    new_image)

        if self.args.avg_color_adjust:
            for _ in [0, 1]:
                np.clip(new_image, 0.0, 255.0, out=new_image)
                diff = frame - new_image
                avg_diff = np.sum(diff * image_mask, axis=(0, 1))
                adjustment = avg_diff / np.sum(image_mask, axis=(0, 1))
                new_image = new_image + adjustment

        if self.args.match_histogram:
            np.clip(new_image, 0.0, 255.0, out=new_image)
            new_image = self.color_hist_match(new_image, frame, image_mask)

        if self.args.seamless_clone and not self.args.draw_transparent:
            h, w, _ = frame.shape
            h = h // 2
            w = w // 2

            y_indices, x_indices, _ = np.nonzero(image_mask)
            y_crop = slice(np.min(y_indices), np.max(y_indices))
            x_crop = slice(np.min(x_indices), np.max(x_indices))
            y_center = int(
                np.rint((np.max(y_indices) + np.min(y_indices)) / 2) + h)
            x_center = int(
                np.rint((np.max(x_indices) + np.min(x_indices)) / 2) + w)
            '''
            # test with average of centroid rather than the h /2 , w/2 center
            y_center = int(np.rint(np.average(y_indices) + h)
            x_center = int(np.rint(np.average(x_indices) + w)
            '''

            insertion = np.rint(new_image[y_crop, x_crop, :]).astype('uint8')
            insertion_mask = image_mask[y_crop, x_crop, :]
            insertion_mask[insertion_mask != 0] = 255
            insertion_mask = insertion_mask.astype('uint8')

            prior = np.pad(frame, ((h, h), (w, w), (0, 0)),
                           'constant').astype('uint8')

            blended = cv2.seamlessClone(
                insertion,  # pylint: disable=no-member
                prior,
                insertion_mask,
                (x_center, y_center),
                cv2.NORMAL_CLONE)  # pylint: disable=no-member
            blended = blended[h:-h, w:-w]

        else:
            foreground = new_image * image_mask
            background = frame * (1.0 - image_mask)
            blended = foreground + background

        np.clip(blended, 0.0, 255.0, out=blended)

        return np.rint(blended).astype('uint8')
#!/usr/bin/python
'''
    OpenCV seamlessCloning : Normal vs Mixed
    Copyright 2015 by Satya Mallick <*****@*****.**>
    
'''
import cv2
import numpy as np

# Read images : src image will be cloned into dst
im = cv2.imread("images/wood-texture.jpg")
obj = cv2.imread("images/iloveyouticket.jpg")

# Create an all white mask
mask = 255 * np.ones(obj.shape, obj.dtype)

# The location of the center of the src in the dst
width, height, channels = im.shape
center = (int(height / 2), int(width / 2))

# Seamlessly clone src into dst and put the results in output
normal_clone = cv2.seamlessClone(obj, im, mask, center, cv2.NORMAL_CLONE)
mixed_clone = cv2.seamlessClone(obj, im, mask, center, cv2.MIXED_CLONE)

# Write results
cv2.imwrite("images/opencv-normal-clone-example.jpg", normal_clone)
cv2.imwrite("images/opencv-mixed-clone-example.jpg", mixed_clone)
def run_two_image(bfm,
                  uv_coords,
                  uv_kpt_ind,
                  face_ind,
                  triangles,
                  s_uv_coords,
                  image_path_A,
                  mat_path_A,
                  image_path_B,
                  mat_path_B,
                  save_folder,
                  name,
                  mode=1,
                  uv_h=256,
                  uv_w=256,
                  image_h=256,
                  image_w=256):

    image, cropped_image, center, size, pos, vertices = \
        run_one_image(bfm, uv_coords, face_ind, image_path_A, mat_path_A,
                  uv_h, uv_w, image_h, image_w)

    ref_image, ref_cropped_image, ref_center, ref_size, ref_pos, ref_vertices = \
        run_one_image(bfm, uv_coords, face_ind, image_path_B, mat_path_B,
                  uv_h, uv_w, image_h, image_w)

    texture = cv2.remap(cropped_image,
                        pos[:, :, :2].astype(np.float32),
                        None,
                        interpolation=cv2.INTER_NEAREST,
                        borderMode=cv2.BORDER_CONSTANT,
                        borderValue=(0))
    ref_texture = cv2.remap(ref_cropped_image,
                            ref_pos[:, :, :2].astype(np.float32),
                            None,
                            interpolation=cv2.INTER_NEAREST,
                            borderMode=cv2.BORDER_CONSTANT,
                            borderValue=(0))

    if mode == 0:
        # load eye mask
        uv_face_eye = imread('images/uv_face_eyes.png', as_grey=True) / 255.
        uv_face = imread('images/uv_face.png', as_grey=True) / 255.
        eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)
        # modify texture
        new_texture = texture * (1 - eye_mask[:, :, np.newaxis]
                                 ) + ref_texture * eye_mask[:, :, np.newaxis]
    else:
        uv_whole_face = imread('images/uv_face_mask.png', as_grey=True) / 255.
        new_texture = texture * (1 - uv_whole_face[:, :, np.newaxis]
                                 ) + ref_texture * uv_whole_face[:, :,
                                                                 np.newaxis]
        # new_texture = ref_texture

    #-- 3. remap to input image.(render)
    vis_colors = np.ones((vertices.shape[0], 1))
    face_mask = render_texture(vertices.T,
                               vis_colors.T,
                               triangles.T,
                               image_h,
                               image_w,
                               c=1)
    face_mask = np.squeeze(face_mask > 0).astype(np.float32)
    new_colors = get_colors_from_texture(new_texture, face_ind, uv_h)
    new_image = render_texture(vertices.T,
                               new_colors.T,
                               triangles.T,
                               image_h,
                               image_w,
                               c=3)
    new_image = cropped_image * (1 - face_mask[:, :, np.newaxis]
                                 ) + new_image * face_mask[:, :, np.newaxis]

    # Possion Editing for blending image
    vis_ind = np.argwhere(face_mask > 0)
    vis_min = np.min(vis_ind, 0)
    vis_max = np.max(vis_ind, 0)
    center = (int((vis_min[1] + vis_max[1]) / 2 + 0.5),
              int((vis_min[0] + vis_max[0]) / 2 + 0.5))

    output = cv2.seamlessClone((new_image * 255).astype(np.uint8),
                               (cropped_image * 255).astype(np.uint8),
                               (face_mask * 255).astype(np.uint8), center,
                               cv2.NORMAL_CLONE)

    if mode == 0:
        imsave(os.path.join(save_folder, name + '_eyes.jpg'), output)
    else:
        imsave(os.path.join(save_folder, name + '_swap.jpg'), output)
def replaceFace(dst_img, src_img, ps_dst, ps_src, wx, wy):
    hull = ConvexHull(ps_dst)
    convex = []
    for i in range(len(hull.vertices)):
        convex.append(hull.points[hull.vertices[i]])
    convex = np.array(convex)
    '''
    for i in range(len(convex)):
        p1 = (int(convex[i][0]), int(convex[i][1]))
        p2 = (int(convex[(i+1)%len(convex)][0]), int(convex[(i+1)%len(convex)][1]))
        #cv2.line(dst_img, p1, p2, (255,255,255))

    n = len(ps_dst)
    for i in range(n):
        pt = ps_dst[i]
        k=10
        for j in range(k):
            x = int(ps_dst[i][0]*(1-j/(float)(k)) + ps_dst[(i+1)%n][0]*(j/(float)(k)))
            y = int(ps_dst[i][1]*(1-j/(float)(k)) + ps_dst[(i+1)%n][1]*(j/(float)(k)))
            srcx = int(f(x,y,wx,ps_dst))
            srcy = int(f(x,y,wy,ps_dst))
            print(x, y, srcx, srcy, x-srcx, y-srcy)
            #dst_img[y,x] = src_img[srcy, srcx]
            dst_img[y,x] = (255,255,255)
            src_img[srcy, srcx] = (255,255,255)
                
            cv2.imshow('src', src_img)
            cv2.imshow('dst', dst_img)
            cv2.waitKey(0)
    '''

    convex = convex.astype(int)
    mask = np.zeros(dst_img.shape, dst_img.dtype)
    cv2.fillPoly(mask, [convex], (255, 255, 255))

    rescale, offset = getRemapValue(dst_img, src_img, ps_dst, ps_src)

    src_img_warped = np.zeros(dst_img.shape, dst_img.dtype)
    x_min = int(np.min(convex[:, 0]))
    y_min = int(np.min(convex[:, 1]))
    x_max = int(np.max(convex[:, 0]))
    y_max = int(np.max(convex[:, 1]))

    #cv2.imwrite('src_img.jpg', src_img)
    #cv2.imwrite('dst_img.jpg', dst_img)

    for x in range(x_min, x_max):
        for y in range(y_min, y_max):
            #if convex_contains(convex, (x,y)):
            if mask[y, x][0] > 128:
                srcx = int(f(x, y, wx, ps_dst))
                srcy = int(f(x, y, wy, ps_dst))
                #print(x, y, srcx, srcy, x-srcx, y-srcy)
                v = src_img[srcy, srcx] * rescale + offset
                v = np.clip(v, 0, 255)
                src_img_warped[y, x] = v
                #dst_img[y,x] = v
                #src_img[srcy, srcx] = (255,255,255)

                #cv2.imshow('src', src_img)
                #cv2.imshow('dst', dst_img)
                #cv2.waitKey(0)

    border = 0
    h, w, c = src_img_warped.shape
    src_img_warped = src_img_warped[border:h - border, border:w - border]
    mask = mask[border:h - border, border:w - border]

    cv2.imwrite('test2_ori.jpg', dst_img)

    center = (int((x_min + x_max) / 2), int((y_min + y_max) / 2))
    #center = (int(dst_img.shape[1]/2), int(dst_img.shape[0]/2))
    dst_img = cv2.seamlessClone(src_img_warped, dst_img, mask, center,
                                cv2.NORMAL_CLONE)

    cv2.imwrite('res2_tps.jpg', dst_img)
    cv2.imshow('res2_tps.jpg', dst_img)
    cv2.waitKey(0)

    return dst_img
Example #37
0
                 srcBB[2]] = resFrame[srcBB[1]:srcBB[1] + srcBB[3],
                                      srcBB[0]:srcBB[0] +
                                      srcBB[2]] + mask * trgCrop

        cloneMask = np.zeros(targetFrame.shape, targetFrame.dtype)
        #cloneMask = cloneMask.fill(255)
        cloneContour = cv2.convexHull(np.int32(transformed_landmarks))
        #mskBB = cv2.boundingRect(cloneContour)
        #cloneMask[mskBB[1]:mskBB[1]+mskBB[3], mskBB[0]:mskBB[0]+mskBB[2]] = (255,255,255)
        #targetFrame = cv2.fillConvexPoly(targetFrame, cloneContour, (0,0,0))
        cloneMask = cv2.fillConvexPoly(cloneMask, cloneContour,
                                       (255, 255, 255))
        cloneMask = cv2.erode(cloneMask, np.ones((5, 5)))
        #finalFrame = cv2.seamlessClone(resFrame, np.copy(targetFrame), cloneMask ,(int(trg_w/2),int(trg_h/2)),cv2.MIXED_CLONE)
        finalFrame = cv2.seamlessClone(
            resFrame, np.copy(targetFrame), cloneMask,
            (int(target_face_center[0]), int(target_face_center[1])),
            cv2.NORMAL_CLONE)
        #resFrame[srcBB[1]:srcBB[1]+srcBB[3], srcBB[0]:srcBB[0]+srcBB[2]] =  resFrame[srcBB[1]:srcBB[1]+srcBB[3], srcBB[0]:srcBB[0]+srcBB[2]] + mask * trgCrop
        if args.slow:
            resPlot.set_data(finalFrame)
            plt.pause(frameTime)
        # resFrame[trgBB[1]:trgBB[1]+trgBB[3], trgBB[0]:trgBB[0]+trgBB[2]] = resFrame[trgBB[1]:trgBB[1]+trgBB[3], trgBB[0]:trgBB[0]+trgBB[2]] * ((1.0,1.0,1.0) - mask)
        # resFrame[trgBB[1]:trgBB[1]+trgBB[3], trgBB[0]:trgBB[0]+trgBB[2]] = resFrame[trgBB[1]:trgBB[1]+trgBB[3], trgBB[0]:trgBB[0]+trgBB[2]] + trgCrop
    #convert in a neutral scale invariant space
    resPlot.set_data(finalFrame)
    #move stuff arround
    if output:
        output.write(np.uint8(cv2.cvtColor(finalFrame, cv2.COLOR_RGB2BGR)))

    profiler.tock()
Example #38
0
    def get_negatives_stitching(self, path, mode=1):
        import cv2
        import os
        import scipy.ndimage.morphology as morph
        import random

        # decide if the negative is of color or shape (only one, so that they cannot mix)
        color_or_shape = np.random.randint(
            0, 3)  # 0 is color, 1 is shape, 2 is material

        path_image_original = self.path_dataset + '/images/' + self.split + '/positive/' + path + '.' + self.image_extension
        index_object_original = np.random.randint(0, 3)
        image_original = cv2.imread(path_image_original)
        seg_original = cv2.imread(
            path_image_original.replace('positive', 'segmentation'))
        with open(
                os.path.join(self.path_dataset, 'scenes', self.split,
                             f'{path}.json'), 'rb') as f:
            scene_original = json.load(f)
        color_original = scene_original['objects'][index_object_original][
            'color']
        shape_original = scene_original['objects'][index_object_original][
            'shape']
        material_original = scene_original['objects'][index_object_original][
            'material']
        position = scene_original['objects'][index_object_original][
            'pixel_coords'][:2]

        # inpaint hole of previous object
        mask_inpaint = (seg_original[..., 2] == index_object_original +
                        1).astype(np.uint8)
        mask_inpaint = morph.binary_dilation(mask_inpaint,
                                             structure=np.ones(
                                                 (5, 5))).astype(np.uint8)
        image_inpainted = cv2.inpaint(image_original, mask_inpaint, 3,
                                      cv2.INPAINT_TELEA)

        # open other image
        other_correct = False

        while not other_correct:
            path_other = random.choice(list(self.paths.values()))
            index_object_other = np.random.randint(0, 3)
            with open(
                    os.path.join(self.path_dataset, 'scenes', self.split,
                                 f'{path_other}.json'), 'rb') as f:
                scene_other = json.load(f)
            color_other = scene_other['objects'][index_object_other]['color']
            shape_other = scene_other['objects'][index_object_other]['shape']
            material_other = scene_other['objects'][index_object_other][
                'material']
            # brute force...
            if mode == 1:
                if color_or_shape == 1 and (color_other != color_original or
                                            material_other != material_original
                                            or shape_other == shape_original):
                    continue
                elif color_or_shape == 0 and (
                        color_other == color_original
                        or material_other != material_original
                        or shape_other != shape_original):
                    continue
                elif color_or_shape == 2 and (
                        color_other != color_original or material_other
                        == material_original or shape_other != shape_original):
                    continue
                else:
                    other_correct = True
            elif mode == 2:
                if color_or_shape == 1 and (shape_other == shape_original):
                    continue
                elif color_or_shape == 0 and (color_other == color_original):
                    continue
                elif color_or_shape == 2 and (material_other
                                              == material_original):
                    continue
                else:
                    other_correct = True

            path_image_other = self.path_dataset + '/images/' + self.split + '/positive/' + path_other + '.' + self.image_extension
            image_other = cv2.imread(path_image_other)
            seg_other = cv2.imread(
                path_image_other.replace('positive', 'segmentation'))

        # stitch object
        # %10 in the case of SCLEVR3, if not, it does not harm
        mask_stitch = (seg_other[..., 2] % 10) == index_object_other + 1
        mask_stitch = mask_stitch.astype(np.uint8)
        # extend mask so that the gradients (borders) are visible
        mask_stitch = morph.binary_dilation(mask_stitch,
                                            structure=np.ones(
                                                (10, 10))).astype(np.uint8)
        mask_stitch = np.stack((mask_stitch, ) * 3, -1) * 255

        try:
            image_stitched = cv2.seamlessClone(image_other, image_inpainted,
                                               mask_stitch, tuple(position),
                                               cv2.NORMAL_CLONE)
        except:
            print('retry')
            return self.get_negatives_stitching(path)

        img = Image.fromarray(image_stitched).convert('RGB')
        img = self.transform(img)
        return img
Example #39
0
    mask = np.asarray(mask * mask_src, dtype=np.uint8)

    ## Correct color
    if not args.warp_2d and args.correct_color:
        warped_src_face = apply_mask(warped_src_face, mask)
        dst_face_masked = apply_mask(dst_face, mask)
        warped_src_face = correct_colours(dst_face_masked, warped_src_face,
                                          dst_points)

    ## Shrink the mask
    kernel = np.ones((10, 10), np.uint8)
    mask = cv2.erode(mask, kernel, iterations=1)
    ##Poisson Blending
    r = cv2.boundingRect(mask)
    center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
    output = cv2.seamlessClone(warped_src_face, dst_face, mask, center,
                               cv2.NORMAL_CLONE)

    x, y, w, h = dst_shape
    dst_img_cp = dst_img.copy()
    dst_img_cp[y:y + h, x:x + w] = output
    output = dst_img_cp

    dir_path = os.path.dirname(args.out)
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)

    cv2.imwrite(args.out, output)

    ##For debug
    # if not args.no_debug_window:
    #     cv2.imshow("From", dst_img)
Example #40
0
def seamless_cloning_paper_test_copy(mode='mixed_clone'):
    # Read images : src image will be cloned into dst
    # im = cv2.imread('./code_seamless/input.jpg')
    # obj = cv2.imread('./code_seamless/output.jpg')

    im = cv2.imread(
        'D:/RAIN_DATA_TRAINING/GOPR0039_taken_640_broken_center_short/raindrop0439.jpg'
    )
    obj = cv2.imread(
        'D:/RAIN_DATA_TRAINING/GOPR0039_taken_640_broken_center_short/raindrop0436.jpg'
    )

    # Create an all white mask
    # mask = cv2.imread('./code_seamless/mask.jpg')

    mask = cv2.imread(
        'D:/RAIN_DATA_TRAINING/GOPR0039_taken_640_broken_center_short/mask01.jpg'
    )

    crop_img = obj[370:470, 270:370]
    cv2.imshow("cropped", crop_img)

    x_offset = y_offset = 270
    im[y_offset:y_offset + crop_img.shape[0],
       x_offset:x_offset + crop_img.shape[1]] = crop_img

    cv2.imshow("im", im)
    #cv2.waitKey(0)

    gray_image = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)

    # thresh = 127
    # im_bw = cv2.threshold(gray_image, thresh, 255, cv2.THRESH_BINARY)[1]

    (thresh, im_bw) = cv2.threshold(gray_image, 128, 255,
                                    cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    cv2.imshow("gray_image", gray_image)
    cv2.imshow("im_bw", im_bw)
    cv2.imwrite(
        "D:/RAIN_DATA_TRAINING/GOPR0039_taken_640_broken_center_short/copied_image_439_436.jpg",
        im)

    # Create an all white mask
    # mask = 255 * np.ones(obj.shape, obj.dtype)

    # The location of the center of the src in the dst
    width, height, channels = im.shape

    minx = 1e5
    maxx = 1
    miny = 1e5
    maxy = 1

    for y in range(1, height):
        for x in range(1, width):
            if ((im_bw[x][y] != 0) and (im_bw[x][y] != 255)):
                print(x, y, " : ", im_bw[x][y])
            # if ((mask[x][y][1] > 0) or (mask[x][y][2] > 0) or (mask[x][y][0] > 0) ):
            if ((im_bw[x][y] > 0)):
                # print(x, y , " : ",im_bw[x][y])

                minx = min(minx, x)
                maxx = max(maxx, x)
                miny = min(miny, y)
                maxy = max(maxy, y)

    center = (int(miny + (maxy - miny) / 2), int(minx + (maxx - minx) / 2))

    # center = (int(height / 2), int(width / 2))
    print("center", center)
    print(",minx: ", minx, ",maxx: ", maxx, ",miny: ", miny, ",maxy: ", maxy)

    cv2.circle(im, center, 1, (0, 0, 255), -1)
    cv2.circle(obj, center, 1, (0, 0, 255), -1)
    cv2.circle(mask, center, 1, (0, 0, 255), -1)

    # draw points
    cv2.circle(mask, (minx, miny), 1, (0, 255, 255), -1)
    cv2.circle(mask, (maxx, miny), 1, (0, 255, 255), -1)
    cv2.circle(mask, (minx, maxy), 1, (0, 255, 255), -1)
    cv2.circle(mask, (maxx, maxy), 1, (0, 255, 255), -1)

    # draw points
    cv2.circle(mask, (miny, minx), 1, (0, 255, 0), -1)
    cv2.circle(mask, (maxy, minx), 1, (0, 255, 0), -1)
    cv2.circle(mask, (miny, maxx), 1, (0, 255, 0), -1)
    cv2.circle(mask, (maxy, maxx), 1, (0, 255, 0), -1)

    # dest_cloned = im
    #
    # # Seamlessly clone src into dst and put the results in output
    # if mode == 'normal_clone':
    #     dest_cloned = cv2.seamlessClone(obj, im, mask, center, cv2.NORMAL_CLONE)
    # elif mode == 'mixed_clone':
    #     dest_cloned = cv2.seamlessClone(obj, im, mask, center, cv2.MIXED_CLONE)
    # else:
    #     dest_cloned= cv2.seamlessClone(obj, im, mask, center, cv2.MONOCHROME_TRANSFER)
    taolao = (320, 420)
    normal_clone = cv2.seamlessClone(obj, im, im_bw, center, cv2.NORMAL_CLONE)
    normal_clone = unsharp_mask.unsharp_mask(normal_clone)

    mixed_clone = cv2.seamlessClone(obj, im, im_bw, center, cv2.MIXED_CLONE)
    mixed_clone = unsharp_mask.unsharp_mask(mixed_clone)

    monochrome_transfer = cv2.seamlessClone(obj, im, im_bw, center,
                                            cv2.MONOCHROME_TRANSFER)

    cv2.circle(normal_clone, center, 1, (0, 255, 255), -1)
    cv2.circle(mixed_clone, center, 1, (0, 255, 255), -1)
    cv2.circle(monochrome_transfer, center, 1, (0, 255, 255), -1)

    # Display image
    cv2.imshow("im", im)
    cv2.imshow("obj", obj)
    cv2.imshow("mask", mask)
    cv2.imshow("normal_clone", normal_clone)
    cv2.imshow("mixed_clone", mixed_clone)
    cv2.imshow("monochrome_transfer", monochrome_transfer)

    # cv2.imshow("dest_cloned", dest_cloned)
    cv2.waitKey(0)
Example #41
0
org = C.copy()

eye, mask, full_mask = cutbox(C, landmarks["right_eye"], 50)

# Blow out the mask a bit
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
mask = cv2.dilate(mask, kernel, iterations=10)

minds_eye = landmarks["right_eye_centroid"] + landmarks["left_eye_centroid"]
minds_eye /= 2

minds_eye[1] -= 100
minds_eye = minds_eye.round().astype(int)


#avg = C.img[full_mask>0].mean(axis=0).astype(np.uint8)
#C[full_mask>0] = avg
#C.show()
#pastebox(C, eye, mask, minds_eye)
#exit()

C.rgb = cv2.seamlessClone(eye.rgb, C.rgb, mask, tuple(minds_eye), cv2.NORMAL_CLONE)

#intensity = regions_of_high_intensity(org, blocksize=7, kernel_size=3)
#intensity.show()
#exit()

C.copy().resize(0.5).save("docs/images/tessa1_third_eye.png")

C.show()
Example #42
0
def pastebox(canvas, img, fmask, location):
    mask = np.zeros((*canvas.img.shape[:2], 3), canvas.img.dtype)
    mask[fmask] = [255] * 3
    canvas.img[:, :, :3] = cv2.seamlessClone(img[:, :, :3],
                                             canvas.img[:, :, :3], mask,
                                             tuple(location), cv2.MIXED_CLONE)
Example #43
0
def pastebox(canvas, source, mask, location):
    canvas.rgb = cv2.seamlessClone(
        source.rgb, canvas.rgb, mask, tuple(location), cv2.NORMAL_CLONE
    )
Example #44
0
import os

objDir="cropResizedLeitbake"
backgroundDir="background"
dstDir="combined"
count = 1
for obj in os.listdir(os.path.join(".",objDir)):
    for background in os.listdir(os.path.join(".",backgroundDir)):
        # Read images : src image will be cloned into dst
        backgroundimg=cv2.imread(os.path.join(".",backgroundDir,background))
        objimg=cv2.imread(os.path.join(".",objDir,obj))

        # Create an all white mask
        mask = 255 * np.ones(objimg.shape, objimg.dtype)

        # The location of the center of the src in the dst
        width, height, channels = backgroundimg.shape
        center = (int(round(height/2)), int(round(width/2)))

        print(objimg.shape)
        print(backgroundimg.shape)
        # Seamlessly clone src into dst and put the results in output
        normal_clone = cv2.seamlessClone(objimg, backgroundimg, mask, center, cv2.NORMAL_CLONE)
        #mixed_clone = cv2.seamlessClone(objimg, backgroundimg, mask, center, cv2.MIXED_CLONE)


        # Write results
        cv2.imwrite(os.path.join(".",dstDir,str(count)+".jpg"),normal_clone)
        #cv2.imwrite("./opencv-mixed-clone-example.jpg", mixed_clone)
        count=count+1
Example #45
0
import cv2
import numpy as np
src = cv2.imread("airplane.png")
dst = cv2.imread("sky.jpg")
src_mask = np.zeros(src.shape, src.dtype)
poly = np.array([[4, 80], [30, 54], [151, 63], [254, 37], [298, 90],
                 [272, 134], [43, 122]], np.int32)
cv2.fillPoly(src_mask, [poly], (255, 255, 255))
width, height, channels = dst.shape
center = (int(width / 2), int(height / 2))
output = cv2.seamlessClone(src, dst, src_mask, center, cv2.NORMAL_CLONE)
cv2.imwrite("result.jpg", output)
Example #46
0
def PoissonBlending(image, mask, center):
    src = cv2.imread(cfg.OUT_FOLDER + cfg.IMAGE + "_CompletedPoints.png")
    dst = cv2.imread(cfg.OUT_FOLDER + cfg.IMAGE + "_Complete.png")
    blendedImage = cv2.seamlessClone(src, dst, mask, center, cv2.MIXED_CLONE)
    return blendedImage
Example #47
0
__author__ = 'Allan'
import cv2
import numpy as np
import globalSetting as gs
# Read images : src image will be cloned into dst

#cap  = cv2.VideoCapture(0)

im = cv2.imread("/Users/Allan/Desktop/xAd_Resource/bgbg.jpg")
obj= cv2.imread("/Users/Allan/Desktop/xAd_Resource/jiaduobao.png")

#bg = cap.read()

bg_img = cv2.imdecode(obj,0)

# Create an all white mask
#mask = 255 * np.ones(obj.shape, obj.dtype)

# The location of the center of the src in the dst
width, height, channels = im.shape
center = (height/2, width/2)

# Seamlessly clone src into dst and put the results in output
normal_clone = cv2.seamlessClone(obj, bg_img, obj, center, cv2.NORMAL_CLONE)
mixed_clone = cv2.seamlessClone(obj, bg_img, obj, center, cv2.MIXED_CLONE)

# Write results
cv2.imwrite("/Users/Allan/Desktop/tao-normal-clone-example.jpg", normal_clone)
cv2.imwrite("/Users/Allan/Desktop/tao-mixed-clone-example.jpg", mixed_clone)
def MergeMaskedFace (predictor_func, predictor_input_shape,
                     face_enhancer_func,
                     xseg_256_extract_func,
                     cfg, frame_info, img_bgr_uint8, img_bgr, img_face_landmarks):

    img_size = img_bgr.shape[1], img_bgr.shape[0]
    img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)

    input_size = predictor_input_shape[0]
    mask_subres_size = input_size*4
    output_size = input_size
    if cfg.super_resolution_power != 0:
        output_size *= 4

    face_mat        = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type)
    face_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)

    if mask_subres_size == output_size:
        face_mask_output_mat = face_output_mat
    else:
        face_mask_output_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, mask_subres_size, face_type=cfg.face_type, scale= 1.0 + 0.01*cfg.output_face_scale)

    dst_face_bgr      = cv2.warpAffine( img_bgr        , face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
    dst_face_bgr      = np.clip(dst_face_bgr, 0, 1)

    dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
    dst_face_mask_a_0 = np.clip(dst_face_mask_a_0, 0, 1)

    predictor_input_bgr      = cv2.resize (dst_face_bgr, (input_size,input_size) )

    predicted = predictor_func (predictor_input_bgr)
    prd_face_bgr          = np.clip (predicted[0], 0, 1.0)
    prd_face_mask_a_0     = np.clip (predicted[1], 0, 1.0)
    prd_face_dst_mask_a_0 = np.clip (predicted[2], 0, 1.0)

    if cfg.super_resolution_power != 0:
        prd_face_bgr_enhanced = face_enhancer_func(prd_face_bgr, is_tanh=True, preserve_size=False)
        mod = cfg.super_resolution_power / 100.0
        prd_face_bgr = cv2.resize(prd_face_bgr, (output_size,output_size))*(1.0-mod) + prd_face_bgr_enhanced*mod
        prd_face_bgr = np.clip(prd_face_bgr, 0, 1)

    if cfg.super_resolution_power != 0:
        prd_face_mask_a_0     = cv2.resize (prd_face_mask_a_0,      (output_size, output_size), interpolation=cv2.INTER_CUBIC)
        prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0,  (output_size, output_size), interpolation=cv2.INTER_CUBIC)

    if cfg.mask_mode == 1: #dst
        wrk_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (output_size,output_size), interpolation=cv2.INTER_CUBIC)
    elif cfg.mask_mode == 2: #learned-prd
        wrk_face_mask_a_0 = prd_face_mask_a_0
    elif cfg.mask_mode == 3: #learned-dst
        wrk_face_mask_a_0 = prd_face_dst_mask_a_0
    elif cfg.mask_mode == 4: #learned-prd*learned-dst
        wrk_face_mask_a_0 = prd_face_mask_a_0*prd_face_dst_mask_a_0
    elif cfg.mask_mode == 5: #learned-prd+learned-dst
        wrk_face_mask_a_0 = np.clip( prd_face_mask_a_0+prd_face_dst_mask_a_0, 0, 1)
    elif cfg.mask_mode >= 6 and cfg.mask_mode <= 9:  #XSeg modes
        if cfg.mask_mode == 6 or cfg.mask_mode == 8 or cfg.mask_mode == 9:
            # obtain XSeg-prd
            prd_face_xseg_bgr = cv2.resize (prd_face_bgr, (xseg_input_size,)*2, interpolation=cv2.INTER_CUBIC)
            prd_face_xseg_mask = xseg_256_extract_func(prd_face_xseg_bgr)
            X_prd_face_mask_a_0 = cv2.resize ( prd_face_xseg_mask, (output_size, output_size), interpolation=cv2.INTER_CUBIC)

        if cfg.mask_mode >= 7 and cfg.mask_mode <= 9:
            # obtain XSeg-dst
            xseg_mat            = LandmarksProcessor.get_transform_mat (img_face_landmarks, xseg_input_size, face_type=cfg.face_type)
            dst_face_xseg_bgr   = cv2.warpAffine(img_bgr, xseg_mat, (xseg_input_size,)*2, flags=cv2.INTER_CUBIC )
            dst_face_xseg_mask  = xseg_256_extract_func(dst_face_xseg_bgr)
            X_dst_face_mask_a_0 = cv2.resize (dst_face_xseg_mask, (output_size,output_size), interpolation=cv2.INTER_CUBIC)

        if cfg.mask_mode == 6:   #'XSeg-prd'
            wrk_face_mask_a_0 = X_prd_face_mask_a_0
        elif cfg.mask_mode == 7: #'XSeg-dst'
            wrk_face_mask_a_0 = X_dst_face_mask_a_0
        elif cfg.mask_mode == 8: #'XSeg-prd*XSeg-dst'
            wrk_face_mask_a_0 = X_prd_face_mask_a_0 * X_dst_face_mask_a_0
        elif cfg.mask_mode == 9: #learned-prd*learned-dst*XSeg-prd*XSeg-dst
            wrk_face_mask_a_0 = prd_face_mask_a_0 * prd_face_dst_mask_a_0 * X_prd_face_mask_a_0 * X_dst_face_mask_a_0

    wrk_face_mask_a_0[ wrk_face_mask_a_0 < (1.0/255.0) ] = 0.0 # get rid of noise

    # resize to mask_subres_size
    if wrk_face_mask_a_0.shape[0] != mask_subres_size:
        wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (mask_subres_size, mask_subres_size), interpolation=cv2.INTER_CUBIC)

    # process mask in local predicted space
    if 'raw' not in cfg.mode:
        # add zero pad
        wrk_face_mask_a_0 = np.pad (wrk_face_mask_a_0, input_size)

        ero  = cfg.erode_mask_modifier
        blur = cfg.blur_mask_modifier

        if ero > 0:
            wrk_face_mask_a_0 = cv2.erode(wrk_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(ero,ero)), iterations = 1 )
        elif ero < 0:
            wrk_face_mask_a_0 = cv2.dilate(wrk_face_mask_a_0, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(-ero,-ero)), iterations = 1 )

        # clip eroded/dilated mask in actual predict area
        # pad with half blur size in order to accuratelly fade to zero at the boundary
        clip_size = input_size + blur // 2

        wrk_face_mask_a_0[:clip_size,:] = 0
        wrk_face_mask_a_0[-clip_size:,:] = 0
        wrk_face_mask_a_0[:,:clip_size] = 0
        wrk_face_mask_a_0[:,-clip_size:] = 0

        if blur > 0:
            blur = blur + (1-blur % 2)
            wrk_face_mask_a_0 = cv2.GaussianBlur(wrk_face_mask_a_0, (blur, blur) , 0)

        wrk_face_mask_a_0 = wrk_face_mask_a_0[input_size:-input_size,input_size:-input_size]

        wrk_face_mask_a_0 = np.clip(wrk_face_mask_a_0, 0, 1)

    img_face_mask_a = cv2.warpAffine( wrk_face_mask_a_0, face_mask_output_mat, img_size, np.zeros(img_bgr.shape[0:2], dtype=np.float32), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC )[...,None]
    img_face_mask_a = np.clip (img_face_mask_a, 0.0, 1.0)
    img_face_mask_a [ img_face_mask_a < (1.0/255.0) ] = 0.0 # get rid of noise

    if wrk_face_mask_a_0.shape[0] != output_size:
        wrk_face_mask_a_0 = cv2.resize (wrk_face_mask_a_0, (output_size,output_size), interpolation=cv2.INTER_CUBIC)

    wrk_face_mask_a = wrk_face_mask_a_0[...,None]

    out_merging_mask_a = None
    if cfg.mode == 'original':
        return img_bgr, img_face_mask_a

    elif 'raw' in cfg.mode:
        if cfg.mode == 'raw-rgb':
            out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
            out_merging_mask_a = img_face_mask_a
        elif cfg.mode == 'raw-predict':
            out_img = prd_face_bgr
            out_merging_mask_a = wrk_face_mask_a
        else:
            raise ValueError(f"undefined raw type {cfg.mode}")

        out_img = np.clip (out_img, 0.0, 1.0 )
    else:
                
        # Process if the mask meets minimum size
        maxregion = np.argwhere( img_face_mask_a >= 0.1 )
        if maxregion.size != 0:
            miny,minx = maxregion.min(axis=0)[:2]
            maxy,maxx = maxregion.max(axis=0)[:2]
            lenx = maxx - minx
            leny = maxy - miny
            if min(lenx,leny) >= 4:
                wrk_face_mask_area_a = wrk_face_mask_a.copy()
                wrk_face_mask_area_a[wrk_face_mask_area_a>0] = 1.0
                
                if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
                    if cfg.color_transfer_mode == 1: #rct
                        prd_face_bgr = imagelib.reinhard_color_transfer ( np.clip( prd_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8),
                                                                          np.clip( dst_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8), )

                        prd_face_bgr = np.clip( prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                    elif cfg.color_transfer_mode == 2: #lct
                        prd_face_bgr = imagelib.linear_color_transfer (prd_face_bgr, dst_face_bgr)
                    elif cfg.color_transfer_mode == 3: #mkl
                        prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr, dst_face_bgr)
                    elif cfg.color_transfer_mode == 4: #mkl-m
                        prd_face_bgr = imagelib.color_transfer_mkl (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
                    elif cfg.color_transfer_mode == 5: #idt
                        prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr, dst_face_bgr)
                    elif cfg.color_transfer_mode == 6: #idt-m
                        prd_face_bgr = imagelib.color_transfer_idt (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
                    elif cfg.color_transfer_mode == 7: #sot-m
                        prd_face_bgr = imagelib.color_transfer_sot (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a, steps=10, batch_size=30)
                        prd_face_bgr = np.clip (prd_face_bgr, 0.0, 1.0)
                    elif cfg.color_transfer_mode == 8: #mix-m
                        prd_face_bgr = imagelib.color_transfer_mix (prd_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)

                if cfg.mode == 'hist-match':
                    hist_mask_a = np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)

                    if cfg.masked_hist_match:
                        hist_mask_a *= wrk_face_mask_area_a

                    white =  (1.0-hist_mask_a)* np.ones ( prd_face_bgr.shape[:2] + (1,) , dtype=np.float32)

                    hist_match_1 = prd_face_bgr*hist_mask_a + white
                    hist_match_1[ hist_match_1 > 1.0 ] = 1.0

                    hist_match_2 = dst_face_bgr*hist_mask_a + white
                    hist_match_2[ hist_match_1 > 1.0 ] = 1.0

                    prd_face_bgr = imagelib.color_hist_match(hist_match_1, hist_match_2, cfg.hist_match_threshold ).astype(dtype=np.float32)

                if 'seamless' in cfg.mode:
                    #mask used for cv2.seamlessClone
                    img_face_seamless_mask_a = None
                    for i in range(1,10):
                        a = img_face_mask_a > i / 10.0
                        if len(np.argwhere(a)) == 0:
                            continue
                        img_face_seamless_mask_a = img_face_mask_a.copy()
                        img_face_seamless_mask_a[a] = 1.0
                        img_face_seamless_mask_a[img_face_seamless_mask_a <= i / 10.0] = 0.0
                        break

                out_img = cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
                out_img = np.clip(out_img, 0.0, 1.0)

                if 'seamless' in cfg.mode:
                    try:
                        #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
                        l,t,w,h = cv2.boundingRect( (img_face_seamless_mask_a*255).astype(np.uint8) )
                        s_maskx, s_masky = int(l+w/2), int(t+h/2)
                        out_img = cv2.seamlessClone( (out_img*255).astype(np.uint8), img_bgr_uint8, (img_face_seamless_mask_a*255).astype(np.uint8), (s_maskx,s_masky) , cv2.NORMAL_CLONE )
                        out_img = out_img.astype(dtype=np.float32) / 255.0
                    except Exception as e:
                        #seamlessClone may fail in some cases
                        e_str = traceback.format_exc()

                        if 'MemoryError' in e_str:
                            raise Exception("Seamless fail: " + e_str) #reraise MemoryError in order to reprocess this data by other processes
                        else:
                            print ("Seamless fail: " + e_str)

                cfg_mp = 0.30#cfg.motion_blur_power / 100.0
                
                
                ###
                shrink_res = output_size #512
                
                shrink_prd_face_dst_mask_a_0 = cv2.resize (prd_face_dst_mask_a_0,  (shrink_res, shrink_res), interpolation=cv2.INTER_CUBIC)     
                
                shrink_blur_size = (shrink_res // 32)+1            
                shrink_blur_size += (1-shrink_blur_size % 2)
                
                # Feather the mask
                shrink_prd_face_dst_mask_a_0 = cv2.GaussianBlur(shrink_prd_face_dst_mask_a_0, (shrink_blur_size,shrink_blur_size) , 0)
                shrink_prd_face_dst_mask_a_0[shrink_prd_face_dst_mask_a_0 < 0.5] = 0.0
                shrink_prd_face_dst_mask_a_0[shrink_prd_face_dst_mask_a_0 >= 0.5] = 1.0            
                
                cnts = cv2.findContours( shrink_prd_face_dst_mask_a_0.astype(np.uint8), cv2.RETR_LIST , cv2.CHAIN_APPROX_TC89_KCOS )
                
                # Get the largest found contour
                cnt = sorted(cnts[0], key = cv2.contourArea, reverse = True)[0].squeeze()

                center = np.mean(cnt,0)
                cnt2 = cnt.copy().astype(np.float32)
                cnt2_c = center - cnt2    
                cnt2_len = npla.norm(cnt2_c, axis=1, keepdims=True)
                cnt2_vec = cnt2_c / cnt2_len
                cnt2 += cnt2_vec *  cnt2_len * cfg_mp #todo               
                cnt2 = cnt2.astype(np.int32)
                
                img_cnt = LandmarksProcessor.transform_points (cnt, face_mat, True)
                img_cnt2 = LandmarksProcessor.transform_points (cnt2, face_mat, True)
                
                 
                # Anchor perimeter
                
                
                h=img_size[1]
                w=img_size[0]
                w_pts_count = w // 16
                h_pts_count = w // 16
                perim_pts = np.concatenate (
                            (   np.concatenate ( [ np.arange(0,w+w/w_pts_count, w/w_pts_count)[...,None], np.array ( [[0]]*(h_pts_count+1) ) ], axis=-1 ),
                                np.concatenate ( [ np.arange(0,w+w/w_pts_count, w/w_pts_count)[...,None], np.array ( [[h]]*(h_pts_count+1) ) ], axis=-1 ),
                                np.concatenate ( [ np.array ( [[0]]*(w_pts_count+1) ), np.arange(0,h+h/h_pts_count, h/h_pts_count)[...,None] ], axis=-1 ),
                                np.concatenate ( [ np.array ( [[w]]*(w_pts_count+1) ), np.arange(0,h+h/h_pts_count, h/h_pts_count)[...,None] ], axis=-1 ) ), 0 ).astype(np.int32)


               
                img_cnt2 = np.concatenate ( (img_cnt2, perim_pts), 0 )
                img_cnt = np.concatenate ( (img_cnt, perim_pts), 0 )
                
                morphed_img_bgr = mls_affine_deformation_inv( img_bgr, img_cnt, img_cnt2 )
                
                while True:
                    cv2.imshow("", (img_bgr*255).astype(np.uint8) )
                    cv2.waitKey(0)
                    cv2.imshow("", (morphed_img_bgr*255).astype(np.uint8) )
                    cv2.waitKey(0)
            
                import code
                code.interact(local=dict(globals(), **locals()))

                

                out_img = img_bgr*(1-img_face_mask_a) + (out_img*img_face_mask_a)

                if ('seamless' in cfg.mode and cfg.color_transfer_mode != 0) or \
                   cfg.mode == 'seamless-hist-match' or \
                   cfg_mp != 0 or \
                   cfg.blursharpen_amount != 0 or \
                   cfg.image_denoise_power != 0 or \
                   cfg.bicubic_degrade_power != 0:

                    out_face_bgr = cv2.warpAffine( out_img, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )

                    if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
                        if cfg.color_transfer_mode == 1:
                            out_face_bgr = imagelib.reinhard_color_transfer ( np.clip(out_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8),
                                                                              np.clip(dst_face_bgr*wrk_face_mask_area_a*255, 0, 255).astype(np.uint8) )
                            out_face_bgr = np.clip( out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                        elif cfg.color_transfer_mode == 2: #lct
                            out_face_bgr = imagelib.linear_color_transfer (out_face_bgr, dst_face_bgr)
                        elif cfg.color_transfer_mode == 3: #mkl
                            out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr, dst_face_bgr)
                        elif cfg.color_transfer_mode == 4: #mkl-m
                            out_face_bgr = imagelib.color_transfer_mkl (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
                        elif cfg.color_transfer_mode == 5: #idt
                            out_face_bgr = imagelib.color_transfer_idt (out_face_bgr, dst_face_bgr)
                        elif cfg.color_transfer_mode == 6: #idt-m
                            out_face_bgr = imagelib.color_transfer_idt (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)
                        elif cfg.color_transfer_mode == 7: #sot-m
                            out_face_bgr = imagelib.color_transfer_sot (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a, steps=10, batch_size=30)
                            out_face_bgr = np.clip (out_face_bgr, 0.0, 1.0)
                        elif cfg.color_transfer_mode == 8: #mix-m
                            out_face_bgr = imagelib.color_transfer_mix (out_face_bgr*wrk_face_mask_area_a, dst_face_bgr*wrk_face_mask_area_a)

                    if cfg.mode == 'seamless-hist-match':
                        out_face_bgr = imagelib.color_hist_match(out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)

                    if cfg_mp != 0:
                        k_size = int(frame_info.motion_power*cfg_mp)
                        if k_size >= 1:
                            k_size = np.clip (k_size+1, 2, 50)
                            if cfg.super_resolution_power != 0:
                                k_size *= 2
                            out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg)

                    if cfg.blursharpen_amount != 0:
                        out_face_bgr = imagelib.blursharpen ( out_face_bgr, cfg.sharpen_mode, 3, cfg.blursharpen_amount)

                    if cfg.image_denoise_power != 0:
                        n = cfg.image_denoise_power
                        while n > 0:
                            img_bgr_denoised = cv2.medianBlur(img_bgr, 5)
                            if int(n / 100) != 0:
                                img_bgr = img_bgr_denoised
                            else:
                                pass_power = (n % 100) / 100.0
                                img_bgr = img_bgr*(1.0-pass_power)+img_bgr_denoised*pass_power
                            n = max(n-10,0)

                    if cfg.bicubic_degrade_power != 0:
                        p = 1.0 - cfg.bicubic_degrade_power / 101.0
                        img_bgr_downscaled = cv2.resize (img_bgr, ( int(img_size[0]*p), int(img_size[1]*p ) ), interpolation=cv2.INTER_CUBIC)
                        img_bgr = cv2.resize (img_bgr_downscaled, img_size, interpolation=cv2.INTER_CUBIC)

                    new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, np.empty_like(img_bgr), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

                    out_img =  np.clip( img_bgr*(1-img_face_mask_a) + (new_out*img_face_mask_a) , 0, 1.0 )

                if cfg.color_degrade_power != 0:
                    out_img_reduced = imagelib.reduce_colors(out_img, 256)
                    if cfg.color_degrade_power == 100:
                        out_img = out_img_reduced
                    else:
                        alpha = cfg.color_degrade_power / 100.0
                        out_img = (out_img*(1.0-alpha) + out_img_reduced*alpha)

        out_merging_mask_a = img_face_mask_a

    return out_img, out_merging_mask_a
Example #49
0
mask_grund = y2.copy()

k = np.ones((5, 5), np.uint8)
skye = mask_grund.copy()
mask_grund = cv2.bitwise_not(mask_grund)

mask_grund = cv2.dilate(mask_grund, k, iterations=3)
mask_grund = cv2.erode(mask_grund, k, iterations=1)

_, contours, _ = cv2.findContours(mask_grund.copy(), cv2.RETR_LIST,
                                  cv2.CHAIN_APPROX_NONE)

c = max(contours, key=cv2.contourArea)
x, y, wb, hb = cv2.boundingRect(c)

land_mask = mask_grund[y:y + hb, x:x + wb]
land = img[y:y + hb, x:x + wb]

center2 = (int(x + wb / 2), int(y + hb / 2))

output = cv2.seamlessClone(land, wall, land_mask, center2, cv2.NORMAL_CLONE)

cv2.imshow('img', img)
cv2.imshow('wall', wall)
cv2.imshow('output', output)
cv2.imshow('mask-sky', skye)
cv2.waitKey(0)

cv2.destroyAllWindows()
Example #50
0
# reference:
# http://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp/
# http://www.irisa.fr/vista/Papers/2003_siggraph_perez.pdf
# 2016-4-28

import cv2
import numpy as np

imgA = cv2.imread("wood-texture.jpg")
imgB = cv2.imread("iloveyouticket.jpg")

# create an all white mask
mask = 255 * np.ones(imgB.shape, imgB.dtype)

(hA, wA)= imgA.shape[:2]
center = (wA/2, hA/2)

c1 = cv2.seamlessClone(imgB, imgA, mask, center, cv2.NORMAL_CLONE)
c2 = cv2.seamlessClone(imgB, imgA, mask, center, cv2.MIXED_CLONE)
cv2.imshow("normal clone",c1)
cv2.imshow("mixed clone",c2)
cv2.waitKey(0)
Example #51
0
def texture_editing(prn, args):
    # read image
    image = imread(args.image_path)
    [h, w, _] = image.shape

    #-- 1. 3d reconstruction -> get texture. 
    pos = prn.process(image) 
    vertices = prn.get_vertices(pos)
    image = image/255.
    texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
    
    #-- 2. Texture Editing
    Mode = args.mode
    # change part of texture(for data augumentation/selfie editing. Here modify eyes for example)
    if Mode == 0: 
        # load eye mask
        uv_face_eye = imread('Data/uv-data/uv_face_eyes.png', as_grey=True)/255. 
        uv_face = imread('Data/uv-data/uv_face.png', as_grey=True)/255.
        eye_mask = (abs(uv_face_eye - uv_face) > 0).astype(np.float32)

        # texture from another image or a processed texture
        ref_image = imread(args.ref_path)
        ref_pos = prn.process(ref_image)
        ref_image = ref_image/255.
        ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))

        # modify texture
        new_texture = texture*(1 - eye_mask[:,:,np.newaxis]) + ref_texture*eye_mask[:,:,np.newaxis]
    
    # change whole face(face swap)
    elif Mode == 1: 
        # texture from another image or a processed texture
        ref_image = imread(args.ref_path)
        ref_pos = prn.process(ref_image)
        ref_image = ref_image/255.
        ref_texture = cv2.remap(ref_image, ref_pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
        ref_vertices = prn.get_vertices(ref_pos)
        new_texture = ref_texture#(texture + ref_texture)/2.

    else:
        print('Wrong Mode! Mode should be 0 or 1.')
        exit()


    #-- 3. remap to input image.(render)
    vis_colors = np.ones((vertices.shape[0], 1))
    face_mask = render_texture(vertices.T, vis_colors.T, prn.triangles.T, h, w, c = 1)
    face_mask = np.squeeze(face_mask > 0).astype(np.float32)
    
    new_colors = prn.get_colors_from_texture(new_texture)
    new_image = render_texture(vertices.T, new_colors.T, prn.triangles.T, h, w, c = 3)
    new_image = image*(1 - face_mask[:,:,np.newaxis]) + new_image*face_mask[:,:,np.newaxis]

    # Possion Editing for blending image
    vis_ind = np.argwhere(face_mask>0)
    vis_min = np.min(vis_ind, 0)
    vis_max = np.max(vis_ind, 0)
    center = (int((vis_min[1] + vis_max[1])/2+0.5), int((vis_min[0] + vis_max[0])/2+0.5))
    output = cv2.seamlessClone((new_image*255).astype(np.uint8), (image*255).astype(np.uint8), (face_mask*255).astype(np.uint8), center, cv2.NORMAL_CLONE)
   
    # save output
    imsave(args.output_path, output) 
    print('Done.')
Example #52
-1
def get_images(target):
  # Gather faces
  faces_driver = dict()
  for dr in input_drivers:
    with open(os.path.join(dr,'{}.json'.format(target))) as f:
      data = json.load(f)
    faces_driver[dr] = list()
    for key,value in data.iteritems():
      img = cv2.imread(os.path.join(dr,key))
      #face = img[value[1]:value[3], value[0]: value[2]]
      faces_driver[dr].append((os.path.join(dr,key),value))


  # Create new driver:
  images = 2000
  curr_img = 0
  for i in range(3000):
    image_key = random.choice(faces_driver.keys())
    while True:
      face_key = random.choice(faces_driver.keys())
      if face_key != image_key: break
    print face_key, image_key
    # Get base image and center of face
    (path_image_base, rect) = random.choice(faces_driver[image_key])
    #print path_image_base, rect
    image = cv2.imread(path_image_base)
    center = (int((rect[0]+ rect[2])/2.0), int((rect[1]+ rect[3])/2.0))
	      
    # Get new face
    (path_image_face, rect) = random.choice(faces_driver[face_key])     
    #print path_image_face, rect
    face = cv2.imread(path_image_face)
    face = face[int(rect[1]):int(rect[3]), int(rect[0]): int(rect[2])]
    src_mask = np.ones(face.shape, image.dtype)
    src_mask[:,:] = 255.0
    #print face.shape, face.dtype, image.shape, image.dtype, src_mask.shape, src_mask.dtype,center
    # Clone seamlessly.
    try:
      output = cv2.seamlessClone(face, image, src_mask, center, cv2.NORMAL_CLONE)
      new_name = "new_driver/{}_{}/{}/".format(path_image_base[:4],path_image_face[:4],target)
      make_dir(new_name)
      cv2.imwrite(os.path.join( new_name, str(curr_img) + ".jpg"), output)
      #cv2.imwrite(str(curr_img) + ".jpg", output)
      curr_img += 1
    except:
      print 'error'
      pass
    
    print i
    if curr_img == images: break