コード例 #1
0
def face_swap(orig_image, down_scale):
    # extract face from original
    facelist = extract_faces(orig_image, 256)
    result_image = orig_image

    # iterate through all detected faces
    for (face, resized_image) in facelist:
        range_ = numpy.linspace(128 - 80, 128 + 80, 5)
        mapx = numpy.broadcast_to(range_, (5, 5))
        mapy = mapx.T

        # warp image like in the training
        mapx = mapx + numpy.random.normal(size=(5, 5), scale=5)
        mapy = mapy + numpy.random.normal(size=(5, 5), scale=5)

        src_points = numpy.stack([mapx.ravel(), mapy.ravel()], axis=-1)
        dst_points = numpy.mgrid[0:65:16, 0:65:16].T.reshape(-1, 2)
        mat = umeyama(src_points, dst_points, True)[0:2]

        warped_resized_image = cv2.warpAffine(resized_image, mat,
                                              (64, 64)) / 255.0

        test_images = numpy.empty((1, ) + warped_resized_image.shape)
        test_images[0] = warped_resized_image

        # predict faceswap using encoder A
        figure = autoencoder_A.predict(test_images)

        new_face = numpy.clip(numpy.squeeze(figure[0]) * 255.0, 0,
                              255).astype('uint8')
        mat_inv = umeyama(dst_points, src_points, True)[0:2]

        # insert face into extracted face
        dest_face = blend_warp(new_face, resized_image, mat_inv)

        # create an inverse affine transform matrix to insert extracted face again
        mat = get_align_mat(face)
        mat = mat * (256 - 2 * 48)
        mat[:, 2] += 48
        mat_inv = cv2.invertAffineTransform(mat)
        # insert new face into original image
        result_image = blend_warp(dest_face, result_image, mat_inv)

    # return resulting image after downscale
    return cv2.resize(result_image, (result_image.shape[1] // down_scale,
                                     result_image.shape[0] // down_scale))
コード例 #2
0
    def faceDetection(img, token, rid, dectype="trump"):
        global graph, r
        img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        bounding_boxes, points = detect_face.detect_face(
            img2, minsize, pnet, rnet, onet, threshold, factor)
        with graph.as_default():
            for box in bounding_boxes:
                try:
                    if box[3] - box[1] < box[2] - box[0]:
                        delta = int((box[2] - box[0]) - (box[3] - box[1]) -
                                    (box[2] - box[0]) * 0.2) / 2
                        box[3] += delta
                        box[1] -= delta

                        box[2] -= int((box[2] - box[0]) * 0.1)
                        box[0] += int((box[2] - box[0]) * 0.1)
                    else:
                        delta = int((box[3] - box[1]) - (box[2] - box[0]) -
                                    (box[3] - box[1]) * 0.2) / 2
                        box[2] += delta
                        box[0] -= delta

                        box[3] -= int((box[3] - box[1]) * 0.1)
                        box[1] += int((box[3] - box[1]) * 0.1)

                    image = img[int(box[1]):int(box[3] + 1),
                                int(box[0]):int(box[2] + 1), :]

                    ss = image.shape
                    if (ss[0] * ss[1]) == 0:
                        continue
                    IMG_COL = 256
                    IMG_ROW = 256
                    border_v = 0
                    border_h = 0

                    if (IMG_COL / IMG_ROW) >= (image.shape[0] /
                                               image.shape[1]):
                        border_v = int(
                            (((IMG_COL / IMG_ROW) * image.shape[1]) -
                             image.shape[0]) / 2)
                        image = cv2.copyMakeBorder(image, border_v, border_v,
                                                   0, 0, cv2.BORDER_REPLICATE,
                                                   0)
                    else:
                        border_h = int(
                            (((IMG_ROW / IMG_COL) * image.shape[0]) -
                             image.shape[1]) / 2)
                        image = cv2.copyMakeBorder(image, 0, 0, border_h,
                                                   border_h,
                                                   cv2.BORDER_REPLICATE, 0)

                except Exception as e:
                    print(e)
                    continue

                try:

                    ss = image.shape
                    #print(ss)
                    image = cv2.resize(image, (IMG_ROW, IMG_COL)) / 255.0
                    #cv2.normalize(image, image, 0, 1.0, norm_type=cv2.NORM_MINMAX)
                    test = np.empty((1, ) + image.shape, image.dtype)
                    test[0] = image
                    if (dectype == "trump"):
                        figure = autoencoder_A.predict(test)
                    elif (dectype == "swift"):
                        figure = autoencoder_A_swift.predict(test)
                    else:
                        figure = autoencoder_B.predict(test)
                    a1 = cv2.resize(alpha, (ss[1], ss[0]))
                    figure = cv2.resize(
                        figure[0, :, :, :],
                        (ss[1], ss[0]))  #[border_h:-border_h,:,:]
                    figure = cv2.filter2D(
                        figure, -1,
                        np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]))
                    figure = np.clip(figure * 255.0, 0, 255).astype('uint8')
                    img[int(box[1]):int(box[1]) + ss[0],
                        int(box[0]
                            ):int(box[0] + ss[1]), :] = cv2.convertScaleAbs(
                                img[int(box[1]):int(box[1]) + ss[0],
                                    int(box[0]):int(box[0] + ss[1]), :] *
                                (1 - a1) + figure * a1)
                except ValueError as e:
                    print(e)

        jpg = cv2.imencode(".jpg", img)[1].tostring()
        rid = rid + ":face"
        r.set(rid, jpg, ex=30)
        r.publish(token + "face", rid)
        #print(token,rid)
        return
コード例 #3
0
ファイル: train.py プロジェクト: drat/WD_Faker
          test_B_i = numpy.array(test_B_i).reshape((-1,64,64,3))






        figWarped = numpy.stack([warped_A[:6],warped_B[:6]],axis=0 )
        figWarped = numpy.clip( figWarped * 255, 0, 255 ).astype('uint8')
        figWarped = stack_images( figWarped )
        cv2.imshow( "w", figWarped )

        
        zmask = numpy.zeros((test_A.shape[0],128,128,1),float)

        pred_a_a,pred_a_a_m = autoencoder_A.predict([test_A_i,zmask])
        pred_b_a,pred_b_a_m = autoencoder_B.predict([test_A_i,zmask])

        pred_a_b,pred_a_b_m = autoencoder_A.predict([test_B_i,zmask])
        pred_b_b,pred_b_b_m = autoencoder_B.predict([test_B_i,zmask])

        pred_a_a = pred_a_a[0:18,:,:,:3]
        pred_a_b = pred_a_b[0:18,:,:,:3]
        pred_b_a = pred_b_a[0:18,:,:,:3]
        pred_b_b = pred_b_b[0:18,:,:,:3]

        figure_A = numpy.stack([
            test_A,
            pred_a_a,
            pred_b_a,
            ], axis=1 )
コード例 #4
0
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )

    if epoch % 100 == 0:
        save_model_weights()
        test_A = target_A[0:14]
        test_B = target_B[0:14]

    figure_A = numpy.stack([
        test_A,
        autoencoder_A.predict( test_A ),
        autoencoder_B.predict( test_A ),
        ], axis=1 )
    figure_B = numpy.stack([
        test_B,
        autoencoder_B.predict( test_B ),
        autoencoder_A.predict( test_B ),
        ], axis=1 )

    figure = numpy.concatenate( [ figure_A, figure_B ], axis=0 )
    figure = figure.reshape( (4,7) + figure.shape[1:] )
    figure = stack_images( figure )

    figure = numpy.clip( figure * 255, 0, 255 ).astype('uint8')

    cv2.imshow( "", figure )
コード例 #5
0
    
wrap ,a_faces = get_training_data(imgsA,6)
wrap2 ,b_faces = get_training_data(imgsB,6)

#input()
# show original image
for (index, img) in enumerate(a_faces):
    winn="original_image1_"+str(index)
    cv2.namedWindow(winn)        
    cv2.moveWindow(winn, 10,index*170) 
    cv2.imshow(winn, img)

#a_faces = a_faces.astype('float32') / 255.
#wrap = wrap.astype('float32') / 255.

decoded_imgs = autoencoder_A.predict(a_faces)
#decoded_imgs = (decoded_imgs *255).astype(np.uint8)
print(decoded_imgs.shape)
for (index, img) in enumerate(decoded_imgs):
    winn="dec_image1_"+str(index)
    cv2.namedWindow(winn)        
    cv2.moveWindow(winn, 130,index*170) 
    cv2.imshow(winn, img)
decoded_imgs = autoencoder_B.predict(a_faces)
#decoded_imgs = (decoded_imgs*255).astype(np.uint8)

for (index, img) in enumerate(decoded_imgs):
    winn="dec_image4_"+str(index)
    cv2.namedWindow(winn)        
    cv2.moveWindow(winn, 250,index*170) 
    cv2.imshow(winn, img)
コード例 #6
0
        else:
            offset = int((face_h - face_w) / 2)
            face_patch = face_patch[offset:offset + face_w, :, :]

        # Crop face patch
        offset_crop = face_patch.shape[0] // 10
        face_patch_crop = face_patch[offset_crop:face_patch.shape[0] -
                                     offset_crop,
                                     offset_crop:face_patch.shape[1] -
                                     offset_crop, :]
        frame_test = cv2.resize(face_patch_crop, (64, 64),
                                interpolation=cv2.INTER_AREA)
        frame_test = frame_test / 255.0

        # Inference
        frame_hat = autoencoder_A.predict(frame_test.reshape((1, 64, 64, 3)))
        frame_hat = frame_hat[0, :, :, :]
        frame_hat = np.clip(frame_hat * 255, 0, 255).astype('uint8')

        # Stitch
        face_stitch = cv2.resize(
            frame_hat, (face_patch_crop.shape[0], face_patch_crop.shape[1]),
            interpolation=cv2.INTER_AREA)
        frame_stitch = np.copy(frame)

        if face_h < face_w:
            x_start = face_x + offset_crop
            x_end = face_x + face_w - offset_crop
            y_start = face_y + offset + offset_crop
            y_end = face_y + offset + face_h - offset_crop
        else:
コード例 #7
0
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )

    if epoch % 100 == 0:
        save_model_weights()
        test_A = target_A[0:14]
        test_B = target_B[0:14]

    figure_A = numpy.stack([
        test_A,
        autoencoder_A.predict( test_A ),
        autoencoder_B.predict( test_A ),
        ], axis=1 )
    figure_B = numpy.stack([
        test_B,
        autoencoder_B.predict( test_B ),
        autoencoder_A.predict( test_B ),
        ], axis=1 )

    figure = numpy.concatenate( [ figure_A, figure_B ], axis=0 )
    figure = figure.reshape( (4,7) + figure.shape[1:] )
    figure = stack_images( figure )

    figure = numpy.clip( figure * 255, 0, 255 ).astype('uint8')

    cv2.imshow( "", figure )