Пример #1
0
def convert_face(inputqueue, outputqueue, swap_model, thread_number,
                 double_pass):
    from model import autoencoder_A
    from model import autoencoder_B
    from model import encoder, decoder_A, decoder_B

    encoder.load_weights("models/encoder.h5")
    decoder_A.load_weights("models/decoder_A.h5")
    decoder_B.load_weights("models/decoder_B.h5")

    if swap_model: autoencoder, otherautoencoder = autoencoder_A, autoencoder_B
    else: autoencoder, otherautoencoder = autoencoder_B, autoencoder_A

    while True:
        item = inputqueue.get()
        if item is None:
            break
        image, mat, sourceFace, face, framepts = item

        new_face_rgb, new_face_m = autoencoder.predict([face, zmask])
        if double_pass:
            #feed the original prediction back into the network for a second round.
            new_face_rgb = new_face_rgb.reshape((128, 128, 3))
            new_face_rgb = cv2.resize(new_face_rgb, (64, 64))
            new_face_rgb = numpy.expand_dims(new_face_rgb, 0)
            new_face_rgb, _ = autoencoder.predict([new_face_rgb, zmask])

        _, other_face_m = otherautoencoder.predict([face, zmask])

        outputqueue.put((image, mat, sourceFace, new_face_rgb, new_face_m,
                         other_face_m, framepts))

    for i in range(thread_number):
        outputqueue.put(None)
Пример #2
0
def applymodel(vidpath,outpath,mdlpath):
    try:
        encoder.load_weights(mdlpath+r"/encoder.h5")
        decoder_A.load_weights(mdlpath+r"/decoder_A.h5")
        decoder_B.load_weights(mdlpath+r"/decoder_B.h5")
        print("Model succesfully loaded")
    except:
        print("error no model found")

    cap = cv2.VideoCapture(vidpath)
    
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    outfilenm=outpath+r"/out.avi"
    writer = cv2.VideoWriter(outfilenm,cv2.VideoWriter_fourcc('M','J','P','G'), fps, (width, height))
    fr=0

    for i in range(frames_count):
        stat,frame = cap.read()
        #try:
        #print(frame.shape)
        #input()
        #cv2.imshow("hasdasdj",frame)
        
        start_time = time.time()
        writer.write(applyswap(frame,autoencoder_A))
        print(str(fr)+"--- %s seconds ---" % (time.time() - start_time))
        fr=fr+1
    cap.release()
    writer.release()
Пример #3
0
def Train(Odir, TAdir, Mdir, ep, sv):
    imgsA = loadImgs(Odir) / 255.0
    imgsB = loadImgs(TAdir) / 255.0
    imgsA += imgsB.mean(axis=(0, 1, 2)) - imgsA.mean(axis=(0, 1, 2))
    try:
        encoder.load_weights(Mdir + "/encoder.h5")
        decoder_A.load_weights(Mdir + "/decoder_A.h5")
        decoder_B.load_weights(Mdir + "/decoder_B.h5")
        print("loaded existing model")
    except:
        print("No existing model")

    for epoch in range(int(ep)):
        # get next training batch
        batch_size = 64
        warped_A, target_A = get_training_data(imgsA, batch_size)
        warped_B, target_B = get_training_data(imgsB, batch_size)

        # train and calculate loss
        loss_A = autoencoder_A.train_on_batch(warped_A, target_A)
        loss_B = autoencoder_B.train_on_batch(warped_B, target_B)

        if epoch % int(sv) == 0:
            print("Training loss " + str(epoch) + " :")
            print(loss_A, loss_B)

            # save model every 100 steps
            save_model_weights(Mdir)
            test_A = target_A[0:14]
            test_B = target_B[0:14]
            # create image and write to disk

    # save our model after training has finished
    save_model_weights(Mdir)
Пример #4
0
import imageio
import cv2
import numpy as np
import face_recognition
import json
import sys 
sys.path.append('f_swap')
from pathlib import Path

from utils import get_image_paths

from model import autoencoder_A
from model import autoencoder_B
from model import encoder, decoder_A, decoder_B
encoder  .load_weights( "f_swap/models/encoder.h5"   )
decoder_A.load_weights( "f_swap/models/decoder_A.h5" )
decoder_B.load_weights( "f_swap/models/decoder_B.h5" )

filename = "short_hamilton_clip.mp4"
fr = open('myu_s.json','r')

reader = imageio.get_reader(filename,  'ffmpeg')
fps = reader.get_meta_data()['fps']

def get_center(top, right, bottom, left):
    center_x = (int)((right+left)/2)
    center_y = (int)((top+bottom)/2)
    return center_x,center_y

#160x160 to 256x256
def get_m(top, right, bottom, left):
Пример #5
0
import logging
import datetime
import json
import math
import urllib
import config

from model import autoencoder_A, autoencoder_B, autoencoder_A_swift
from model import encoder, decoder_A, decoder_B, decoder_A_swift, encoder_swift
from keras import backend as K
import redis

global graph, r

encoder.load_weights("models/encoder256.h5")
decoder_A.load_weights("models/decoder256_A.h5")
decoder_B.load_weights("models/decoder256_B.h5")

encoder_swift.load_weights("models/encoder256_ENCODER.h5")
decoder_A_swift.load_weights("models/decoder256_A_TAYLOR.h5")
#decoder_B_swift.load_weights( "models/decoder256_B_swift.h5" )

graph = tf.get_default_graph()

r = redis.Redis.from_url(config.REDIS_CONFIG['uri'])


#logging.basicConfig(level=logging.DEBUG)
def tensorThread(something):
    def url_to_image(url):
        try:
Пример #6
0
import cv2
import numpy
from pathlib import Path

from utils import get_image_paths

from model import autoencoder_A
from model import autoencoder_B
from model import encoder, decoder_A, decoder_B

videopath = "../../../data/faceswap/video/"
modelpath = "../../../data/faceswap/"

encoder.load_weights(modelpath + "models/encoder.h5")
decoder_A.load_weights(modelpath + "models/decoder_A.h5")
decoder_B.load_weights(modelpath + "models/decoder_B.h5")

images_A = get_image_paths("../../../data/faceswap/original3/a")
images_B = get_image_paths("../../../data/faceswap/original3/b")


def convert_one_image(autoencoder, image):
    assert image.shape == (256, 256, 3)
    crop = slice(48, 208)
    face = image[crop, crop]
    face = cv2.resize(face, (64, 64))
    face = numpy.expand_dims(face, 0)
    new_face = autoencoder.predict(face / 255.0)[0]
    new_face = numpy.clip(new_face * 255, 0, 255).astype(image.dtype)
    new_face = cv2.resize(new_face, (160, 160))
    new_image = image.copy()
Пример #7
0
import cv2
import numpy

from utils import get_image_paths, load_images, stack_images
from training_data import get_training_data

from model import autoencoder_A
from model import autoencoder_B
from model import encoder, decoder_A, decoder_B

try:
    encoder  .load_weights( "/input/data/models/encoder.h5"   )
    decoder_A.load_weights( "/input/data/models/decoder_A.h5" )
    decoder_B.load_weights( "/input/data/models/decoder_B.h5" )
except:
    pass

def save_model_weights():
    encoder  .save_weights( "/input/data/models/encoder.h5"   )
    decoder_A.save_weights( "/input/data/models/decoder_A.h5" )
    decoder_B.save_weights( "/input/data/models/decoder_B.h5" )
    print( "save model weights" )

images_A = get_image_paths( "/input/data/data/trump" )
images_B = get_image_paths( "/input/data/data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )
Пример #8
0
import argparse
import sys
import cv2
import json
import numpy
from pathlib import Path
from tqdm import tqdm
sys.path.append("../lib/")
from model import autoencoder_A, autoencoder_B, encoder, decoder_A, decoder_B

encoder.load_weights("../models/encoder.h5")
decoder_A.load_weights("../models/decoder_A.h5")
decoder_B.load_weights("../models/decoder_B.h5")


def convert_one_image(autoencoder, image, mat):
    size = 64
    image_size = image.shape[1], image.shape[0]

    face = cv2.warpAffine(image, mat * size, (size, size))
    face = numpy.expand_dims(face, 0)
    new_face = autoencoder.predict(face / 255.0)[0]
    new_face = numpy.clip(new_face * 255, 0, 255).astype(image.dtype)

    face_mask = numpy.zeros(new_face.shape, dtype=image.dtype)
    image_mask = numpy.zeros(image.shape, dtype=image.dtype)
    cv2.circle(face_mask, (size // 2, size // 2), int(size * 0.6),
               (255, 255, 255), -1)

    cv2.warpAffine(face_mask, mat * size, image_size, image_mask,
                   cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT)
Пример #9
0
from keras.models import load_model
from MdlTrain import *
import numpy as np
import cv2

from model import autoencoder_B,autoencoder_A
from model import encoder, decoder_A, decoder_B

image_size = 64

encoder.load_weights("nwmdlt/encoder.h5")
decoder_A.load_weights("nwmdlt/decoder_A.h5")
decoder_B.load_weights("nwmdlt/decoder_B.h5")

imgsA=loadImgs(r"C:\Users\Q\Documents\Deepswap\Oout2")/255
imgsB=loadImgs(r"C:\Users\Q\Documents\Deepswap\TaOutT")/255


#imgsA=loadImgs(r"C:\Users\Q\Documents\Deepswap\Oout")/255.0
#imgsB=loadImgs(r"C:\Users\Q\Documents\Deepswap\TaOut")/255.0
a_faces = np.ndarray(shape=(5,128,128,3))
b_faces = np.ndarray(shape=(5,128,128,3))


##for i in range(10):
##    print(imgsA[i].shape)
##    print(a_faces[i].shape)
##    a_faces[i] = cv2.resize(imgsA[i], (64,64), interpolation = cv2.INTER_AREA)
##    #imgsA[i]=cv2.resize(imgsA[i],(64,64))
##    #a_faces[i]=imgsA[i]
##    b_faces[i] = cv2.resize(imgsB[i], (64,64), interpolation = cv2.INTER_AREA)
Пример #10
0
import cv2
import numpy

from utils import get_image_paths, load_images, stack_images
from training_data import get_training_data

from model import autoencoder_A
from model import autoencoder_B
from model import encoder, decoder_A, decoder_B

try:
    encoder  .load_weights( "models/encoder.h5"   )
    decoder_A.load_weights( "models/decoder_A.h5" )
    decoder_B.load_weights( "models/decoder_B.h5" )
except:
    pass

def save_model_weights():
    encoder  .save_weights( "models/encoder.h5"   )
    decoder_A.save_weights( "models/decoder_A.h5" )
    decoder_B.save_weights( "models/decoder_B.h5" )
    print( "save model weights" )

images_A = get_image_paths( "data/trump" )
images_B = get_image_paths( "data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )