Exemple #1
0
def get(source_image, driving_video):
    source_image = imageio.imread(source_image)
    driving_video = imageio.mimread(driving_video)

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    generator, kp_detector = load_checkpoints(
        config_path='./first-order-motion-model/config/vox-256.yaml',
        checkpoint_path='./vox-cpk.pth.tar')

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('./files/generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder
    return True
Exemple #2
0
    def run(self, opt):
        if opt.crop_image:
            source_image = image_crop(opt.source_image, opt.crop_image_padding)
        else:
            source_image = imageio.imread(opt.source_image)

        reader = imageio.get_reader(opt.driving_video)
        source_image = resize(source_image, (256, 256))[..., :3]
        fps = reader.get_meta_data()['fps']
        driving_video = []
        try:
            for im in reader:
                driving_video.append(im)
        except RuntimeError:
            pass
        reader.close()

        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]
        generator, kp_detector = load_checkpoints(
            config_path=opt.config, checkpoint_path=opt.checkpoint)

        if opt.find_best_frame or opt.best_frame is not None:
            i = opt.best_frame if opt.best_frame is not None else find_best_frame(
                source_image, driving_video, cpu=opt.cpu)
            print("Best frame: " + str(i))
            driving_forward = driving_video[i:]
            driving_backward = driving_video[:(i + 1)][::-1]
            predictions_forward = make_animation(
                source_image,
                driving_forward,
                generator,
                kp_detector,
                relative=opt.relative,
                adapt_movement_scale=opt.adapt_scale,
                cpu=opt.cpu)
            predictions_backward = make_animation(
                source_image,
                driving_backward,
                generator,
                kp_detector,
                relative=opt.relative,
                adapt_movement_scale=opt.adapt_scale,
                cpu=opt.cpu)
            predictions = predictions_backward[::-1] + predictions_forward[1:]
        else:
            predictions = make_animation(source_image,
                                         driving_video,
                                         generator,
                                         kp_detector,
                                         relative=opt.relative,
                                         adapt_movement_scale=opt.adapt_scale,
                                         cpu=opt.cpu)

        #predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
        imageio.mimsave(opt.output,
                        [img_as_ubyte(frame) for frame in predictions],
                        fps=fps)
Exemple #3
0
 def __init__(self, config_path, checkpoint_path):
   """
     Initializes the ImageAnimater object with a pretrained model and
     its corresponding config file.
   """
   generator, kp_detector = fomm_demo.load_checkpoints(
       config_path=config_path, checkpoint_path=checkpoint_path)
   # Store into object
   self.generator = generator
   self.keypoint_detector = kp_detector
Exemple #4
0
 def __init__(self):
     if (LandMark2FaceModel == "firstorder"):
         source_image = imageio.imread(
             os.path.join("/",
                          *root_file_path.split("/")[:-1],
                          "REF/current/ref.png"))
         self.source_image = resize(source_image, (256, 256))[..., :3]
         self.generator, self.kp_detector = load_checkpoints(
             config_path=os.path.join(root_file_path,
                                      "firstorder/config/vox-adv-256.yaml"),
             checkpoint_path=os.path.join(root_file_path,
                                          "firstorder/vox-cpk.pth.tar"))
    def __init__():
        DeepFakeApi.initialized = True

        if not os.path.isfile('checkpoints/vox-cpk.pth.tar'):
            raise FileNotFoundError(
                "Could not find training data. Check checkpoints folder")

        print("Loading checkpoints")
        DeepFakeApi.generator, DeepFakeApi.kp_detector = load_checkpoints(
            "dependencies/first-order-model/config/vox-256.yaml",
            "checkpoints/vox-cpk.pth.tar",
            cpu=False)
def get_predictions(photo, video):
    t = time()
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path=checkpoint_path)
    # '/content/gdrive/My Drive/DepFak/vox-adv-cpk.pth.tar'
    predictions = make_animation(photo,
                                 video,
                                 generator,
                                 kp_detector,
                                 relative=True)
    printer.log('PREDICTION TIME', time() - t)
    return predictions
Exemple #7
0
def animate_image(source_image, driving_video, savepath, configpath,
                  checkpointpath):
    generator, kp_detector = load_checkpoints(config_path=configpath,
                                              checkpoint_path=checkpointpath)
    """**Perform image animation**"""

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave(savepath, [img_as_ubyte(frame) for frame in predictions])
Exemple #8
0
def generate():
    source_image = imageio.imread(image_path)
    driving_video = imageio.mimread(video_path)
    #Resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
    #Create a model and load checkpoints
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='first-order-motion-model/vox-cpk.pth.tar')
    #Perform image animation
    from demo import make_animation
    from skimage import img_as_ubyte
    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='first-order-motion-model/vox-cpk.pth.tar')
    from demo import make_animation
    from skimage import img_as_ubyte
    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
    #save resulting video
    imageio.mimsave(output_path, [img_as_ubyte(frame) for frame in predictions])
    print('video saved at ' + output_path)
Exemple #9
0
def convert_image_to_video(uploadimage, outputPath):
    print("다메다메 밈 작업중", end=' ')
    warnings.filterwarnings("ignore")

    source_image = imageio.imread(uploadimage)
    driving_video = imageio.mimread(os.path.join(config.dame_path, '04.mp4'))

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    def display(source, driving, generated=None):
        fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))

        ims = []
        for i in range(len(driving)):
            cols = [source, driving[i]]
            if generated is not None:
                cols.append(generated[i])
            im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
            plt.axis('off')
            ims.append([im])

        ani = animation.ArtistAnimation(fig,
                                        ims,
                                        interval=50,
                                        repeat_delay=1000)
        plt.close()
        return ani

    generator, kp_detector = load_checkpoints(
        config_path=os.path.join(config.first_order_model_path,
                                 'config/vox-256.yaml'),
        checkpoint_path=os.path.join(config.dame_path, 'vox-cpk.pth.tar'))

    # make video
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)
    imageio.mimsave(outputPath, [img_as_ubyte(frame) for frame in predictions])
    print('done')
    return outputPath
Exemple #10
0
def deepfake_m(source_image, driving_video):
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml',
        checkpoint_path=
        '/content/gdrive/My Drive/first-order-motion-model/vox-cpk.pth.tar')
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('../generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder

    # HTML(display(source_image, driving_video, predictions).to_html5_video())

    return predictions
Exemple #11
0
def prepare_data(user_id: int):
    data = dict()
    source = user_videos[user_id]['source']
    target = user_videos[user_id]['target']

    audio_clip = AudioFileClip(target)
    data['audio'] = audio_clip

    if source.endswith('.jpg'):
        crop_image(source)
    else:
        # pass
        crop_video(source)
    try:
        source_reader = imageio.get_reader('crop_' + source)
    except FileNotFoundError:
        print("Didn't find cropped video")
        source_reader = imageio.get_reader(source)

    if source.endswith('.jpg'):
        data['source_media'] = resize(next(iter(source_reader)), (256, 256))[..., :3]
        data['photo'] = True
    else:
        data['source_media'] = read_video(source_reader)
        data['photo'] = False

    crop_video(target)
    try:
        target_reader = imageio.get_reader('crop_' + target)
    except FileNotFoundError:
        print("Didn't find cropped video")
        target_reader = imageio.get_reader(target)
    fps = target_reader.get_meta_data()['fps']

    data['fps'] = fps
    data['target_media'] = read_video(target_reader)

    generator, kp_detector = load_checkpoints(config_path=CONFIG,
                                              checkpoint_path=CHECKPOINT,
                                              cpu=CPU)
    data['generator'] = generator
    data['kp_detector'] = kp_detector
    return data
Exemple #12
0
def gen_dpfk(no_nvidia_gpu):
    if no_nvidia_gpu:
        print(
            "Using CPU for further calculations... (this will be much slower)")

    print("Reading template and input image...")

    source_image = imageio.imread('../input_image.png')
    driving_video = imageio.mimread('bakamitai_template.mp4')

    #Resize image and video to 256x256

    print("Resizing inputs...")

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    print("Generating video... (this may take a while)")

    generator, kp_detector = demo.load_checkpoints(
        config_path='first-order-model/config/vox-256.yaml',
        checkpoint_path='vox-cpk.pth.tar',
        cpu=no_nvidia_gpu)

    predictions = demo.make_animation(source_image,
                                      driving_video,
                                      generator,
                                      kp_detector,
                                      relative=True,
                                      cpu=no_nvidia_gpu)

    print("Saving video...")

    imageio.mimsave('generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
def video_generator():

    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')

    print(" generator and detector loaded ...  ")
    source_image = 'me.png'
    driving_video = '04.mp4'
    source_image = imageio.imread(source_image)
    reader = imageio.get_reader(driving_video)

    #Resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]
    fps = reader.get_meta_data()['fps']
    driving_video = []
    try:
        for im in reader:
            driving_video.append(im)
    except RuntimeError:
        pass
    reader.close()
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True,
                                 adapt_movement_scale=True)
    #save resulting video
    print('generating animated video ')
    imageio.mimsave('generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions],
                    fps=fps)
    print('video saved ...')
Exemple #14
0
def generate_video(filename1):
    path1 = "static/profile_pics/" + filename1  #getting path
    source_image = imageio.imread(path1)  #loading image
    driving_video = imageio.mimread('t3.mp4')  #loading template

    # Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]  #resizing image
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]  #resizing template

    # loading checkpoints
    print("loading checkpoints")
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')

    # performing image animation and saving video

    print("importing demo and skimage")
    from demo import make_animation
    from skimage import img_as_ubyte

    print("calling make_animation")
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    # save resulting video
    # i_name , f_ext = os.path.splitext(filename1)
    # videoName = "static/generated_with_api_" + i_name + "mp4"
    imageio.mimsave("static/jahnavi_t4.mp4",
                    [img_as_ubyte(frame) for frame in predictions])
Exemple #15
0
def convert():

    image_file = tk.filedialog.askopenfilename()

    convert_button.destroy()

    source_image = imageio.imread(image_file)
    driving_video = imageio.mimread('files/04.mp4')

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    def display(source, driving, generated=None):
        fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))

        ims = []
        for i in range(len(driving)):
            cols = [source]
            cols.append(driving[i])
            if generated is not None:
                cols.append(generated[i])
            im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
            plt.axis('off')
            ims.append([im])

        ani = animation.ArtistAnimation(fig,
                                        ims,
                                        interval=50,
                                        repeat_delay=1000)
        plt.close()
        return ani

    HTML(display(source_image, driving_video).to_html5_video())
    """**Create a model and load checkpoints**"""

    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml',
        checkpoint_path='files/vox-cpk.pth.tar')
    """**Perform image animation**"""

    from demo import make_animation
    from skimage import img_as_ubyte

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('../generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder

    HTML(display(source_image, driving_video, predictions).to_html5_video())
    """**In the cell above we use relative keypoint displacement to animate the objects. We can use absolute coordinates instead,  but in this way all the object proporions will be inherited from the driving video. For example Putin haircut will be extended to match Trump haircut.**"""

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=False,
                                 adapt_movement_scale=True)
    HTML(display(source_image, driving_video, predictions).to_html5_video())
    """## Running on your data

    **First we need to crop a face from both source image and video, while simple graphic editor like paint can be used for cropping from image. Cropping from video is more complicated. You can use ffpmeg for this.**
    """
    """**Another posibility is to use some screen recording tool, or if you need to crop many images at ones use face detector(https://github.com/1adrianb/face-alignment) , see https://github.com/AliaksandrSiarohin/video-preprocessing for preprcessing of VoxCeleb.**"""

    source_image = imageio.imread('files/09.png')
    driving_video = imageio.mimread('hinton.mp4', memtest=False)

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True,
                                 adapt_movement_scale=True)

    HTML(display(source_image, driving_video, predictions).to_html5_video())
source_image = imageio.imread(
    'C:/Users/Nandhakumar/Documents/GitHub/Deepfake-meme-bot/JS_FrontEnd/disc_image.png'
)
driving_video = imageio.mimread(
    'C:/Users/Nandhakumar/Documents/GitHub/Deepfake-meme-bot/JS_FrontEnd/disc_vid.mp4'
)

# Resize image and video to 256x256

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]

generator, kp_detector = load_checkpoints(
    config_path=
    'C:/Users/Nandhakumar/Documents/GitHub/Deepfake-meme-bot/PY_BackEnd/first-order-model/config/vox-256.yaml',
    checkpoint_path=
    'C:/Users/Nandhakumar/Documents/GitHub/Deepfake-meme-bot/PY_BackEnd/first-order-model/vox-cpk.pth.tar'
)

from demo import make_animation
from skimage import img_as_ubyte

predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=True)

#save resulting video

imageio.mimsave(
Exemple #17
0
def run(im, vid, out):
    global status_text
    print(im.name, vid.name, out)
    if im is None or vid is None or out == "":
        print("Please complete required fields.")
        return
    # source_image = imageio.imread('C:/Users/Artin/first-order-model/MotionModels/armanFace3.png')
    # driving_video = imageio.mimread('C:/Users/Artin/first-order-model/MotionModels/Dam.mp4')
    status_text.set("Working")
    #update()
    try:
        source_image = imageio.imread(str(im.name))
        driving_video = imageio.mimread(str(vid.name))

        #Resize image and video to 256x256

        source_image = resize(source_image, (256, 256))[..., :3]
        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]

        # def display(source, driving, generated=None):
        #     fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))
        #
        #     ims = []
        #     for i in range(len(driving)):
        #         cols = [source]
        #         cols.append(driving[i])
        #         if generated is not None:
        #             cols.append(generated[i])
        #         im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        #         plt.axis('off')
        #         ims.append([im])
        #
        #     ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
        #     plt.close()
        #     return ani

        # HTML(display(source_image, driving_video).to_html5_video())
        """**Create a model and load checkpoints**"""

        from demo import load_checkpoints
        generator, kp_detector = load_checkpoints(
            config_path='config/vox-256.yaml',
            checkpoint_path=
            'C:/Users/Artin/first-order-model/MotionModels/vox-cpk.pth.tar')
        """**Perform image animation**"""

        from demo import make_animation
        from skimage import img_as_ubyte

        predictions = make_animation(source_image,
                                     driving_video,
                                     generator,
                                     kp_detector,
                                     relative=True)

        #save resulting video
        imageio.mimsave(out + '.mp4',
                        [img_as_ubyte(frame) for frame in predictions])
        os.system("")
        status_text.set(f"File saved to {os.getcwd()}{out}.mp4")
    except Exception as e:
        status_text.set(f"{type(e)}: {e}")
Exemple #18
0
                required=False,
                help="Path to video input")

args = vars(ap.parse_args())

print("[INFO] loading source image and checkpoint...")
source_path = args['input_image']
checkpoint_path = args['checkpoint']
if args['input_video']:
    video_path = args['input_video']
else:
    video_path = None
source_image = imageio.imread(source_path)
source_image = resize(source_image, (256, 256))[..., :3]

generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                          checkpoint_path=checkpoint_path)

if not os.path.exists('output'):
    os.mkdir('output')

relative = True
adapt_movement_scale = True
cpu = True

if video_path:
    cap = cv2.VideoCapture(video_path)
    print("[INFO] Loading video from the given path")
else:
    cap = cv2.VideoCapture(0)
    print("[INFO] Initializing front camera...")
Exemple #19
0
        if generated is not None:
            cols.append(generated[i])
        im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        plt.axis('off')
        ims.append([im])

    ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
    plt.close()
    return ani
    

HTML(display(source_image, driving_video).to_html5_video())

"""**Create a model and load checkpoints**"""

from demo import load_checkpoints
generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', 
                            checkpoint_path='/content/gdrive/My Drive/Amaka/vox-cpk.pth.tar')

"""**Perform image animation**"""

from demo import make_animation
from skimage import img_as_ubyte

predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)

#save resulting video
imageio.mimsave('../generated.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)
#video can be downloaded from /content folder

HTML(display(source_image, driving_video, predictions).to_html5_video())
Exemple #20
0
from skimage.transform import resize
from face_alignment import FaceAlignment, LandmarksType
from scipy.optimize import least_squares
from PIL import Image
import regex
import torch
import subprocess
import hashlib
import sys
from demo import load_checkpoints
from animate import normalize_kp

app = Flask(__name__)

generator, kp_detector = load_checkpoints(
    config_path="first-order-model/config/vox-adv-256.yaml",
    checkpoint_path="vox-adv-cpk.pth.tar",
)
fa = FaceAlignment(LandmarksType._2D)


@app.route("/")
def index():
    return render_template("upload.html")


def data(obj) -> str:
    return f"data: {json.dumps(obj)}\n\n"


@app.route("/upload", methods=["POST"])
def upload():
        im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        plt.axis('off')
        ims.append([im])

    ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
    plt.close()
    return ani


#HTML(display(source_image, driving_video).to_html5_video())

print('check')

from demo import load_checkpoints
generator, kp_detector = load_checkpoints(
    config_path='config/vox-256.yaml',
    checkpoint_path=
    './content/gdrive/My Drive/first-order-motion-model/vox-cpk.pth.tar')

print('pred')

from demo import make_animation
from skimage import img_as_ubyte

predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=True)

print('save')
Exemple #22
0
class ModelPipeline:
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')

    def __init__(self, source, vid, start_time, p_id, global_status=None):
        self.source = source
        self.vid = vid
        self.start_time = start_time
        self.p_id = p_id
        self.global_status = global_status

    def update_status(self, new_status):
        if self.p_id in self.global_status and "status" in self.global_status[
                self.p_id]:
            p_status = self.global_status[self.p_id]
            p_status["status"] = new_status
            self.global_status[self.p_id] = p_status
        print("STATUS UPDATE: ",
              self.global_status,
              new_status,
              sep="\n********\n")

    def prepare(self):
        self.update_status("5")
        source, vid, start_time = self.source, self.vid, self.start_time
        os.makedirs(f"raw_images/{start_time}", exist_ok=True)
        os.makedirs(f"aligned_images/{start_time}", exist_ok=True)
        os.makedirs(f"frames/{start_time}", exist_ok=True)
        os.makedirs(f"video/intermediate/{start_time}", exist_ok=True)
        os.makedirs(f"video/final/", exist_ok=True)
        return self

    def align_images(self):
        source, vid, start_time = self.source, self.vid, self.start_time

        name = source.split("/")[-1]
        os.replace(f"{source}", f"raw_images/{start_time}/{name}")

        os.system(
            f"python align_images.py raw_images/{start_time}/ aligned_images/{start_time}/"
        )
        pho_file = os.listdir(f"aligned_images/{start_time}")

        if not pho_file or not len(pho_file):
            self.update_status("Failed")
            raise ValueError("Image processing failed.")
        else:
            self.processed_image = pho_file[0]
            self.update_status("10")

        return self

    def generate_raw_output(self):
        source, vid, start_time = self.source, self.vid, self.start_time
        photoname = self.processed_image.split('.')[0] + '.png'
        source_image = imageio.imread(
            f'aligned_images/{start_time}/{photoname}')
        source_image = resize(source_image, (256, 256))[..., :3]

        placeholder_bytes = base64.b64decode(
            'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII='
        )
        placeholder_image = imageio.imread(placeholder_bytes, '.png')
        placeholder_image = resize(placeholder_image, (256, 256))[..., :3]
        ims = []
        try:
            driving_video = imageio.get_reader(vid)
            for im in driving_video:
                ims.append(im)
        except Exception as e:
            print('Error: ', e)

        self.update_status("20")

        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]
        predictions = make_animation(source_image,
                                     driving_video,
                                     ModelPipeline.generator,
                                     ModelPipeline.kp_detector,
                                     relative=True)
        self.videoname = videoname = f'result-{start_time}.mp4'
        imageio.mimsave(f'video/intermediate/{start_time}/{videoname}',
                        [img_as_ubyte(frame) for frame in predictions])
        self.update_status("40")
        return self

    def extract_audio_and_frames(self):
        source, vid, start_time = self.source, self.vid, self.start_time
        videoname = self.videoname

        fps_of_video = 30
        frames_of_video = int(
            cv2.VideoCapture(vid).get(cv2.CAP_PROP_FRAME_COUNT))

        vidcap = cv2.VideoCapture(
            f'video/intermediate/{start_time}/{videoname}')
        success, image = vidcap.read()
        count = 0
        success = True
        while success:
            cv2.imwrite(f"frames/{start_time}/frame%09d.jpg" % count, image)
            success, image = vidcap.read()
            count += 1

        frames = []
        img = os.listdir(f"frames/{start_time}/")
        img.sort()

        for i in img:
            frames.append(imageio.imread(f"frames/{start_time}/{i}"))
        frames = np.array(frames)
        self.final_video = dstvid = f"video/final/{videoname}"
        imageio.mimsave(dstvid, frames, fps=fps_of_video)
        self.update_status("60")
        return self

    def save(self):
        dstvid = self.final_video
        source, vid, start_time = self.source, self.vid, self.start_time

        tmpfile = dstvid.replace('.mp4', '-audio.mp4')
        bg_cmd = (
            f"ffmpeg -nostdin -y -i {vid} -vn -ar 44100 -ac 2 -ab 192K -f mp3 video/intermediate/{start_time}/sound.mp3"
            + " && " +
            f"ffmpeg -nostdin -i video/intermediate/{start_time}/sound.mp3 -i {dstvid} {tmpfile}"
        )
        os.system(bg_cmd)
        self.update_status("90")
        time.sleep(2)
        self.update_status("Complete")
        return self

    def pipeline(self):
        self.prepare().align_images().generate_raw_output(
        ).extract_audio_and_frames().save()
        return self
def main():
    global source_image
    source_image = readnextimage(0)

    # start streaming
    if system == "linux":
        camera = pyfakewebcam.FakeWebcam(f'/dev/video{stream_id}',
                                         webcam_width, webcam_height)
        camera.print_capabilities()
        print(
            f"Fake webcam created on /dev/video{stream_id}. Use Firefox and join a Google Meeting to test."
        )

    # capture webcam
    video_capture = cv2.VideoCapture(webcam_id)
    time.sleep(1)
    width = video_capture.get(3)  # float
    height = video_capture.get(4)  # float
    print("webcam dimensions = {} x {}".format(width, height))

    # load models
    previous = None
    net = load_face_model()
    generator, kp_detector = demo.load_checkpoints(
        config_path=f'{first_order_path}config/vox-adv-256.yaml',
        checkpoint_path=f'{model_path}/vox-adv-cpk.pth.tar')

    # create windows
    cv2.namedWindow('Face', cv2.WINDOW_GUI_NORMAL)  # extracted face
    cv2.moveWindow('Face', int(screen_width / 2) - 150, 100)
    cv2.resizeWindow('Face', 256, 256)

    cv2.namedWindow('DeepFake', cv2.WINDOW_GUI_NORMAL)  # face transformation
    cv2.moveWindow('DeepFake', int(screen_width / 2) + 150, 100)
    cv2.resizeWindow('DeepFake', int(img_shape[1] / img_shape[0] * 256), 256)

    cv2.namedWindow('Stream', cv2.WINDOW_GUI_NORMAL)  # rendered to fake webcam
    cv2.moveWindow('Stream',
                   int(screen_width / 2) - int(webcam_width / 2), 400)
    cv2.resizeWindow('Stream', webcam_width, webcam_height)

    print(
        "Press C to center Webcam, Press B/N for previous/next image in media directory, T to alter between relative and absolute transformation, Q to quit"
    )
    x1, y1, x2, y2 = [0, 0, 0, 0]
    relative = True
    while True:
        ret, frame = video_capture.read()
        frame = cv2.resize(frame, (640, 480))
        frame = cv2.flip(frame, 1)

        if (previous is None or reset is True):
            x1, y1, x2, y2 = find_face_cut(net, frame)
            previous = cut_face_window(x1, y1, x2, y2, frame)
            reset = False
            #cv2.imshow('Previous',previous)

        curr_face = cut_face_window(x1, y1, x2, y2, frame)
        #cv2.imshow('Curr Face',curr_face)
        #cv2.imshow('Source Image',source_image)
        deep_fake = process_image(source_image, previous, curr_face, net,
                                  generator, kp_detector, relative)
        deep_fake = cv2.cvtColor(deep_fake, cv2.COLOR_RGB2BGR)

        #cv2.imshow('Webcam', frame) - get face
        cv2.imshow('Face', curr_face)
        cv2.imshow('DeepFake', deep_fake)

        rgb = cv2.resize(deep_fake,
                         (int(img_shape[1] / img_shape[0] * 480), 480))
        # pad image
        x_border = int((640 - (img_shape[1] / img_shape[0] * 480)) / 2)
        y_border = int((480 - (img_shape[0] / img_shape[1] * 640)) / 2)
        stream_v = cv2.copyMakeBorder(rgb, y_border if y_border >= 0 else 0,
                                      y_border if y_border >= 0 else 0,
                                      x_border if x_border >= 0 else 0,
                                      x_border if x_border >= 0 else 0,
                                      cv2.BORDER_CONSTANT)
        cv2.imshow('Stream', stream_v)

        #time.sleep(1/30.0)
        stream_v = cv2.flip(stream_v, 1)
        stream_v = cv2.cvtColor(stream_v, cv2.COLOR_BGR2RGB)
        stream_v = (stream_v * 255).astype(np.uint8)

        # stream to fakewebcam
        if system == "linux":
            #print("output to fakecam")
            camera.schedule_frame(stream_v)

        k = cv2.waitKey(1)
        # Hit 'q' on the keyboard to quit!
        if k & 0xFF == ord('q'):
            print("Quiting")
            video_capture.release()
            break
        elif k == ord('c'):
            # center
            print("Centering the image")
            reset = True
        elif k == ord('b'):
            # previous image
            print("Loading previous image")
            source_image = readpreviousimage()
            reset = True
        elif k == ord('n'):
            # next image
            print("Loading next image")
            source_image = readnextimage()
            reset = True
        elif k == ord('t'):
            # rotate
            relative = not relative
            print("Changing transform mode")

    cv2.destroyAllWindows()
    exit()
Exemple #24
0
def process(input):
    print("[INFO] loading source image and checkpoint...")
    source_path = input
    checkpoint_path = args['checkpoint']
    if args['input_video']:
        video_path = args['input_video']
    else:
        video_path = None
    source_image = imageio.imread(source_path)
    source_image = resize(source_image, (256, 256))[..., :3]

    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path=checkpoint_path)

    # Load the cascade
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

    if not os.path.exists('output'):
        os.mkdir('output')

    relative = True
    adapt_movement_scale = True
    if args['cpu']:
        cpu = True
    else:
        cpu = False

    if video_path:
        cap = cv2.VideoCapture(video_path)
        print("[INFO] Loading video from the given path")
    else:
        cap = cv2.VideoCapture(0)
        print("[INFO] Initializing front camera...")
        # get vcap property
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # float `width`
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float `height`
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        print('resolution : {} x {}'.format(width, height))
        print('frame rate : {} \nframe count : {}'.format(fps, frame_count))

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out1 = cv2.VideoWriter('output/test.avi', fourcc, 12, (256 * 3, 256), True)

    cv2_source = cv2.cvtColor(source_image.astype('float32'),
                              cv2.COLOR_BGR2RGB)
    cv2_source2 = (source_image * 255).astype(np.uint8)

    if args['vc']:
        camera = pyfakewebcam.FakeWebcam('/dev/video7', 640, 360)
        camera._settings.fmt.pix.width = 640
        camera._settings.fmt.pix.height = 360

    img = np.zeros((360, 640, 3), dtype=np.uint8)
    yoff = round((360 - 256) / 2)
    xoff = round((640 - 256) / 2)
    img_im = img.copy()
    img_cv2_source = img.copy()
    img_im[:, :, 2] = 255
    img_cv2_source[:, :, 2] = 255
    with torch.no_grad():
        predictions = []
        source = torch.tensor(source_image[np.newaxis].astype(
            np.float32)).permute(0, 3, 1, 2)
        if not cpu:
            source = source.cuda()
        kp_source = kp_detector(source)
        count = 0
        fps = []
        if args['csv']:
            line1 = []
            size = 10
            x_vec = np.linspace(0, 1, size + 1)[0:-1]
            y_vec = np.random.randn(len(x_vec))
        while (True):
            start = time.time()
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Detect the faces
            faces = face_cascade.detectMultiScale(gray, 1.1, 4)
            frame = cv2.flip(frame, 1)
            if ret == True:

                if not video_path:
                    x = 143
                    y = 87
                    w = 322
                    h = 322
                    frame = frame[y:y + h, x:x + w]
                frame1 = resize(frame, (256, 256))[..., :3]

                if count == 0:
                    source_image1 = frame1
                    source1 = torch.tensor(source_image1[np.newaxis].astype(
                        np.float32)).permute(0, 3, 1, 2)
                    kp_driving_initial = kp_detector(source1)

                frame_test = torch.tensor(frame1[np.newaxis].astype(
                    np.float32)).permute(0, 3, 1, 2)

                driving_frame = frame_test
                if not cpu:
                    driving_frame = driving_frame.cuda()
                kp_driving = kp_detector(driving_frame)
                kp_norm = normalize_kp(
                    kp_source=kp_source,
                    kp_driving=kp_driving,
                    kp_driving_initial=kp_driving_initial,
                    use_relative_movement=relative,
                    use_relative_jacobian=relative,
                    adapt_movement_scale=adapt_movement_scale)
                out = generator(source,
                                kp_source=kp_source,
                                kp_driving=kp_norm)
                predictions.append(
                    np.transpose(out['prediction'].data.cpu().numpy(),
                                 [0, 2, 3, 1])[0])
                im = np.transpose(out['prediction'].data.cpu().numpy(),
                                  [0, 2, 3, 1])[0]
                #im = cv2.cvtColor(im,cv2.COLOR_RGB2BGR)
                #cv2_source = cv2.cvtColor(cv2_source,cv2.COLOR_RGB2BGR)
                im = (np.array(im) * 255).astype(np.uint8)
                #cv2_source = (np.array(cv2_source)*255).astype(np.uint8)
                img_im[yoff:yoff + 256, xoff:xoff + 256] = im
                img_cv2_source[yoff:yoff + 256, xoff:xoff + 256] = cv2_source2
                #print(faces)
                #print(type(im))
                if args['debug']:
                    #print("[DEBUG] FPS : ",1.0 / (time.time()-start))
                    fps.append(1.0 / (time.time() - start))
                    if args['cpu']:
                        print("[DEBUG] Avg. of FPS using CPU : ", mean(fps))
                    else:
                        print("[DEBUG] Avg. of FPS using GPU : ", mean(fps))

                if args['csv']:
                    y_vec[-1] = mean(fps)
                    line1 = live_plotter(x_vec, y_vec, line1)
                    y_vec = np.append(y_vec[1:], 0.0)

                if args['vc']:
                    if np.array(faces).any():
                        #joinedFrame = np.concatenate((cv2_source,im,frame1),axis=1)
                        camera.schedule_frame(img_im)
                    else:
                        #joinedFrame = np.concatenate((cv2_source,cv2_source,frame1),axis=1)
                        camera.schedule_frame(img_cv2_source)
                    #cv2.imshow('Test',joinedFrame)
                    #out1.write(img_as_ubyte(np.array(im)))
                count += 1
            else:
                break

        cap.release()
        out1.release()
        cv2.destroyAllWindows()
Exemple #25
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
#from IPython.display import HTML
import warnings
warnings.filterwarnings("ignore")

source_image = imageio.imread('kii.jpeg')
driving_video = imageio.mimread('04.mp4')

# Resize image and video to 256x256

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]

generator, kp_detector = load_checkpoints(
    config_path='first-order-model/config/vox-256.yaml',
    checkpoint_path='first-order-model/vox-cpk.pth.tar')

#predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True, cpu=True)
predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=True)

# save resulting video
imageio.mimsave('out.mp4', [img_as_ubyte(frame) for frame in predictions])
# video can be downloaded from /content folder
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
import warnings
import os
import urllib.request, urllib.error
from moviepy.editor import VideoFileClip, concatenate_videoclips

warnings.filterwarnings("ignore")

#Create a model and load checkpoints
from demo import load_checkpoints
generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                          checkpoint_path='vox-cpk.pth.tar')

from demo import make_animation
from skimage import img_as_ubyte

#Go trough Files
for filename in os.listdir('input'):
    if filename.endswith(".mp4"):

        # Get Character profile from Filename
        characterId = filename.split('_')[-1].strip(".mp4")
        print(filename)

        # Get Character Portrait from Zkillboard
        try:
            if not os.path.isfile("{}.png".format(characterId)):
Exemple #27
0
import imageio
from skimage.transform import resize
import warnings
import sys
sys.path.append("./first-order-model")
from demo import load_checkpoints, make_animation
from skimage import img_as_ubyte
from subprocess import run
import shlex
warnings.filterwarnings("ignore")

print("loading model,", "using" if args.adversary else "not using",
      "adversary")
generator, kp_detector = load_checkpoints(
    config_path='./first-order-model/config/vox-256.yaml',
    checkpoint_path='./data/vox-cpk.pth.tar'
    if not args.adversary else "./data/vox-adv-cpk.pth.tar",
    cpu=args.cpu)

print("loading input")
source_image = imageio.imread(args.image)
driving_video = imageio.mimread('data/template.mp4', memtest=False)

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
print("making predictions")
predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=args.relative,
Exemple #28
0
cpu = True

source_image = imageio.imread(image_path)
# Should fix memory error by feeding frame by frame opposed to whole video at once
driving_video = imageio.get_reader(video_path)

# Resize image to 256x256
source_image = resize(source_image, (256, 256))[..., :3]

fps = driving_video.get_meta_data()['fps']
ttl = driving_video.get_meta_data()['duration'] * fps
relative = True
print(fps, ttl)

generator, kp_detector = load_checkpoints(
    config_path='config/vox-adv-256.yaml',
    checkpoint_path='./data/checkpoints/vox-adv-cpk.pth.tar',
    cpu=cpu)

# Create Video Writer for output
writer = imageio.get_writer(temp_out_path, fps=fps)

with torch.no_grad():
    source = permutate(source_image)
    first_frame = permutate(
        resize(driving_video.get_data(0), (256, 256))[..., :3])

    # Keypoints
    kp_source = None
    kp_driving_initial = None

    if cpu:
Exemple #29
0
    output_video_name += '_' + args.image_resize + '_' + args.video_resize
    if options['adapt_movement_scale']:
        output_video_name += '_adaptive_scaling'
    output_video_name += "_" + args.codec
    output_video_name += ".mp4"
    driving_video_name = basename(driving_video_path).rsplit('.', 1)[0]
    output_video_folder = join(output_folder, driving_video_name)
    try:
        makedirs(output_video_folder)
    except:
        pass
    output_video_path = join(output_video_folder, output_video_name)

    if not (generator or kp_detector):
        print("Loading Model")
        generator, kp_detector = load_checkpoints(**modes[args.mode])

    print("Loading User Input")
    print()

    # Read Video and Extract Audio
    source_video = VideoFileClip(driving_video_path)
    # Remap timings
    if args.start:
        if args.end and args.end > args.start and args.end < source_video.duration:
            source_video = source_video.subclip(args.start, args.end)
        elif args.duration and args.duration < source_video.duration and args.duration + args.start < source_video.duration:
            source_video = source_video.subclip(args.start,
                                                args.start + args.duration)
        else:
            if args.start < source_video.duration: