Пример #1
0
    def run(self, opt):
        if opt.crop_image:
            source_image = image_crop(opt.source_image, opt.crop_image_padding)
        else:
            source_image = imageio.imread(opt.source_image)

        reader = imageio.get_reader(opt.driving_video)
        source_image = resize(source_image, (256, 256))[..., :3]
        fps = reader.get_meta_data()['fps']
        driving_video = []
        try:
            for im in reader:
                driving_video.append(im)
        except RuntimeError:
            pass
        reader.close()

        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]
        generator, kp_detector = load_checkpoints(
            config_path=opt.config, checkpoint_path=opt.checkpoint)

        if opt.find_best_frame or opt.best_frame is not None:
            i = opt.best_frame if opt.best_frame is not None else find_best_frame(
                source_image, driving_video, cpu=opt.cpu)
            print("Best frame: " + str(i))
            driving_forward = driving_video[i:]
            driving_backward = driving_video[:(i + 1)][::-1]
            predictions_forward = make_animation(
                source_image,
                driving_forward,
                generator,
                kp_detector,
                relative=opt.relative,
                adapt_movement_scale=opt.adapt_scale,
                cpu=opt.cpu)
            predictions_backward = make_animation(
                source_image,
                driving_backward,
                generator,
                kp_detector,
                relative=opt.relative,
                adapt_movement_scale=opt.adapt_scale,
                cpu=opt.cpu)
            predictions = predictions_backward[::-1] + predictions_forward[1:]
        else:
            predictions = make_animation(source_image,
                                         driving_video,
                                         generator,
                                         kp_detector,
                                         relative=opt.relative,
                                         adapt_movement_scale=opt.adapt_scale,
                                         cpu=opt.cpu)

        #predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
        imageio.mimsave(opt.output,
                        [img_as_ubyte(frame) for frame in predictions],
                        fps=fps)
Пример #2
0
def first_order(user_id: int):
    data = prepare_data(user_id)
    if data['photo']:
        predictions = make_photo_animation(
                data['source_media'], data['target_media'],
                data['generator'], data['kp_detector'],
                relative=RELATIVE,
                adapt_movement_scale=ADAPT_SCALE,
                cpu=CPU
        )
    else:
        predictions = make_animation(
                data['source_media'], data['target_media'],
                data['generator'], data['kp_detector'],
                relative=RELATIVE,
                adapt_movement_scale=ADAPT_SCALE,
                cpu=CPU
        )
    # imageio.mimsave(f'{PATH}1.mp4', [img_as_ubyte(frame) for frame in predictions], "mp4", fps=data['fps'])
    filename = f'{PATH}{user_id}'
    imageio.mimsave(filename + '.mp4',
                    [super_resolution(img_as_ubyte(frame), 4) for frame in predictions],
                    "mp4", fps=data['fps'])
    video_clip = VideoFileClip(filename + '.mp4')
    video_clip.audio = data['audio']
    try:
        video_clip.write_videofile(filename + '_a' + '.mp4')
    except Exception as e:
        print(e)
        video_clip = VideoFileClip(filename + '.mp4')
        video_clip.write_videofile(filename + '_a' + '.mp4')
Пример #3
0
    def generate_raw_output(self):
        source, vid, start_time = self.source, self.vid, self.start_time
        photoname = self.processed_image.split('.')[0] + '.png'
        source_image = imageio.imread(
            f'aligned_images/{start_time}/{photoname}')
        source_image = resize(source_image, (256, 256))[..., :3]

        placeholder_bytes = base64.b64decode(
            'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO+ip1sAAAAASUVORK5CYII='
        )
        placeholder_image = imageio.imread(placeholder_bytes, '.png')
        placeholder_image = resize(placeholder_image, (256, 256))[..., :3]
        ims = []
        try:
            driving_video = imageio.get_reader(vid)
            for im in driving_video:
                ims.append(im)
        except Exception as e:
            print('Error: ', e)

        self.update_status("20")

        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]
        predictions = make_animation(source_image,
                                     driving_video,
                                     ModelPipeline.generator,
                                     ModelPipeline.kp_detector,
                                     relative=True)
        self.videoname = videoname = f'result-{start_time}.mp4'
        imageio.mimsave(f'video/intermediate/{start_time}/{videoname}',
                        [img_as_ubyte(frame) for frame in predictions])
        self.update_status("40")
        return self
Пример #4
0
def get(source_image, driving_video):
    source_image = imageio.imread(source_image)
    driving_video = imageio.mimread(driving_video)

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    generator, kp_detector = load_checkpoints(
        config_path='./first-order-motion-model/config/vox-256.yaml',
        checkpoint_path='./vox-cpk.pth.tar')

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('./files/generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder
    return True
Пример #5
0
def get_predictions(photo, video):
    t = time()
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path=checkpoint_path)
    # '/content/gdrive/My Drive/DepFak/vox-adv-cpk.pth.tar'
    predictions = make_animation(photo,
                                 video,
                                 generator,
                                 kp_detector,
                                 relative=True)
    printer.log('PREDICTION TIME', time() - t)
    return predictions
Пример #6
0
def process_image(source_image, base, current, net, generator, kp_detector,
                  relative):
    predictions = make_animation(source_image, [base, current],
                                 generator,
                                 kp_detector,
                                 relative=relative,
                                 adapt_movement_scale=False,
                                 cpu=False)
    #print("Device",torch.cuda.current_device())
    #print("Device Name:",torch.cuda.get_device_name(gpu_id))
    # predictions = [1]# predictions[..., ::-1]
    # predictions = (np.clip(predictions, 0, 1) * 255).astype(np.uint8)
    return predictions[1]
Пример #7
0
def animate_image(source_image, driving_video, savepath, configpath,
                  checkpointpath):
    generator, kp_detector = load_checkpoints(config_path=configpath,
                                              checkpoint_path=checkpointpath)
    """**Perform image animation**"""

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave(savepath, [img_as_ubyte(frame) for frame in predictions])
Пример #8
0
def generate():
    source_image = imageio.imread(image_path)
    driving_video = imageio.mimread(video_path)
    #Resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
    #Create a model and load checkpoints
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='first-order-motion-model/vox-cpk.pth.tar')
    #Perform image animation
    from demo import make_animation
    from skimage import img_as_ubyte
    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='first-order-motion-model/vox-cpk.pth.tar')
    from demo import make_animation
    from skimage import img_as_ubyte
    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)
    #save resulting video
    imageio.mimsave(output_path, [img_as_ubyte(frame) for frame in predictions])
    print('video saved at ' + output_path)
Пример #9
0
def convert_image_to_video(uploadimage, outputPath):
    print("다메다메 밈 작업중", end=' ')
    warnings.filterwarnings("ignore")

    source_image = imageio.imread(uploadimage)
    driving_video = imageio.mimread(os.path.join(config.dame_path, '04.mp4'))

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    def display(source, driving, generated=None):
        fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))

        ims = []
        for i in range(len(driving)):
            cols = [source, driving[i]]
            if generated is not None:
                cols.append(generated[i])
            im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
            plt.axis('off')
            ims.append([im])

        ani = animation.ArtistAnimation(fig,
                                        ims,
                                        interval=50,
                                        repeat_delay=1000)
        plt.close()
        return ani

    generator, kp_detector = load_checkpoints(
        config_path=os.path.join(config.first_order_model_path,
                                 'config/vox-256.yaml'),
        checkpoint_path=os.path.join(config.dame_path, 'vox-cpk.pth.tar'))

    # make video
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)
    imageio.mimsave(outputPath, [img_as_ubyte(frame) for frame in predictions])
    print('done')
    return outputPath
Пример #10
0
def deepfake_m(source_image, driving_video):
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml',
        checkpoint_path=
        '/content/gdrive/My Drive/first-order-motion-model/vox-cpk.pth.tar')
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('../generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder

    # HTML(display(source_image, driving_video, predictions).to_html5_video())

    return predictions
Пример #11
0
  def animate(self, driver, target, size=(256, 256)):
    """
      Uses First Order Model project to animate the target image using the
      driver video.

      :param driver: DGVideo object for the driver video.
      :param target: DGImage object for the target image.
      :param size: Tuple for dimensions to work with.
    """
    # An awkward way of resizing the inputs
    image = target.resize(size=size)
    video = driver.get_video()
    video = DGVideo.from_video(video, size=size, fps=driver.fps)
    video = driver.get_video(mode='rgb', type='float')
    image = image.get_image(mode='rgb', type='float')
    anim = fomm_demo.make_animation(image, video,
                               self.generator, self.keypoint_detector, 
                               relative=True)

    return DGVideo.from_video(anim, fps=driver.fps)
Пример #12
0
def gen_dpfk(no_nvidia_gpu):
    if no_nvidia_gpu:
        print(
            "Using CPU for further calculations... (this will be much slower)")

    print("Reading template and input image...")

    source_image = imageio.imread('../input_image.png')
    driving_video = imageio.mimread('bakamitai_template.mp4')

    #Resize image and video to 256x256

    print("Resizing inputs...")

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    print("Generating video... (this may take a while)")

    generator, kp_detector = demo.load_checkpoints(
        config_path='first-order-model/config/vox-256.yaml',
        checkpoint_path='vox-cpk.pth.tar',
        cpu=no_nvidia_gpu)

    predictions = demo.make_animation(source_image,
                                      driving_video,
                                      generator,
                                      kp_detector,
                                      relative=True,
                                      cpu=no_nvidia_gpu)

    print("Saving video...")

    imageio.mimsave('generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
Пример #13
0
    def synthesize_landmark_to_face(self):
        if (LandMark2FaceModel == "firstorder"):
            reader = imageio.get_reader(
                os.path.join("/",
                             *root_file_path.split("/")[:-1],
                             "cSpeech2Landmark/OUT/out.mp4"))
            self.fps = reader.get_meta_data()['fps']
            self.fps = self.fps - 5
            reader.close()
            source_image = imageio.imread(
                os.path.join("/",
                             *root_file_path.split("/")[:-1],
                             "REF/current/ref.png"))
            self.source_image = resize(source_image, (256, 256))[..., :3]
            driving_video = imageio.mimread(os.path.join(
                "/",
                *root_file_path.split("/")[:-1],
                "cSpeech2Landmark/OUT/out.mp4"),
                                            memtest=False)
            self.driving_video = [
                resize(frame, (256, 256))[..., :3] for frame in driving_video
            ]

            cv2.destroyAllWindows()
            predictions = make_animation(self.source_image, self.driving_video,
                                         self.generator, self.kp_detector)
            imageio.mimsave(os.path.join(root_file_path, "OUT/face.mp4"),
                            predictions,
                            fps=self.fps)
            #os.chdir("../finalface")
            #os.system("bash test.sh")
            #os.chdir("/home/ipsum/fatssd/Anya/TTS")
            #stabilize("../finalface/result.mp4","stabilized.mp4")
            cmd = 'ffmpeg -y -i ' + 'dLandMark2Face/OUT/face.mp4 -i ' + 'bText2Speech/OUT/temp.wav -c:v copy -c:a aac -strict experimental fOUTPUT/queued.mp4'
            os.system(cmd)
            shutil.copy("fOUTPUT/queued.mp4", "Queue/queued.mp4")
def video_generator():

    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')

    print(" generator and detector loaded ...  ")
    source_image = 'me.png'
    driving_video = '04.mp4'
    source_image = imageio.imread(source_image)
    reader = imageio.get_reader(driving_video)

    #Resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]
    fps = reader.get_meta_data()['fps']
    driving_video = []
    try:
        for im in reader:
            driving_video.append(im)
    except RuntimeError:
        pass
    reader.close()
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True,
                                 adapt_movement_scale=True)
    #save resulting video
    print('generating animated video ')
    imageio.mimsave('generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions],
                    fps=fps)
    print('video saved ...')
Пример #15
0
def generate_video(filename1):
    path1 = "static/profile_pics/" + filename1  #getting path
    source_image = imageio.imread(path1)  #loading image
    driving_video = imageio.mimread('t3.mp4')  #loading template

    # Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]  #resizing image
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]  #resizing template

    # loading checkpoints
    print("loading checkpoints")
    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')

    # performing image animation and saving video

    print("importing demo and skimage")
    from demo import make_animation
    from skimage import img_as_ubyte

    print("calling make_animation")
    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    # save resulting video
    # i_name , f_ext = os.path.splitext(filename1)
    # videoName = "static/generated_with_api_" + i_name + "mp4"
    imageio.mimsave("static/jahnavi_t4.mp4",
                    [img_as_ubyte(frame) for frame in predictions])
Пример #16
0
    def generate_deepfake(image_path,
                          driver_path,
                          sound_path,
                          output_path="output/output.mp4"):
        if not hasattr(DeepFakeApi, "initialized"):
            DeepFakeApi.__init__()

        print("Loading driver and source image")
        source_image = imageio.imread(image_path)
        reader = imageio.get_reader(driver_path)
        meta_data = reader.get_meta_data()

        fps = reader.get_meta_data()['fps']

        driving_video = []
        try:
            for im in reader:
                driving_video.append(im)
        except RuntimeError:
            pass
        reader.close()

        print("Resizing driver and source image")
        source_image = resize(source_image, (256, 256))[..., :3]

        # Resize only if necessary
        if meta_data['source_size'] != (256, 256) and meta_data['size'] != (
                256, 256):
            driving_video = [
                resize(frame, (256, 256))[..., :3] for frame in driving_video
            ]
        else:
            # We still need to remap the color values from [0,256] to [0,1]
            driving_video = [(frame * (1 / 256))[..., :3]
                             for frame in driving_video]

        print("Creating animation")
        predictions = make_animation(source_image,
                                     driving_video,
                                     DeepFakeApi.generator,
                                     DeepFakeApi.kp_detector,
                                     cpu=False)

        # save resulting video
        print("Saving output")
        clips = [
            mp.ImageClip(img_as_ubyte(m)).set_duration(1 / fps)
            for m in predictions
        ]

        audio_clip = mp.AudioFileClip(sound_path)
        audio_clip = audio_clip.set_end(meta_data["duration"])
        concat_clip = mp.concatenate_videoclips(clips, method="compose")
        concat_clip = concat_clip.set_audio(audio_clip)

        # Practically infinite but not really, just in case i am a bad programmer and it doesnt end
        if os.path.isfile(output_path):
            path_split = output_path.split(".")
            path_split.insert(-1, 1)
            path_split[-1] = "." + path_split[-1]
            for i in range(1, 512):
                if i > 510:
                    print("Infinite loop error! Aborting")
                    break

                path_split[-2] = str(i)
                test_path = "".join(path_split)
                if not os.path.isfile(test_path):
                    output_path = test_path
                    break

        concat_clip.write_videofile(output_path, fps=fps)
        for x in range(0, pieces):
            print("Animating", filename, x)
            # Get the snip
            driving_video = imageio.mimread(os.path.join(
                "input_snips", "{}_{}.mp4".format(filename.strip(".mp4"), x)),
                                            memtest=False)

            # Resize video to 256x256
            driving_video = [
                resize(frame, (256, 256))[..., :3] for frame in driving_video
            ]

            # Perform image animation
            predictions = make_animation(source_image,
                                         driving_video,
                                         generator,
                                         kp_detector,
                                         relative=True,
                                         cpu=False)

            imageio.mimsave(os.path.join(
                "output_snips", "{}_{}.mp4".format(filename.strip(".mp4"), x)),
                            [img_as_ubyte(frame) for frame in predictions],
                            fps=30)

        # Concatenate snips
        video_pieces = []
        for x in range(0, pieces):
            print("Merging", filename, x)
            split = VideoFileClip(
                os.path.join("output_snips",
                             "{}_{}.mp4".format(filename.strip(".mp4"), x)))
Пример #18
0
def process_image(source_image,base,current,net, generator,kp_detector,relative):
    predictions = make_animation(source_image, [base,current], generator, kp_detector, relative=relative, adapt_movement_scale=False)
    return predictions[1] 
Пример #19
0
    parser.add_argument("--video_path", type=str, default='./data/videos/04.mp4')
    parser.add_argument("--use_relative",type=str2bool, nargs='?',
                        const=True, default=True)
    #parser.add_argument("--exclude_pattern", nargs="+", default=[""])
    args = parser.parse_args()


    image_path = args.image_path
    video_path = args.video_path
    source_image = imageio.imread(image_path)
    driving_video = imageio.mimread(video_path, memtest=False)


    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = np.array([resize(frame, (256, 256))[..., :3] for frame in driving_video])

    imageio.imwrite('./generated/downscaled_image.png',source_image)
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='./models/vox-cpk.pth.tar')

    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=args.use_relative)
    #predictions = []
    #orig_img = driving_video[0]
    #for i in tqdm(range(np.shape(driving_video)[0]-1)):
    #    #print(np.shape(driving_video[i]))
    #    predictions.append(animate_image(source_image, driving_video[i],orig_img, generator, kp_detector, relative=args.use_relative))

    #save resulting video
    imageio.mimsave('./generated/generated.mp4', [img_as_ubyte(frame) for frame in predictions])
Пример #20
0
                height = 256
                source_video = movie_resize(source_video, (width, height))
                x_center, y_center = width // 2, 128
            source_video = movie_crop(source_video,
                                      x_center=x_center,
                                      y_center=y_center,
                                      width=256,
                                      height=256)
        else:
            raise NotImplementedError("Invalid Video Resize Mode")

    driving_video = [(frame / 255) for frame in source_video.iter_frames()]
    print()

    print("Generating Video")
    predictions = make_animation(source_image, driving_video, generator,
                                 kp_detector, **options)
    print()

    output_clip = VideoClip(make_frame, duration=source_duration)
    output_clip = output_clip.set_fps(source_fps)
    output_clip = output_clip.set_audio(source_audio)

    if args.image_resize == 'fill' and args.crop_output:
        print(f"Cropping output video to {unfill_width}x{unfill_height}")
        output_clip = movie_crop(output_clip,
                                 x_center=256 // 2,
                                 y_center=256 // 2,
                                 width=unfill_width,
                                 height=unfill_height)

    print("Saving Video...")
Пример #21
0
def convert():

    image_file = tk.filedialog.askopenfilename()

    convert_button.destroy()

    source_image = imageio.imread(image_file)
    driving_video = imageio.mimread('files/04.mp4')

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    def display(source, driving, generated=None):
        fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))

        ims = []
        for i in range(len(driving)):
            cols = [source]
            cols.append(driving[i])
            if generated is not None:
                cols.append(generated[i])
            im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
            plt.axis('off')
            ims.append([im])

        ani = animation.ArtistAnimation(fig,
                                        ims,
                                        interval=50,
                                        repeat_delay=1000)
        plt.close()
        return ani

    HTML(display(source_image, driving_video).to_html5_video())
    """**Create a model and load checkpoints**"""

    from demo import load_checkpoints
    generator, kp_detector = load_checkpoints(
        config_path='config/vox-256.yaml',
        checkpoint_path='files/vox-cpk.pth.tar')
    """**Perform image animation**"""

    from demo import make_animation
    from skimage import img_as_ubyte

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True)

    #save resulting video
    imageio.mimsave('../generated.mp4',
                    [img_as_ubyte(frame) for frame in predictions])
    #video can be downloaded from /content folder

    HTML(display(source_image, driving_video, predictions).to_html5_video())
    """**In the cell above we use relative keypoint displacement to animate the objects. We can use absolute coordinates instead,  but in this way all the object proporions will be inherited from the driving video. For example Putin haircut will be extended to match Trump haircut.**"""

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=False,
                                 adapt_movement_scale=True)
    HTML(display(source_image, driving_video, predictions).to_html5_video())
    """## Running on your data

    **First we need to crop a face from both source image and video, while simple graphic editor like paint can be used for cropping from image. Cropping from video is more complicated. You can use ffpmeg for this.**
    """
    """**Another posibility is to use some screen recording tool, or if you need to crop many images at ones use face detector(https://github.com/1adrianb/face-alignment) , see https://github.com/AliaksandrSiarohin/video-preprocessing for preprcessing of VoxCeleb.**"""

    source_image = imageio.imread('files/09.png')
    driving_video = imageio.mimread('hinton.mp4', memtest=False)

    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True,
                                 adapt_movement_scale=True)

    HTML(display(source_image, driving_video, predictions).to_html5_video())
Пример #22
0
    for i in range(len(driving)):
        cols = [source]
        cols.append(driving[i])
        if generated is not None:
            cols.append(generated[i])
        im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        plt.axis('off')
        ims.append([im])

    ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
    plt.close()
    return ani
    

HTML(display(source_image, driving_video).to_html5_video())
from demo import load_checkpoints
generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', 
                            checkpoint_path='/content/gdrive/My Drive/first-order-motion-model/vox-cpk.pth.tar')
                            from demo import make_animation
from skimage import img_as_ubyte

predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)

#save resulting video
imageio.mimsave('../generated.mp4', [img_as_ubyte(frame) for frame in predictions])
#video can be downloaded from /content folder

HTML(display(source_image, driving_video, predictions).to_html5_video())
predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True)
HTML(display(source_image, driving_video, predictions).to_html5_video())
Пример #23
0
def run(im, vid, out):
    global status_text
    print(im.name, vid.name, out)
    if im is None or vid is None or out == "":
        print("Please complete required fields.")
        return
    # source_image = imageio.imread('C:/Users/Artin/first-order-model/MotionModels/armanFace3.png')
    # driving_video = imageio.mimread('C:/Users/Artin/first-order-model/MotionModels/Dam.mp4')
    status_text.set("Working")
    #update()
    try:
        source_image = imageio.imread(str(im.name))
        driving_video = imageio.mimread(str(vid.name))

        #Resize image and video to 256x256

        source_image = resize(source_image, (256, 256))[..., :3]
        driving_video = [
            resize(frame, (256, 256))[..., :3] for frame in driving_video
        ]

        # def display(source, driving, generated=None):
        #     fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))
        #
        #     ims = []
        #     for i in range(len(driving)):
        #         cols = [source]
        #         cols.append(driving[i])
        #         if generated is not None:
        #             cols.append(generated[i])
        #         im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        #         plt.axis('off')
        #         ims.append([im])
        #
        #     ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
        #     plt.close()
        #     return ani

        # HTML(display(source_image, driving_video).to_html5_video())
        """**Create a model and load checkpoints**"""

        from demo import load_checkpoints
        generator, kp_detector = load_checkpoints(
            config_path='config/vox-256.yaml',
            checkpoint_path=
            'C:/Users/Artin/first-order-model/MotionModels/vox-cpk.pth.tar')
        """**Perform image animation**"""

        from demo import make_animation
        from skimage import img_as_ubyte

        predictions = make_animation(source_image,
                                     driving_video,
                                     generator,
                                     kp_detector,
                                     relative=True)

        #save resulting video
        imageio.mimsave(out + '.mp4',
                        [img_as_ubyte(frame) for frame in predictions])
        os.system("")
        status_text.set(f"File saved to {os.getcwd()}{out}.mp4")
    except Exception as e:
        status_text.set(f"{type(e)}: {e}")
Пример #24
0
    config_path='./first-order-model/config/vox-256.yaml',
    checkpoint_path='./data/vox-cpk.pth.tar'
    if not args.adversary else "./data/vox-adv-cpk.pth.tar",
    cpu=args.cpu)

print("loading input")
source_image = imageio.imread(args.image)
driving_video = imageio.mimread('data/template.mp4', memtest=False)

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
print("making predictions")
predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=args.relative,
                             adapt_movement_scale=args.adapt_movement_scale,
                             cpu=args.cpu)
RESULTS_PATH.mkdir(exist_ok=True)

output_no_audio = RESULTS_PATH.joinpath(args.image.stem + ".mp4")
output_audio = RESULTS_PATH.joinpath(args.image.stem + "_audio.mp4")

print("saving", output_no_audio)
imageio.mimsave(output_no_audio,
                [img_as_ubyte(frame) for frame in predictions],
                fps=30)

print("adding audio", output_audio)
cmd = f"ffmpeg -y -i {output_no_audio} -i data/template.mp3 -codec copy -shortest {output_audio}"
Пример #25
0
def GenerateVideo():
    globs = globals()
    disable_widget_names = [
        "source_preview", "driving_preview", "generate_preview",
        'source_button', 'driving_button', 'saveto_button'
    ]
    for widget in disable_widget_names:
        globs[widget].configure(state='disabled')
    generate_button = globs['generate_button']
    generate_button.grid_remove()
    progress_var = globs['progress_var']
    progress_bar = globs['progress_bar']
    progress_bar.grid()
    progress_label = globs['progress_label']
    progress_label.grid()

    source_address, driving_address, generated_address = globs[
        "source_address"], globs["driving_address"], globs["generated_address"]
    #Load source image and driving video
    source_image = imageio.imread(source_address)
    reader = imageio.get_reader(driving_address)

    #Resize image and video to 256x256
    source_image = resize(source_image, (256, 256))[..., :3]

    fps = reader.get_meta_data()['fps']
    driving_video = []
    try:
        for im in reader:
            driving_video.append(im)
    except RuntimeError:
        pass
    reader.close()

    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=True,
                                 adapt_movement_scale=True,
                                 cpu=True,
                                 progress_var=progress_var,
                                 progress_label=progress_label)
    # relative and adapt_movement_scale can be changed to obtain different results

    # Save resulting video
    imageio.mimsave(generated_address,
                    [img_as_ubyte(frame) for frame in predictions],
                    fps=fps)

    # View generated video
    message = messagebox.askquestion(title="Generation successfull.",
                                     message="Video at:\n" +
                                     generated_address +
                                     "\n\nView generated video?",
                                     icon="question")
    if 'yes' in message:
        os.startfile(generated_address)

    globs = globals()
    for widget in disable_widget_names:
        globs[widget].configure(state='normal')
    generate_button.grid()
    progress_var.set(0)
    progress_bar.grid_remove()
    progress_label.grid_remove()
Пример #26
0
                                                 face_detector, previous_crop,
                                                 source_width / source_height)
            frames_left -= 1
            if frame is None:
                # This is the case where we don't see a face at the start of a video. We just skip those frames.
                frames_skipped += 1
                continue
            # Debug only:
            # cropped_input_writer.append_data(frame)
            frames.append(frame)

        if len(frames) > 0:
            video_empty = False
            animated_image = make_animation(source_image,
                                            frames,
                                            generator,
                                            kp_detector,
                                            relative=True)
            for animated_frame in animated_image:
                resized_frame = img_as_ubyte(
                    resize(animated_frame,
                           (source_height, source_width))[..., :3])
                original_image = np.copy(uncropped_image)
                original_image[TRUMP_IMAGE_CROP[1]:TRUMP_IMAGE_CROP[1] +
                               TRUMP_IMAGE_CROP[3],
                               TRUMP_IMAGE_CROP[0]:TRUMP_IMAGE_CROP[0] +
                               TRUMP_IMAGE_CROP[2], :] = resized_frame
                video_writer.append_data(original_image)

    print(f"Num frames skipped: {frames_skipped}")