예제 #1
0
    def get_outputs(self):
        super(RandomImageClipJob, self).get_outputs()

        import numpy as np

        def make_frame(t):
            """Function that chains together all the post processing effects."""
            return np.random.random(self.frame_size + [3]) * 255

        clip = VideoClip(make_frame)

        if hasattr(self, 'video_filename'):
            clip = clip.set_duration(
                VideoFileClip(
                    os.path.join(self.video_location,
                                 self.video_filename)).duration)
        return clip
예제 #2
0
def run_inference(
    request: types.Request,
    credentials: HTTPAuthorizationCredentials = security.http_credentials,
):
    print("************ Getting avatar image...")
    avatar, _ = handle_image_request(request.avatar)
    avatar = cv2.resize(avatar, model_input_size)
    if avatar.ndim == 2:
        avatar = np.tile(avatar[..., None], [1, 1, 3])
    print("************ Done!")

    print("************* Setting avatar image to model ...")
    model.set_source_image(avatar)
    print("************ Done!")

    print("************* Getting video frames ...")
    video_bytes = base64.b64decode(request.video.content)
    video_frames = list(io.bytes2video(video_bytes, fps=request.fps))
    print("************ Done!")

    # video_frames = video_frames[:5]

    video_name = uuid.uuid4().hex
    io.write_fn(f"app/static/{video_name}_orig.webm", video_bytes)

    video_path = f"app/static/{video_name}.mp4"

    print("************* Getting audio Object ...")
    audio = io.get_audio_obj(video_bytes)
    print("************ Done!")

    bbox = model.get_face_bbox(video_frames[0])

    print("************* Getting transform video ...")
    output_frames = model_funs.generate_video(
        model,
        video_frames,
        merge=request.merge,
        axis=request.axis,
        verbose=True,
        model_input_size=model_input_size,
        horizontal_flip=request.flip,
        relative=request.transferFace,
        # relative=True,
        crop_bbox=bbox,
        # debug=False,
    )
    model.reset_frames()
    print("************ Done!")

    print("************* Getting video in moviepy ...")

    def gen_video(t):
        return output_frames[min(int(t * request.fps), len(output_frames) - 1)]

    video = VideoClip(gen_video, duration=len(output_frames) / request.fps,)
    print("************ Done!")

    # Calculate min duration between audio and video (for some odd reason may differ)
    final_duration = min(video.duration, audio.duration)
    video = video.set_duration(final_duration)
    audio = audio.set_duration(final_duration)

    print("************* Setting audio to video ...")
    video = video.set_audio(audio.set_duration(video.duration))
    print("************ Done!")

    print("************* Saving and decoding video ...")
    video.write_videofile(video_path, fps=request.fps)

    # output_frames = video_frames
    # io.write_video(video_path, output_frames)

    video_bytes = io.read_fn(video_path)
    result = base64.b64encode(video_bytes).decode()
    print("************ Done!")

    return Response(video=types.Video(content=result))