示例#1
0
def handle_image_request(image: types.Image):
    if image.content is None and image.source is None:
        raise HTTPException(
            status_code=400,
            detail="Either image as bytes or image source with uri needs to be sent!",
        )

    try:
        init = time.time()
        if image.content is not None:
            image_bytes = image.content
            image = io.bytes2image(base64.b64decode(image_bytes))
        else:
            image_bytes = io.read_fn(image.source.imageUri)
            image = io.bytes2image(image_bytes)
            image_bytes = base64.b64encode(image_bytes)
        print(f"Elapsed reading image: {time.time()-init}")
    except Exception as e:
        raise HTTPException(
            status_code=500,
            detail="Image {} could not be read. Error: {}".format(
                image.source.imageUri, e
            ),
        )

    return image, image_bytes
示例#2
0
def run_inference(
    request: types.Request,
    credentials: HTTPAuthorizationCredentials = security.http_credentials,
):
    print("************ Getting avatar image...")
    avatar, _ = handle_image_request(request.avatar)
    avatar = cv2.resize(avatar, model_input_size)
    if avatar.ndim == 2:
        avatar = np.tile(avatar[..., None], [1, 1, 3])
    print("************ Done!")

    print("************* Setting avatar image to model ...")
    model.set_source_image(avatar)
    print("************ Done!")

    print("************* Getting video frames ...")
    video_bytes = base64.b64decode(request.video.content)
    video_frames = list(io.bytes2video(video_bytes, fps=request.fps))
    print("************ Done!")

    # video_frames = video_frames[:5]

    video_name = uuid.uuid4().hex
    io.write_fn(f"app/static/{video_name}_orig.webm", video_bytes)

    video_path = f"app/static/{video_name}.mp4"

    print("************* Getting audio Object ...")
    audio = io.get_audio_obj(video_bytes)
    print("************ Done!")

    bbox = model.get_face_bbox(video_frames[0])

    print("************* Getting transform video ...")
    output_frames = model_funs.generate_video(
        model,
        video_frames,
        merge=request.merge,
        axis=request.axis,
        verbose=True,
        model_input_size=model_input_size,
        horizontal_flip=request.flip,
        relative=request.transferFace,
        # relative=True,
        crop_bbox=bbox,
        # debug=False,
    )
    model.reset_frames()
    print("************ Done!")

    print("************* Getting video in moviepy ...")

    def gen_video(t):
        return output_frames[min(int(t * request.fps), len(output_frames) - 1)]

    video = VideoClip(gen_video, duration=len(output_frames) / request.fps,)
    print("************ Done!")

    # Calculate min duration between audio and video (for some odd reason may differ)
    final_duration = min(video.duration, audio.duration)
    video = video.set_duration(final_duration)
    audio = audio.set_duration(final_duration)

    print("************* Setting audio to video ...")
    video = video.set_audio(audio.set_duration(video.duration))
    print("************ Done!")

    print("************* Saving and decoding video ...")
    video.write_videofile(video_path, fps=request.fps)

    # output_frames = video_frames
    # io.write_video(video_path, output_frames)

    video_bytes = io.read_fn(video_path)
    result = base64.b64encode(video_bytes).decode()
    print("************ Done!")

    return Response(video=types.Video(content=result))
示例#3
0
if os.path.exists(test_image_path):
    img = read_image(test_image_path)
    img = cv2.resize(img, fx=0.5, fy=0.5, dsize=None)

    image = image2bytes(img)
    image = base64.b64encode(image).decode()
else:
    image = None

# with tempfile.TemporaryFile(suffix=".mp4") as fp:
fp = "temp.mp4"
fp = "test.webm"
# video_frames = get_frames_from_camera(0)
# write_video(fp, video_frames)
video_bytes = read_fn(fp)
video = base64.b64encode(video_bytes).decode()

data = {
    "avatar": {
        "content": image,
        "source": {
            "imageUri": test_image_path
        }
    },
    "video": {
        "content": video
    },
    "merge": True,
    "fps": 30,
    "transferFace": True,
示例#4
0
        get_audio_obj,
    )
    import app.model_funs as model_funs

config_path = "fomm/config/vox-adv-256.yaml"
checkpoint_path = "vox-adv-cpk.pth.tar"

model = PredictorLocal(config_path, checkpoint_path, adapt_movement_scale=True)
IMG_SIZE = 256
size = (IMG_SIZE, IMG_SIZE)

avatar = cv2.imread("avatars/jobs.jpg")[..., ::-1]
avatar = cv2.imread("avatars/mona.jpg")[..., ::-1]
avatar = cv2.resize(avatar, size)
model.set_source_image(avatar)

video_path = "test.webm"
video_frames = list(bytes2video(read_fn(video_path)))
bbox = model.get_face_bbox(video_frames[0])

output_frames = model_funs.generate_video(
    model,
    video_frames,
    merge=False,
    verbose=True,
    horizontal_flip=True,
    relative=True,
    crop_bbox=bbox,
    debug=True,
)
示例#5
0
        get_audio_obj,
    )
from moviepy.editor import *
import tempfile

video_path = "download.webm"

# video_path = "sisa.webm"
# audio = AudioFileClip(video_path)
# video = VideoFileClip(video_path, fps_source="fps")
# print(video)
# print(len([frame for frame in video.iter_frames()]))

# ffmpeg -i current.webm -c copy -fflags +genpts new.webm

video_bytes = read_fn(video_path)
print(get_audio_obj(video_bytes))
exit(0)

with tempfile.TemporaryDirectory() as temp_dir:
    print(temp_dir)
    tmp_video = temp_dir + "/sisa.webm"
    tmp_video2 = temp_dir + "/sisa2.webm"
    with open(tmp_video, "wb") as f:
        f.write(video_bytes)

    input = ffmpeg.input(tmp_video)
    out = ffmpeg.output(input,
                        tmp_video2,
                        vcodec="copy",
                        acodec="copy",