Пример #1
0
def generate_video(
        model,
        video_frames,
        merge=False,
        axis=1,
        verbose=True,
        horizontal_flip=False,
        relative=False,
        model_input_size=(256, 256),
        crop_bbox=[],
):
    output = []
    stream_img_size = None
    video_frames = (tqdm(video_frames, total=len(video_frames))
                    if verbose else video_frames)
    for frame in video_frames:

        if crop_bbox:
            x1, y1, x2, y2 = crop_bbox
            frame = frame[y1:y2, x1:x2]

        if horizontal_flip:
            frame = cv2.flip(frame, 1)

        if stream_img_size is None:
            stream_img_size = frame.shape[1], frame.shape[0]

        # input_frame, lrudwh = crop(
        #     frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y,
        # )
        input_frame = cv2.resize(frame, model_input_size)

        out = model.predict(input_frame, relative=relative)
        out = pad_img(out, stream_img_size)
        out = cv2.resize(out, stream_img_size)

        if merge:
            out = np.concatenate([frame, out], axis=axis)
        output.append(out)

    print(out.shape)

    return output
Пример #2
0
                                           1)[0]
                cv2.putText(preview_frame, 'rocking',
                            (IMG_SIZE - textw - 10, 20), 0,
                            0.5 * IMG_SIZE / 256, (255, 255, 255), 1)
                out = loop_frames[loop_frame_idx]
                loop_frame_idx += rock_frame_delta
                if loop_frame_idx >= len(loop_frames) - 1:
                    rock_frame_delta = -1
                elif loop_frame_idx <= 0:
                    rock_frame_delta = 1

            cv2.imshow('cam', preview_frame[..., ::-1])

            if out is not None:
                if not opt.no_pad:
                    out = pad_img(out, stream_img_size)

                if output_flip:
                    out = cv2.flip(out, 1)

                if enable_vcam:
                    out = resize(out, stream_img_size)
                    stream.schedule_frame(out)

                cv2.imshow('avatarify', out[..., ::-1])

            fps_hist.append(tt.toc(total=True))
            if len(fps_hist) == 10:
                fps = 10 / (sum(fps_hist) / 1000)
                fps_hist = []
    except KeyboardInterrupt:
Пример #3
0
def generate_video(
    model,
    video_frames,
    merge=False,
    axis=1,
    verbose=True,
    horizontal_flip=False,
    relative=False,
    model_input_size=(256, 256),
    crop_bbox=[],
    watermark="app/watermark.png",
    debug=False,
):
    output = []
    stream_img_size = None
    video_frames = (tqdm(video_frames, total=len(video_frames))
                    if verbose else video_frames)

    # load watermark image
    final_watermark = None
    if watermark is not None:
        watermark = io.read_image(watermark)

    for frame in video_frames:

        if crop_bbox:
            x1, y1, x2, y2 = crop_bbox
            frame = frame[y1:y2, x1:x2]

        if horizontal_flip:
            frame = cv2.flip(frame, 1)

        if stream_img_size is None:
            stream_img_size = frame.shape[1], frame.shape[0]

        input_frame = cv2.resize(frame, model_input_size)

        out = model.predict(input_frame, relative=relative)
        out = pad_img(out, stream_img_size)
        out = cv2.resize(out, stream_img_size)

        if merge:
            out = np.concatenate([frame, out], axis=axis)

        if watermark is not None:
            # Calculate just the first time
            if final_watermark is None:
                out_h, out_w = out.shape[:2]
                wm_h, wm_w = watermark.shape[:2]

                final_h = out_h * 0.2
                final_watermark = cv2.resize(watermark,
                                             fx=final_h / wm_h,
                                             fy=final_h / wm_h,
                                             dsize=None)
                x = out_w - int(final_h / wm_h * wm_w)
                y = out_h - int(final_h)

                # put watermark in center
                if merge:
                    x = out_w // 2 - int(final_h / wm_h * wm_w) // 2

            # Add watermark
            out = io.overlay(out, final_watermark, x, y)

        if debug:
            cv2.imshow("Otuput", out)
            cv2.imshow("Model input", input_frame)
            cv2.waitKey(1)

        h, w = out.shape[:2]
        resize_even = False
        # make it even
        if h % 2 != 0:
            h -= 1
            resize_even = True
        if w % 2 != 0:
            w -= 1
            resize_even = True
        if resize_even:
            out = cv2.resize(
                out,
                (w, h),
            )

        output.append(out)

    print(out.shape)

    return output