示例#1
0
def predict(token):
    try:
        vprint("Processes: ", app.processes)
        vprint("Predictors: ", app.predictors)
        IMG_SIZE = 256
        frame_proportion = 0.9
        frame_offset_x = 0
        frame_offset_y = 0
        img = request.files['image']
        if img is not None:
            img_g = img.read()
            img_np = np.fromstring(img_g, np.uint8)
            frame = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
            if token in app.processes:
                predictor = app.processes[token]['predictor']
                frame = frame[..., ::-1]
                frame_orig = frame.copy()
                frame, lrudwh = crop(frame,
                                     p=frame_proportion,
                                     offset_x=frame_offset_x,
                                     offset_y=frame_offset_y)
                frame_lrudwh = lrudwh
                frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]
                out = predictor.predict(frame)
                if out is not None:
                    out = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
                    _, out = cv2.imencode('.jpg', out)
                    out = out.tobytes()
                    out = base64.b64encode(out).decode("utf-8")
                    return predict_response(
                        status=afy_flask_predict_status.SUCCESS, image=out)
                return predict_response(
                    status=afy_flask_predict_status.SUCCESS)
            return predict_response(
                status=afy_flask_predict_status.NO_PREDICTOR,
                error="Predictor not available")
        return predict_response(
            status=afy_flask_predict_status.INPUT_IMAGE_ERROR,
            error="Invalid image / image corrupted")
    except Exception as e:
        if app.verbose:
            traceback.print_exc()
        return predict_response(error=str(e))
示例#2
0
            timing = {'preproc': 0, 'predict': 0, 'postproc': 0}

            green_overlay = False

            tt.tic()

            ret, frame = cap.read()
            if not ret:
                log("Can't receive frame (stream end?). Exiting ...")
                break

            frame = frame[..., ::-1]
            frame_orig = frame.copy()

            frame, lrudwh = crop(frame,
                                 p=frame_proportion,
                                 offset_x=frame_offset_x,
                                 offset_y=frame_offset_y)
            frame_lrudwh = lrudwh
            frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]

            if find_keyframe:
                if is_new_frame_better(avatar, frame, predictor):
                    log("Taking new frame!")
                    green_overlay = True
                    predictor.reset_frames()

            timing['preproc'] = tt.toc()

            if passthrough:
                out = frame
            elif is_calibrated:
示例#3
0
            # FPS 值為 25.0,解析度為 512x512
            inference_output = cv2.VideoWriter('video_output.mp4', fourcc, 15.0, (width, height))
            record_flag = False
            avatar = None
            change_avatar(predictor, avatars[cur_ava])
            cap = cv2.VideoCapture(r"C:\Users\taros\avatarify-python\input.mp4")
            while(cap.isOpened()):
                
                ret, frame = cap.read()
                if ret:
                    log("reading frame")
                    frame = frame[..., ::-1]
                    frame_orig = frame.copy()


                    frame, (frame_offset_x, frame_offset_y) = crop(frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y)
                    frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]

                    out = predictor.predict(frame)
                    if out is not None:
                        if not opt.no_pad:
                            out = pad_img(out, stream_img_size)

                        output_frame = cv2.resize(out[..., ::-1], (width, height))
                        # 寫入影格
                        inference_output.write(output_frame)
                else:
                    log("stopping process")
                    inference_output.release()
                    cv2.destroyAllWindows()