Exemple #1
0
def PedestrianDetection(img):
    if cfg['view']['detection_method'].upper() == 'Darknet'.upper():
        if not isOpencvImg(img):
            img = PIL2Opencv(img)
    elif cfg['view']['detection_method'].upper() == 'Keras'.upper():
        if isOpencvImg(img):
            img = Opencv2PIL(img)
    boxes = np.array([])
    if cfg['view'].getboolean('detect_person'):
        boxes, scores = YOLO_model.detect_image(img)
    return boxes
Exemple #2
0
def put_boxes(img, boxes):
    if not isOpencvImg(img):
        img = PIL2Opencv(img)
    # boxes
    if len(boxes) > 0:
        # loop over the indexes we are keeping
        for i in range(len(boxes)):
            # extract the bounding box coordinates
            (x1, y1) = (boxes[i][0], boxes[i][1])
            (x2, y2) = (boxes[i][2], boxes[i][3])
            # draw a bounding box rectangle and label on the image
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
    return img
Exemple #3
0
def image():
    if request.method == 'POST':
        # Get the image from post request
        img = base64_to_pil(request.json)

        # 镜像翻转
        # img = img.transpose(Image.FLIP_LEFT_RIGHT)
        boxes = PedestrianDetection(img)
        img = put_boxes(img, boxes)
        if isOpencvImg(img):
            img = Opencv2PIL(img)
        result = pil_to_base64(img)
        # Serialize the result, you can add additional fields
        return jsonify(result=result)

    return render_template('image_part.html')
Exemple #4
0
def put_FPS_person(img, curr_fps, person_num):
    if not isOpencvImg(img):
        img = PIL2Opencv(img)
    # FPS and person
    text = "FPS: " + str(curr_fps) + "\nperson: " + str(person_num)
    y0, dy = 15, 20
    for i, txt in enumerate(text.split('\n')):
        y = y0 + i * dy
        # cv2.putText(img, txt, (50, y), cv2.FONT_HERSHEY_SIMPLEX, .6, (0, 255, 0), 1, 2)
        cv2.putText(img,
                    text=txt,
                    org=(3, y),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50,
                    color=(255, 0, 0),
                    thickness=2)
    # cv2.putText(img, text=text, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
    #             fontScale=0.50, color=(255, 0, 0), thickness=2)
    return img
Exemple #5
0
def webcam_image_3():
    global is_process_next
    global last_time
    if request.method == 'POST':
        if not is_process_next:
            return jsonify(result='')
        # is_process_next = False

        get_data = request.json

        img = base64_to_pil(get_data['file'])

        # print("get_data['camera_id'] : ", get_data['camera_id'])
        if get_data['camera_id'] == 0:
            # 左右镜像翻转
            img = img.transpose(Image.FLIP_LEFT_RIGHT)

        # YOLO
        boxes = PedestrianDetection(img)
        # img = PIL2Opencv(img)
        # FPS
        now_time = timer()
        seconds = now_time - last_time
        last_time = now_time
        curr_fps = round(1.0 / seconds, 2)

        tes_fps(curr_fps)

        # img = put_FPS(img, curr_fps)
        # img = put_boxes_FPS_person(img, boxes, curr_fps, len(boxes))
        img = put_boxes(img, boxes)
        img = put_FPS_person(img, curr_fps, len(boxes))
        if isOpencvImg(img):
            img = Opencv2PIL(img)
        # img = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
        result = pil_to_base64(img)
        # Serialize the result, you can add additional fields
        is_process_next = True
        return jsonify(frame_id=get_data['frame_id'], result=result)
    return jsonify(result='')
Exemple #6
0
def video():
    global percent_complete_frame
    global num_frames
    global elap
    percent_complete_frame = 0
    num_frames = 0
    elap = 0

    if request.method == "POST":

        file = request.files["file"]

        print("File uploaded")
        print(file)

        my_upload = storage.upload(file)

        video_file_name = my_upload.name
        video_file_extension = my_upload.extension
        video_file = app.config.get("STORAGE_CONTAINER") + video_file_name

        vid = cv2.VideoCapture(video_file)
        if not vid.isOpened():
            raise IOError("Couldn't open webcam or video")

        num_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
        video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
        video_fps = vid.get(cv2.CAP_PROP_FPS)
        video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                      int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        outVideoName = video_file_name.split(
            '.')[0] + "_process." + video_file_extension
        output_path = app.config.get("STORAGE_CONTAINER") + outVideoName
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)

        while (vid.isOpened()):
            ret, img = vid.read()
            if ret == True:

                # 计时
                start = time.time()
                # # # 简单尝试处理一下
                # img = cv2.flip(img, 0)
                # 调用 Keras 目标检测
                boxes = PedestrianDetection(img)
                img = put_boxes(img, boxes)
                # PIL 转 opencv
                if not isOpencvImg(img):
                    img = PIL2Opencv(img)
                end = time.time()
                elap = (end - start)
                # write the flipped frame
                out.write(img)
                percent_complete_frame += 1
            else:
                break
        vid.release()
        out.release()

        obj = storage.get(outVideoName)
        view_url = obj.url
        download_url = obj.download_url()
        res = make_response(
            jsonify({
                "message": "Process Done",
                'view_url': view_url,
                'download_url': download_url
            }), 200)
        return res

    return render_template("upload_video.html")