示例#1
0
def process_motion_frame(q, f, tick, ts, mfa=False, rotateAng=False, width=False, gBlur=(9, 9)):
    '''
    This function defines the image processing techniques that are applied
    to a new thread when a frame is retreived from the camera.
    '''
    rects_sal = []
    fgmask = None
    f_copy = f.copy()
    if rotateAng is not False and rotateAng != 0:
        f = imutils.rotate(f, angle=rotateAng)
    if width is not False:
        f = imutils.resize(f, width=width)
    # blur & bg sub
    try:
        fgmask = fgbg.apply(cv2.GaussianBlur(f, gBlur, 0), learningRate=config.computing.learning_rate)
    except:
        print("-"*60)
        traceback.print_exc(file=sys.stdout)
        print("-"*60)
        raise

    # get our frame outlines
    f_rects, rects_mot = get_motions(f, fgmask, thickness=1)
    rects_sal.extend(rects_mot)
    num_motion = len(rects_mot)

    if True:
        # don't do anything else if there's no motion of any kind detected
        # if num_motion > 0 or mfa is True:
        num_bodies = 0
        num_faces = 0
        if config.computing.body_detection_en or config.computing.face_detection_en:
            # generate a histogram equalized bw image if we're doing processing
            # that needs it
            f_bw = cv2.equalizeHist(cv2.cvtColor(f, cv2.COLOR_BGR2GRAY))
            if config.computing.body_detection_en:
                fBody, rectsBody = detectPerson(f, color=(255, 0, 0))
                if len(rectsBody) > 0:
                    f_rects = cv2.add(f_rects, fBody)
                    num_bodies = len(rectsBody)
                    rects_sal.extend(rectsBody)

            if config.computing.face_detection_en:
                fFace, rectsFace = detectFace(f_bw, color=(0, 255, 0))
                if len(rectsFace) > 0:
                    f_rects = cv2.add(f_rects, fFace)
                    num_faces = len(rectsFace)
                    rects_sal.extend(rectsFace)

        f_rects = imutils.resize(f_rects, width=f_copy.shape[1])
        q.put({"f": f_copy, "ts": ts, "rects_sal": rects_sal, "sz_scaled": getsize(
            f), "num_motion": num_motion, "num_bodies": num_bodies, "num_faces": num_faces})

    return f_copy, f_rects, fgmask, rects_sal, tick, ts
示例#2
0
def process_response(q):
    '''
    This function defines the processes of the "Response Thread" that is
    fully independent from the main thread. This one handles extracting
    regions from the original (higher-res) frame and uploading the cropped
    out portion locally or to the cloud.
    '''
    import boto3
    s3 = boto3.resource("s3")
    last_up = clock()
    # initialize our largest detected area to 0
    biggest_crop_area = 0
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    while cap.isOpened():
        # receive the data
        data = q.get()
        f = data["f"]
        rects_sal = data["rects_sal"]
        sz_scaled = data["sz_scaled"]
        ts = data["ts"]
        sz = getsize(f)
        rr = (float(sz[0]) / sz_scaled[0], float(sz[1]) / sz_scaled[1])
        # rescale the rectangular dimensions to our original resolution
        bx = []
        for x, y, w, h in rects_sal:
            tmp = (x * rr[0], y * rr[1], w * rr[0], h * rr[1])
            bx.append(tuple(int(x) for x in tmp))
        if config.storage.save_cloud_crops or config.storage.save_local_crops:
            if len(bx) > 0:
                xx = tuple(
                    (min([min(x[0], x[0] + x[2]) for x in bx]), max([max(x[0], x[0] + x[2]) for x in bx])))
                yy = tuple(
                    (min([min(x[1], x[1] + x[3]) for x in bx]), max([max(x[1], x[1] + x[3]) for x in bx])))
                # only continue if the area of is not zero
                if abs(yy[0] - yy[1]) > 0 and abs(xx[0] - xx[1]) > 0:
                    f_mask = f[min(yy):max(yy), min(xx):max(xx)]
                    cropped_area = (max(xx) - min(xx)) * (max(yy) - min(yy))
                    if (clock() - last_up) > config.storage.min_upload_delay:
                        biggest_crop_area = 0
                    # Always send the frames that contain detected people/faces.
                    # If detecting people/faces is disabled in the config, it
                    # is not affected.
                    if (cropped_area > biggest_crop_area) or data["num_bodies"] > 0 or data["num_faces"] > 0:
                        biggest_crop_area = cropped_area
                        root_path = join(
                            join(sDir, "cropped-regions"), date_pretty())
                        fn = root_path + "_regions.jpg"
                        res, img = cv2.imencode(
                            ".jpg", f_mask, [int(cv2.IMWRITE_JPEG_QUALITY), config.storage.encoding_quality])
                        if res and config.storage.save_local_crops:
                            print("saving frame locally: {}".format(root_path))
                            # todo: save locally
                        if res and config.storage.save_cloud_crops:
                            last_up = clock()
                            img = img.tostring()
                            print(
                                "uploading frame to s3: {}".format(basename(fn)))
                            print(
                                "-- time since last upload: {}s".format(clock() - last_up))
                            s3.Object("cloudguard-in",
                                      basename(fn)).put(Body=img,
                                                        Metadata={"Content-Type": "Image/jpeg",
                                                                  "Number-Detected-Motion": str(data["num_motion"]),
                                                                  "Number-Detected-Bodies": str(data["num_bodies"]),
                                                                  "Number-Detected-Faces": str(data["num_faces"]),
                                                                  "Captured-Timestamp": str(ts),
                                                                  "Captured-Timestamp-Timezone": "UTC"})