def process_response(q): ''' This function defines the processes of the "Response Thread" that is fully independent from the main thread. This one handles extracting regions from the original (higher-res) frame and uploading the cropped out portion locally or to the cloud. ''' import boto3 s3 = boto3.resource("s3") last_up = clock() # initialize our largest detected area to 0 biggest_crop_area = 0 signal.signal(signal.SIGINT, signal.SIG_IGN) while cap.isOpened(): # receive the data data = q.get() f = data["f"] rects_sal = data["rects_sal"] sz_scaled = data["sz_scaled"] ts = data["ts"] sz = getsize(f) rr = (float(sz[0]) / sz_scaled[0], float(sz[1]) / sz_scaled[1]) # rescale the rectangular dimensions to our original resolution bx = [] for x, y, w, h in rects_sal: tmp = (x * rr[0], y * rr[1], w * rr[0], h * rr[1]) bx.append(tuple(int(x) for x in tmp)) if config.storage.save_cloud_crops or config.storage.save_local_crops: if len(bx) > 0: xx = tuple( (min([min(x[0], x[0] + x[2]) for x in bx]), max([max(x[0], x[0] + x[2]) for x in bx]))) yy = tuple( (min([min(x[1], x[1] + x[3]) for x in bx]), max([max(x[1], x[1] + x[3]) for x in bx]))) # only continue if the area of is not zero if abs(yy[0] - yy[1]) > 0 and abs(xx[0] - xx[1]) > 0: f_mask = f[min(yy):max(yy), min(xx):max(xx)] cropped_area = (max(xx) - min(xx)) * (max(yy) - min(yy)) if (clock() - last_up) > config.storage.min_upload_delay: biggest_crop_area = 0 # Always send the frames that contain detected people/faces. # If detecting people/faces is disabled in the config, it # is not affected. if (cropped_area > biggest_crop_area) or data["num_bodies"] > 0 or data["num_faces"] > 0: biggest_crop_area = cropped_area root_path = join( join(sDir, "cropped-regions"), date_pretty()) fn = root_path + "_regions.jpg" res, img = cv2.imencode( ".jpg", f_mask, [int(cv2.IMWRITE_JPEG_QUALITY), config.storage.encoding_quality]) if res and config.storage.save_local_crops: print("saving frame locally: {}".format(root_path)) # todo: save locally if res and config.storage.save_cloud_crops: last_up = clock() img = img.tostring() print( "uploading frame to s3: {}".format(basename(fn))) print( "-- time since last upload: {}s".format(clock() - last_up)) s3.Object("cloudguard-in", basename(fn)).put(Body=img, Metadata={"Content-Type": "Image/jpeg", "Number-Detected-Motion": str(data["num_motion"]), "Number-Detected-Bodies": str(data["num_bodies"]), "Number-Detected-Faces": str(data["num_faces"]), "Captured-Timestamp": str(ts), "Captured-Timestamp-Timezone": "UTC"})
return f_copy, f_rects, fgmask, rects_sal, tick, ts if __name__ == '__main__': ''' Main entry point. ''' threadN = cpu_count() pool = ThreadPool(processes=threadN) pending = deque(maxlen=threadN) q = Queue() p = Process(target=process_response, args=(q, )) p.start() latency = StatValue() frame_interval = StatValue() last_frame_time = clock() fgmask = [] streamId = 0 pipe_ready = False vWfn = ["vidStream", ".avi"] if imutils.is_cv3(): fcc = cv2.VideoWriter_fourcc(*"XVID") elif imutils.is_cv2(): fcc = cv2.cv.CV_FOURCC(*"XVID") vwParams = dict(filename=join(vsDir, str( "null" + vWfn[1])), fourcc=fcc, fps=config.camera.fps, frameSize=config.camera.res) vW = None while True: try: while len(pending) > 0 and pending[0].ready(): try: