def __init__(self, path, headless, buffer_size, codec, fps, net, confidence, frameCount=32, notifier=None, url=None, debugmemory=False, blocking=False): # initialize the video stream, allow the camera sensor to warm up, # and initialize the FPS counter print("[INFO] starting video stream...") self.vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() time.sleep(2.0) self.fps = FPS().start() # initialize the key clip writer and the motionFrames # and consecFrames to track frames without motion self.buffer_size = buffer_size self.kcw = KeyClipWriter(bufSize=buffer_size) self.consecFrames = 0 # number of frames with no motion self.prev_detections = None # initialize the output frame and a lock used to ensure thread-safe # exchanges of the output frames (useful when multiple browsers/tabs # are viewing the stream) self.outputFrame = None self.lock = threading.Lock() self.path = path self.headless = headless self.codec = codec self.fps_rate = fps self.net = net self.confidence = confidence self.frameCount = frameCount self.notifier = notifier self.url = url self.debugmemory = debugmemory self.blocking = blocking if not self.blocking: self.inputQueue = Queue(maxsize=1) self.outputQueue = Queue(maxsize=1)
direction = "" # TODO add "duration" to highlights. Currently only passes start time # and assumes a constant time for highlight duration. #class HighlightClass: # def __init__(self, time, duration): # self.time = time # self.duration = duration highlight_duration = 4 state = "" state_prev = "" state_count = 0 # initialize key clip writer and the consecutive number of # frames that have *not* contained any action kcw = KeyClipWriter(bufSize=args["buffer_size"]) consecFrames = 0 # keep looping while True: is_highlight = False # grab the current frame, resize it, and initialize a # boolean used to indicate if the consecutive frames # counter should be updated frame = vs.read() frame = imutils.resize(frame, width=600) updateConsecFrames = True # blur the frame and convert it to the HSV color space blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
cap = cv2.VideoCapture(args["input"]) frames_num = cap.get(7) stop_frame = int(frames_num - frames_num % args["detectFrames"]) print("stop frame:", frames_num, stop_frame) print("load model") print(args["config"]) detect_model = OVdetection(args["md"], args["device"], args["cpu_extension"], args["config"], ROI) detect_model.load_model() recogh_model = OVrecognition(args["mrh"], args["device"], args["cpu_extension"]) recogh_model.load_model() #init for save video kcw = KeyClipWriter(bufSize=args["buffer_size"]) consecFrames = 0 frame_idx = 0 frozen_detect = False frozen_frame = 0 log = open('log.txt', 'w') def model_inference(kcw, Q): print("do inference") detection_time = 0 recogntion_time = 0 inference_idx = 0 consecFrames = 0 frame_idx = 0 frozen_frame = 0
import argparse import datetime import imutils import time import cv2 from upload import upload from menu import Menu mn = Menu() print("[INFO] Esquentando a câmera") vs = VideoStream(usePiCamera=mn.parametros['camera'] > 0).start() time.sleep(2.0) kcw = KeyClipWriter(mn.parametros['buffer']) consecFrames = 0 #conta o numero de frames que não contém um evento de interesse while True: #pega o frame atual, redimensiona frame = vs.read() frame = imutils.resize(frame, width = mn.parametros['resolucao_w']) updateConsecFrames = True cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): #finaliza o monitoramento break
class GenericDetector: def __init__(self, path, headless, buffer_size, codec, fps, net, confidence, frameCount=32, notifier=None, url=None, debugmemory=False, blocking=False): # initialize the video stream, allow the camera sensor to warm up, # and initialize the FPS counter print("[INFO] starting video stream...") self.vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() time.sleep(2.0) self.fps = FPS().start() # initialize the key clip writer and the motionFrames # and consecFrames to track frames without motion self.buffer_size = buffer_size self.kcw = KeyClipWriter(bufSize=buffer_size) self.consecFrames = 0 # number of frames with no motion self.prev_detections = None # initialize the output frame and a lock used to ensure thread-safe # exchanges of the output frames (useful when multiple browsers/tabs # are viewing the stream) self.outputFrame = None self.lock = threading.Lock() self.path = path self.headless = headless self.codec = codec self.fps_rate = fps self.net = net self.confidence = confidence self.frameCount = frameCount self.notifier = notifier self.url = url self.debugmemory = debugmemory self.blocking = blocking if not self.blocking: self.inputQueue = Queue(maxsize=1) self.outputQueue = Queue(maxsize=1) def __del__(self): self.kcw.finish() # stop the timer and display FPS information self.vs.stop() self.fps.stop() print("[INFO] elapsed time: {:.2f}".format(fps.elapsed())) print("[INFO] approx. FPS: {:.2f}".format(fps.fps())) def loop_over_detections(self, frame, detections, w, h): detected = False msg = [] # loop over the detections for i in np.arange(0, detections.shape[2]): # extract the confidence (i.e., probability) associated with # the prediction confidence = detections[0, 0, i, 2] # filter out weak detections by ensuring the `confidence` is # greater than the minimum confidence if confidence > self.confidence: # extract the index of the class label from the # `detections`, then compute the (x, y)-coordinates of # the bounding box for the object idx = int(detections[0, 0, i, 1]) box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") # draw the timestamp timestamp = datetime.datetime.now() cv2.putText(frame, timestamp.strftime("%Y.%m.%d %H:%M:%S"), (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2) # draw the prediction on the frame label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100) cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2) y = startY - 15 if startY - 15 > 15 else startY + 15 cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) # record this frame if self.prev_detections is None: return if np.array_equal(self.prev_detections, detections[0, 0, :, 1]): return detected = True # if we are not already recording, start recording if not self.kcw.recording: difference = set(detections[0, 0, :, 1]).symmetric_difference( set(self.prev_detections)) for o in difference: msg.append(CLASSES[int(o)]) timestamp = datetime.datetime.now() ts = timestamp.strftime("%Y%m%d-%H%M%S") p = "{}/{}.avi".format(self.path, ts) print(timestamp, 'Start recording', p) self.kcw.start(p, cv2.VideoWriter_fourcc(*self.codec), self.fps_rate) if len(msg) > 0: msg = ', '.join(msg) msg = '{}: {} appeared'.format(ts, msg) if self.url: msg = msg + ' {}/video_feed'.format(self.url) print(msg) if self.notifier: self.notifier(msg) return detected def detect_object_in_frame(self, frame): frame = imutils.resize(frame, width=400) # update the key frame clip buffer self.kcw.update(frame) # grab the frame dimensions and convert it to a blob (h, w) = frame.shape[:2] # run detection in current process detections = self.classify_frame(frame) if detections is not None: if self.loop_over_detections(frame, detections, w, h): self.consecFrames = 0 self.prev_detections = detections[ 0, 0, :, 1] # save objects, detected on current frame return frame def _classify_frame(self, frame): frame = cv2.resize(frame, (300, 300)) blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5) # set the blob as input to our deep learning object # detector and obtain the detections self.net.setInput(blob) return self.net.forward() def classify_frame(self, frame): if self.blocking: return self._classify_frame(frame) # if the input queue *is* empty, give the current frame to # classify if self.inputQueue.empty(): self.inputQueue.put(frame) # if the output queue *is not* empty, grab the detections if not self.outputQueue.empty(): return self.outputQueue.get() def loop_classify_frame(self): assert not self.blocking # only for non-blocking case # keep looping while True: # check to see if there is a frame in our input queue if self.inputQueue.empty(): continue # grab the frame from the input queue, resize it, and # construct a blob from it frame = self.inputQueue.get() detections = self._classify_frame(frame) # write the detections to the output queue self.outputQueue.put(detections) def generate(self): "Yield image/jpeg for web serving" # grab global references to the output frame and lock variables # loop over frames from the output stream while True: # wait until the lock is acquired with self.lock: # check if the output frame is available, otherwise skip # the iteration of the loop if self.outputFrame is None: continue # encode the frame in JPEG format (flag, encodedImage) = cv2.imencode(".jpg", self.outputFrame) # ensure the frame was successfully encoded if not flag: continue # yield the output frame in the byte format yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
"GMG": cv2.bgsegm.createBackgroundSubtractorGMG, "MOG": cv2.bgsegm.createBackgroundSubtractorMOG, "GSOC": cv2.bgsegm.createBackgroundSubtractorGSOC, "LSBP": cv2.bgsegm.createBackgroundSubtractorLSBP } # create our background subtractor fgbg = OPENCV_BG_SUBTRACTORS[conf["bg_sub"]]() # create erosion and dilation kernels eKernel = np.ones(tuple(conf["erode"]["kernel"]), "uint8") dKernel = np.ones(tuple(conf["dilate"]["kernel"]), "uint8") # initialize key clip writer, the consecutive number of frames without # motion and frames since the last snapshot was written kcw = KeyClipWriter(bufSize=conf["keyclipwriter_buffersize"]) framesWithoutMotion = 0 framesSinceSnap = 0 # begin capturing "ctrl + c" signals signal.signal(signal.SIGINT, signal_handler) images = " and images..." if conf["write_snaps"] else "..." print("[INFO] detecting motion and storing videos{}".format(images)) # loop through the frames while True: # grab a frame from the video stream fullFrame = vs.read() # if no frame was read, the stream has ended if fullFrame is None:
args = vars(ap.parse_args()) # initialize the video stream and allow the camera sensor to # warmup print("[INFO] warming up camera...") vs = VideoStream(usePiCamera=args["picamera"] > 0).start() time.sleep(2.0) # define the lower and upper boundaries of the "green" ball in # the HSV color space greenLower = (29, 86, 6) greenUpper = (64, 255, 255) # initialize key clip writer and the consecutive number of # frames that have *not* contained any action kcw = KeyClipWriter(bufSize=args["buffer_size"]) consecFrames = 0 # keep looping while True: # grab the current frame, resize it, and initialize a # boolean used to indicate if the consecutive frames # counter should be updated frame = vs.read() frame = imutils.resize(frame, width=600) updateConsecFrames = True # blur the frame and convert it to the HSV color space blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
avg = None peds = 0 #if dropbox is enabled initialize DB if conf["use_dropbox"]: uploader = DBUpload(conf["dropbox_key"], conf["dropbox_secret"]) #initialize the video stream and let the camera warmup logging.info("warming up camera...") vs = VideoStream(usePiCamera=conf["picamera"]>0, resolution=(640,480)).start() time.sleep(conf["camera_warmup_time"]) #initialize the key clip writer and the motionFrames # and consecFrames to track frames without motion kcw = KeyClipWriter(bufSize=conf["buffer_size"]) pDet = PedDetect() consecFrames = 0 #number of frames with no motion motionFrames = 0 #number of frames with motion pedFrames = 0 boundingbox = [conf["resolution"][0],conf["resolution"][1],0,0] #motion detection x,y,w,h p = "" bbROIH = conf["HeightROIfactor"] bbROIW = conf["WidthROIfactor"] while True: #grab the current frame, resize, add status Text and timestamp frame = vs.read() frame = imutils.resize(frame, width=conf["resize_width"]) timestamp = datetime.datetime.now() text = "Standby"
def detect_motion(outputDirectory): global vs, outputFrame, lock frameCount = 32 md = SingleMotionDetector(accumWeight=0.1) total = 0 recordedFrameCount = 0 kcw = KeyClipWriter(bufSize=beforeAndAfterFrames) lastFileName = None while True: # read the next frame from the video stream, resize it, # convert the frame to grayscale, and blur it frame = vs.read() frame = imutils.resize(frame, width=600) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (7, 7), 0) # grab the current timestamp and draw it on the frame timestamp = datetime.datetime.now() cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 0), 1) # if the total number of frames has reached a sufficient # number to construct a reasonable background model, then # continue to process the frame if total > frameCount: kcw.update(frame) motion = md.detect(gray) # check to see if motion was found in the frame if motion is not None: # unpack the tuple and draw the box surrounding the # "motion area" on the output frame (thresh, (minX, minY, maxX, maxY)) = motion cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2) if kcw.recording is False: recordedFrameCount = 0 timestamp = datetime.datetime.now() lastFileName = "{}/{}.mp4".format( outputDirectory, timestamp.strftime("%Y%m%d-%H%M%S")) kcw.start(lastFileName, cv2.VideoWriter_fourcc(*"MP4V"), fps) logging.info("Started recording") if kcw.recording is True: recordedFrameCount += 1 if recordedFrameCount > beforeAndAfterFrames: logging.info("Stopped recording") kcw.finish() if lastFileName is not None: ah.sendEvent(lastFileName) # update the background model and increment the total number # of frames read thus far md.update(gray) total += 1 # acquire the lock, set the output frame, and release the # lock with lock: outputFrame = frame.copy()
def detect_motion(frameCount): # grab global references to the video stream, output frame, and # lock variables global vs, outputFrame, lock # initialize the motion detector and the total number of frames # read thus far md = SingleMotionDetector(accumWeight=0.1) total = 0 # initialize KeyClipWriter, set counter for frames with no motion detected kcw = KeyClipWriter() consecFramesNoMotion = 0 # loop over frames from the video stream while True: timestamp = datetime.datetime.now() text = "Unoccupied" # read the next frame from the video stream, resize it, # convert the frame to grayscale, and blur it frame = vs.read() frame = imutils.resize(frame, width=400) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (7, 7), 0) # if the total number of frames has reached a sufficient # number to construct a reasonable background model, then # continue to process the frame if total > frameCount: # detect motion in the image motion = md.detect(gray) # check to see if motion was found in the frame if motion is not None: # unpack the tuple and draw the box surrounding the # "motion area" on the output frame (thresh, (minX, minY, maxX, maxY)) = motion cv2.rectangle(frame, (minX, minY), (maxX, maxY), (0, 0, 255), 2) text = "Occupied" # send email to notify user of motion # send_email(timestamp) # motion has occured, so reset frames with no motion counter consecFramesNoMotion = 0 else: consecFramesNoMotion += 1 record_video(kcw, frame, motion, consecFramesNoMotion, timestamp) # grab the current timestamp and draw it on the frame cv2.putText(frame, timestamp.strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # update the background model and increment the total number # of frames read thus far md.update(gray) total += 1 # acquire the lock, set the output frame, and release the # lock with lock: outputFrame = frame.copy()
# finish the authorization and grab the Dropbox client (accessToken, userID) = flow.finish(authCode) client = DropboxClient(accessToken) print("[SUCCESS] dropbox account linked") # initialize the camera vs = PiVideoStream(conf["resolution"], conf["fps"], conf["rotation"]).start() # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print("[INFO] warming up...") time.sleep(conf["camera_warmup_time"]) # initialize key clip writer and the consecutive number of # frames that have *not* contained any action kcw = KeyClipWriter(bufSize=conf["videobuffer"], timeout=0.01) consecFrames = 0 recFrames = 0 avg = None lastUploaded = datetime.datetime.now() motionCounter = 0 cv2.namedWindow("Security Feed") cv2.namedWindow("ctrl", cv2.WINDOW_NORMAL) cv2.setMouseCallback("Security Feed", moveMask) cv2.createTrackbar('1:Exit app', "ctrl", 0, 1, quit) cv2.createTrackbar('Mask size', "ctrl", maskw, 255, setMaskSize) cv2.createTrackbar('0:Off\n1:On', "ctrl", 0, 1, startDetect) cv2.resizeWindow("ctrl", 300, 100) cv2.moveWindow("ctrl", 500, 35)
# TODO add "duration" to highlights. Currently only passes start time # and assumes a constant time for highlight duration. #class HighlightClass: # def __init__(self, time, duration): # self.time = time # self.duration = duration highlight_duration = 4 is_highlight = False state = "" state_prev = "" state_count = 0 # initialize key clip writer and the consecutive number of # frames that have *not* contained any action kcw = KeyClipWriter(bufSize=args["buffer_size"]) consecFrames = 0 # keep looping while True: # grab the current frame, resize it, and initialize a # boolean used to indicate if the consecutive frames # counter should be updated frame = vs.read() frame = imutils.resize(frame, width=600) updateConsecFrames = True # blur the frame and convert it to the HSV color space blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)