def dropbox_upload(image): # write the image to temporary file t = TempImage() cv2.imwrite(t.path, image) # upload the image to Dropbox and cleanup the temporary image print("[UPLOAD] {}".format(ts)) path = "/{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) client.files_upload(open(t.path, "rb").read(), path) t.cleanup()
def Upload(self, frame, timestamp): if self.conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # upload the image to Dropbox and cleanup the tempory image ts = timestamp.strftime("%Y-%m-%d %H:%M:%S") path = "/{base_path}/{timestamp}.jpg".format( base_path=self.conf["dropbox_base_path"], timestamp=ts) logger.info("[UPLOAD] {}".format(path)) self.client.files_upload(open(t.path, "rb").read(), path) t.cleanup()
def upload_picture(frame, kind): t = TempImage() cv2.imwrite(t.path, frame) print "[SECURITAS]" + kind + " detected! Uploading photo to Dropbox..." path = "{timestamp}.jpg".format(timestamp=timestamp.strftime('%Y%m%d%H%M')) dropbox_file = dropbox_client.put_file(path, open(t.path, "rb")) media = dropbox_client.media(path) print "[SECURITAS] Sending SMS..." sms = nexmo_client.send_message({ 'from': conf["nexmo_number"], 'to': conf["phone_number"], 'text': kind + ' detected at ' + timestamp.strftime('%Y-%m-%d %I:%M %p') + ': ' + media['url'] }) t.cleanup()
def sendFrame(self, frame): chat_id = self.chatId try: if chat_id != None and (self.started or self.sendSingleFrame): self.logger.debug("sending image") t = TempImage() cv2.imwrite(t.path, frame) self.bot.sendPhoto(chat_id=chat_id, photo=open(t.path, 'rb')) t.cleanup() except NetworkError: self.logger.error("network error") except Unauthorized: # The user has removed or blocked the bot. self.chatId = None self.started = False self.logger.error( "Unauth. User has removed bot" ) except: self.logger.error( "Unknown" ) self.sendSingleFrame = False
def sendFrame(self, frame): chat_id = self.chatId try: if chat_id != None and (self.started or self.sendSingleFrame): self.logger.debug("sending image") t = TempImage() cv2.imwrite(t.path, frame) self.bot.sendPhoto(chat_id=chat_id, photo=open(t.path, 'rb')) t.cleanup() except NetworkError: self.logger.error("network error") except Unauthorized: # The user has removed or blocked the bot. self.chatId = None self.started = False self.logger.error("Unauth. User has removed bot") except: self.logger.error("Unknown") self.sendSingleFrame = False
def send_email(frame): # Write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # E-mail the image and cleanup the tempory image msg = MIMEMultipart() msg['From'] = conf["from_addr"] msg['To'] = conf["to_addr"] msg['Subject'] = "Front Door Visitor" body = " " msg.attach(MIMEText(body, 'plain')) attachment = open(t.path, "rb") part = MIMEBase('application', 'octet-stream') part.set_payload((attachment).read()) encoders.encode_base64(part) part.add_header('Content-Disposition', "attachment; filename= %s" % t.filename) msg.attach(part) server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(conf["from_addr"], conf["email_pwd"]) text = msg.as_string() server.sendmail(conf["from_addr"], conf["to_addr"], text) server.quit() print "[E-MAIL SENT] {}".format(ts) t.cleanup() return
def dbxmain(): # Put your token here: with open("/home/pi/Desktop/pisecuritysystem/permissions.json") as f: data = json.load(f) client = dbx.Dropbox(data['db-token']) # initialize the camera and grab a reference to the raw camera capture camera = picamera.PiCamera() #default 640x480 - decrease to go faster #motion-detect camera resolution camera.resolution = (1808, 1008) rawCapture = PiRGBArray(camera, size=(1808, 1008)) for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): frame = f.array timestamp = datetime.datetime.now() frame = imutils.resize(frame, width=1808) # draw the text and timestamp on the frame ts = timestamp.strftime("%A_%d_%m_%Y_%I:%M:%S%p") cv2.putText(frame, "{}".format(ts), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) t = TempImage() cv2.imwrite(t.path, frame) print("[UPLOAD] {}".format(ts)) name = "{base}{timestamp}".format(base="", timestamp=ts) os.rename(t.path[3:], "{new}.jpg".format(new=name)) with open( "/home/pi/Desktop/pisecuritysystem/{name}.jpg".format( name=name), "rb") as f: client.files_upload(f.read(), "/{name}.jpg".format(name=name), mute=True) os.remove("{name}.jpg".format(name=name)) rawCapture.truncate(0) break camera.close() time.sleep(.5)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (205, 155, 75), 1) # Check to see if there was new motion detected if text == "Motion detected": # Check if enough time has elapsed since the last upload if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # Increment the motion counter motionCounter += 1 # Check to see if the number of frames with consistent motion is high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox sohuld be used t = TempImage(conf["storage_base_path"]) cv2.imwrite(t.path, frame) if active == True: print "[INFO] Uploading image. Path: {path}".format( path=t.path) postImage(t.path) t.cleanup() # Upload the last uploaded timestamp lastUploaded = timestamp # Reset the motion counter for the next capture motionCounter = 0 # Otherwise: No new motion has been detected else: motionCounter = 0
def main(): # create logger' logger = logging.getLogger('home_security') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler('home_security.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) #syslog = logging.handlers.SysLogHandler(address = '/dev/log') #syslog.setLevel(logging.ERROR) #logger.addHandler(syslog) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="path to the JSON configuration file") args = vars(ap.parse_args()) # filter warnings, load the configuration and initialize the Dropbox # client warnings.filterwarnings("ignore") conf = json.load(open(args["conf"])) client = None # check to see if the Dropbox should be used if conf["use_dropbox"]: if conf["accessToken"]: accessToken=conf["accessToken"] userID="*****@*****.**" else: # connect to dropbox and start the session authorization process flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"]) print "[INFO] Authorize this application: {}".format(flow.start()) authCode = raw_input("Enter auth code here: ").strip() # finish the authorization and grab the Dropbox client (accessToken, userID) = flow.finish(authCode) print "accessToken:{} userID:{}".format(accessToken,userID) client = DropboxClient(accessToken) print "[SUCCESS] dropbox account linked" # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() camera.resolution = tuple(conf["resolution"]) camera.framerate = conf["fps"] rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"])) # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print "[INFO] warming up..." time.sleep(conf["camera_warmup_time"]) avg = None lastUploaded = datetime.datetime.now() dayNumber=lastUploaded.toordinal() motionCounter = 0 # capture frames from the camera for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image and initialize # the timestamp and movement flag frame = f.array timestamp = datetime.datetime.now() dayNumberNow = timestamp.toordinal() movement = False # resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame, width=600) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the average frame is None, initialize it if avg is None: print "[INFO] starting background model..." avg = gray.copy().astype("float") rawCapture.truncate(0) continue # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over the contours. 0,0 is tlc. y increases down, x increase right x,y = 0,0 for c in cnts: (x, y, w, h) = cv2.boundingRect(c) # if the contour is too small, y co-ord is too low ignore it if (cv2.contourArea(c) < conf["min_area"]) or ((y + h) < 320): continue # compute the bounding box for the contour, draw it on the frame, # and update the text cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) movement = True # draw the text and timestamp on the frame ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "x: {} y: {}".format(x,y), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1) # check to see if there is movement if movement: logger.info("movement detected") # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox should be used if conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) suffix=(dayNumberNow % 20)+1 #(1..20) new_path="Public/SecurityDawson65_" + str(suffix) # upload the image to Dropbox and cleanup the tempory image try: path = "{base_path}/{timestamp}.jpg".format(base_path=new_path, timestamp=ts) logger.info("[UPLOAD] {}".format(path)) client.put_file(path, open(t.path, "rb")) t.cleanup() except Exception as e: logger.exception("Network error. Upload failed") time.sleep(30) #wait for dropbox to recover # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 else: logger.info("failed min_motion_frames {}".format(motionCounter)) else: logger.info("failed min_upload_seconds") # otherwise, no movement detected else: motionCounter = 0 if dayNumber != dayNumberNow: #midnight. clear new folder suffix=(dayNumberNow % 20)+1 #(1..20) new_path="Public/SecurityDawson65_" + str(suffix) delete_files(client, logger, new_path) dayNumber = dayNumberNow logger.info("old files deleted for day %s" % str(dayNumberNow % 20+1)) # check to see if the frames should be displayed to screen if conf["show_video"]: # display the security feed cv2.imshow("Security Feed", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key is pressed, break from the loop if key == ord("q"): break # clear the stream in preparation for the next frame rawCapture.truncate(0)
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1) # check to see if motion is detected if text == "Detected": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # check to see if people detection is wanted if conf["detect_only_people"]: tOrig = TempImage() tHuman = TempImage() orig = frame.copy() cv2.imwrite(tOrig.path, orig) #resize the image to reduce detection time image = imutils.resize(frame, width=min(400, frame.shape[1])) #detect people in the image
0.35, (0, 0, 255), 1) # check to see if the room is occupied if TEXT == "Occupied": __alert__() # check to see if enough time has passed between uploads if (TIMESTAMP - LAST_UPLOADED).seconds >= CONF["min_upload_seconds"]: # increment the motion counter MOTION_COUNTER += 1 # check to see if the number of frames with consistent motion is high enough if MOTION_COUNTER >= CONF["min_motion_frames"]: # check to see if dropbox sohuld be used if CONF["use_dropbox"]: # write the image to temporary file T = TempImage() cv2.imwrite(T.path, FRAME) # upload the image to Dropbox and cleanup the tempory image print "[UPLOAD] {}".format(TS) PATH = "{base_path}/{timestamp}.jpg".format( base_path=CONF["dropbox_base_path"], timestamp=TS) CLIENT.put_file(PATH, open(T.path, "rb")) T.cleanup() # update the last uploaded timestamp and reset the motion # counter LAST_UPLOADED = TIMESTAMP MOTION_COUNTER = 0 # otherwise, the room is not occupied
def motionmain(): pin_num = 22 # filter warnings, load the configuration and initialize the Dropbox warnings.filterwarnings("ignore") #setup gpio GPIO1.setmode(GPIO1.BCM) # GPIO 23 & 17 set up as inputs, pulled up to avoid false detection. # Both ports are wired to connect to GND on button press. # So we'll be setting up falling edge detection for both GPIO1.setup(pin_num, GPIO1.IN, pull_up_down=GPIO1.PUD_UP) #dropbox: with open("/home/pi/Desktop/pisecuritysystem/permissions.json") as f: data = json.load(f) client = dbx.Dropbox(data['db-token']) # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() #default 640x480 - decrease to go faster #motion-detect camera resolution camera.resolution = (640,480) rawCapture = PiRGBArray(camera, size=(640,480)) # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print("[INFO] warming up...") time.sleep(2.5) avg = None lastUploaded = datetime.datetime.now() motionCounter = 0 text = "" name = "" # capture frames from the camera for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image and initialize # the timestamp and occupied/unoccupied text frame = f.array timestamp = datetime.datetime.now() # resize the frame, convert it to grayscale, and blur it #frame=500 default, decrease it to go faster frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the average frame is None, initialize it if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") rawCapture.truncate(0) continue # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, 5, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over the contours for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < 5000: continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "!" # draw the text and timestamp on the frame ts = timestamp.strftime("%A_%d_m_%Y_%I:%M:%S%p") cv2.putText(frame, "{}".format(ts), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # check to see if the room is occupied if text == "!": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= 3.0: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= 8: #originally 8 print("Capturing image.") t = TempImage() cv2.imwrite(t.path, frame) name = "{base}{timestamp}".format(base="", timestamp=ts) os.rename(t.path[3:], "{new}.jpg".format(new=name)) print("[UPLOAD] {}".format(ts)) with open("/home/pi/Desktop/pisecuritysystem/{name}.jpg".format(name=name), "rb") as f: client.files_upload(f.read(), "/{name}.jpg".format(name=name), mute = True) os.remove("{name}.jpg".format(name=name)) # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 text="" # otherwise, the room is not occupied else: motionCounter = 0 text="" # clear the stream in preparation for the next frame rawCapture.truncate(0) if GPIO1.input(pin_num) == False: print("button pressed") print("exit now") break GPIO1.cleanup() time.sleep(.25) #pause for .25 seconds camera.close() print("camera closed") time.sleep(.25)
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1) # check to see if motion is detected if text == "Detected": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # check to see if people detection is wanted if conf["detect_only_people"]: tOrig = TempImage() tHuman = TempImage() orig = frame.copy() cv2.imwrite(tOrig.path, orig) #resize the image to reduce detection time image = imutils.resize(frame, width=min(400, frame.shape[1])) #detect people in the image (rects, weights) = hog.detectMultiScale(image, winStride=(4,4),
def main(): ########################################### with open('conf.json') as json_data_file: data = json.load(json_data_file) ########################################### # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="path to the JSON configuration file") args = vars(ap.parse_args()) # filter warnings, load the configuration and initialize the Dropbox # client warnings.filterwarnings("ignore") conf = json.load(open(args["conf"])) client = None # check to see if the Dropbox should be used if conf["use_dropbox"]: # connect to dropbox and start the session authorization process client = dropbox.Dropbox(conf["dropbox_access_token"]) print("[SUCCESS] dropbox account linked") # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() camera.resolution = tuple(conf["resolution"]) camera.framerate = conf["fps"] rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"])) # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print("[INFO] warming up...") time.sleep(conf["camera_warmup_time"]) avg = None lastUploaded = datetime.datetime.now() motionCounter = 0 ######### PAN part 1 ################ ## ssP = ServoSix() ## horDeg = 90 ## shift = 0.5 ## i = 0.0 ## dir = 1 ## def countdown(n): ## while n > 0: ## n = n -1 ###################################### # capture frames from the camera for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image and initialize # the timestamp and occupied/unoccupied text frame = f.array timestamp = datetime.datetime.now() text = "Unoccupied" # resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the average frame is None, initialize it if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") rawCapture.truncate(0) continue # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < conf["min_area"]: continue # compute the bounding box for the contour, draw it on the frame, # and update the text (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "Motion Detected" # draw the text and timestamp on the frame ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) # check to see if the room is occupied if text == "Motion Detected": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox sohuld be used if conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # upload the image to Dropbox and cleanup the tempory image print("[UPLOAD] {}".format(ts)) path = "/{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) client.files_upload(open(t.path, "rb").read(), path) t.cleanup() # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 # otherwise, the room is not occupied else: motionCounter = 0 # check to see if the frames should be displayed to screen if conf["show_video"]: # display the security feed cv2.imshow("Security Feed", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key is pressed, break from the lop if key == ord("q"): break # clear the stream in preparation for the next frame rawCapture.truncate(0)
def deal_frame(s_frame, dropbox_client, pi_raw_capture): timestamp = datetime.datetime.now() text = "Unoccupied" # 调整尺寸,转换成灰阶图像并进行模糊 s_frame = imutils.resize(s_frame, width=500) gray = cv2.cvtColor(s_frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # cv2.imshow("Gray Feed", gray) global avg if avg is None: print "[INFO] starting background model..." avg = gray.copy().astype("float") if pi_raw_capture is not None: pi_raw_capture.truncate(0) return # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) framedelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # cv2.imshow("framedelta Feed", framedelta) # 对图像进行阀值化,膨胀阀值图像来填补孔洞,在阀值图像上找到轮廓线 thresh = cv2.threshold(framedelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # cv2.imshow("thresh Feed", thresh) # 遍历轮廓线 for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < conf["min_area"]: continue # 计算轮廓线外框,在当前帧上画出外框,并更新文本 (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(s_frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "Occupied" # 在当前帧上标文本和时间戳 ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(s_frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(s_frame, ts, (10, s_frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) # 检查房间是否被占用 if text == "Occupied": # 判断上传时间间隔是否已经达到 global lastUploaded if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # 运动检测计数器递增 global motionCounter motionCounter += 1 # 判断包含连续运动的帧数是否已经足够多 if motionCounter >= conf["min_motion_frames"]: # Write the image to the temporary file t = TempImage() cv2.imwrite(t.path, s_frame) # 判断dropbox是否启用 if dropbox_client is not None: # 将图像上传到dropbox并删除临时图片 print "[UPLOAD] {}".format(ts) path = "{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) dropbox_client.put_file(path, open(t.path, "rb")) t.cleanup() # 更新最近一次上传事件,重置计数器 lastUploaded = datetime.datetime.now() motionCounter = 0 else: motionCounter = 0 # 判断安保视频是否需要显示在屏幕上 if conf["show_video"]: # 显示视频 cv2.imshow("Security Feed", s_frame)
# increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox sohuld be used if conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 # otherwise, the room is not occupied else:
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") # Timestamp generation cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) if text == "Occupied": # Check if room is occupied if (timestamp - lastUploaded).seconds >= conf[ "min_upload_seconds"]: # Check if enough time has passed motionCounter += 1 # Increment counter if motionCounter >= conf[ "min_motion_frames"]: # Check if we have consistent motion if conf["use_dropbox"]: # Check if we're using dropbox upload t = TempImage() # Make temp image cv2.imwrite(t.path, frame) print("[UPLOAD] {}".format(ts)) path = "/{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) client.files_upload(open(t.path, "rb").read(), path) # Upload to Dropbox t.cleanup() if conf["use_drive"]: filenameOut = "{timestamp}.jpg".format(timestamp=ts) cv2.imwrite(filenameOut, frame) print("[UPLOAD] {}".format(ts)) imOut = drive.CreateFile()
0.35, (0, 0, 255), 1) # check to see if the room is occupied if text == "Occupied": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox sohuld be used if conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) # upload the image to Dropbox and cleanup the tempory image print( "[UPLOAD] {}".format(ts)) path = "{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) client.put_file(path, open(t.path, "rb")) t.cleanup() # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 # otherwise, the room is not occupied
continue (x, y, w, h) = cv2.boundingRect(c) text = "Occupied" ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) if text == "Occupied": if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: motionCounter += 1 if motionCounter >= conf["min_motion_frames"]: t = TempImage(basePath="/tmp/") timestampd = datetime.datetime.now(timezone('US/Mountain')) date = timestampd.strftime("%A:%d-%B-%Y-%I:%M:%S%p") cv2.imwrite(t.path, frame) print "[UPLOAD] {}".format(ts) url = t.path uploaded_image = imG.upload_image(url, title=date) print conf["min_sms_seconds"] twC.messages.create( from_="(720) 903-4624", to=phone, body= "Recent Activity Alert at Galvanize. Check App for More Details.", media_url=uploaded_image.link) print url fp = urllib.urlopen(url)
def main(): # create logger' logger = logging.getLogger('home_security') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler('home_security.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.INFO) # create formatter and add it to the handlers formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) #syslog = logging.handlers.SysLogHandler(address = '/dev/log') #syslog.setLevel(logging.ERROR) #logger.addHandler(syslog) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-c", "--conf", required=True, help="path to the JSON configuration file") args = vars(ap.parse_args()) # filter warnings, load the configuration and initialize the Dropbox # client warnings.filterwarnings("ignore") conf = json.load(open(args["conf"])) client = None # check to see if the Dropbox should be used if conf["use_dropbox"]: if conf["accessToken"]: accessToken = conf["accessToken"] userID = "*****@*****.**" else: # connect to dropbox and start the session authorization process #flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"]) #print "[INFO] Authorize this application: {}".format(flow.start()) #authCode = raw_input("Enter auth code here: ").strip() # finish the authorization and grab the Dropbox client #(accessToken, userID) = flow.finish(authCode) print " ************* error *************" print "accessToken:{} userID:{}".format(accessToken, userID) # Create a dropbox object using an API v2 key dbx = dropbox.Dropbox(token) #client = DropboxClient(accessToken) print "[SUCCESS] dropbox account linked" # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() camera.resolution = tuple(conf["resolution"]) camera.framerate = conf["fps"] rawCapture = PiRGBArray(camera, size=tuple(conf["resolution"])) # allow the camera to warmup, then initialize the average frame, last # uploaded timestamp, and frame motion counter print "[INFO] warming up..." time.sleep(conf["camera_warmup_time"]) avg = None lastUploaded = datetime.datetime.now() dayNumber = lastUploaded.toordinal() motionCounter = 0 # capture frames from the camera for f in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image and initialize # the timestamp and movement flag frame = f.array timestamp = datetime.datetime.now() dayNumberNow = timestamp.toordinal() movement = False # resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame, width=600) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the average frame is None, initialize it if avg is None: print "[INFO] starting background model..." avg = gray.copy().astype("float") rawCapture.truncate(0) continue # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over the contours. 0,0 is tlc. y increases down, x increase right x, y = 0, 0 for c in cnts: (x, y, w, h) = cv2.boundingRect(c) # if the contour is too small, y co-ord is too low ignore it if (cv2.contourArea(c) < conf["min_area"]) or ((y + h) < 320): continue # compute the bounding box for the contour, draw it on the frame, # and update the text cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) movement = True # draw the text and timestamp on the frame ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "x: {} y: {}".format(x, y), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 255, 255), 1) # check to see if there is movement if movement: logger.info("movement detected") # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= conf["min_motion_frames"]: # check to see if dropbox should be used if conf["use_dropbox"]: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) suffix = (dayNumberNow % 20) + 1 #(1..20) new_path = "Public/SecurityDawson65_" + str(suffix) # upload the image to Dropbox and cleanup the tempory image try: path = "{base_path}/{timestamp}.jpg".format( base_path=new_path, timestamp=ts) logger.info("[UPLOAD] {}".format(path)) #client.put_file(path, open(t.path, "rb")) # we want to overwite any previous version of the file contents = open(t.path, "rb").read() meta = dbx.files_upload( contents, path, mode=dropbox.files.WriteMode("overwrite")) except Exception as e: logger.exception("Network error. Upload failed") time.sleep(30) #wait for dropbox to recover finally: t.cleanup() # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 else: logger.info( "failed min_motion_frames {}".format(motionCounter)) else: logger.info("failed min_upload_seconds") # otherwise, no movement detected else: motionCounter = 0 if dayNumber != dayNumberNow: #midnight. clear new folder suffix = (dayNumberNow % 20) + 1 #(1..20) new_path = "Public/SecurityDawson65_" + str(suffix) delete_files(dbx, logger, new_path) dayNumber = dayNumberNow logger.info("old files deleted for day %s" % str(dayNumberNow % 20 + 1)) # check to see if the frames should be displayed to screen if conf["show_video"]: # display the security feed cv2.imshow("Security Feed", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key is pressed, break from the loop if key == ord("q"): break # clear the stream in preparation for the next frame rawCapture.truncate(0)
from pyimagesearch.tempimage import TempImage from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive import datetime import conf # allows use of conf.json settings conf = conf.config() # initialise timestamp and temp image global timestamp temp = TempImage() timestamp = datetime.datetime.utcnow() ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") #GoogleDrive Authentication gauth = GoogleAuth() drive = GoogleDrive(gauth) def get_gauth(): return gauth def get_drive(): return drive def get_ti(): return temp
def deal_frame(s_frame, dropbox_client, pi_raw_capture): timestamp = datetime.datetime.now() text = "Unoccupied" # 调整尺寸,转换成灰阶图像并进行模糊 s_frame = imutils.resize(s_frame, width=500) gray = cv2.cvtColor(s_frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21,21), 0) # cv2.imshow("Gray Feed", gray) global avg if avg is None: print "[INFO] starting background model..." avg = gray.copy().astype("float") if pi_raw_capture is not None: pi_raw_capture.truncate(0) return # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) framedelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # cv2.imshow("framedelta Feed", framedelta) # 对图像进行阀值化,膨胀阀值图像来填补孔洞,在阀值图像上找到轮廓线 thresh = cv2.threshold(framedelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # cv2.imshow("thresh Feed", thresh) # 遍历轮廓线 for c in cnts: # if the contour is too small, ignore it if cv2.contourArea(c) < conf["min_area"]: continue # 计算轮廓线外框,在当前帧上画出外框,并更新文本 (x,y,w,h) = cv2.boundingRect(c) cv2.rectangle(s_frame, (x,y), (x + w, y + h), (0, 255, 0), 2) text = "Occupied" # 在当前帧上标文本和时间戳 ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(s_frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(s_frame, ts, (10, s_frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) # 检查房间是否被占用 if text == "Occupied": # 判断上传时间间隔是否已经达到 global lastUploaded if ( timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: # 运动检测计数器递增 global motionCounter motionCounter += 1 # 判断包含连续运动的帧数是否已经足够多 if motionCounter >= conf["min_motion_frames"]: # Write the image to the temporary file t = TempImage() cv2.imwrite(t.path, s_frame) # 判断dropbox是否启用 if dropbox_client is not None: # 将图像上传到dropbox并删除临时图片 print "[UPLOAD] {}".format(ts) path = "{base_path}/{timestamp}.jpg".format(base_path=conf["dropbox_base_path"], timestamp=ts) dropbox_client.put_file(path, open(t.path, "rb")) t.cleanup() # 更新最近一次上传事件,重置计数器 lastUploaded = datetime.datetime.now() motionCounter = 0 else: motionCounter = 0 # 判断安保视频是否需要显示在屏幕上 if conf["show_video"]: # 显示视频 cv2.imshow("Security Feed", s_frame)
def video(): vs = VideoStream(usePiCamera=args["picamera"] > 0).start() print("[INFO] warming up...") time.sleep(2.0) avg = None lastUploaded = datetime.datetime.now() #当前系统时间 motionCounter = 0 while True: frame = vs.read() timestamp = datetime.datetime.now() text = "Unoccupied" frame = imutils.resize(frame, width=400) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") cv2.destroyAllWindows() continue cv2.accumulateWeighted(gray, avg, 0.5) #求gray的平均值并放入到avg中 frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] for c in cnts: if cv2.contourArea(c) < conf["min_area"]: continue (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) text = "Occupied" ts = timestamp.strftime("%y-%m-%d %I:%M:%S") cv2.putText(frame, "room status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) if text == "Occupied": if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]: motionCounter += 1 if conf["use_dropbox"]: t = TempImage() cv2.imwrite(t.path, frame) print("[UPLOAD] {}".format(ts)) path = "/{base_path}/{timestamp}.jpg".format( base_path=conf["dropbox_base_path"], timestamp=ts) bp = ByPy() send_email() insert_data() insert_images(t.path) bp.upload(t.path, '/apps/') lastUploaded = timestamp motionCounter = 0 else: motionCounter = 0 if conf["show_video"]: cv2.imshow("Security Feed", frame) key = cv2.waitKey(1) & 0xFF if key == ord("q"): break cv2.destroyAllWindows() vs.stop()
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") cv2.putText(frame, "{}".format(ts), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # check to see if the room is occupied if text == "!": # check to see if enough time has passed between uploads if (timestamp - lastUploaded).seconds >= 3.0: # increment the motion counter motionCounter += 1 # check to see if the number of frames with consistent motion is # high enough if motionCounter >= 8: # write the image to temporary file t = TempImage() cv2.imwrite(t.path, frame) print "[UPLOAD] {}".format(ts) path = "{base_path}/{timestamp}.jpg".format(base_path="/", timestamp=ts) client.put_file(open(t.path, "rb").read(), path) t.cleanup() # update the last uploaded timestamp and reset the motion # counter lastUploaded = timestamp motionCounter = 0 # otherwise, the room is not occupied else: motionCounter = 0
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p") #print("[TICK] {}".format(ts)) # grab the raw NumPy array representing the image and initialize # the timestamp and occupied/unoccupied text frame = f.array text = "" # resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame, width=500) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # if the average frame is None, initialize it if avg is None: print("[INFO] starting background model...") avg = gray.copy().astype("float") rawCapture.truncate(0) t = TempImage("Boot") cv2.imwrite(t.path, frame) lastFrame = frame continue # accumulate the weighted average between the current frame and # previous frames, then compute the difference between the current # frame and running average cv2.accumulateWeighted(gray, avg, 0.5) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # threshold the delta image, dilate the thresholded image to fill # in holes, then find contours on thresholded image thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,