Exemple #1
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    log.info("Loading Inference Engine")
    ie = IECore()

    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)

    model_xml = args.model  #'face-detection-retail-0004.xml'
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = ie.read_network(model=model_xml, weights=model_bin)

    log.info("Device info:")
    versions = ie.get_versions(args.device)
    print("{}{}".format(" " * 8, args.device))
    print("{}MKLDNNPlugin version ......... {}.{}".format(
        " " * 8, versions[args.device].major, versions[args.device].minor))
    print("{}Build ........... {}".format(" " * 8,
                                          versions[args.device].build_number))

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    n, c, h, w = net.inputs[input_blob].shape
    del net

    #s3client = boto3.resource('s3')
    expiresIn = 5 * 24 * 3600  #expires recorded voice after 5 days

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    last_epoch = time.time()

    cap = cv2.VideoCapture(0)
    cap.set(3, 640)
    cap.set(4, 480)

    ret, frame = cap.read()
    log.info("To close the application, press 'CTRL+C'")

    while cap.isOpened():
        ret, frame = cap.read()

        if not ret:
            break
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()

        in_frame = cv2.resize(frame, (w, h))
        in_frame = in_frame.transpose(
            (2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n, c, h, w))
        exec_net.start_async(request_id=cur_request_id,
                             inputs={input_blob: in_frame})

        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start
            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            #log.info("Processing output blobs")

            #print(res)
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    #color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                    color = (232, 35, 244)
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(
                        class_id)
                    cv2.putText(
                        frame,
                        det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %',
                        (xmin, ymin - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, color,
                        1)

                    diff = (time.time() - last_epoch)

                    log.info("Object detected {:.1f} s".format(
                        round(obj[2] * 100, 1)))
                    if diff > 60:
                        last_epoch = time.time()

                        t = TempImage()
                        cv2.imwrite(t.path, frame)
                        #s3client.meta.client.upload_file(t.path, args.bucket, t.key )
                        #log.info("upload image to {}".format(signedUrl))
                        play_mp3('alert.wav')
                        t.cleanup()

            inf_time_message = "Inference time: {:.3f} ms".format(det_time *
                                                                  1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1000)
            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, render_time_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

        render_start = time.time()
        cv2.imshow("Detection Results", frame)
        render_end = time.time()
        render_time = render_end - render_start

        key = cv2.waitKey(1)
        if key == 27:
            break

    cv2.destroyAllWindows()
    cap.release()
Exemple #2
0
		0.35, (0, 0, 255), 1)

	# check to see if the room is occupied
	if text == "Someone Detected":
		# check to see if enough time has passed between uploads
		if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
			# increment the motion counter
			motionCounter += 1

			# check to see if the number of frames with consistent motion is
			# high enough
			if motionCounter >= conf["min_motion_frames"]:
				# check to see if dropbox sohuld be used
				if conf["use_dropbox"]:
					# write the image to temporary file
					t = TempImage()
					cv2.imwrite(t.path, frame)

					# upload the image to Dropbox and cleanup the tempory image
					print("[UPLOAD] {}".format(ts))
					path = "/{base_path}/{timestamp}.jpg".format(
					    base_path=conf["dropbox_base_path"], timestamp=ts)
					client.files_upload(open(t.path, "rb").read(), path)
					t.cleanup()

				# update the last uploaded timestamp and reset the motion
				# counter
				lastUploaded = timestamp
				motionCounter = 0

	# otherwise, the room is not occupied
 # If the image is below our blurThresh value, we want to save that image
 # to encode and send out.
 #instance = vs.getGrabbed()
 #cascades.faceCascadeDetectionOfImage(frame)
 # Long term goal would be to compress image into single row matrix as
 # string, write that to txt file, and send that off to mother station
 # to which mother station will decode them all and have picture presented.
 if (fm > (blurDetection.getBlurThresh()+20)): 
     counter = 0
     ts = timestamp  #.strftime("%A %d %B %Y %I:%M:%S%p")
     #if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
     # check to see if dropbox sohuld be used
     if conf["use_dropbox"]:
         # write the image to temporary file
         
         t = TempImage()
         cv2.imwrite(t.path, frame)
             
             # upload the image to Dropbox and cleanup the tempory image
         print "[UPLOAD] {}".format(ts)
         path = "{base_path}/{timestamp}.jpg".format(
                                                     base_path=conf["dropbox_base_path"], timestamp=ts)
         client.put_file(path, open(t.path, "rb"))
         t.cleanup()
         
         # update the last uploaded timestamp and reset the motion
         # counter
         #lastUploaded = timestamp
         timestamp = timestamp + 1
     
     '''# So basically we set the conditional to have a limited range of blurDetectionThresh
        motionDetected = True

# draw the timestamp on the frame
    timestamp = datetime.datetime.now()
    #ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
    #cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
    #           0.35, (0, 0, 255), 1)  # Check to see if motion

    if motionDetected:
        # check to see if enough time has passed between uploads
        # if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:

        # increment the motion counter
        motionCounter += 1

        print("Motion Counter:" + str(motionCounter))

        # check to see if the number of frames with consistent motion is
        # high enough
        if motionCounter >= conf["min_motion_frames"]:
            t = TempImage()
            cv2.imwrite(t.path, frame)

            # update the last uploaded timestamp and reset the motion
            # counter
            lastUploaded = timestamp
            motionCounter = 0

    else:
        motionCounter = 0
def check_for_objects():
    global last_epoch
    global last_upload
    global motionCounter
    while True:
        try:
            _, found_obj,frame = video_camera.get_object(object_classifier)
            if found_obj and (time.time() - last_epoch) > conf["min_motion_window"]:
                
                motionCounter += 1
                print("$$$ object found with motion counter ", motionCounter)
                print("min_motion_frames",conf["min_motion_frames"])
                print("last upload was x seconds ago ",(time.time() - last_upload))
                print("min upload interval ",conf["upload_interval"])
                last_epoch = time.time()
                if motionCounter >= conf["min_motion_frames"] and (time.time() - last_upload) > conf["upload_interval"] :
                    print("$$$ upload")
                    last_upload = time.time()
                    motionCounter = 0
                    t = TempImage()
                    cv2.imwrite(t.path, frame)
                    s3client.meta.client.upload_file(t.path, conf["s3bucket_name"], t.key )
                    signedUrl = s3client.meta.client.generate_presigned_url('get_object', Params = {'Bucket': conf["s3bucket_name"], 'Key': t.key}, ExpiresIn = expiresIn)
                    faceId=None
                    faceName="Visitor"
                    matched = False
                    if conf["use_rekognition"]== True:
                        time.sleep(1)
                        rekodata ={}
                        rekodata["key"]=t.key
                        matched,piFace = search_face(rekodata)
                        if matched == True:
                            faceId = piFace['faceId']
                            faceName = piFace['faceName']

                    slack_data = {
                        'attachments': [
                            {
                                'color': "#36a64f",
                                'pretext': faceName+ " at the front door",
                                'title': "Home Surveillance",
                                'title_link': "https://master.d5yvordr34qco.amplifyapp.com",
                                'image_url': signedUrl
                        
                            }
                        ]
                    }
                    requests.post(conf["slack_incoming_webhook"], data=json.dumps(slack_data),headers={'Content-Type': 'application/json'})
                    persistNotification({
                        "id": str(uuid.uuid4()),
                        "createdOn": datetime.now().isoformat(),
                        "bucket": conf["s3bucket_name"],
                        "signedUrl":signedUrl,
                        "key":t.key,
                        "faceId": faceId,
                        "faceName":faceName
                    })
                    t.cleanup()
                    # prompt visitor to leave voice message
                    polly.speak("Hello "+faceName+".... Please leave your brief message after the beep....")
                    recordGuestVoice()
                      

                else:
                    print("$$$ dont upload")
                
        except:
            print ("Error sending email: ", sys.exc_info()[0])
Exemple #6
0
def surveillance_loop(conf):
    global terminate
    global cloud_storage_bucket
    # check to see if cloud upload should be used

    # initialize the camera and grab a reference to the raw camera capture
    camera = cv2.VideoCapture(0)
    camera.set(3, conf["resolution"][0])
    camera.set(4, conf["resolution"][1])
    camera.set(5, conf["fps"])
    time.sleep(0.25)

    # allow the camera to warmup, then initialize the average frame, last
    # uploaded timestamp, and frame motion counter
    print("[INFO] warming up...")
    time.sleep(conf["camera_warmup_time"])
    avg = None
    lastUploaded = datetime.datetime.now()
    motionCounter = 0
    # capture frames from the camera
    while (terminate == False):
        # grab the raw NumPy array representing the image and initialize
        # the timestamp and occupied/unoccupied text
        (grabbed, frame) = camera.read()
        if not grabbed:
            break

        timestamp = datetime.datetime.now()
        text = "Unoccupied"

        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=500)
        frame = cv2.flip(frame, -1)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the average frame is None, initialize it
        if avg is None:
            print("[INFO] starting background model...")
            avg = gray.copy().astype("float")
            #rawCapture.truncate(0)
            continue

        # accumulate the weighted average between the current frame and
        # previous frames, then compute the difference between the current
        # frame and running average
        cv2.accumulateWeighted(gray, avg, 0.5)
        frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

        # threshold the delta image, dilate the thresholded image to fill
        # in holes, then find contours on thresholded image
        thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
                               cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < conf["min_area"]:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            text = "Occupied"

        # draw the text and timestamp on the frame
        ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
        cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame, ts, (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # check to see if the room is occupied
        if text == "Occupied":
            setColor(colors['red']
                     )  # Set light to "RED" to indicate that motion detected
            # check to see if enough time has passed between uploads
            if (timestamp -
                    lastUploaded).seconds >= conf["min_upload_seconds"]:
                # increment the motion counter
                motionCounter += 1

                # check to see if the number of frames with consistent motion is
                # high enough
                if motionCounter >= conf["min_motion_frames"]:
                    # check to see if cloud upload sohuld be used
                    if conf["use_cloud"]:
                        # write the image to temporary file
                        t = TempImage()
                        cv2.imwrite(t.path, frame)

                        # upload the image to cloud storage and cleanup the tempory image
                        print("[UPLOAD] {}".format(ts))

                        k = cloud_storage_bucket.new_key(ts)
                        k.set_contents_from_filename(t.path)

                        t.cleanup()

                    # update the last uploaded timestamp and reset the motion
                    # counter
                    lastUploaded = timestamp
                    motionCounter = 0

        # otherwise, the room is not occupied
        else:
            motionCounter = 0
            setColor(
                colors['green']
            )  # set light to "GREEN" to indicate no motion being detected.

        # check to see if the frames should be displayed to screen
        if conf["show_video"]:
            # display the security feed
            cv2.imshow("Security Feed", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key is pressed, break from the lop
            if key == ord("q"):
                terminate = True

    if conf["show_video"]: cv2.destroyAllWindows()
    camera.release()
Exemple #7
0
'''
	This code is used for uploading images to the User's Google Drive. 
	A temporary image is created using the tempimage file.
'''
from tempimage import TempImage
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import datetime

# Initialize temp image
temp = TempImage()

# Google Drive authentication
gauth = GoogleAuth()
drive = GoogleDrive(gauth)


def get_gauth():
    return gauth


def get_drive():
    return drive


def get_tmpimg():
    return temp


def authentication():
    try: