def recognition():
    global count
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-t",
                    "--target",
                    type=str,
                    required=True,
                    choices=["myriad", "cpu"],
                    help="target processor for object detection")
    ap.add_argument("-m",
                    "--mode",
                    type=str,
                    required=True,
                    choices=["horizontal", "vertical"],
                    help="direction in which people will be moving")
    ap.add_argument("-c",
                    "--conf",
                    required=True,
                    help="Path to the input configuration file")
    ap.add_argument("-i",
                    "--input",
                    type=str,
                    help="path to optional input video file")
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    help="path to optional output video file")
    args = vars(ap.parse_args())

    # load the configuration file
    conf = Conf(args["conf"])

    # initialize the list of class labels MobileNet SSD detects
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(conf["prototxt_path"], conf["model_path"])

    # check if the target processor is myriad, if so, then set the
    # preferable target to myriad
    if args["target"] == "myriad":
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    # otherwise, the target processor is CPU
    else:
        # set the preferable target processor to CPU and preferable
        # backend to OpenCV
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)

    # if a video path was not supplied, grab a reference to the webcam
    if not args.get("input", False):
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        #vs = VideoStream(usePiCamera=True).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(args["input"])

    # initialize the video writer process (we'll instantiate later if
    # need be) along with the frame dimensions
    writerProcess = None
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(maxDisappeared=20, maxDistance=30)
    trackers = []
    trackableObjects = {}
    # initialize the direction info variable (used to store information
    # such as up/down or left/right people count) and a variable to store
    # the the total number of frames processed thus far
    directionInfo = None
    totalFrames = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # convert the frame from BGR to RGB for dlib
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # check to see if the frame dimensions are not set
        if W is None or H is None:
            # set the frame dimensions and instantiate our direction
            # counter
            (H, W) = frame.shape[:2]
            dc = DirectionCounter(args["mode"], H, W)

        # begin writing the video to disk if required
        if args["output"] is not None and writerProcess is None:
            # set the value of the write flag (used to communicate when
            # to stop the process)
            writeVideo = Value('i', 1)

            # initialize a frame queue and start the video writer
            frameQueue = Queue()
            writerProcess = Process(target=write_video,
                                    args=(args["output"], writeVideo,
                                          frameQueue, W, H))
            writerProcess.start()

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % conf["skip_frames"] == 0:
            # set the status and initialize our new set of object
            # trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame,
                                         size=(300, 300),
                                         ddepth=cv2.CV_8U)
            net.setInput(blob,
                         scalefactor=1.0 / 127.5,
                         mean=[127.5, 127.5, 127.5])
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > conf["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # check if the direction is *vertical*
        if args["mode"] == "vertical":
            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # otherwise, the direction is *horizontal*
        else:
            # draw a vertical line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'left' or 'right'
            cv2.line(frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # grab the trackable object via its object ID
            to = trackableObjects.get(objectID, None)

            # create a new trackable object if needed
            if to is None:
                to = TrackableObject(objectID, centroid,
                                     datetime.datetime.now())
                cursor.execute(
                    'INSERT INTO trackhistory (track_time,track_date) VALUES (%s,%s)',
                    (to.dt, to.dt))
                #urllib.request.urlopen("https://api.thingspeak.com/update?api_key="+API+"&field1=0"+str(1))
                count += 1

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # find the direction and update the list of centroids
                dc.find_direction(to, centroid)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # find the direction of motion of the people
                    directionInfo = dc.count_object(to, centroid)

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            color = (0, 255, 0) if to.counted else (0, 0, 255)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

        # check if there is any direction info available
        if directionInfo is not None:
            # construct a list of information as a combination of
            # direction info and status info
            info = directionInfo + [("Status", status)]

        # otherwise, there is no direction info available yet
        else:
            # construct a list of information as status info since we
            # don't have any direction info available yet
            info = [("Status", status)]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # put frame into the shared queue for video writing
        if writerProcess is not None:
            frameQueue.put(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    import matplotlib.pyplot as plt
    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
    count = -1
from sklearn.preprocessing import LabelEncoder
from imutils import paths
import argparse
import cPickle
import random

# construct the argument parser and parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to configuration file")
args = vars(ap.parse_args())

# load the configuration and grab all image paths in the dataset
conf = Conf(args["conf"])
imagePaths = list(paths.list_images(conf["dataset_path"]))

# shuffle the image paths to ensure randomness -- this will help make our
# training and testing split code more efficient
random.seed(42)
random.shuffle(imagePaths)

# determine the set of possible class labels from the image dataset assuming
# that the images are in {directory}/{filename} structure and create the
# label encoder
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([p.split("/")[-2] for p in imagePaths])

# initialize the Overfeat extractor and the Overfeat indexer
Esempio n. 3
0
def Detect():
    # construct the argument parser and parse the arguments
    # Basically the code forces the code to use a specific requirement for it to work.
    # It requires the config file for the data realated to accuracy,
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--conf",
                    required=True,
                    help="Path to the input configuration file")
    # Extra requirement if you want the code to analyse a video instead of it working real time.
    ap.add_argument("-i", "--input", help="path to the input video file")
    args = vars(ap.parse_args())

    # load the configuration file
    conf = Conf(args["conf"])

    # load the COCO class labels our YOLO model was trained on and
    # initialize a list of colors to represent each possible class
    # label
    # COCO is the object list for the code. It's what alows the code to recognise if
    # the user is looking at a chair or a human.
    LABELS = open(conf["labels_path"]).read().strip().split("\n")
    np.random.seed(42)
    COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))

    # initialize the plugin in for specified device
    # This is basically initializing the NCS2 which is required for the extra
    # visual processing power.
    plugin = IEPlugin(device="MYRIAD")

    # read the IR generated by the Model Optimizer (.xml and .bin files)
    print("[INFO] loading models...")
    net = IENetwork(model=conf["xml_path"], weights=conf["bin_path"])

    # prepare inputs
    print("[INFO] preparing inputs...")
    inputBlob = next(iter(net.inputs))

    # set the default batch size as 1 and get the number of input blobs,
    # number of channels, the height, and width of the input blob
    net.batch_size = 1
    (n, c, h, w) = net.inputs[inputBlob].shape

    # if a video path was not supplied, grab a reference to the webcam
    if args["input"] is None:
        print("[INFO] starting video stream...")
        # vs = VideoStream(src=0).start()
        vs = VideoStream(usePiCamera=True).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(os.path.abspath(args["input"]))

    # loading model to the plugin and start the frames per second
    # throughput estimator
    print("[INFO] loading model to the plugin...")
    execNet = plugin.load(network=net, num_requests=1)
    fps = FPS().start()

    # loop over the frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        orig = vs.read()
        orig = orig[1] if args["input"] is not None else orig

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and orig is None:
            break

        # resize original frame to have a maximum width of 500 pixel and
        # input_frame to network size
        orig = imutils.resize(orig, width=500)
        frame = cv2.resize(orig, (w, h))

        # change data layout from HxWxC to CxHxW
        frame = frame.transpose((2, 0, 1))
        frame = frame.reshape((n, c, h, w))

        # start inference and initialize list to collect object detection
        # results
        output = execNet.infer({inputBlob: frame})
        objects = []

        # loop over the output items
        for (layerName, outBlob) in output.items():
            # create a new object which contains the required tinyYOLOv3
            # parameters
            layerParams = TinyYOLOV3Params(net.layers[layerName].params,
                                           outBlob.shape[2])

            # parse the output region
            objects += TinyYOLOv3.parse_yolo_region(outBlob, frame.shape[2:],
                                                    orig.shape[:-1],
                                                    layerParams,
                                                    conf["prob_threshold"])

        # loop over each of the objects
        for i in range(len(objects)):
            # check if the confidence of the detected object is zero, if
            # it is, then skip this iteration, indicating that the object
            # should be ignored
            if objects[i]["confidence"] == 0:
                continue

            # loop over remaining objects
            for j in range(i + 1, len(objects)):
                # check if the IoU of both the objects exceeds a
                # threshold, if it does, then set the confidence of that
                # object to zero
                if TinyYOLOv3.intersection_over_union(
                        objects[i], objects[j]) > conf["iou_threshold"]:
                    objects[j]["confidence"] = 0

        # filter objects by using the probability threshold -- if a an
        # object is below the threshold, ignore it
        objects = [obj for obj in objects if obj['confidence'] >= \
            conf["prob_threshold"]]

        # store the height and width of the original frame
        (endY, endX) = orig.shape[:-1]

        # loop through all the remaining objects
        for obj in objects:
            # validate the bounding box of the detected object, ensuring
            # we don't have any invalid bounding boxes
            if obj["xmax"] > endX or obj["ymax"] > endY or obj["xmin"] \
                < 0 or obj["ymin"] < 0:
                continue

            # build a label consisting of the predicted class and
            # associated probability
            label = "{}: {:.2f}%".format(LABELS[obj["class_id"]],
                                         obj["confidence"] * 100)

            print(label)

            # This part of code helps us find the number of steps from the user to the object.
            @functools.lru_cache(maxsize=128)
            def distance():
                # Using the age of the user and the Ultrasonic sensor, we can find the steps required.
                age = calculateAge(date(year, month, day))
                # set Trigger to HIGH
                GPIO.output(GPIO_TRIGGER, True)

                # set Trigger after 0.01ms to LOW
                time.sleep(0.0001)
                GPIO.output(GPIO_TRIGGER, False)
                StartTime = time.time()
                StopTime = time.time()

                # save StartTime
                while GPIO.input(GPIO_ECHO) == 0:
                    StartTime = time.time()
                # save time of arrival
                while GPIO.input(GPIO_ECHO) == 1:
                    StopTime = time.time()

                # time difference between start and arrival
                TimeElapsed = StopTime - StartTime
                # multiply with the sonic speed (34300 cm/s)
                # and divide by 2, because there and back
                distance = (TimeElapsed * 34300) / 2

                return distance

            # This piece of code helps with finding the distance using meters, as the code
            # above saves it as centimeters.
            dist = distance()
            # Converts cm to meters.
            meter = dist / 100.0
            # States it for debuging for errors.
            measure = " and the measured distance is %.1f m" % meter
            print(measure)

            # Maths required to find age.
            # This is a bunch of if statements checking what the age is, and then
            # giving the corresponding pace of the user.
            # Checks if the age is in which range.
            if age in range(20, 29):
                # Prints the pace for debuging for errors.
                print('Steps = 1.35 m/s')
                # Gives us the number of steps by taking the number of meters and dividing
                # by the pace set.
                # The code rounds the steps to give a whole number, as you can't have
                # 6.5467 steps, which the user won't really understand.
                steps = round(meter / 1.35)
                print(steps)

            # Same as the previous statement, just different age group and pace.
            elif age in range(30, 39):
                print('Steps = 1.385 m/s')
                steps = round(meter / 1.385)
                print(steps)
            # Same as the previous statement, just different age group and pace.
            elif age in range(40, 49):
                print('Steps = 1.41 m/s')
                steps = round(meter / 1.41)
                print(steps)
            # Same as the previous statement, just different age group and pace.
            elif age in range(50, 59):
                print('Steps = 1.37 m/s')
                steps = round(meter / 1.37)
                print(steps)
            # Same as the previous statement, just different age group and pace.
            elif age in range(60, 69):
                print('Steps = 1.29 m/s')
                steps = round(meter / 1.29)
                print(steps)
            # Same as the previous statement, just different age group and pace.
            elif age in range(70, 79):
                print('Steps = 1.195 m/s')
                steps = round(meter / 1.195)
                print(steps)
            # Same as the previous statement, just different age group and pace.
            elif age in range(80, 89):
                print('Steps = 0.96m/s')
                steps = round(meter / 0.96)
                print(steps)

            # This is the statement which will then give us the number of steps in a sentence,
            # while also preseting the sentence for the audio portion.
            stepstxt = " and the object is %s steps away." % steps
            # Prints the text for dev for debugging for errors.
            print(stepstxt)

            # Sets the language which the audio will be played in.
            language = 'en'
            # Prints the object with the percentage as well as the steps text for
            # Debugging purposes.
            print(label + stepstxt)
            # This piece of code is what turns the text part of the code into audio as a mp3 file.
            output = gTTS(text=label + stepstxt, lang='en', slow=False)

            # Saves the file as a mp3 file with a name.
            output.save('output.mp3')

            # Uses the operating system to play the audio using the terminal without having to
            # open any apps.
            os.system('mpg123 output.mp3')

            # This code is just more requirements for the realtime viewing window for demonstration
            # and debuging.
            # calculate the y-coordinate used to write the label on the
            # frame depending on the bounding box coordinate
            y = obj["ymin"] - 15 if obj["ymin"] - 15 > 15 else \
                obj["ymin"] + 15

            # draw a bounding box rectangle and label on the frame
            cv2.rectangle(orig, (obj["xmin"], obj["ymin"]),
                          (obj["xmax"], obj["ymax"]), COLORS[obj["class_id"]],
                          2)
            cv2.putText(orig, label, (obj["xmin"], y),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, COLORS[obj["class_id"]],
                        3)

        # display the current frame to the screen and record if a user
        # presses a key
        cv2.imshow("TinyYOLOv3", orig)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # stop the video stream and close any open windows
    vs.stop() if args["input"] is None else vs.release()
    cv2.destroyAllWindows()
Esempio n. 4
0
def signal_handler(sig, frame):
    print("[INFO] You pressed 'ctrl + c'! Closing refrigerator monitor" \
        " application...")
    sys.exit(0)


# construct the argument parser and parse the args
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="Path to the input configuration file")
args = vars(ap.parse_args())

# load the configuration file and initialize the Twilio notifier
conf = Conf(args["conf"])  # this location is from commany line argument
tn = TwilioNotifier(conf)

# initialize the flags for fridge open and notification sent
fridgeOpen = False
notifSent = False

# initialize the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
# vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)

# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
print("[INFO] Press `ctrl + c` to exit, or 'q' to quit if you have" \
Esempio n. 5
0
def run_program():
    # load our serialized model from disk
    print("[INFO] loading model...")
    network = cv2.dnn.readNetFromCaffe(
        conf["prototxt_path"], conf["model_path"])
    # network.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    videoStream = initialize_video_stream()

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    H = None
    W = None

    # keep the count of total number of frames
    totalFrames = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # continue stream flag
    isContinueStream = True

    # loop over the frames of the stream
    while True:
        # load the line configuration file
        lines_offset_conf = Conf(inputArguments["lines"])

        currentFrame = get_frame_from_video_stream(videoStream=videoStream)

        # check if the frame is None, if so, break out of the loop
        if currentFrame is None:
            break

        # resize the frame
        currentFrame = imutils.resize(currentFrame, width=conf["frame_width"])
        rgb = cv2.cvtColor(currentFrame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = currentFrame.shape[:2]

        # save clean original frame before drawing on it
        cleanFrame = currentFrame.copy()

        # draw 2 vertical lines and one horizontal line in the frame
        (LeftToRightLine, RightToLeftLine, HorizontalLine) = calculate_zone_boundaries(
            H, W, lines_offset_conf)
        draw_lines(LeftToRightLine, RightToLeftLine,
                   HorizontalLine, currentFrame, H, W)

        if not display_frame(currentFrame):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

        # if current frame is a stream frame
        # enable continue streaming
        if totalFrames % conf["streaming_frequency"] == 0:
            isContinueStream = True

        # if continue streaming true
        # print report to server
        # disable continue streaming until next stream frame
        if isContinueStream:
            handle_report(None, frame=currentFrame,
                          isForStreaming=True, isStreaming=isStreaming)
            isContinueStream = False

        # if not streaming break after sending  one frame
        if not isStreaming:
            break

    end_program(fps, videoStream)
def Object_Detection(InputType):
    ####################################SETUP########################################
    #Object Detection
    print("Initializing Parameters")
    startTimerTime = 0.0
    endTimerTime = 0.0
    elapseTimerTime = 0.0
    setTimerTime = 10.0 #seconds (30sec)

    #Detection
    detectIterations = 0
    DOG_Stat = 0
    CAT_Stat = 0
    PERSON_Stat = 0
    confidenceTimeThreshold = .70

    # Command Line Interface #
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c", "--conf", required=True,
        help="Path to the input configuration file")
    ap.add_argument("-i", "--input", help="path to the input video file")
    args = vars(ap.parse_args())

    # load the configuration file
    conf = Conf(args["conf"])

    # COCO label and corresponding color preparation #
    # load the COCO class labels that YOLO model was trained on and
    # initialize a list of colors to represent each possible class label
    LABELS = open(conf["labels_path"]).read().strip().split("\n")
    np.random.seed(42)
    COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))

    # Load tinyYOLOv3 modle onto Movidius NCS2 #
    # initialize the plugin in for specified device
    plugin = IEPlugin(device="MYRIAD")

    # read the IR generated by the Model Optimizer (.xml and .bin files)
    #.xml is YOLO architecture & .bin is the pretrained weights
    print("[INFO] loading models...")
    net = IENetwork(model=conf["xml_path"], weights=conf["bin_path"])

    # prepare inputs
    print("[INFO] preparing inputs...")
    inputBlob = next(iter(net.inputs))

    # set the default batch size as 1 and get the number of input blobs,
    # number of channels, the height, and width of the input blob
    net.batch_size = 1
    (n, c, h, w) = net.inputs[inputBlob].shape

    # /Image input reference for tinyYOLOv3 #
    # if a video path was not supplied, grab a reference to the webcam
    if args["input"] is None:
        print("[INFO] starting video stream...")
        # vs = VideoStream(src=0).start() #USB Camera
        vs = VideoStream(usePiCamera=True).start() #Pi Camera
        time.sleep(2.0)
        # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening image/video file...")
        vs = cv2.VideoCapture(os.path.abspath(args["input"]))
        
    # loading model to the plugin and start the frames per second
    # throughput estimator
    print("[INFO] loading model to the plugin...")
    execNet = plugin.load(network=net, num_requests=1)
    fps = FPS().start()

    ######################################Processing#####################################

    print("[INFO] Runnig Detection...")     
    startTimerTime = time.perf_counter()
    while(elapseTimerTime < setTimerTime):
        
        # grab the next frame and handle VideoCapture or VideoStream
        orig = vs.read()
        orig = orig[1] if args["input"] is not None else orig

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and orig is None:
            break

        # resize original frame to have a maximum width of 500 pixel and
        # input_frame to network size
        orig = imutils.resize(orig, width=500)
        frame = cv2.resize(orig, (w, h))

        # change data layout from HxWxC to CxHxW
        frame = frame.transpose((2, 0, 1))
        frame = frame.reshape((n, c, h, w))

        # start inference and initialize list to collect object detection
        # results
        output = execNet.infer({inputBlob: frame})
        objects = []

        # loop over the output items
        for (layerName, outBlob) in output.items():
            # create an object with required tinyYOLOv3 parameters
            layerParams = TinyYOLOV3Params(net.layers[layerName].params,
                outBlob.shape[2])

            # parse the output region
            objects += TinyYOLOv3.parse_yolo_region(outBlob,
                frame.shape[2:], orig.shape[:-1], layerParams,
                conf["prob_threshold"])

        # loop over each of the objects
        for i in range(len(objects)):
            # check if the confidence of the detected object is zero, if
            # it is, then skip this iteration, indicating that the object
            # should be ignored
            if objects[i]["confidence"] == 0:
                continue

            # loop over remaining objects[Intersection over Union (IoU)]
            for j in range(i + 1, len(objects)):
                # check if the IoU of both the objects exceeds a
                # threshold, if it does, then set the confidence of
                # that object to zero
                if TinyYOLOv3.intersection_over_union(objects[i],
                    objects[j]) > conf["iou_threshold"]:
                    objects[j]["confidence"] = 0

        # filter objects by using the probability threshold -- if a an
        # object is below the threshold, ignore it
        objects = [obj for obj in objects if obj['confidence'] >= \
            conf["prob_threshold"]]

    ############################Object Detection Frame & Stats###########################

        # store the height and width of the original frame
        (endY, endX) = orig.shape[:-1]

        # loop through all the remaining objects
        for obj in objects:
            # validate the bounding box of the detected object, ensuring
            # we don't have any invalid bounding boxes
            if obj["xmax"] > endX or obj["ymax"] > endY or obj["xmin"] \
                < 0 or obj["ymin"] < 0:
                continue

            # build a label consisting of the predicted class and
            # associated probability
            label = "{}: {:.2f}%".format(LABELS[obj["class_id"]],
                obj["confidence"] * 100)
            print(label)
            
            tag = LABELS[obj["class_id"]]
            if (tag == "person"):
                PERSON_Stat = PERSON_Stat+ 1
            elif (tag == "dog"):
                DOG_Stat = DOG_Stat+ 1
            elif(tag == "cat"):
                CAT_Stat = CAT_Stat+ 1
                
            # calculate the y-coordinate used to write the label on the
            # frame depending on the bounding box coordinate
            y = obj["ymin"] - 15 if obj["ymin"] - 15 > 15 else \
                obj["ymin"] + 15

            # draw a bounding box rectangle and label on the frame
            cv2.rectangle(orig, (obj["xmin"], obj["ymin"]), (obj["xmax"],
                obj["ymax"]), COLORS[obj["class_id"]], 2)
            cv2.putText(orig, label, (obj["xmin"], y),
                cv2.FONT_HERSHEY_SIMPLEX, 1, COLORS[obj["class_id"]], 3)

        # display the current frame to the screen
        cv2.imshow("TinyYOLOv3", orig)

        # update the FPS counter
        fps.update()
        
        detectIterations = detectIterations + 1
        endTimerTime = time.perf_counter()
        elapseTimerTime = endTimerTime - startTimerTime #seconds
        print(elapseTimerTime)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    print("[INFO] Time Frame Calculation for Person: ", PERSON_Stat)
    print("[INFO] Time Frame Calculation for Dog: ", DOG_Stat)
    print("[INFO] Time Frame Calculation for Cat: ", CAT_Stat)

    #calculate confidence level
    print("Calculating OD Stats")
    print("Iterations :", detectIterations)
    print("TimeFrame :", elapseTimerTime)
    if(InputType == 'v'):
        IterationPercentage = elapseTimerTime / detectIterations
    if(InputType == 'i'):
        IterationPercentage = detectIterations
    print(IterationPercentage)

    DOG_Final= DOG_Stat * IterationPercentage
    print(DOG_Final)

    CAT_Final= CAT_Stat * IterationPercentage
    print(CAT_Final)

    PERSON_Final= PERSON_Stat * IterationPercentage
    print(PERSON_Final)

    if(((DOG_Final or CAT_Final) > confidenceTimeThreshold) and (PERSON_Final < confidenceTimeThreshold )):
        print("[INFO] Detected ...")
        print("Reading TEMP")
        Current_Temp = readTemp()
        print("Current Temp: ", Current_Temp, "*C")

        return Current_Temp
    else:
        print("[INFO] No Detections ...")
        return 0
import cv2
import numpy as np
import imutils
import dlib
import AvoidanceDecision as AD
# import DIP_avoidance.AvoidanceDecision as AD
from pyimagesearch.utils import Conf
from pyimagesearch.centroidtracker import CentroidTracker
from imutils.video import FPS

# load the configuration file
conf = Conf('config/config.json')

#'../project_video.mp4'

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
H = None
W = None

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=conf["max_disappear"],
                     maxDistance=conf["max_distance"])
trackers = []
trackableObjects = {}

# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = [
Esempio n. 8
0
        # if continue streaming true
        # print report to server
        # disable continue streaming until next stream frame
        if isContinueStream:
            handle_report(None, frame=currentFrame,
                          isForStreaming=True, isStreaming=isStreaming)
            isContinueStream = False

        # if not streaming break after sending  one frame
        if not isStreaming:
            break

    end_program(fps, videoStream)


def main():
    run_program()


if __name__ == "__main__":
    inputArguments = get_input_arguments()

    # load the configuration file
    conf = Conf(inputArguments["conf"])
    lines_offset_conf = Conf(inputArguments["lines"])
    isStreaming = inputArguments["stream"]

    print("[INFO] now streaming: " + str(isStreaming))

    main()
Esempio n. 9
0
from pyimagesearch.utils import Conf
import argparse
from picamera import PiCamera
from time import sleep
from Adafruit_IO import Client, Feed, Data
import time
import datetime

ADAFRUIT_IO_KEY = '*****************************'
ADAFRUIT_IO_USERNAME = '******'
aio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)

camera = PiCamera()

conf = Conf(
    "/home/pi/Desktop/sighthound-lpr-cloud-examples/code-samples/python/config/config.json"
)
tn = TwilioNotifier(conf)

###############################################################################
##
##  You must edit and replace 'YourSighthoundToken' with your own token.
##
## Your Sighthound Cloud API Token. More information at
##     https://www.sighthound.com/support/creating-api-token
##

_cloud_token = "****************************"

##
###############################################################################
Esempio n. 10
0
	def __init__(self):

		rospy.loginfo("Initializing ROS node ...")
		self.node_name = 'dip_listener'
		rospy.init_node(self.node_name)

		# What the program do during shutdown
		rospy.on_shutdown(self.cleanup)

		# Create the OpenCV display windown for the RGB image	
		# rospy.loginfo("Initializing displays ...")
		# cv2.namedWindow(self.node_name, cv2.WINDOW_NORMAL)
		# cv2.moveWindow(self.node_name, 25, 75)	

		# Create the cv_bridge object
		rospy.loginfo("Creating CVBridge object ...")
		self.bridge = CvBridge()
 
		# load the configuration file
		rospy.loginfo("Loading the configuration file ...")
		self.conf = Conf('config/config.json')

		# instantiate our centroid tracker, then initialize a list to store
		# each of our dlib correlation trackers, followed by a dictionary to
		# map each unique object ID to a TrackableObject
		self.ct = CentroidTracker(maxDisappeared=self.conf["max_disappear"],
			maxDistance=self.conf["max_distance"])
		# self.trackers = []

		# load our serialized model from disk
		rospy.loginfo("Loading model...")
		# initialize the list of class labels MobileNet SSD was trained to
		# detect
		self.CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
			"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
			"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
			"sofa", "train", "tvmonitor"]
		self.net = cv2.dnn.readNetFromCaffe(self.conf["prototxt_path"],
			self.conf["model_path"])

		# OTher variables:
		# initialize the frame dimensions (we'll set them as soon as we read
		# the first frame from the video)
		self.H = None
		self.W = None

		# keep the count of total number of frames
		self.totalFrames = 0

		# video recording
		now = datetime.now()
		folderName = 'pictures/%s'%(now.strftime("%Y%m%d"))
		if ~os.path.isdir(folderName):
			os.mkdir(folderName)

		current_time = now.strftime("%H_%M_%S")
		self.outputvideo = cv2.VideoWriter('%s/%s.avi'%(folderName, current_time),cv2.VideoWriter_fourcc(*'XVID'), 10, (320,240))
		# self.outputvideo = cv2.VideoWriter('pictures/%s.avi'%(current_time),-1, 10, (320,240))

		# Subcribe to the camera image topics and set the appropriate callbacks
		rospy.loginfo("Subscribing to camera topic ....")
		self.image_sub = rospy.Subscriber("dip/camera/rgb/compressed", CompressedImage, self.image_callback, queue_size=1)

		rospy.loginfo("Waiting for image topics ...")
Esempio n. 11
0
def video_feed_counter(conf, mode, input, output, url, camera):
    # construct the argument parser and parse the arguments
    # load the configuration file
    conf = Conf(conf)
    count = 0
    # initialize the MOG foreground background subtractor object
    # mog = cv2.bgsegm.createBackgroundSubtractorMOG()
    mog = cv2.createBackgroundSubtractorMOG2()
    # initialize and define the dilation kernel
    dKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # initialize the video writer process
    writerProcess = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker and initialize a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(conf["max_disappeared"], conf["max_distance"])
    trackableObjects = {}

    # if a video path was not supplied, grab a reference to the webcam
    # if not args.get("input", False):
    # if input:
    # 	print("[INFO] starting video stream...")
    # 	# vs = VideoStream(src=0).start()
    # 	vs = VideoStream(usePiCamera=True).start()
    # 	time.sleep(2.0)

    # otherwise, grab a reference to the video file
    # else:
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(url, cv2.CAP_FFMPEG)
    # vs = cv2.VideoCapture(args["input"])

    # check if the user wants to use the difference flag feature
    if conf["diff_flag"]:
        # initialize the start counting flag and mouse click callback
        start = False
        cv2.namedWindow("set_points")
        cv2.setMouseCallback("set_points", set_points, [mode])

    # otherwise, the user does not want to use it
    else:
        # set the start flag as true indicating to start traffic counting
        start = True

    # initialize the direction info variable (used to store information
    # such as up/down or left/right vehicle count) and the difference
    # point (used to differentiate between left and right lanes)
    directionInfo = None
    diffPt = None
    fps = FPS().start()
    # print('fbs')
    # loop over frames from the video stream
    while (vs.isOpened()):
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        # frame = vs.read()
        ret, frame = vs.read()  # import image
        # if not ret:
        # 	frame = cv2.VideoCapture(url)
        #     continue
        # if ret:
        #     frame = cv2.VideoCapture(url)
        #     continue

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if input is not None and frame is None:
            break
        #print("frame in while")

        # check if the start flag is set, if so, we will start traffic
        # counting

        if start:
            # if the frame dimensions are empty, grab the frame
            # dimensions, instantiate the direction counter, and set the
            # centroid tracker direction

            if W is None or H is None:
                # start the frames per second throughput estimator
                #fps = FPS().start()
                (H, W) = frame.shape[:2]
                dc = DirectionCounter(mode, W - conf["x_offset"],
                                      H - conf["y_offset"])
                ct.direction = mode

                # check if the difference point is set, if it is, then
                # set it in the centroid tracker object
                if diffPt is not None:
                    ct.diffPt = diffPt

            # begin writing the video to disk if required
            if output is not None and writerProcess is None:
                # set the value of the write flag (used to communicate when
                # to stop the process)
                writeVideo = Value('i', 1)

                # initialize a shared queue for the exhcange frames,
                # initialize a process, and start the process
                frameQueue = Queue()
                writerProcess = Process(target=write_video,
                                        args=(output, writeVideo, frameQueue,
                                              W, H))
                writerProcess.start()

            # initialize a list to store the bounding box rectangles
            # returned by background subtraction model
            rects = []

            # convert the frame to grayscale image and then blur it
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)

            # apply the MOG background subtraction model which returns
            # a mask
            mask = mog.apply(gray)

            # apply dilation
            dilation = cv2.dilate(mask, dKernel, iterations=2)

            # find contours in the mask
            cnts = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            # loop over each contour
            for c in cnts:
                # if the contour area is less than the minimum area
                # required then ignore the object
                if cv2.contourArea(c) < conf["min_area"]:
                    continue

                # get the (x, y)-coordinates of the contour, along with
                # height and width
                (x, y, w, h) = cv2.boundingRect(c)

                # check if direction is *vertical and the vehicle is
                # further away from the line, if so then, no need to
                # detect it
                if mode == "vertical" and y < conf["limit"]:
                    continue

                # otherwise, check if direction is horizontal and the
                # vehicle is further away from the line, if so then,
                # no need to detect it
                elif mode == "horizontal" and x > conf["limit"]:
                    continue

                # add the bounding box coordinates to the rectangles list
                rects.append((x, y, x + w, y + h))

            # check if the direction is vertical
            if mode == "vertical":
                # draw a horizontal line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'up' or 'down'
                cv2.line(frame, (0, H - conf["y_offset"]),
                         (W, H - conf["y_offset"]), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw
                # a line diving the two lanes
                if diffPt is not None:
                    cv2.line(frame, (diffPt, 0), (diffPt, H), (255, 0, 0), 2)

            # otherwise, the direction is horizontal
            else:
                # draw a vertical line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'left' or 'right'
                # print('ddds')
                cv2.line(frame, (W - conf["x_offset"], 0),
                         (W - conf["x_offset"], H), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw a
                # line dividing the two lanes
                if diffPt is not None:
                    cv2.line(frame, (0, diffPt), (W, diffPt), (255, 0, 0), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the
                # current object ID and initialize the color
                to = trackableObjects.get(objectID, None)
                color = (0, 0, 255)

                # create a new trackable object if needed
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can
                # utilize it to determine direction
                else:
                    # find the direction and update the list of centroids
                    dc.find_direction(to, centroid)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:

                        # find the direction of motion of the vehicles
                        directionInfo = dc.count_object(to, centroid, camera)

                    # otherwise, the object has been counted and set the
                    # color to green indicate it has been counted
                    else:
                        color = (0, 255, 0)

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

            # extract the traffic counts and write/draw them
            if directionInfo is not None:
                for (i, (k, v)) in enumerate(directionInfo):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # put frame into the shared queue for video writing
            if writerProcess is not None:
                frameQueue.put(frame)

            # show the output frame
            # cv2.imshow("Frame", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # update the FPS counter
            fps.update()

        # otherwise, the user has to select a difference point
        else:
            # show the output frame
            # cv2.imshow("set_points", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `s` key was pressed, start traffic counting
            if key == ord("s"):
                # begin counting and eliminate the informational window
                start = True
                cv2.destroyWindow("set_points")

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    # if not args.get("input", False):
    # 	vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
def run_program():
    # load our serialized model from disk
    print("[INFO] loading model...")
    network = cv2.dnn.readNetFromCaffe(conf["prototxt_path"],
                                       conf["model_path"])
    # network.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    videoStream = initialize_video_stream()

    # construct alarm thread
    alarmThread = AlarmHandler(src=conf["alarm_source"])

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    H = None
    W = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    centroidTracker = CentroidTracker(maxDisappeared=conf["max_disappear"],
                                      maxDistance=conf["max_distance"])
    trackers = []
    trackableObjects = {}

    # keep the count of total number of frames
    totalFrames = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over the frames of the stream
    while True:
        # load the line configuration file
        lines_offset_conf = Conf(inputArguments["lines"])
        # instantiate license Numbers inputs Dictionary
        licenseNumberInputs = []
        # grab the next frame from the stream
        currentFrame = get_frame_from_video_stream(videoStream=videoStream)

        # check if the frame is None, if so, break out of the loop
        if currentFrame is None:
            break

        # resize the frame
        currentFrame = imutils.resize(currentFrame, width=conf["frame_width"])
        rgb = cv2.cvtColor(currentFrame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = currentFrame.shape[:2]

        # initialize our list of bounding box rectangles returned by
        # either (1) our object detector or (2) the correlation trackers
        boundingBoxes = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % conf["track_object"] == 0:
            detections = obtain_detections_in_frame(frame=currentFrame,
                                                    network=network)
            trackers = build_trackers_list_from_detections(
                detections, rgb, H, W)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            boundingBoxes = build_bounding_box_list_from_trackers_list(
                trackers, rgb)

        # save clean original frame before drawing on it
        cleanFrame = currentFrame.copy()
        # draw 2 vertical lines and one horizontal line in the frame -- once an
        # object crosses these lines we will start the timers
        (LeftToRightLine, RightToLeftLine,
         HorizontalLine) = calculate_zone_boundaries(H, W, lines_offset_conf)
        draw_lines(LeftToRightLine, RightToLeftLine, HorizontalLine,
                   currentFrame, H, W)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = centroidTracker.update(boundingBoxes)
        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            trackingBoundingBox = centroidTracker.objectsToBoundingBoxes[
                centroid.tostring()]
            # check to see if a trackable object exists for the current
            # object ID
            currentTrackableObject = trackableObjects.get(objectID, None)
            # if there is no existing trackable object, create one
            if currentTrackableObject is None:
                currentTrackableObject = create_trackable_object(
                    RightToLeftLine, LeftToRightLine, HorizontalLine, centroid,
                    objectID)
            # otherwise, there is a trackable object
            else:
                append_license_input_info_to_inputs_list(
                    currentTrackableObject, licenseNumberInputs, objectID,
                    cleanFrame, trackingBoundingBox)
                check_and_handle_objects_location_in_frame(
                    currentTrackableObject, RightToLeftLine, LeftToRightLine,
                    HorizontalLine, objectID, centroid, cleanFrame,
                    currentFrame, alarmThread)

            # store the trackable object in our dictionary
            trackableObjects[objectID] = currentTrackableObject
            draw_information_on_object(currentFrame, centroid, objectID,
                                       currentTrackableObject,
                                       trackingBoundingBox)

        if not display_frame(currentFrame):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

        stop_alarm_for_dissappeared_object(
            inputAlarmThread=alarmThread,
            inputObjectsList=centroidTracker.objects)

        # handle plate recognition from data collected earlier and assign it to an object
        handle_license_plates_in_frame(licenseNumberInputs, trackableObjects,
                                       totalFrames)

    end_program(fps, videoStream, alarmThread)
def get_white_list():
    return Conf(conf["white_list_json"])["list"]