Beispiel #1
0
def main(args):
    # Read video
    frames, fps = read_video(args.video_path)
    print(f"Read {len(frames)} frames (fps: {fps})")

    # Read bboxes of each frame
    json_files = sorted(os.listdir(args.bbox_path),
                        key=lambda x: int(x.split(".")[0]))
    object_boxes_per_frame = []

    for file in json_files:
        with open(os.path.join(args.bbox_path, file)) as f:
            data = json.load(f)
            bboxes = data['children'].copy()
            object_boxes_per_frame.append(bboxes)
    print(f"Read {len(object_boxes_per_frame)} bbox files")

    # Run object tracking
    centroid_ids_per_frame = []

    if args.method == "centroid":
        ct = CentroidTracker(maxDisappeared=50)

        for ind in range(len(frames)):
            rects = [[obj['x1'], obj['y1'], obj['x2'], obj['y2']]
                     for obj in object_boxes_per_frame[ind]]
            centroid_ids = ct.update(rects)
            centroid_ids_per_frame.append(centroid_ids.copy())

    elif args.method == "kalman":
        tracker = Sort(max_age=50, min_hits=3)

        for ind in range(len(frames)):
            detections = np.array([[
                obj['x1'], obj['y1'], obj['x2'], obj['y2'], obj['confidence']
            ] for obj in object_boxes_per_frame[ind]])
            trackers = tracker.update(detections, None)
            centroid_ids = [[((track[0] + track[2]) / 2,
                              (track[1] + track[3]) / 2),
                             int(track[4])] for track in trackers]
            centroid_ids_per_frame.append(centroid_ids)
    else:
        raise NotImplementedError
    print(f"Processed {len(centroid_ids_per_frame)} frames")

    # Create output video
    annotated_frames = annotate_frames(frames, object_boxes_per_frame,
                                       centroid_ids_per_frame)
    frames2video(annotated_frames, fps=28, filepath=args.save_path)
    print("Created output video")
Beispiel #2
0
def main():
    ct = CentroidTracker(10)
    pub.subscribe(face_out_of_frame, 'face_out_of_frame')
    pub.subscribe(face_in_frame, 'face_in_frame')
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_argparser().parse_args()
    stream = cv2.VideoCapture(0)
    model_bin, model_xml = get_face_detection_model(args)
    model_age_gender_xml, model_age_gender_bin = get_age_gender_detection_model(
        args)
    # Plugin initialization for specified device and load extensions library if specified
    plugin = get_plugin(args)
    print('running on device', args.device)
    add_extension_to_plugin(args, plugin)
    face_detection_net = IENetwork(model=model_xml, weights=model_bin)
    check_for_unsupported_layers(plugin, face_detection_net)
    age_gender_net = IENetwork(model=model_age_gender_xml,
                               weights=model_age_gender_bin)
    check_for_unsupported_layers(plugin, age_gender_net)
    # /opt/intel/computer_vision_sdk/deployment_tools/intel_models/face-detection-adas-0001/FP32/face-detection-adas-0001.xml

    log.info("Preparing inputs")
    face_net_input_blob = next(iter(face_detection_net.inputs))
    face_net_output_blob = next(iter(face_detection_net.outputs))
    face_detection_net.batch_size = 1

    # Read and pre-process input images
    n, c, h, w = face_detection_net.inputs[face_net_input_blob].shape
    print('number of images :', n, 'number of channels:', c,
          'height of image:', h, 'width of image:', w)
    # Loading model to the plugin
    log.info("Loading model ton the plugin")
    exec_net = plugin.load(network=face_detection_net)
    age_gender_input_blob = next(iter(age_gender_net.inputs))
    # print(face_detection_net.inputs,face_detection_net.outputs,age_gender_net.inputs,age_gender_net.outputs)
    # print(age_gender_net.outputs,len(age_gender_net.outputs))
    age_blob = 'age_conv3'
    gender_blob = 'prob'

    age_output = age_gender_net.outputs[age_blob]
    gender_output = age_gender_net.outputs[gender_blob]
    print("age,gender,model input specs",
          age_gender_net.inputs[age_gender_input_blob].shape)
    agen, agec, ageh, agew = age_gender_net.inputs[age_gender_input_blob].shape
    print("loading page gender model to the plugin")
    exec_age_gender_net = plugin.load(network=age_gender_net)

    while (True):
        status, image = stream.read()
        res, initialw, initialh = infer_face(n, c, h, w, image, exec_net,
                                             face_net_input_blob)
        out = res[face_net_output_blob]
        count = 0

        tfaces = np.ndarray(shape=(agen, agec, ageh, agew))
        rects = []
        for obj in out[0][0]:
            threshold = obj[2]
            class_id = int(obj[1])
            if threshold > 0.9:
                count = count + 1
                xmin = int(obj[3] * initialw)
                ymin = int(obj[4] * initialh)
                xmax = int(obj[5] * initialw)
                ymax = int(obj[6] * initialh)
                color = (min(class_id * 12.5,
                             255), min(class_id * 7,
                                       255), min(class_id * 5, 255))
                face = image[ymin:ymax, xmin:xmax]
                (fh, fw) = face.shape[:-1]
                if fh < ageh or fw < agew:
                    continue
                tface = cv2.resize(face, (agew, ageh))
                tface = tface.transpose((2, 0, 1))
                tfaces[0] = tface
                t0 = time()
                out_age_gender = exec_age_gender_net.infer(
                    inputs={face_net_input_blob: tfaces})
                print('inferencetime age,gender detection',
                      (time() - t0) * 1000)
                age, gender, checkedin = get_age_gender(
                    out_age_gender, age_blob, gender_blob)

                rects.append((xmin, ymin, xmax, ymax, age, gender, checkedin))

                cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)

        print("number of faces in frame", count)
        if count > 0:
            x = ct.update(rects)
            print(list(x.items()))
            cv2.imshow("face", face)

        cv2.imshow("Display", image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Beispiel #3
0
class TrackerSpeedEstimator:
    """TrackerSpeedEstimator class manages the tracking and the speed estimation of elements in a video.

    Note: Works with CentroidTracker and Control_zone classes

    Args:
        czones (list of object class): list of Control_zone object class that have been initialized
        video_capture (opencv object): opencv video iterator

    Attributes:
        czones (list of object class): list of Control_zone object class that have been initialized
        fps (int): number of frames per second of the video
        cap (opencv object): opencv video iterator
        ct (object): CentroidTracker object
        mapped_centroid_classes (dict): dictionnary of tracked elements
                                            and their detected classes
                                            (ie {0:'car',1: 'car',2:'truck'} )
        tracked_objects_status (dict): dictionnary of tracked elements
                                            and their control zone status,
                                            idzone belongings and number of displayed time
                                            (ie  {0: (1, 1, 0),
                                                  1: (0, None, 0),
                                                  2: (2, 1, 19)} )
        frameid_control (dict): dictionnary of tracked elements and their frames ids in the control zone
                                            (ie {4: [24, 68, 69],
                                                 0: [54, 79, 80, 81, 82],
                                                 8: [84, 112]})
        estimated_speed (dict): dictionnary of the tracked elements and their estimated speed for each control zone
                                            (ie {1: {0: 104.4, 8: 104.4},
                                                 2: {2: 100.8, 6: 127.095}
                                                 })

    """

    def __init__(self, video_capture, czones):
        self.czones = czones
        self.fps = int(video_capture.get(cv2.CAP_PROP_FPS))
        self.cap = video_capture
        self.ct = CentroidTracker(maxDisappeared=8)
        self.mapped_centroid_classes = {}
        self.tracked_objects_status = {}
        self.frameid_control = {}
        self.estimated_speed = {}

    def track(self, detections):
        """update the tracker with the centroid of the detected elements
        Args:
            detections (list of numpy array): list of bounding box coordinates of the detected elements
                                        (ie [np.array([252,266,175,112]]),np.array([112,186,375,121]])])
        """
        # object Tracking
        self.detections = detections
        bboxes = [np.array(i[:4]).astype(int) for i in self.detections]
        self.objects = self.ct.update(bboxes)


    def map_centroid_class(self):
        """Maps the tracked objects ids with the detected object classes using centroids and bounding boxes
        Note :
              the mapping is realized according the the minimal distance bewteen two centroids
        """
        centroid_bboxes = np.array([((x1 + x2) / 2, ((y1 + y2) / 2)) for x1, y1, x2, y2, conf, cls in self.detections])
        for (objectID, centroid) in self.objects.items():
            distances = dist.cdist(np.expand_dims(centroid, axis=0), centroid_bboxes)
            imaped_bbox = distances.argmin(axis=1)[0]
            self.mapped_centroid_classes[objectID] = self.detections[imaped_bbox][5]

    def _update_status(self, obj_id, centroid, cz):
        """update the tracked elements informations to know when an element is not yet in the control zone,
        is currently in the control zone or has already crossed the control zone.

        Frame ids are saved for each tracked elements when entering in the control zone and exiting the controle zone

        Note :
              status is defined as :
                                   0 - when a tracked element has not crossed any control zone yet,
                                   1 - when a tracked element has crossed the starting zone of a control zone
                                   2 - when a tracked element has crossed the ending zone of the same control zone

        Args:
            obj_id (int): object id of the tracked element
            centroid (tuple of int): x,y coordinates tracked centroid
            cz (Control_zone object): control zone
        """
        frameid = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))

        if not (obj_id in self.tracked_objects_status.keys()):
            self.tracked_objects_status[obj_id] = (0, None, 0)

        status, idczone, ndisplay = self.tracked_objects_status[obj_id]
        if cz.entering_zone(centroid):
            # if status != 1:
            self.tracked_objects_status[obj_id] = (1, cz.idczone, 0)
            self.frameid_control[obj_id] = [frameid]

        if idczone == cz.idczone:
            if cz.exiting_zone(centroid):
                self.tracked_objects_status[obj_id] = (2, idczone, 0)
                self.frameid_control[obj_id].append(frameid)

    def compute_speed(self):
        """Compute the speed for each tracked elements crossing each control zone

        Note :
              only elements with status = 2 are measured using information of the entering and exiting frame ids
              knowing the length of between the entering and the exiting zone ,
              the number of frames in the control zone and the frame rate, we can estimate the speed of the object.
        """
        for czone in self.czones:
            if not (czone.idczone in self.estimated_speed.keys()):
                self.estimated_speed[czone.idczone] = {}
            for (objectID, centroid) in self.objects.items():
                self._update_status(objectID, centroid, czone)

                status, idczone, ndisplay = self.tracked_objects_status[objectID]

                if status == 2 and (idczone == czone.idczone):
                    n_present_frames = self.frameid_control[objectID][-1] - self.frameid_control[objectID][0]
                    speed = ((czone.ckzn_d / (n_present_frames / self.fps)) * 3600) / 1000  # km/h
                    self.estimated_speed[idczone].update({objectID: speed})

    def display_speed(self, img, ndisplay_frames=20):
        """Displays the speed of each measured objects and the average speed for each control zone
        Note :
              only the speed of status = 2 elements is displayed during ndisplay_frames
              If an element if over the speed limit, the speed is displayed as red
        Args:
             img (numpy 2D array): input image
             ndisplay_frames (int): maximum number of displayed speed frames for each tracked objects
        """
        speedlimits = {}
        for czone in self.czones:
            speedlimits[czone.idczone] = czone.speedlimit

        shape = img.shape[:2]
        for (objectID, centroid) in self.objects.items():
            status, idczone, ndisplay = self.tracked_objects_status[objectID]
            if status == 2:
                if ndisplay <= ndisplay_frames:
                    speed = self.estimated_speed[idczone][objectID]
                    centroid = self.objects[objectID]
                    cv2.putText(img, "{0:.1f} : km/h".format(speed), (centroid[0] - 15, centroid[1] + 40),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                over_speed_color((0, 255, 0), speed, speedlimits[idczone]),
                                1)
                    self.tracked_objects_status[objectID] = (status, idczone, ndisplay + 1)
                else:
                    self.tracked_objects_status[objectID] = (0, None, 0)

        i = 0
        for czone in self.czones:
            idczone = czone.idczone
            if len(self.estimated_speed[idczone]) > 0:
                mspeed = np.array(list(self.estimated_speed[idczone].values())).mean()
                offset_r, offset_c = offset_loc(czone.draw_loc)
                x, y = (
                    (shape[1] // 2) + (offset_c * (shape[1] // 4)),
                    -((shape[0] // 2) - 20) * offset_r + (shape[0] // 2))
                cv2.rectangle(img, (x - 10, y + 5), (x + 170, y - 15), czone.col, -1)
                cv2.putText(img, "Avg : {0:.1f} : km/h".format(mspeed), (x, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            i += 1

    def display_tracking(self, img):
        """Displays the centroid and the IDs of the tracked objects
        Args:
             img (numpy 2D array): input image
        """
        for (objectID, centroid) in self.objects.items():
            col = (0, 255, 0)
            status, _, _ = self.tracked_objects_status[objectID]
            if status == 1:
                col = (255, 255, 255)
            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, col, 2)
            cv2.circle(img, (centroid[0], centroid[1]), 4, col, -1)
Beispiel #4
0
class Camera:
    def __init__(self, publisher: mqtt_publisher, args):
        # Start Arguments
        self.publisher: mqtt_publisher = publisher

        for k, v in args.items():
            if k is "skip_frame":
                if v is not None:
                    self.skip_frame = int(v)
                else:
                    self.skip_frame = 5
            if k is "min_confidence":
                if v is not None:
                    self.min_confidence = float(v)
                else:
                    self.min_confidence = 0.4
            if k is "resolution":
                if v is not None:
                    w, h = v.split(",")
                    self.resolution = (int(w), int(h))
                else:
                    self.resolution = (640, 480)
            if k is "debug":
                if v is not None:
                    self.debug = v
                else:
                    self.debug = False

        # Classes the net Model recognises
        self.CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
                        "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
                        "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
                        "sofa", "train", "tvmonitor"]

        # centroid tracker and some inits
        self.ct = CentroidTracker(publisher, maxDisappeared=40, maxDistance=50, )
        self.targets = []
        self.rois = OrderedDict()
        self.take_snap = False
        self.fps = None
        self.s = sched.scheduler(time.time, time.sleep)

    def read_config(self):
        self.targets = []
        self.rois = OrderedDict()
        with open("config.json") as json_file:
            data = json.load(json_file)
            for t in data["target"]:
                self.targets.append(t)
            for roi in data["ROI"]:
                name = roi["name"]
                (x, y, w, h) = roi["coordinates"]
                self.rois[name] = (int(x), int(y), int(w), int(h))
        self.ct.update_key_centroids(self.rois)

    def reload_config(self):
        self.read_config()

    def snapshot(self):
        self.take_snap = True

    def publish_snap(self, img):
        val, buffer = cv2.imencode(".jpg", img)
        encoded = base64.b64encode(buffer)

        packet_size = 3000
        start = 0
        end = packet_size
        length = len(encoded)
        pic_id = "snapshot_{}".format(length % 100)
        pos = 0
        packet_number = math.ceil(length / packet_size) - 1

        while start <= length:
            data = {
                "data": str(encoded[start:end]),
                "pic_id": pic_id,
                "pos": pos,
                "packet_number": packet_number
            }
            print("sending {}/{}".format(pos, packet_number))

            self.publisher.publish(json.dumps(data), "test/test/snapshot")
            end += packet_size
            start += packet_size
            pos = pos + 1
            time.sleep(0.2)

    def publish_fps(self):
        self.fps.stop()
        if self.fps.elapsed() <= 0:
            pass
        else:
            fps = {
                "time_elapsed": int(self.fps.elapsed()),
                "fps": int(self.fps.fps())
            }
            self.publisher.publish(json.dumps(fps), "test/test/fps")
        self.fps = FPS().start()

    def publish_online(self, sc):
        msg = {
            "online": True
        }
        self.publisher.publish(json.dumps(msg), "test/test/status")
        self.s.enter(60, 1, self.publish_online, (sc,))

    def run_camera(self):
        totalFrames = 0
        trackers = []
        trackableObjects = {}
        W = None
        H = None
        class_list = []

        self.read_config()

        self.s.enter(1, 1, self.publish_online, (self.s,))

        # Video Source
        # vid_capture = cv2.VideoCapture(0)
        vid_capture = cv2.VideoCapture("../../img/in/campus4-c1.avi")

        # Load Model
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe("mobilenet_ssd/MobileNetSSD_deploy.prototxt",
                                       "mobilenet_ssd/MobileNetSSD_deploy.caffemodel")
        print("Done")
        # Start FPS counter
        self.fps = FPS().start()

        print("Running...")
        error = False
        while True:
            _, frame = vid_capture.read()

            # if end of video
            if frame is None:
                print("no Frame")
                vid_capture = cv2.VideoCapture("../../img/in/campus4-c1.avi")
                _, frame = vid_capture.read()
                # error = True
                # break

            # resize and convert to rgb for dlib
            frame = cv2.resize(frame, self.resolution, interpolation=cv2.INTER_AREA)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # set frame dimensions
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # snapshot
            if self.take_snap:
                self.publish_snap(frame)
                self.take_snap = False
                continue

            # init bounding box rectangles
            rects = []

            # Only search for objects every 5 frames
            if totalFrames % self.skip_frame == 0:
                totalFrames = 0
                # init new set of trackers
                trackers = []
                class_list = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    if confidence > self.min_confidence:
                        # if the class label is not a person, ignore it
                        idx = int(detections[0, 0, i, 1])
                        target = self.CLASSES[idx]
                        if not self.targets.__contains__(target):
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (start_x, start_y, end_x, end_y) = box.astype("int")

                        # make a dlib rectangle object and start the dlib tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(start_x, start_y, end_x, end_y)
                        tracker.start_track(rgb, rect)

                        trackers.append(tracker)
                        class_list.append(target)

            # use tracker during skipped frames, not object recognition
            else:
                for tracker in trackers:
                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack position object
                    start_x = int(pos.left())
                    start_y = int(pos.top())
                    end_x = int(pos.right())
                    end_y = int(pos.bottom())

                    # add the box coordinates to the rectangles list
                    rects.append((start_x, start_y, end_x, end_y))

            # use centroids to match old and new centroids and then loop over them
            (objects, class_dict) = self.ct.update(rects, class_list)

            for (object_id, centroid) in objects.items():
                # check if object is in the object list
                t_object = trackableObjects.get(object_id, None)

                # if there is no existing trackable object, create one
                if t_object is None:
                    t_object = TrackableObject(object_id, centroid, class_dict[object_id])

                else:
                    t_object.centroids.append(centroid)

                # store the trackable object in our dictionary
                trackableObjects[object_id] = t_object

                # draw Centroid
                text = "ID {}".format(object_id)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            for roi in self.rois:
                draw_roi(frame, roi, self.rois[roi])

            # Debugging
            if self.debug:
                cv2.imshow("Tracking", frame)

            totalFrames += 1
            self.fps.update()

            k = cv2.waitKey(5) & 0xFF
            if k == 27:  # Esc
                break

        self.publish_fps()
        self.fps.stop()
        cv2.destroyAllWindows()
        vid_capture.release()
        print("Camera stopped")
        return True, error
Beispiel #5
0
    model.setInput(blob)
    detections = model.forward()
    rects = []

    for i in range(0, detections.shape[2]):
        if detections[0, 0, i, 2] > 0.5:

            box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
            rects.append(box.astype("int"))

            (startX, startY, endX, endY) = box.astype("int")
            coordinates = box.astype("int")
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)

    objects = ct.update(rects)

    for (objectID, centroid) in objects.items():
        if not os.path.isfile("{}.jpg".format(objectID)):
            img = frame[coordinates[1]:coordinates[0],
                        coordinates[3]:coordinates[2]]
            cv2.imwrite("{}.jpg".format(objectID), img)
            cv2.imshow("{} img".format(objectID), img)
        text = "Face {}".format(objectID)
        cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_ITALIC, 0.5, (0, 0, 255), 2)
        cv2.circle(frame, (centroid[0], centroid[1]), 5, (0, 0, 255), -1)

    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
counter = 0
names = []
start = time.time()
fps_counter = 0
while True:
    fps_counter += 1
    counter += 1
    frame = vs.read()
    frame = imutils.resize(frame, width=500)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    rects = detector.detectMultiScale(gray, 1.1, 5)
        
    objects = tracker.update(rects)
    if counter % FRAMES_TO_TRACK == 0:
        counter = 0
        boxes = [(y, x+w, y+h, x) for (x,y,w,h) in rects]
        encondings = face_recognition.face_encodings(rgb, boxes)
        names = []
        for encoding in encondings:
            matches = face_recognition.compare_faces(data['encodings'], encoding)
            name = "Unknown"
            
            if True in matches:
                matchedIds = [i for (i, b) in enumerate(matches) if b]
                counts = {}
                for i in matchedIds:
                    name = data['names'][i]
                    counts[name] = counts.get(name, 0) + 1
class LiveDetector:
    def __init__(self):
        # Instantiate detector
        self.detector = ObjectDetection()

        # Set and load model
        self.model_path = "D:/Final-Year-Project/Object-tracking/models/yolo.h5"
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath(self.model_path)
        self.detector.loadModel()

        # Set custom objects
        self.custom_objects = self.detector.CustomObjects(car=True,
                                                          motorcycle=True,
                                                          person=True,
                                                          bicycle=True,
                                                          dog=True)
        self.tracker = CentroidTracker()

    def track_objects(self, frame):
        rects = []
        names = []
        data = {}
        frame = self.pixelate_frontyard((100, 100), frame)

        returned_image, detection = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.custom_objects,
            input_image=frame,
            output_type="array",
            input_type="array")
        for eachObject in detection:
            rects.append(eachObject["box_points"])
            names.append(eachObject["name"])

            (startX, startY, endX, endY) = eachObject["box_points"]
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            self.blur_object((startX, startY), (endX, endY), (11, 11), frame)

        objects = self.tracker.update(rects, names)

        if objects is not None:
            for objectID, objectDetails in objects.items():
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                centroid = objectDetails[0]
                name = objectDetails[1]
                if self.tracker.disappeared[objectID] < 1:
                    text = name + " " + str(objectID)
                    cv2.putText(frame, text,
                                (centroid[0] - 30, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4,
                               (0, 255, 0), -1)

                    data[name] = objectID

        return frame, data

    def blur_object(self, topLeft, bottomRight, kSize, frame):
        x, y = topLeft[0], topLeft[1]
        w, h = bottomRight[0] - topLeft[0], bottomRight[1] - topLeft[1]

        ROI = frame[y:y + h, x:x + w]
        blur = cv2.GaussianBlur(ROI, kSize, 0)

        frame[y:y + h, x:x + w] = blur

    def blur_frontyard(self, kSize, frame):
        height, width, channel = frame.shape
        ROI_corners = np.array([[(320, 490), (895, 320), (895, height),
                                 (320, height)]],
                               dtype=np.int32)
        blurred_frame = cv2.GaussianBlur(frame, kSize, 0)
        mask = np.zeros(frame.shape, dtype=np.uint8)
        ignore_mask_color = (255, ) * channel
        cv2.fillPoly(mask, ROI_corners, ignore_mask_color)
        mask_inverse = np.ones(mask.shape).astype(np.uint8) * 255 - mask
        frame = cv2.bitwise_and(blurred_frame, mask) + cv2.bitwise_and(
            frame, mask_inverse)

        return frame

    def pixelate_frontyard(self, kSize, frame):
        height, width, channel = frame.shape
        w, h = kSize
        ROI_corners = np.array([[(320, 490), (895, 320), (895, height),
                                 (320, height)]],
                               dtype=np.int32)
        temp = cv2.resize(frame, (w, h), interpolation=cv2.INTER_LINEAR)
        pixelated_frame = cv2.resize(temp, (width, height),
                                     interpolation=cv2.INTER_NEAREST)
        mask = np.zeros(frame.shape, dtype=np.uint8)
        ignore_mask_color = (255, ) * channel
        cv2.fillPoly(mask, ROI_corners, ignore_mask_color)
        mask_inverse = np.ones(mask.shape).astype(np.uint8) * 255 - mask
        frame = cv2.bitwise_and(pixelated_frame, mask) + cv2.bitwise_and(
            frame, mask_inverse)

        return frame
                            cv2.FONT_HERSHEY_TRIPLEX,
                            0.4,
                            color=(255, 255, 255))

        inDet = qDet.tryGet()
        if inDet is not None:
            detections = inDet.detections
            frame_count += 1

        if frame is not None:
            height = frame.shape[0]
            width = frame.shape[1]

            objects = ct.update([
                (int(detection.xmin * width), int(detection.ymin * height),
                 int(detection.xmax * width), int(detection.ymax * height))
                for detection in detections
            ])

            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)

                if to is None:
                    to = TrackableObject(objectID, centroid)
                else:
                    if args.axis and not to.counted:
                        x = [c[0] for c in to.centroids]
                        direction = centroid[0] - np.mean(x)

                        if centroid[
                                0] > args.roi_position * width and direction > 0 and np.mean(
                class_ids.append(class_id)

    # Select objects with high probability
    indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

    if DEBUG_MODE:
        # Draw bounding boxes
        for i in range(len(boxes)):
            if i in indexes:
                startX, startY, endX, endY = boxes[i]
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              (0, 255, 0), 2)

    # Update our centroid tracker using the computed set of bounding
    # box rectangles
    objects = ct.update(boxes)

    if DEBUG_MODE:
        # Loop over the tracked objects and draw their centroids
        for (objectID, ball) in objects.items():
            # Draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text,
                        (ball.centroid[0] - 10, ball.centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

    # Detect any user input
    pressed_key = cv2.waitKey(1)