def __init__(self, input_video=None, skip_frames=30, model_dir='model', model_name='MobileNetSSD_deploy', confidence=0.4):
        # dimensiones del frame
        self.W = None
        self.H = None
        self.skip_frames = skip_frames
        self.confidence = confidence
        self.input_video = input_video
        self.model_dir = model_dir
        self.model_name = model_name
        self.status = 'Init'
        self.trackers = []
        self.last_frame = np.zeros(10)
        # variables para contar personas
        self.totalFrames = 0
        self.totalDown = 0
        self.totalUp = 0
        if not self.input_video:
            self.vs = cv2.VideoCapture(0)
            # self.vs = VideoStream(src=0).start()
        else:
            self.vs = cv2.VideoCapture(input_video)

        self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        self.trackableObjects = {}
        # estimador de fps
        self.fps = FPS().start()
        self.net = cv2.dnn.readNetFromCaffe(
            self.model_dir + '/' + self.model_name + '.prototxt',
            self.model_dir + '/' + self.model_name + '.caffemodel'
        )
Ejemplo n.º 2
0
class FaceCenterDnn:
    def __init__(self, confidence):
        self.confidence = confidence
        self.net = cv2.dnn.readNetFromCaffe(
            'utils/face_recognizer_dnn.prototxt',
            'utils/face_recognizer_dnn.caffemodel')
        self.centroid_tracker = CentroidTracker(20)

    def update(self, frame, screen_centerX, screen_centerY):
        height, width = frame.shape[:2]
        # Construct a blob from the frame and pass it through the network
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,
                                     (300, 300), (104.0, 177.0, 123.0))
        self.net.setInput(blob)
        detections = self.net.forward()
        # Initialize the list of bounding box rectangles
        rects = []
        for i in range(detections.shape[2]):
            # Filter out weak detections by ensuring the predicted probability is greater than a minimum threshold
            if detections[0, 0, i, 2] > self.confidence:
                # compute the x and y coordinates of the bounding box for the object
                box = detections[0, 0, i, 3:7] * np.array(
                    [width, height, width, height])
                rects.append(box.astype("int"))
        # Update centroid tracker using the computed set of bounding box rectangles
        objects = self.centroid_tracker.update(rects)
        # No faces found, return screen center and 0 as radius
        if detections.shape[2] == 0:
            return screen_centerX, screen_centerY, 0
        # Loop over the tracked objects and pick center of the object with lower id
        center = (screen_centerX, screen_centerY)
        min_id = 999
        if objects is not None:
            for objectID, centroid in objects.items():
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                if objectID < min_id:
                    min_id = objectID
                    center = centroid
        x = center[0]
        y = center[1]
        radius = center[2] if len(center) > 2 else 0
        return x, y, radius
class FaceCenterHaar:
    def __init__(self):
        self.face_detector = cv2.CascadeClassifier(
            "utils/face_recognizer_haarcascade.xml")
        self.cenroid_tracker = CentroidTracker(50)

    def update(self, frame, screen_centerX, screen_centerY):
        # Convert the frame to grayscale
        frame_grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Detect all faces in the input frame
        faces = self.face_detector.detectMultiScale(frame_grayscale,
                                                    1.05,
                                                    9,
                                                    minSize=(30, 30))
        # Initialize the list of bounding box rectangles
        rects = []
        for (x, y, w, h) in faces:
            # compute the x and y coordinates of the bounding box for the object
            box = [x + 20, y + 20, x + w - 20, y + h - 20]
            rects.append(box)
        # Update centroid tracker using the computed set of bounding box rectangles
        objects = self.cenroid_tracker.update(rects)
        # No faces found, return screen center and 0 as radius
        if len(faces) == 0:
            return screen_centerX, screen_centerY, 0
        # Loop over the tracked objects and pick center of the object with lower id
        center = (screen_centerX, screen_centerY)
        min_id = 999
        if objects is not None:
            for objectID, centroid in objects.items():
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                if objectID < min_id:
                    min_id = objectID
                    center = centroid
        x = center[0]
        y = center[1]
        radius = center[2] if len(center) > 2 else 0
        return x, y, radius
Ejemplo n.º 4
0
else:
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(args["input"])

# initialize the video writer (we'll instantiate later if need be)
writer = None

# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}

# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0

# start the frames per second throughput estimator
fps = FPS().start()

# loop over frames from the video stream
while True:
    # grab the next frame and handle if we are reading from either
Ejemplo n.º 5
0
def main():
    default_model_dir = '/Users/octavian/Projects/Python3_projects/cars-counting/all_models'
    default_model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite'
    default_labels = 'coco_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    parser.add_argument(
        '--top_k',
        type=int,
        default=3,
        help='number of categories with highest score to display')
    parser.add_argument('--camera_idx',
                        type=int,
                        help='Index of which video source to use. ',
                        default=0)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.1,
                        help='classifier score threshold')
    args = parser.parse_args()

    print('Loading {} with {} labels.'.format(args.model, args.labels))
    # interpreter = tflite.Interpreter(args.model, experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    interpreter = tf.lite.Interpreter(args.model)
    interpreter.allocate_tensors()
    labels = load_labels(args.labels)
    detection_threshold = 0.5

    dist_estimator = ForwardDistanceEstimator()
    dist_estimator.load_scalers('./extra/scaler_x.save',
                                './extra/scaler_y.save')
    dist_estimator.load_model(
        '/Users/octavian/Projects/Python3_projects/cars-counting/all_models/[email protected]',
        '/Users/octavian/Projects/Python3_projects/cars-counting/all_models/[email protected]'
    )

    frames_until_reset = 0
    csv_columns = ["Number", "Type", "Date"]

    cap = cv2.VideoCapture(0)
    # fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    # out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (640,352))

    ct = CentroidTracker()
    with open(
            "output_" + datetime.datetime.today().strftime('%Y-%m-%d') +
            ".csv", "w") as output_file:
        writer = csv.DictWriter(output_file, fieldnames=csv_columns)
        writer.writeheader()
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            cv2_im = frame
            frames_until_reset += 1

            cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
            pil_im = Image.fromarray(cv2_im_rgb)

            (h, w) = cv2_im.shape[:2]
            common.set_input(interpreter, pil_im)
            interpreter.invoke()
            objs, boxes, classes, scores, count = get_output(
                interpreter, score_threshold=args.threshold, top_k=args.top_k)
            boxes = np.squeeze(boxes)
            classes = np.squeeze(classes).astype(np.int32)
            scores = np.squeeze(scores)

            for ind in range(len(boxes)):
                if scores[ind] > detection_threshold and (
                        classes[ind] == 2 or classes[ind] == 7
                        or classes[ind] == 3 or classes[ind] == 0):

                    box = boxes[ind] * np.array([h, w, h, w])
                    box = np.append(box, classes[ind])

                    (startY, startX, endY, endX, label) = box.astype("int")
                    distance = dist_estimator.predict_distance(
                        startX, startY, endX, endY)
                    cv2.putText(img=cv2_im,
                                text=str(distance),
                                org=(startX + 30, startY + 30),
                                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=1e-3 * frame.shape[0],
                                color=(255, 255, 255),
                                thickness=2)
                    cv2.rectangle(cv2_im, (startX, startY), (endX, endY),
                                  (0, 255, 0), 2)

            cv2.imshow('Output', cv2_im)
            cv2.waitKey(1)

            # out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()
        # out.release()
        cv2.destroyAllWindows()
class PersonCounter(object):
    # ancho de la imagen
    IM_WIDTH = 500
    N_TRACE = 60
    ## sectores de la tienda
    sectors = {  # sector		start(x, y)					end(x, y)
        'pasillo': ((IM_WIDTH // 3, 0), (IM_WIDTH * 2 // 3, 300)),  #
        'tienda1': ((0, 0), (IM_WIDTH // 3, 100)),
        'tienda2': ((0, 100), (IM_WIDTH // 3, 200)),
        'tienda3': ((0, 200), (IM_WIDTH // 3, 300)),
        'tienda4': ((IM_WIDTH * 2 // 3, 0), (IM_WIDTH, 100)),
        'tienda5': ((IM_WIDTH * 2 // 3, 100), (IM_WIDTH, 200)),
        'tienda6': ((IM_WIDTH * 2 // 3, 200), (IM_WIDTH, 300))
    }
    # puntos
    sector_points = {  # sector	[initial_points, counter]
        'pasillo': [100, 0],
        'tienda1': [100, 0],
        'tienda2': [100, 0],
        'tienda3': [100, 0],
        'tienda4': [100, 0],
        'tienda5': [100, 0],
        'tienda6': [100, 0]
    }
    def __init__(self, input_video=None, skip_frames=30, model_dir='model', model_name='MobileNetSSD_deploy', confidence=0.4):
        # dimensiones del frame
        self.W = None
        self.H = None
        self.skip_frames = skip_frames
        self.confidence = confidence
        self.input_video = input_video
        self.model_dir = model_dir
        self.model_name = model_name
        self.status = 'Init'
        self.trackers = []
        self.last_frame = np.zeros(10)
        # variables para contar personas
        self.totalFrames = 0
        self.totalDown = 0
        self.totalUp = 0
        if not self.input_video:
            self.vs = cv2.VideoCapture(0)
            # self.vs = VideoStream(src=0).start()
        else:
            self.vs = cv2.VideoCapture(input_video)

        self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        self.trackableObjects = {}
        # estimador de fps
        self.fps = FPS().start()
        self.net = cv2.dnn.readNetFromCaffe(
            self.model_dir + '/' + self.model_name + '.prototxt',
            self.model_dir + '/' + self.model_name + '.caffemodel'
        )

    def in_rect(self, position, start, end):
        return position[0] >= start[0] and position[0] <= end[0] and position[1] >= start[1] and position[1] <= end[1]

    def __del__(self):
        self.video.release()
        self.fps.stop()

    def get_sector(self, position, sectors):
        for sector, (start, end) in sectors.items():
            if self.in_rect(position, start, end):
                return sector

    def get_frame(self):
        time.sleep(0.1)
        """Se ejecuta en cada frame"""
        ret, frame = self.vs.read()
        # frame = frame[1] if not self.input_video else frame
        if not isinstance(frame, (np.ndarray, np.generic)):
            time.sleep(0.2)
            ret, jpeg = cv2.imencode('.jpg', self.last_frame)
            return jpeg.tobytes()

        frame = imutils.resize(frame, width=IM_WIDTH)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if self.W is None or self.H is None:
            (self.H, self.W) = frame.shape[:2]

        self.status = "Waiting"
        rects = []

        if self.totalFrames % self.skip_frames == 0:
            self.status = "Detecting"
            self.trackers = []
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H), 127.5)
            self.net.setInput(blob)
            detections = self.net.forward()

            # para cada deteccion
            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]

                if confidence > self.confidence:
                    idx = int(detections[0, 0, i, 1])

                    if CLASSES[idx] != "person":
                        continue

                    box = detections[0, 0, i, 3:7] * np.array([self.W, self.H, self.W, self.H])
                    (startX, startY, endX, endY) = box.astype("int")
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    self.trackers.append(tracker)
            for objectID, centroid in self.ct.objects.items():
                to = self.trackableObjects.get(objectID, None)
                for cent in to.centroids:
                    sector = self.get_sector(cent, self.sectors)
                    if sector:
                        self.sector_points[sector][1] += 1
                        if self.sector_points[sector][1] > TIME_LIMIT:
                            self.sector_points[sector][0] += 2
                            self.sector_points[sector][1] = 0
                            for sec, points in self.sector_points.items():
                                points[0] -= 1

        else:
            for tracker in self.trackers:
                self.status = 'Tracking'
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        objects = self.ct.update(rects)

        for objectID, centroid in objects.items():
            to = self.trackableObjects.get(objectID, None)
            if not to:
                to = TrackableObject(objectID, centroid)
            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0 and centroid[1] < self.H // 2:
                        self.totalUp += 1
                        to.counted = True
                    elif direction > 0 and centroid[1] > self.H // 2:
                        self.totalDown += 1
                        to.counted = True

            self.trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(
                frame,
                text,
                (centroid[0] - 10, centroid[1] - 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 255, 0),
                2
            )
            sector = self.get_sector(centroid, self.sectors)
            cv2.putText(
                frame,
                sector,
                (centroid[0] - 10, centroid[1] + 10),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 255, 0),
                2
            )
            plot_centroids = to.centroids if len(to.centroids) < self.N_TRACE else to.centroids[-self.N_TRACE:]
            for i in range(0, len(plot_centroids), 4):
                cv2.circle(
                    frame,
                    (plot_centroids[i][0], plot_centroids[i][1]),
                    4,
                    (0, 255, 0),
                    -1
                )

        # for sector, (start, end) in self.sectors.items():
        #     cv2.rectangle(
        #         frame,
        #         start,
        #         end,
        #         (0, 255, 255),
        #         2
        #     )
        #     cv2.putText(
        #         frame,
        #         str(self.sector_points[sector][0]),
        #         (start[0], start[1] + 15),
        #         cv2.FONT_HERSHEY_SIMPLEX,
        #         0.5,
        #         (0, 255, 0),
        #         2
        #     )
        self.totalFrames += 1
        self.fps.update()

        ret, jpeg = cv2.imencode('.jpg', frame)
        self.last_frame = np.copy(frame)
        return jpeg.tobytes()
def main(video_to_process, conf):
    start_time = time.time()
    ct = CentroidTracker(maxDisappeared=5, maxDistance=300)
    trackers = []
    trackableObjects = {}
    buff_dict = dict()
    
    entry_count = 0
    exit_count = 0

    person_net, n_p, c_p, w_p, h_p, input_blob_p, out_blob_p, plugin_p = init_model(conf["person_xml"],
                                                                                    conf["person_bin"])

    fvs = WebcamVideoStream(video_to_process).start()
    time.sleep(0.5)
    # Initialize some variables
    frame_count = 0
    cur_request_id_p = 0

    while True:
        # Grab a single frame of video
        frame = fvs.read()
        # direction = None
        if frame is None:
            break
        initial_h, initial_w = frame.shape[:2]
        frame_copy = frame
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        rects = []

        if frame_count % 2 == 0:
            trackers = []
            in_frame = cv2.resize(frame, (w_p, h_p))
            in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n_p, c_p, h_p, w_p))
            person_net.start_async(request_id=cur_request_id_p, inputs={input_blob_p: in_frame})
            if person_net.requests[cur_request_id_p].wait(-1) == 0:
                person_detection_res = person_net.requests[cur_request_id_p].outputs[out_blob_p]
                for person_loc in person_detection_res[0][0]:
                    if person_loc[2] > 0.7:
                        xmin = abs(int(person_loc[3] * initial_w))
                        ymin = abs(int(person_loc[4] * initial_h))
                        xmax = abs(int(person_loc[5] * initial_w))
                        ymax = abs(int(person_loc[6] * initial_h))
                        cv2.rectangle(frame_copy, (xmin, ymin), (xmax, ymax), (255, 255, 255), 1)
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(xmin, ymin, xmax, ymax)
                        tracker.start_track(rgb, rect)
                        trackers.append(tracker)
        else:
            # loop over the trackers
            for tracker in trackers:
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        objects = ct.update(rects)

        for (objectID, data) in objects.items():
            centroid = data[0]
            objectRect = data[1]
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                to.centroids.append(centroid)

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame_copy, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 244, 255), 3)
            cv2.circle(frame_copy, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            count_flag_line = chk_movement_line_one([centroid[0], centroid[1]], [0, 0], [initial_w, initial_h], 1,
                                                    int(objectID), conf["count_type"], buff_dict)

            if count_flag_line == 1:
                # direction = "entry"
                entry_count += 1
            elif count_flag_line == -1:
                # direction = "exit"
                exit_count += 1

        cv2.putText(frame_copy, "Entry: " + str(entry_count), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)
        cv2.putText(frame_copy, "Entry: " + str(exit_count), (100, 80),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3)

        cv2.imshow('Video', cv2.resize(frame_copy, (1280, 800)))

        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

        print('Processed: ', frame_count)
        frame_count += 1
    elapsed_time = time.time() - start_time
    elapsed = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
    print(elapsed)
    fvs.stop()
Ejemplo n.º 8
0
def main():
    args = build_argparser().parse_args()

    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys(
    )) == 1, "Sample supports only YOLO V3 based single input topologies"
    assert len(
        net.outputs
    ) == 3, "Sample supports only YOLO V3 based triple output topologies"

    # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------

    ct = CentroidTracker(maxDisappeared=50, maxDistance=100)
    buff_dict = {}
    trackers = []
    trackableObjects = {}

    log.info("Preparing inputs")
    input_blob = next(iter(net.inputs))

    #  Defaulf batch_size is 1
    net.batch_size = 1

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    input_stream = 0 if args.input == "cam" else args.input

    is_async_mode = False
    cap = cv2.VideoCapture(input_stream)
    fps = FPS().start()
    number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
    if number_input_frames != 1:
        ret, frame = cap.read()
    else:
        is_async_mode = False

    # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
    log.info("Loading model to the plugin")
    exec_net = plugin.load(network=net, num_requests=2)

    cur_request_id = 0
    next_request_id = 1
    render_time = 0
    parsing_time = 0
    x = 0

    # ----------------------------------------------- 6. Doing inference -----------------------------------------------
    totalFrames = 0
    count_up, count_down = 0, 0
    while cap.isOpened():
        # Here is the first asynchronous point: in the Async mode, we capture frame to populate the NEXT infer request
        # in the regular mode, we capture frame to the CURRENT infer request
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()

        if not ret:
            break

        if is_async_mode:
            request_id = next_request_id
            in_frame = cv2.resize(next_frame, (w, h))
        else:
            request_id = cur_request_id
            in_frame = cv2.resize(frame, (w, h))

        rects = []
        if totalFrames % 5 == 0:
            trackers = []
            # resize input_frame to network size
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))

            # Start inference
            start_time = time()
            exec_net.start_async(request_id=request_id,
                                 inputs={input_blob: in_frame})
            det_time = time() - start_time

            # Collecting object detection results
            objects = list()
            if exec_net.requests[cur_request_id].wait(-1) == 0:
                output = exec_net.requests[cur_request_id].outputs

                start_time = time()
                for layer_name, out_blob in output.items():
                    layer_params = YoloV3Params(net.layers[layer_name].params,
                                                out_blob.shape[2])
                    objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                                 frame.shape[:-1],
                                                 layer_params,
                                                 args.prob_threshold)
                parsing_time = time() - start_time

            # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
            for i in range(len(objects)):
                if objects[i]['confidence'] == 0:
                    continue
                for j in range(i + 1, len(objects)):
                    if intersection_over_union(
                            objects[i], objects[j]) > args.iou_threshold:
                        objects[j]['confidence'] = 0

            # Drawing objects with respect to the --prob_threshold CLI parameter
            objects = [
                obj for obj in objects
                if obj['confidence'] >= args.prob_threshold
            ]

            origin_im_size = frame.shape[:-1]
            for obj in objects:
                # Validation bbox of detected object
                if obj['xmax'] > origin_im_size[0] or obj[
                        'ymax'] > origin_im_size[0] or obj['xmin'] < 0 or obj[
                            'ymin'] < 0:
                    continue
                color = (int(min(obj['class_id'] * 12.5,
                                 255)), min(obj['class_id'] * 7,
                                            255), min(obj['class_id'] * 5,
                                                      255))
                det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                    str(obj['class_id'])

                if int(det_label) in coi:
                    # initialize color model
                    in_frame = frame[abs(obj['ymin']):abs(obj['ymax']),
                                     abs(obj['xmin']):abs(obj['xmax'])]
                    in_frame = cv2.resize(in_frame, (color_w, color_h))
                    in_frame = in_frame.transpose(
                        (2, 0, 1))  # Change data layout from HWC to CHW
                    in_frame = in_frame.reshape(
                        (color_n, color_c, color_h, color_w))
                    color_exec_net.start_async(
                        request_id=cur_request_id,
                        inputs={color_input_blob: in_frame})
                    if color_exec_net.requests[cur_request_id].wait(-1) == 0:
                        res = color_exec_net.requests[cur_request_id].outputs
                        color_of_roi = [
                            'white', 'gray', 'yellow', 'red', 'green', 'blue',
                            'black'
                        ][np.argmax(res['color'][0])]
                        type_of_roi = ['car', 'bus', 'truck',
                                       'van'][np.argmax([res['type'][0]])]
                        cv2.putText(
                            frame,
                            color_of_roi + ' ' + coi_dict[int(det_label)],
                            ((obj['xmax'] + obj['xmin']) // 2,
                             (obj['ymax'] + obj['ymin']) // 2),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)

                    # initialize tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(obj['xmin'], obj['ymin'],
                                          obj['xmax'], obj['ymax'])
                    tracker.start_track(frame, rect)
                    trackers.append(tracker)

        else:
            # loop over the trackers
            for tracker in trackers:
                tracker.update(frame)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        objects = ct.update(rects)

        for (objectID, data) in objects.items():
            centroid = data[0]
            objectRect = data[1]

            cv2.rectangle(frame, (objectRect[0], objectRect[1]),
                          (objectRect[2], objectRect[3]), (255, 255, 0))
            # if find_point(left_top[0], left_top[1], bottom_right[0], bottom_right[1], centroid[0], centroid[1]):
            #     rand_num = randint(0, 3)
            #     cv2.rectangle(frame, random_boxes[rand_num][0], random_boxes[rand_num][1], (0, 0, 255), 2)
            #     cv2.putText(frame, 'ALERT', (500, 685), cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 255), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 2, (0, 255, 0), -1)

        totalFrames += 1
        start_time = time()
        inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
            "Inference time: {:.3f} ms".format(det_time * 1e3)
        render_time_message = "OpenCV rendering time: {:.3f} ms".format(
            render_time * 1e3)
        async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
            "Async mode is off. Processing request {}".format(cur_request_id)
        parsing_message = "YOLO parsing time is {:.3f}".format(parsing_time *
                                                               1e3)

        cv2.putText(frame, inf_time_message, (15, 15),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
        cv2.putText(frame, render_time_message, (15, 45),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
        cv2.putText(frame, async_mode_message,
                    (10, int(origin_im_size[0] - 20)),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
        cv2.putText(frame, parsing_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX,
                    0.5, (10, 10, 200), 1)

        # cv2.rectangle(frame, left_top, bottom_right, (0, 0, 255), 2)
        # out.write(frame)
        cv2.imshow("DetectionResults", frame)
        render_time = time() - start_time

        fps.update()

        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        # Tab key
        if key == 27:
            break
        # ESC key
        if key == 9:
            exec_net.requests[cur_request_id].wait()
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format(
                "async" if is_async_mode else "sync"))

    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()

    del net
    del exec_net
    del plugin
    print('[INFO] exiting ...')
Ejemplo n.º 9
0
print("[Global] reading config file")
config = configparser.ConfigParser()
config.read('config.ini')
print("[Global] reading config file: Done")

# Instantiate face detector
print("[Global] Instantiating face detector")
print("[Global] Using FaceBoxes model.")
FACE_DETECTOR = FaceboxesTensorflow(
    config['FaceboxesTensorflow']['weights_path'],
    config['FaceboxesTensorflow'].getboolean('score_threshold'))
print("[Global] Instantiating face detector: Done")

# Instantiate Centroid Tracker for ID assignment
print("[Global] Instantiating centroid tracker: Done")
CENTROID_TRACKER = CentroidTracker(config['DEFAULT'].getint('max_disappeared'),
                                   config['DEFAULT'].getint('max_distance'))
print("[Global] Instantiating centroid tracker: Done")

# Create global constants
print("[Global] Setting global variables")
RUN_TEST = config['DEFAULT'].getboolean('run_test')
SKIP_FRAMES = config['DEFAULT'].getint('skip_frames')

if RUN_TEST:
    RECORD_TEST = config['TestInfo'].getboolean('record_test')
    SHOW_TEST = config['TestInfo'].getboolean('show')
    MONITOR_NAME = config['TestInfo']['monitor_name']
    MONITOR_ID = config['TestInfo'].getint('monitor_id')
    EVENT_PATH = config['TestInfo']['event_path']
    EVENT_NAME = config['TestInfo']['event_name']
    TOTAL_FRAMES = config['TestInfo'].getint('total_frames')
 def __init__(self):
     self.face_detector = cv2.CascadeClassifier(
         "utils/face_recognizer_haarcascade.xml")
     self.cenroid_tracker = CentroidTracker(50)
Ejemplo n.º 11
0
def person_tracker(yolo, video, cam_id, a, b, count_type):

    print("[INFO] opening video file...")
    fvs = WebcamVideoStream(video).start()
    time.sleep(0.5)
    W = None
    H = None
    ct = CentroidTracker(maxDisappeared=1, maxDistance=500)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    cnt = 0
    exit_cnt = 0
    scale_factor = 1
    fps = FPS().start()
    init_frame = fvs.read()
    if init_frame is None:
        print('No frame')
    # print(init_frame.type)
    if init_frame.shape[1] == 1920:
        scale_factor = 4
    elif init_frame.shape[1] == 3072:
        scale_factor = 8
    frm_width = ceil(init_frame.shape[1] / scale_factor)
    frm_height = ceil(init_frame.shape[0] / scale_factor)
    a1 = [ceil(a_ / scale_factor) for a_ in a]
    b1 = [ceil(b_ / scale_factor) for b_ in b]
    while True:
        fps.update()
        skip_frames = 60
        frame = fvs.read()
        if frame is None:
            break

        frame = imutils.resize(frame, frm_width, frm_height)

        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        rects = []
        if totalFrames % skip_frames == 0:
            trackers = []
            image = Image.fromarray(frame)
            boxs = yolo.detect_image(image)
            print(boxs)
            for box in boxs:

                startX = box[0]
                startY = box[1]
                endX = box[2] + startX
                endY = box[3] + startY
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)
                tracker.start_track(rgb, rect)
                trackers.append(tracker)

        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

                # draw a horizontal line in the center of the frame -- once an
                # object crosses this line we will determine whether they were
                # moving 'up' or 'down'

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        for (objectID, data) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            centroid = data[0]
            objectRect = data[1]
            # print(objectRect)
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # # 'up' and positive for 'down')
                # y = [c[1] for c in to.centroids]
                # direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            count_flag = chk_movement([centroid[0], centroid[1]], a1, b1,
                                      int(objectID), int(cam_id), count_type)
            if count_flag == 1:
                cnt += 1
                cnt_col.update({
                    'cam_id': cam_id,
                    'video_file': video
                }, {
                    '$set': {
                        'entry_count': cnt,
                        'processed_timestamp': datetime.utcnow()
                    }
                },
                               upsert=True)
            elif count_flag == -1:
                exit_cnt += 1
                cnt_col.update({
                    'cam_id': cam_id,
                    'video_file': video
                }, {
                    '$set': {
                        'exit_count': exit_cnt,
                        'processed_timestamp': datetime.utcnow()
                    }
                },
                               upsert=True)

        info = [("Exit", cnt), ("Entry", exit_cnt)]
        #
        # # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        cv2.imshow("Frame", cv2.resize(frame, (800, 600)))
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
    print("completed....")
    fvs.stop()
else:
    print("[INFO] opening the video file...")
    vs = cv2.VideoCapture(args["input"])


# initialize the video writer
writer = None

# initialize the frame dimensions
W = None
H = None

# instantiate our centroid tracker, then initialise a list to store 
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = [] 
trackableObjects = {}

# initialise the total number of frames processed, along with the
# total no.of objects that have either moved up/down
totalFrames = 0
totalDown = 0
totalUp = 0

# start the frames per second throughput estimator
fps = FPS().start()

# loop over frames from the video stream
while True:
    # grab the next frame and handle if we are reading from either 
Ejemplo n.º 13
0
def object_tracker(uid, args):
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    if not args.get("input", False):
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

    else:
        vs = cv2.VideoCapture(args["input"])

    writer = None
    W = None
    H = None

    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    totalLeft = 0
    fps = FPS().start()

    while True:
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        if args["input"] is not None and frame is None:
            break
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        if W is None or H is None:
            (H, W) = frame.shape[:2]
        if args["output"] is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True)

        status = "Waiting"
        rects = []

        if totalFrames % args["skip_frames"] == 0:
            status = "Detecting"
            trackers = []
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()
            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > args["confidence"]:
                    idx = int(detections[0, 0, i, 1])
                    if CLASSES[idx] != "person":
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        objects = ct.update(rects)
        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)
                if not to.counted:
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        if (totalLeft != 0):
                            totalLeft -= 1
                        # totalLeft=1
                        to.counted = True
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        totalLeft += 1
                        to.counted = True
                    with open("res_{}.txt".format(uid), 'w') as f:
                        # print("writing", totalLeft)
                        f.write(str(totalLeft))
            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        if writer is not None:
            writer.write(frame)
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
        totalFrames += 1
        fps.update()
    fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    if writer is not None:
        writer.release()
    if not args.get("input", False):
        vs.stop()
    else:
        vs.release()
    cv2.destroyAllWindows()
Ejemplo n.º 14
0
    pri_key_filepath=path_to_key,
    client_bootstrap=client_bootstrap,
    ca_filepath=path_to_root,
    client_id=client_id,
    clean_session=False,
    keep_alive_secs=6)
# Make the connect() call
connect_future = mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()

lock = threading.Lock()
engine = DetectionEngine(model)
labels = dataset_utils.read_label_file(labels)

ct1 = CentroidTracker(maxDisappeared=600, maxDistance=900)
ct2 = CentroidTracker(maxDisappeared=600, maxDistance=900)

app = Flask(__name__)

print(config.has_option("APP", "input"))

if config['APP']['input'] == "webcam":
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    vidcap = False
else:
    print("[INFO] opening network camera or video file...")
    vidcap = True
    vs = cv2.VideoCapture(config['APP']['input'])
Ejemplo n.º 15
0
from utils.centroidtracker import CentroidTracker
from collections import OrderedDict

import time
import datetime

# global variabel

bulan = [
    'Jan', 'Feb', 'Mar', 'Apr', 'Mei', 'Jun', 'Jul', 'Aug', 'Okt', 'Sep',
    'Nov', 'Dec'
]

arrayTherm = np.zeros((240, 240, 3))
suhu = '0 C'
ct = CentroidTracker(maxDisappeared=16)
getData = False

futureObj = set()
myPeople = OrderedDict()

obj_center = {}
obj_bbox = {}


class MainWindow(QMainWindow):
    def __init__(self):
        QMainWindow.__init__(self)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        path_cam = 'rtsp://*****:*****@11.11.11.81:554/Streaming/channels/101'
Ejemplo n.º 16
0
 def __init__(self, confidence):
     self.confidence = confidence
     self.net = cv2.dnn.readNetFromCaffe(
         'utils/face_recognizer_dnn.prototxt',
         'utils/face_recognizer_dnn.caffemodel')
     self.centroid_tracker = CentroidTracker(20)