Exemple #1
0
    def _analyze_agent_movement(
        self,
        timestamp,
        agent_id,
        centroid,
        frame_mid,
    ):
        to = self._trackable_objects.get(agent_id, None)

        if to is None:
            to = TrackableObject(agent_id, centroid)
            self._trackable_objects[agent_id] = to
            return

        # Compute vertical delta
        mean_y = mean([cent[1] for cent in to.centroids])
        direction = centroid[1] - mean_y
        to.centroids.append(centroid)

        if to.counted:
            return

        # Moved up (entered)
        if direction < 0 and centroid[1] < frame_mid:
            self.enter_times.append(timestamp)
            to.counted = True

        # Moved down (left)
        if direction > 0 and centroid[1] > frame_mid:
            self.leave_times.append(timestamp)
            to.counted = True

        self._trackable_objects[agent_id] = to
def create_trackable_object(RightToLeftLine, LeftToRightLine, HorizontalLine,
                            centroid, objectID):
    trackableObject = TrackableObject(objectID, centroid)
    trackableObject.elapsedTime = 0
    # if first centroid is in illegal zone mark it as created in illegal zone
    if RightToLeftLine > centroid[0] > LeftToRightLine and centroid[
            1] > HorizontalLine:
        trackableObject.createdInIllegalZone = True
        print("[INFO] ID {} created in illegal zone".format(objectID))
    return trackableObject
Exemple #3
0
def draw_centroids(frame, objects, trackableObjects, long_stopped_cars):
    for (objectID, centroid) in objects.items():
        # check if a trackable objects exists for particular ID
        to = trackableObjects.get(objectID, None)

        # if it doesn't then we create a new one corresponding to the given centroid
        if to is None:
            to = TrackableObject(objectID, centroid)

        # place the trackable object into the dict.
        trackableObjects[objectID] = to

        # drawing circle and text
        if objectID in long_stopped_cars:
            text = "ID {} STOPPED".format(objectID + 1)
            # if a car is not moving then we draw a large yellow centroid
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 6, (0, 255, 255), -1)
        else:
            text = "ID {}".format(objectID + 1)
            # else we draw a smaller green centroid
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 3, (0, 255, 0), -1)
def draw_centroids(frame, objects, trackableObjects):
	# анализируем массив отслеживаемых объектов
	for (objectID, centroid) in objects.items():

		# проверяем существует ли отслеживаемый объект для данного ID
		to = trackableObjects.get(objectID, None)

		# если его нет, то создаем новый, соответствующий данному центроиду
		if to is None:
			to = TrackableObject(objectID, centroid)

		# в любом случае помещаем объект в словарь
		# (1) ID (2) объект
		trackableObjects[objectID] = to


		# изобразим центроид и ID объекта на кадре
		text = "ID {}".format(objectID + 1)
		cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
		cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
Exemple #5
0
    # moving 'up' or 'down'
    cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)

    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)

        # otherwise, there is a trackable object so we can utilize it
        # to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)

            # check to see if the object has been counted or not
            if not to.counted:
                # if the direction is negative (indicating the object
    def frames():
        global useIPCam
        out, weights, imgsz = \
        'inference/output', 'weights/yolov5s.pt', 640
        if (useIPCam):
            source = 'stream.txt'
        else:
            source = 'traff.mp4'
        device = torch_utils.select_device()
        if os.path.exists(out):
            shutil.rmtree(out)  # delete output folder
        os.makedirs(out)  # make new output folder
        start = time.time()
        elapsed = 0
        # Load model
        google_utils.attempt_download(weights)
        model = torch.load(weights, map_location=device)['model']

        model.to(device).eval()

        # Second-stage classifier
        classify = False
        if classify:
            modelc = torch_utils.load_classifier(name='resnet101',
                                                 n=2)  # initialize
            modelc.load_state_dict(
                torch.load('weights/resnet101.pt',
                           map_location=device)['model'])  # load weights
            modelc.to(device).eval()

        # Half precision
        half = False and device.type != 'cpu'
        print('half = ' + str(half))

        if half:
            model.half()

        # Set Dataloader
        vid_path, vid_writer = None, None
        if (useIPCam):
            dataset = LoadStreams(source, img_size=imgsz)
        else:
            dataset = LoadImages(source, img_size=imgsz)

        names = model.names if hasattr(model, 'names') else model.modules.names
        colors = [[random.randint(0, 255) for _ in range(3)]
                  for _ in range(len(names))]

        # Run inference
        t0 = time.time()
        ct = CentroidTracker()
        listDet = ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck']

        totalDownPerson = 0
        totalDownBicycle = 0
        totalDownCar = 0
        totalDownMotor = 0
        totalDownBus = 0
        totalDownTruck = 0

        totalUpPerson = 0
        totalUpBicycle = 0
        totalUpCar = 0
        totalUpMotor = 0
        totalUpBus = 0
        totalUpTruck = 0
        pub = False
        trackableObjects = {}
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        for path, img, im0s, vid_cap in dataset:
            elapsed = time.time() - start
            img = torch.from_numpy(img).to(device)
            img = img.half() if half else img.float()  # uint8 to fp16/32
            img /= 255.0  # 0 - 255 to 0.0 - 1.0
            if img.ndimension() == 3:
                img = img.unsqueeze(0)

            # Inference
            t1 = torch_utils.time_synchronized()
            pred = model(img, augment=False)[0]

            # Apply NMS
            pred = non_max_suppression(pred,
                                       0.4,
                                       0.5,
                                       fast=True,
                                       classes=None,
                                       agnostic=False)
            t2 = torch_utils.time_synchronized()

            # Apply Classifier
            if classify:
                pred = apply_classifier(pred, modelc, img, im0s)

            rects = []
            labelObj = []
            yObj = []
            arrCentroid = []
            for i, det in enumerate(pred):  # detections per image

                if (useIPCam):
                    p, s, im0 = path[i], '%g: ' % i, im0s[i].copy(
                    )  #if rtsp/camera
                else:
                    p, s, im0 = path, '', im0s
                height, width, channels = im0.shape
                cv2.line(im0, (0, int(height / 1.5)),
                         (int(width), int(height / 1.5)), (0, 0, 0),
                         thickness=1)
                save_path = str(Path(out) / Path(p).name)
                s += '%gx%g ' % img.shape[2:]  # print string
                gn = torch.tensor(im0.shape)[[1, 0, 1,
                                              0]]  #  normalization gain whwh
                if det is not None and len(det):
                    # Rescale boxes from img_size to im0 size
                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                              im0.shape).round()

                    for c in det[:, -1].unique():
                        n = (det[:, -1] == c).sum()  # detections per class
                        s += '%g %s, ' % (n, names[int(c)])  # add to string
                    for *xyxy, conf, cls in det:
                        label = '%s %.2f' % (names[int(cls)], conf)
                        x = xyxy
                        tl = None or round(0.002 *
                                           (im0.shape[0] + im0.shape[1]) /
                                           2) + 1  # line/font thickness
                        c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))

                        label1 = label.split(' ')
                        if label1[0] in listDet:
                            box = (int(x[0]), int(x[1]), int(x[2]), int(x[3]))
                            rects.append(box)
                            labelObj.append(label1[0])
                            cv2.rectangle(im0,
                                          c1,
                                          c2, (0, 0, 0),
                                          thickness=tl,
                                          lineType=cv2.LINE_AA)
                            tf = max(tl - 1, 1)
                            t_size = cv2.getTextSize(label,
                                                     0,
                                                     fontScale=tl / 3,
                                                     thickness=tf)[0]
                            c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                            cv2.rectangle(im0, c1, c2, (0, 100, 0), -1,
                                          cv2.LINE_AA)
                            cv2.putText(im0,
                                        label, (c1[0], c1[1] - 2),
                                        0,
                                        tl / 3, [225, 255, 255],
                                        thickness=tf,
                                        lineType=cv2.LINE_AA)

                detCentroid = Camera.generateCentroid(rects)
                objects = ct.update(rects)

                for (objectID, centroid) in objects.items():
                    arrCentroid.append(centroid[1])
                for (objectID, centroid) in objects.items():
                    #print(idxDict)
                    to = trackableObjects.get(objectID, None)
                    if to is None:
                        to = TrackableObject(objectID, centroid)
                    else:
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)
                        if not to.counted:  #arah up

                            if direction < 0 and centroid[
                                    1] < height / 1.5 and centroid[
                                        1] > height / 1.7:  ##up truble when at distant car counted twice because bbox reappear
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'person'):
                                    totalUpPerson += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bicycle'):
                                    totalUpBicycle += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'car'):
                                    totalUpCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorbike'):
                                    totalUpMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bus'):
                                    totalUpBus += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalUpTruck += 1
                                    to.counted = True

                            elif direction > 0 and centroid[
                                    1] > height / 1.5:  #arah down
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'person'):
                                    totalDownPerson += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bicycle'):
                                    totalDownBicycle += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'car'):
                                    totalDownCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorbike'):
                                    totalDownMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'bus'):
                                    totalDownBus += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalDownTruck += 1
                                    to.counted = True

                    trackableObjects[objectID] = to

                cv2.putText(im0, 'Down Person : ' + str(totalDownPerson),
                            (int(width * 0.7), int(height * 0.05)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down bicycle : ' + str(totalDownBicycle),
                            (int(width * 0.7), int(height * 0.1)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down car : ' + str(totalDownCar),
                            (int(width * 0.7), int(height * 0.15)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down motorbike : ' + str(totalDownMotor),
                            (int(width * 0.7), int(height * 0.2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down bus : ' + str(totalDownBus),
                            (int(width * 0.7), int(height * 0.25)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Down truck : ' + str(totalDownTruck),
                            (int(width * 0.7), int(height * 0.3)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)

                cv2.putText(im0, 'Up Person : ' + str(totalUpPerson),
                            (int(width * 0.02), int(height * 0.05)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up bicycle : ' + str(totalUpBicycle),
                            (int(width * 0.02), int(height * 0.1)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up car : ' + str(totalUpCar),
                            (int(width * 0.02), int(height * 0.15)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up motorbike : ' + str(totalUpMotor),
                            (int(width * 0.02), int(height * 0.2)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up bus : ' + str(totalUpBus),
                            (int(width * 0.02), int(height * 0.25)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                cv2.putText(im0, 'Up truck : ' + str(totalUpTruck),
                            (int(width * 0.02), int(height * 0.3)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (100, 0, 100), 2)
                #print(elapsed)
                if (elapsed > 60):
                    ObjListku = [
                        'Person', 'Bicycle', 'Car', 'Motorbike', 'Bus', 'Truck'
                    ]
                    objCountUp = []
                    objCountDown = []
                    objCountDown.append(totalDownPerson)
                    objCountDown.append(totalDownBicycle)
                    objCountDown.append(totalDownCar)
                    objCountDown.append(totalDownMotor)
                    objCountDown.append(totalDownBus)
                    objCountDown.append(totalDownTruck)

                    objCountUp.append(totalUpPerson)
                    objCountUp.append(totalUpBicycle)
                    objCountUp.append(totalUpCar)
                    objCountUp.append(totalUpMotor)
                    objCountUp.append(totalUpBus)
                    objCountUp.append(totalUpTruck)

                    date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')

                    totalDownPerson = 0
                    totalDownBicycle = 0
                    totalDownCar = 0
                    totalDownMotor = 0
                    totalDownBus = 0
                    totalDownTruck = 0

                    totalUpPerson = 0
                    totalUpBicycle = 0
                    totalUpCar = 0
                    totalUpMotor = 0
                    totalUpBus = 0
                    totalUpTruck = 0

                    elapsed = 0
                    start = time.time()
                    #db.insert(date,ObjListku,objCountUp,objCountDown) #insert ke module database
                    date = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
                    data_set = {
                        "Timestamp": str(date),
                        "dPerson": totalDownPerson,
                        "dBicycle": totalDownBicycle,
                        "dCar": totalDownCar,
                        "dBus": totalDownBus,
                        "dTruck": totalDownTruck,
                        "uPerson": totalUpPerson,
                        "uBicycle": totalUpBicycle,
                        "uCar": totalUpCar,
                        "uBus": totalUpBus,
                        "uTruck": totalUpTruck
                    }
                    MQTT_MSG = json.dumps(data_set)
                    client.publish(MQTT_TOPIC, MQTT_MSG)
                #time.sleep(00.1)
                if pub == False:
                    proc = subprocess.Popen(
                        'ffmpeg -re -f mjpeg -i http://0.0.0.0:5000/video_feed -f lavfi -i anullsrc -c:v libx264 -g 60 -c:a aac -ar 44100 -ac 2 -f flv rtmp://your-rtmp-server',
                        shell=True)
                    pub = True
            yield cv2.imencode('.jpg', cv2.resize(im0,
                                                  (800, 600)))[1].tobytes()
def people_counter(prototxt, model, output, confidences, skipframes):
    global vs, outputFrame, lock

    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(prototxt, model)
    ids = (1, )

    writer = None
    W = None
    H = None

    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalFrames = 0

    while True:
        frame = vs.read()
        frame = frame[1]
        frame = imutils.resize(frame, width=100)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        if W is None or H is None:
            (H, W) = frame.shape[:2]

        if output is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H), True)

        rects = []

        if totalFrames % skipframes == 0:
            trackers = []
            blob = cv2.dnn.blobFromImage((frame, 0.007843, (300, 300), 127.5))
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > confidences:
                    idx = int(detections[0, 0, i, 1])
                    if classfilter(CLASSES[idx]):
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    label = "{}: {:.2f}%".format(CLASSES[idx],
                                                 confidence * 100)
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  COLORS[idx], 2)
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    cv2.putText(frame, label, (startX, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                    trackers.append(tracker)

        else:
            for tracker in trackers:
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

            objects = ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)
                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                trackableObjects[objectID] = to
                text = "ID {}".format(objectID)
                cv2.imwrite("data/user." + str(objectID) + ".jpg", frame)
                filefoto = "data/user." + str(objectID) + ".jpg"
                time = datetime.now()
                tipe = CLASSES[idx]
                sql = "SELECT file_foto FROM data WHERE id IN (%s)" % (
                    ', '.join(str(id) for id in ids))
                mycursor.execute(sql)
                comp = mycursor.fetchall()
                filefotocomp = ("data/user." + str(objectID) + ".jpg", )
                if filefotobeda(filefotocomp, comp):
                    sql = "INSERT INTO data (timestamp, tipe, file_foto) VALUES (%s, %s, %s)"
                    val = (time, tipe, filefoto)
                    mycursor.execute(sql, val)
                    mydb.commit()
                    sql = "SELECT id FROM data WHERE file_foto = %s"
                    val = (filefoto, )
                    mycursor.execute(sql, val)
                    inp_id = mycursor.fetchall()
                    for x in inp_id:
                        ids = ids + x

            totalFrames += 1
            with lock:
                outputFrame = frame.copy()
Exemple #8
0
            # add the bounding box coordinates to the rectangles list
            rects.append((startX, startY, endX, endY))

    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)

    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)

        # otherwise, if there is a trackable object and its speed has
        # not yet been estimated then estimate it
        elif not to.estimated:
            # check if the direction of the object has been set, if
            # not, calculate it, and set it
            if to.direction is None:
                y = [c[0] for c in to.centroids]
                direction = centroid[0] - np.mean(y)
                to.direction = direction

            # if the direction is positive (indicating the object
            # is moving from left to right)
            if to.direction > 0:
                # check to see if timestamp has been noted for
Exemple #9
0
def hello(word):
    global W, H, ct, totalFrames, trackers, trackableObjects, net, CLASSES, COLORS, mycursor, ids, idk
    retval, frame = wCap.read()
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    if W is None or H is None:
        (H, W) = frame.shape[:2]
    rects = []
    if totalFrames % args["skip_frames"] == 0:
        trackers = []
        blob = cv2.dnn.blobFromImage(frame, 0.007843, (300,300), 127.5)
        net.setInput(blob)
        detections = net.forward()

        for i in np.arange(0, detections.shape[2]):
            confidence = detections[0,0,i,2]
            if confidence > args["confidence"]:
                idx = int(detections[0,0,i,1])
                if classfilter(CLASSES[idx]):
                    continue
                idk = idx
                box = detections[0,0,i,3:7] * np.array([W, H, W, H])
                (startX, startY, endX, endY) = box.astype("int")

                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)
                tracker.start_track(rgb, rect)
                cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
                trackers.append(tracker)
    else:
        for tracker in trackers:
            tracker.update(rgb)
            pos = tracker.get_position()
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            rects.append((startX, startY, endX, endY))

    objects = ct.update(rects)
    for (objectID, centroid) in objects.items():
        to = trackableObjects.get(objectID, None)
        if to is None:
            to = TrackableObject(objectID, centroid)
        else:
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)
        trackableObjects[objectID] = to
        text = "ID {}".format(objectID)
        cv2.imwrite("data/user."+str(objectID)+".jpg", frame)
        filefoto = "data/user."+str(objectID)+".jpg"
        time = datetime.now()
        tipe = CLASSES[idk]
        sql = "SELECT file_foto FROM data WHERE id IN (%s)" % (', '.join(str(id) for id in ids))
        mycursor.execute(sql)
        comp = mycursor.fetchall()
        filefotocomp = ("data/user."+str(objectID)+".jpg",)
        if filefotobeda(filefotocomp,comp):
            sql = "INSERT INTO data (timestamp, tipe, file_foto) VALUES (%s, %s, %s)"
            val =(time,tipe,filefoto)
            mycursor.execute(sql,val)
            mydb.commit()
            sql = "SELECT id FROM data WHERE file_foto = %s"
            val = (filefoto,)
            mycursor.execute(sql,val)
            inp_id = mycursor.fetchall()
            for x in inp_id:
                ids = ids + x
    totalFrames += 1
    retval, buffer = cv2.imencode('.jpg', frame)
    data = base64.b64encode(buffer)
    socketio.emit('kirei', data)
Exemple #10
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()

    index_default = 0
    prototxt_default = ["mobilenet_ssd/MobileNetSSD_deploy.prototxt", "pednet/deploy.prototxt"]
    caffe_default = ["mobilenet_ssd/MobileNetSSD_deploy.caffemodel", "pednet/snapshot_iter_70800.caffemodel"]
    labels_default = ["mobilenet_ssd/MobileNetSSD_deploy_labels.txt", "pednet/class_labels.txt"]

    ap.add_argument("-p", "--prototxt", default=prototxt_default[index_default],
                    help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", default=caffe_default[index_default],
                    help="path to Caffe pre-trained model")
    ap.add_argument("-l", "--labels", default=labels_default[index_default],
                    help="path to Caffe class labels")
    ap.add_argument("-i", "--input", type=str,
        help="path to optional input video file")
    ap.add_argument("-o", "--output", type=str,
        help="path to optional output video file")
    ap.add_argument("-c", "--confidence", type=float, default=0.4,
        help="minimum probability to filter weak detections")
    ap.add_argument("-s", "--skip-frames", type=int, default=30,
        help="# of skip frames between detections")
    args = vars(ap.parse_args())

    # initialize the list of class labels MobileNet SSD was trained to
    # detect

    CLASSES = []
    if index_default == 0:
        CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
        "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
        "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
        "sofa", "train", "tvmonitor"]
    else:
        with open(args["labels"]) as f:
            CLASSES = f.readlines()
        CLASSES = [line.strip() for line in CLASSES]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # if a video path was not supplied, grab a reference to the webcam
    if not args.get("input", False):
        print("[INFO] starting video stream...")
        # vs = VideoStream(src=0).start()
        vs = cv2.VideoCapture(get_jetson_gstreamer_source(), cv2.CAP_GSTREAMER)
        time.sleep(2.0) # TODO creo que esto está para darle tiempo a cv2 para que cargue el recurso de la camarucha...

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    # fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        ret, frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if args["output"] is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(args["output"], fourcc, 30,
                (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()
            print(CLASSES)

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    print(CLASSES[idx])
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        # fps.update()

    # stop the timer and display FPS information
    # fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        # vs.stop()
        vs.release()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
    def doStuff(self, frame, origFrame, W, H):
        # the frame from BGR to RGB for dlib
        # frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        if self.origW is None or self.origH is None:
            (self.origW, self.origH) = origFrame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % self.SKIP_FRAMES == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            self.trackers = []

            yoloDetections = self.yoloInference.runInference(
                frame, W, H, self.CONFIDENCE_LIMIT)
            # loop over the detections
            for detection in yoloDetections:
                class_type = detection.classType

                # compute the (x, y)-coordinates of the bounding box
                # for the object
                box = detection.box[0:4] * np.array([1, 1, 1, 1])
                (startX, startY, endX, endY) = box.astype("int")

                # construct a dlib rectangle object from the bounding
                # box coordinates and then start the dlib correlation
                # tracker
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)

                if __myDebug__:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 0, 0), 1)
                    cv2.putText(frame, class_type, (startX, startY),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

                tracker.start_track(rgb, rect)

                container = TrackerExt(class_type, tracker,
                                       (startX, startY, endX, endY))

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                self.trackers.append(container)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for trackerContainer in self.trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"
                tracker = trackerContainer.tracker

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                trackerContainer.rect = (startX, startY, endX, endY)
                # add the bounding box coordinates to the rectangles list
                rects.append(trackerContainer.rect)

                if __myDebug__:
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 0, 0), 2)
                    cv2.putText(frame, trackerContainer.class_type,
                                (startX, startY), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        extractedRects = [
            trackerContainer for trackerContainer in self.trackers
        ]

        objects = self.ct.update(extractedRects)

        # loop over the tracked objects
        for (objectID, centroidTrackerData) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            centroid = centroidTrackerData[0]
            className = centroidTrackerData[1]
            rect = centroidTrackerData[2]

            directionX = 0
            directionY = 0

            ratioH = self.origH / H
            ratioW = self.origW / W

            # if there is no existing trackable object, create one
            if to is None:

                rect2 = (rect[0] * ratioW, rect[1] * ratioH, rect[2] * ratioW,
                         rect[3] * ratioH)
                clipped = clipImage(origFrame, rect2)

                if className == 'car' or className == 'truck':
                    details = self.__getObjectDetails__(origFrame,
                                                        rect,
                                                        typeName=className)
                    if details and len(details) > 0:
                        predictions = details["predictions"]
                        try:
                            isPost = next((match for match in predictions
                                           if float(match["probability"]) > 0.7
                                           and match["tagName"] == "Post"),
                                          None)
                        except GeneratorExit:
                            pass
                        if isPost:
                            className = "postcar"
                            messageIoTHub = IoTHubMessage(
                                """{"Name":"Postauto"}""")
                            AppState.HubManager.send_event_to_output(
                                "output2", messageIoTHub, 0)

                self.__saveToBlobStorage(clipped,
                                         id=objectID,
                                         typeName=className)
                fullName = className + "-full"
                clipped = clipImage(origFrame, [0, 0, self.origW, self.origH])
                self.__saveToBlobStorage(clipped,
                                         id=objectID,
                                         typeName=fullName)

                to = TrackableObject(objectID, className, centroid)
                self.__sendToIoTHub__(to, rect, frame)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving
                y = [c[1] for c in to.centroids]
                x = [c[0] for c in to.centroids]

                directionY = centroid[1] - np.mean(y)
                directionX = centroid[0] - np.mean(x)

                if len(to.centroids) >= 300:
                    temp = to.centroids[2:]
                    to.centroids = temp
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    if int(directionY) == 0 and int(directionX) == 0:
                        #self.totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif directionY > 0 and centroid[1] > H // 2:
                        self.totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            directX = int(round(directionX, 1))
            directY = int(round(directionY, 1))
            colorCircle = (20, 250, 130)
            colorArrow = (0, 0, 250)
            colorText = (0, 255, 0)
            text = "{}: {}".format(objectID, to.type)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, colorText, 1)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, colorCircle, -1)
            cv2.arrowedLine(frame, (centroid[0], centroid[1]),
                            (centroid[0] + directX, centroid[1] + directY),
                            colorArrow, 2)
            #if len(to.centroids)>1 :
            #    cv2.polylines(frame, np.int32(to.centroids), True, colorCircle, 1)
        # increment the total number of frames processed thus far and
        # then update the FPS counter
        self.totalFrames += 1
        self.fps.update()
def record(dining_hall, src, output, sf):
    args = {
        "input": src,
        "output": output,
        "confidence": 0.4,
        "skip_frames": sf
    }

    df = pd.DataFrame([], columns=['Location', 'Date', 'People'])

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    execution_path = os.getcwd()
    net = cv2.dnn.readNetFromCaffe(
        os.path.join(execution_path,
                     "mobilenet_ssd/MobileNetSSD_deploy.prototxt"),
        os.path.join(execution_path,
                     "mobilenet_ssd/MobileNetSSD_deploy.caffemodel"))

    # grab a reference to the video file
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(args["input"])

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            now = datetime.datetime.now()
            datetime_current = print(now.strftime("%Y-%m-%d %H:%M"))
            output_path = 'output/{}{}.avi'.format(dining_hall[0],
                                                   datetime_current)
            writer = cv2.VideoWriter(os.path.join(execution_path, output_path),
                                     fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        if totalDown - totalUp > 0:
            now = datetime.datetime.now()
            df = df.append(
                {
                    'Location': dining_hall[0],
                    'Date': now.strftime("%Y-%m-%d %H:%M:%S"),
                    'People': totalDown - totalUp
                },
                ignore_index=True)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # otherwise, release the video file pointer
    vs.release()

    # write results to csv
    df.to_csv('results.csv', mode='a', index=True, header=False)

    # close any open windows
    cv2.destroyAllWindows()
Exemple #13
0
def executeTnM(INFO, NET, METADATA, IMSHOW = False, DRAW_ZONE = False, SAVE_DETS = False):

    tracker_types = ['KCF'      , 'TLD' , 'MEDIANFLOW' , 'MOSSE', 'Dlib']
    tracker_type = tracker_types[4]
    dir_n = os.path.dirname(__file__)

    
    #=============================================CONFIGURATION CONSTANTS====================================================
    # Display the video
    IMSHOW = IMSHOW
    # Draw zone in this code. Wont work through the service.
    DRAW_ZONE = DRAW_ZONE
    # Save detections
    SAVE_DETS = SAVE_DETS
    # Shape divider
    WIDTHDIVIDER = 2
    DLIB_TOLERANCE = 10  # Threshold to dlib decide whether the tracked object was lost of not
    CENTROID_DIST = 60      #distance between centroids

    # Json object
    jsonpath = dir_n + "/TM_DATA/JSON/DATA_OUTPUT.json"
    #=======================================================================================================================
    #json2save = "/home/pdi/Felipe_data/TM_DATA/JSON/" + os.path.splitext( os.path.basename(INFO['video']) )[0]

    try:
        os.mknod(jsonpath)
    except:
        pass

    #========================================================DATA dICTIONARY================================================
    DATA_OUTPUT = {
        'state' : 'processing',
        'video' : INFO['video'],
        'poly': INFO['poly'],
        'count' : {
            'cars' : 0,
            'motorbikes' : 0,
            'heavy' : 0
        },
        'progress' : 0,
        'errors' : None,
        'Warnings' : None

    }  
    #========================================================================================================================

    # Reading the video to process
    INPUTVIDEO = INFO['video']
    POLY = INFO['poly']

    # Getting zone corners
    inputZones = []
    for i in range(4):
        inputZones.append( (POLY[i]['x'], POLY[i]['y']) )

    ok= True

    # Path to the video to process
    #INPUTVIDEO = '/home/pdi/Felipe_data/T&M_videos2process/Etiquetado20160301_090424/Etiquetado20160301_090424.mp4'
    #INPUTVIDEO = args['video']

    #================================================== Handling file errors============================================
    if( not (os.path.exists(INPUTVIDEO)) ):
        print('Error')
        error = 'File not found.'
        print(error)

        # Updates output data with the error before exiting
        DATA_OUTPUT['errors'] = error
        with open(jsonpath, 'w') as DATA_OUTPUTjson:
            json.dump(DATA_OUTPUT, DATA_OUTPUTjson)
        exit()

    # Validating video format
    VALID_EXT = ['.mp4','.avi','.flv','.mov']
    _, VIDEO_EXT = os.path.splitext(INPUTVIDEO)
    if( not (VIDEO_EXT in VALID_EXT) ):
        print('Error')
        error = '{} format not supported. Allowed formats {}'.format(VIDEO_EXT,VALID_EXT) 
        print(error)

        # Updates output data with the error before exiting
        DATA_OUTPUT['errors'] = error
        with open(jsonpath, 'w') as DATA_OUTPUTjson:
            json.dump(DATA_OUTPUT, DATA_OUTPUTjson)
        exit()

    #==================================================================================================================


    # Frames evitados en la deteccion
    SKIPFRAMES = 1
    # Types of vehicles to detect
    vehiclesTypes = ["b'car'", "b'motorbike'","b'bus'","b'truck'"]

    # Inizializando trackers
    totalFrames = 0
    trackers = []
    trackableObjects = {}

    # Centroid tracker
    d = CENTROID_DIST
    ct = CentroidTracker(maxDisappeared=10, maxDistance=d, vehiclesTypes=vehiclesTypes, SAVE_DETS = SAVE_DETS,SAVE_PATH = dir_n)


    if SAVE_DETS:
        # Making directories to save the detections. For checking the behavior of the detector.
        #path_count = os.path.join(dir_n, '/data/')
        try:
            os.mkdir(dir_n + '/counting3/')
            os.mkdir(dir_n + '/counting3/' + 'cars')
            os.mkdir(dir_n + '/counting3/' + 'motorbikes')
            os.mkdir(dir_n + '/counting3/' + 'heavy')
        except:
            try:
                os.mkdir(dir_n + '/counting3/' + 'cars')
            except:
                pass
            try:
                os.mkdir(dir_n + '/counting3/' + 'motorbikes')
            except:
                pass
            try:
                os.mkdir(dir_n + '/counting3/' + 'heavy')
            except:
                pass

    # Creating the video object
    cap = cv2.VideoCapture(INPUTVIDEO)
    det = 0

    # FPS of the video
    videoFPS = cap.get(cv2.CAP_PROP_FPS)

    # Lenght of the video
    longVideo = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Inicio toma de tiempo
    start = time.time()

    # Initializing some variables
    old_boxes = []
    maxDistance_2 = d
    trackers = []
    carTypesList = []
    make_tracker = False
    con = 0

    # Reads the first frame of the video to draw the boz where to do the detections
    ret, frame = cap.read()
    frame = imutils.resize(frame, width=704, height=480)


    # gets the corner points of the drawn box
    if DRAW_ZONE:
        inputZones = []
        inputZones = selectPolygonZone(frame,'green')
        inputZones = inputZones[0]
        #inputZones = [(237, 195), (163, 404), (666, 395), (491, 163), (237, 195)]
    polizone = Polygon( [inputZones[0], inputZones[1], inputZones[2], inputZones[3]] )
    pts = np.array([ [inputZones[0][0],inputZones[0][1]] ,[inputZones[1][0],inputZones[1][1]] , [inputZones[2][0],inputZones[2][1]], [inputZones[3][0],inputZones[3][1]] ])


    # Some functions
    m = (pts[2][1] - pts[1][1] ) / (pts[2][0] - pts[1][0] )
    b = pts[1][1] - m * pts[1][0]
    limit = lambda x: b + m*x

    m2 = (120-50)/(431-183)
    radius = lambda y: m2*y


    #========================================================Video Processing============================================
    while cap.isOpened():

        # Capture frame-by-frame
        ret, frame = cap.read()
        frame = imutils.resize(frame, width=704, height=480)
        frame_height, frame_width,_ = frame.shape
        if IMSHOW:
            frame_toshow = frame.copy()    # this is just for drawing and displaying

            # Draws the detection area on the frame to display
            cv2.polylines(frame_toshow, [pts], True, (80,180,23), 3)
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        rects = []
        carTypesList_l = []

        # If there are objects being tracked then we process them first else just continue to the detections
        if trackers:
            #print(len(trackers))

            old_boxes = []
            to_del = []
            # loop over the trackers
            for i, tracker in enumerate(trackers):

                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                if(tracker_type == 'Dlib'):
                    ok = True
                    pos_conf = tracker.update(frame)
                    pos = tracker.get_position()
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    #print(pos_conf)
                    if(pos_conf < DLIB_TOLERANCE ):
                        ok = False

                else:
                    ok,pos = tracker.update(frame)
                
                    # unpack the position object
                    startX = int(pos[0])
                    startY = int(pos[1])
                    endX = int(pos[0] + pos[2])
                    endY = int(pos[1] + pos[3])

                # if the box of the tracker has a part outside of our frame then crops it
                # This helps to keep the centroid close to the detection
                if(endX > frame_width):
                    endX = frame_width
                if(endY > frame_height):
                    endY = frame_height
                
                if IMSHOW:
                    cv2.rectangle(frame_toshow, (startX-int(startX*0.02),startY-int(startY*0.05)),(endX+int(endX*0.02),endY+int(endY*0.05)), (0, 255, 0), 2)

                #print(ok)
                if(not ok):
                    # list of the trackers that have been lost
                    to_del.append(i)
                else:
                    # Trackers that are still ok are passed to our "current boxes"
                    rects.append((startX, startY, endX, endY))
                    old_boxes.append((startX, startY, endX, endY))
            
            for i in range(len(to_del)):
                # Removes Lost trackers
                del trackers[to_del[-i -1]]
                del carTypesList[to_del[-i-1]]

        # time for detections. SKIPFRAMES tells which frames to detect. 1 is detection every frame. 2 every 2 frames and so on.
        # If there are not trackers we need to do detections.
        if( (totalFrames % SKIPFRAMES == 0) or (not trackers) ):
            # set the status and initialize our new set of object trackers
            status = "Detecting"

            rects = []
            carTypesList_l = []
            
            # For yolo to do detections has to read the img from a place on the disk for some reason
            cv2.imwrite('cam.jpg', frame)
            detections = detect(NET, METADATA,  b'cam.jpg', thresh=0.7)

            # Iterates the detections
            for detection in detections:
                # The model was trained on COCO which has 80 classes. We only care for this 4.
                if((str(detection[0]) == "b'car'") or (str(detection[0]) == "b'motorbike'") or (str(detection[0]) ==  "b'bus'") or (str(detection[0]) == "b'truck'")):
                    pt1 = (int(detection[2][0]-detection[2][2]/WIDTHDIVIDER),
                        int(detection[2][1]-detection[2][3]/WIDTHDIVIDER))
                    pt2 = (int(detection[2][0]+detection[2][2]/WIDTHDIVIDER),
                        int(detection[2][1]+detection[2][3]/WIDTHDIVIDER))

                    
                    #rect = dlib.rectangle(np.int64(pt1[0]), np.int64(
                    #    pt1[1]), np.int64(pt2[0]), np.int64(pt2[1]))

                    center = (int(detection[2][0]), int(detection[2][1]) )
                    if(polizone.contains( Point( center)  ) ):
                        
                        # then appends it to our current boxes, xi,yi,xf,yf
                        rects.append( ( pt1[0],pt1[1],pt2[0],pt2[1] ) )
                        carTypesList_l.append(str(detection[0]))
                        det += 1

            # We need to check if the new objects (From detections) is already being tracked to not create another tracker object.
            if(old_boxes):
                con += 1

                centers = []
                areas = []

                # Centers of the old boxes (The ones currently tracked)
                for old_box in old_boxes:
                    centers.append( ( int( (old_box[0] + old_box[2])/2 ),int( (old_box[1] + old_box[3])/2 ) ) )
                    areas.append( (old_box[2] - old_box[0])*(old_box[3] - old_box[1]) )

                # Iterate the new boxes (The detected ones)
                for rect, carType in zip(rects, carTypesList_l):

                    # Gets the new boxes centers
                    ccenter = ( int( (rect[0] + rect[2])/2 ),int( (rect[1] + rect[3])/2 ) )
                    carea = (rect[2] - rect[0])*(rect[3] - rect[1])

                    # We will create the new tracker unless it already exist
                    make_tracker = True
                    
                    # Iterate old centers
                    for i,(center,area) in enumerate(zip(centers,areas)):
                        
                        # Distance between the new center and the old
                        if(distance.euclidean(ccenter,center) < d ):
                            
                            # If is less than a threshold then its the same and sets to not make a new tracker.
                            if(area < carea*0.8):

                                # The vehicles approach the camera, so they become bigger, but the trackers (at least the opencv ones)
                                # do change the size of the box and gets to a point where the tracker box and the detection box are too different
                                # that their centroids are too apart and another object is created. To solve this we have to re-initialize
                                # the tracker when their size its too different from the detection. 
                                trackers[i] = crateTracker(tracker_type)
                                if( tracker_type == 'Dlib'):
                                    drect = dlib.rectangle(rect[0], rect[1], rect[2], rect[3])
                                    trackers[i].start_track(frame, drect)
                                else:
                                    trackers[i].init(frame, (rect[0],rect[1],rect[2]-rect[0],rect[3]-rect[1]) )
                            make_tracker = False
                            break
                    
                    # If there are not matches, then create a new tracker object
                    if(make_tracker):
                        tracker = crateTracker(tracker_type)
                        if( tracker_type == 'Dlib'):
                            drect = dlib.rectangle(rect[0], rect[1], rect[2], rect[3])
                            tracker.start_track(frame, drect)
                        else:
                            tracker.init(frame, (rect[0],rect[1],rect[2]-rect[0],rect[3]-rect[1]) )
                        trackers.append(tracker)
                        carTypesList.append(carType)
                        old_boxes.append( rect )

            # If there are not a single object then every detection is a new object.
            else:
                for rect, carType in zip(rects, carTypesList_l):
                    tracker = crateTracker(tracker_type)
                    if( tracker_type == 'Dlib'):
                        drect = dlib.rectangle( rect[0], rect[1], rect[2], rect[3] )
                        tracker.start_track(frame, drect)
                    else:
                        tracker.init(frame, (rect[0],rect[1],rect[2]-rect[0],rect[3]-rect[1]) )
                    trackers.append(tracker)
                    carTypesList.append(carType)
                old_boxes = rects.copy()

        # old_boxes are the boxes of the objects on the current frame
        # Passing them to our centroid tracker to create an id for each object if new.    
        objects, carTypeObjects, boxes = ct.update(old_boxes, carTypesList, frame)
        # loop over the tracked objects
        
        for i, (objectID, centroid) in enumerate( objects.items() ):
                # check to see if a trackable object exists for the current
                # object ID
            to = trackableObjects.get(objectID, None)
        
            # if there is no existing trackable object, create one
            if to is None:

                to = TrackableObject(objectID, centroid, set_color(
                    carTypeObjects[objectID]), carTypeObjects[objectID])


            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "{1}_{0} ".format(to.objectID,to.vehicle)
            
            
            if IMSHOW:
                #print(centroid,objectID)
                cv2.putText(frame_toshow, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, to.color, 2)
                cv2.circle(frame_toshow, (centroid[0], centroid[1]), 2, to.color, -1)


        if IMSHOW:

            cv2.imshow('Frame', frame_toshow)   
            if cv2.waitKey(1) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break

        
        # Updating data processing
        #printProgressBar(totalFrames,longVideo-2,"Progeso: ",str(round(endImage - startImage,2)) + "s por imagen",2,100)
        DATA_OUTPUT = updateCountDict(DATA_OUTPUT, ct.get_vehicleCount())
        DATA_OUTPUT['progress'] = str( round( (totalFrames * 100)/(longVideo - 2),1 ) )  + "%"

        # Writing the json with the current info
        with open(jsonpath, 'w') as DATA_OUTPUTjson:
            json.dump(DATA_OUTPUT, DATA_OUTPUTjson)

        totalFrames += 1
        if (totalFrames + 1) == longVideo:

            # Writing the json with the output info and saving the info of the process to a new json
            DATA_OUTPUT['state'] = 'free'
            with open(jsonpath, 'w') as DATA_OUTPUTjson:
                json.dump(DATA_OUTPUT, DATA_OUTPUTjson)
            
            break
    cv2.destroyAllWindows()
    return ct.get_vehicleCount()
Exemple #14
0
    def read_video(self):
        fgbg = cv2.createBackgroundSubtractorMOG2(history=120, varThreshold=10, detectShadows=True)
        cap = cv2.VideoCapture(self.vid_name)

        c=0
        while c<5:
            cap.read()
            c+=1
        dis = 0

        prev_time = time()
        last_time = 0
        count_no_frame=0
        count = 0
        reinitialize = 0
        pflag = False
        i=0

        wt = 1#100
        pause = False
        first_frame = False
        W = None
        H = None
        ct = CentroidTracker(maxDisappeared=1, maxDistance=1200)
        trackableObjects = {}
        totalDown = 0
        totalUp = 0

        fp_count=0
        skip_frames=115
        #skip_frames=75


        #out = cv2.VideoWriter('./output.avi', -1, 20.0, (640,480))

        #size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        #fourcc = cv2.VideoWriter_fourcc(*'DIVX')  # 'x264' doesn't work
        #out = cv2.VideoWriter('./001_output.mp4',fourcc, 29.0, size, False)


        file_count=0
        print("Starting infinite loop and continue to read frames")
        while True:
            # print("hello")
            #print("fil=",file_count%1000)
            if(0 == file_count%1000):
                #print("check file")
                #lines
                print("Check if we have data to post")
                file_fd=open('post_data_file.txt','r')
                lines=file_fd.readlines()
                #print("lines=",lines)
                for line in lines:
                    #print("line=",line)
                    headers = {'content-type': "application/json",'cache-control': "no-cache"}
                    url = "http://app.gizmosmart.io/iot/1.3/public/peopleCounting/token/51987536541"
                    try:
                        arr = line
                        response = requests.request("POST", url, data=arr, headers=headers)
                        #print("in file post=",response.text)
                    except Exception as e:
                        #print ("Post Error: ",str(e)) 
                        print("Failed to post data: %s",str(e))
                        #self.post_data_file.write(arr+"\n")
                        #continue
                #print("truncate the file")
                #file_fd.truncate(0)
                with open('post_data_file.txt','a+') as fil:
                    fil.truncate(0)
                file_count=0

                        
            file_count+=1
            key = cv2.waitKey(wt) & 0xFF
            if (key == ord('p')):
                pause = not pause
            if (key == ord('s')):
                wt = int(wt * 2)
            if (key == ord('f')):
                wt = int(wt / 2)
            if(key == ord('q')):
                break

            if (pause):
                continue
            ret, frame = cap.read()

            #out.write(frame)

            if not ret:
                print("ret is null, could not open camera")
                tm.sleep(1)
                #continue

            fp_count +=1

            #if fp_count<skip_frames:
            #    continue
            #if(fp_count%3):
            #    continue

            if(self.cam_rotat):
                frame=self.rotateImage(frame, 90)

            #cv2.imshow("framee",frame)
            #continue


            if ret is True:
                # Show the filtered image

                h, w, c = frame.shape
                #print("shape=",w,h,c)
                fram = cv2.resize(frame, (int(w / 2), int(h / 2)))
                if self.draw:
                    cv2.imshow("original fram", fram)

            if not ret:
                count += 1
                print("ret is zero,count=",count)
                        #iscamopened=cap.isOpened()
                        #print("cap opened=",iscamopened)
                        #if(not iscamopened):
                        #    break
                if count >= 5:
                    print("frame reached max unread frame 5")
                            #reinitialize camera
                    print("release the camera",self.camera_id)
                    cap.release()
                    reinitialize += 1
                    if reinitialize <= 4000:
                        #cam_url=gen_session.get_cam_url(self.camera_id)
                        cam_url=self.vid_name
                        print(reinitialize," attempt to reopen camera",cam_url)
                        cap = cv2.VideoCapture(cam_url)
                        if(cap.isOpened()):
                            count=0
                            reinitialize = 0
                            continue
                        else:
                            print("camera could not be opned")
                                    #count=0
                            continue
                            #break
                    else:
                        print("reached to max 4000 reinitialized camera attempt")
                        break
                else:
                    continue

            cy = 350
            cx = 50
            ch = 380
            cw = 200


            cx = self.ref_line_min_x
            cw = self.ref_line_max_x - cx
            cy=self.ref_line_min_y
            ch=self.ref_line_max_y-cy



            #cy = 100
            #cx = 260
            #ch = 180
            #cw = 200

            # cy = 150
            # cx=200
            # ch=250
            # cw=220
            crop_img = fram[cy:cy + ch, cx:cx + cw]
            if W is None or H is None:
                (H, W) = crop_img.shape[:2]
            if not first_frame:
                #backSubtractor = BackGroundSubtractor(0.007, self.denoise(crop_img))
                backSubtractor = BackGroundSubtractor(0.007, self.denoise(crop_img))
                run = True
                first_frame = True
                continue

            # cv2.imshow("crop_img",crop_img)

            crop_img_dn = self.denoise(crop_img)
            # # cv2.imshow('input',denoise(fram))
            # cv2.imshow('input', crop_img_dn)
            #
            # # bgsimg=bgsobj.get_fg_image(denoise_img)
            # # cv2.imshow("bgsimg",bgsimg)
            foreGround = backSubtractor.getForeground(crop_img_dn)

            # ret, foreGroundb = cv2.threshold(foreGround, 35, 255, 0)
            foreGroundb = cv2.cvtColor(foreGround, cv2.COLOR_BGR2GRAY)
            # Apply thresholding on the background and display the resulting mask
            #ret, mask = cv2.threshold(foreGroundb, 38, 255, cv2.THRESH_BINARY)
            ret, mask = cv2.threshold(foreGroundb, 25, 255, cv2.THRESH_BINARY)
            cv2.imshow("mask",mask)
            h, w = mask.shape

            mask = self.line_erode(mask)
            f_contours = self.find_contours_img(mask)
            self.draw_contours(crop_img.copy(), f_contours)

            #if self.show_fgbg == 1:
            #    cv2.imshow('mask', mask)

            cv2.line(crop_img, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            rects = []
            count += 1
            status = "Waiting"

            for c in f_contours:
                box_colour = (0, 255, 0)
                (x, y, w, h) = cv2.boundingRect(c)
                (startX, startY, endX, endY) = (x, y, x + w, y + h)
                rects.append((startX, startY, endX, endY))
                cv2.rectangle(crop_img, (startX, startY), (int(endX), int(endY)), box_colour, 2)

            objects = ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    # for c in to.centroids:
                    #	print("c====",c)
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    dr_last_point = None
                    centroid_len = len(to.centroids)
                    if (centroid_len > 80):
                        del to.centroids[:(centroid_len - 80)]
                    for cc in to.centroids:
                        # print("cc=",cc)
                        cx = cc[0]
                        cy = cc[1]
                        if (dr_last_point == None):
                            dr_last_point = (cx, cy)
                            continue
                        # cx=cc[0]
                        # cy=cc[1]
                        # cv2.circle(crop_img, (cx,cy), 1, (0,0,255), thickness=2, lineType=8, shift=1)
                        cv2.line(crop_img, (dr_last_point), (cx, cy), (0, 255, 0), thickness=1, lineType=8)
                        dr_last_point = (cx, cy)

                    if not to.counted:
                        # print("centroid[1]=", centroid[1])
                        # print("H/2=", H / 2)
                        if direction < 0 and centroid[1] < H // 2:
                            up_dir_dist = H / 2 - centroid[1]
                            # print(H/4, " up_dir_dist=", up_dir_dist)
                            if (up_dir_dist > 0 and up_dir_dist < H / 4):
                            #if (up_dir_dist > 0 and up_dir_dist < H / 6):
                                # print("caputred up")
                                totalUp += 1
                                # count_in += 1
                                to.counted = True
                        elif direction > 0 and centroid[1] > H // 2:
                            down_dir_dist = centroid[1] - H / 2
                            # print(H/4, " down_dir_dist=", down_dir_dist)
                            if (down_dir_dist > 0 and down_dir_dist < H / 4):
                            #if (down_dir_dist > 0 and down_dir_dist < H / 6):
                                # print("caputured down")
                                totalDown += 1
                                # count_out += 1
                                to.counted = True

                trackableObjects[objectID] = to
                text = "ID {} {}".format(objectID, to.counted)

                cv2.putText(crop_img, text, (centroid[0] - 10, centroid[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 1)
                cv2.circle(crop_img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

                # print("to=", to)

            info = [
                #("Up", totalUp),
                #("Down", totalDown),
                ("IN", totalUp),
                ("OUT", totalDown),
                #("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                # cv2.putText(crop_img, text, (10, H - ((i * 20) + 20)),
                # 			cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                cv2.putText(crop_img, text, (10, i * 20 + 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)

            if self.draw:
                cv2.imshow("pc roi", crop_img)
            key = cv2.waitKey(1) & 0xFF

            #post the in/out status to server
            cur_time = time() - prev_time

            if (cur_time > 10 and cur_time - last_time > 10):
            #if (cur_time > 100000 and cur_time - last_time > 100000):
                last_time = cur_time
                cur_machine_time = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
                count_IN = totalUp
                count_OUT = totalDown

                arr = self.json_prepare(self.camera_id, str(count_IN), str(count_OUT),
                                    cur_machine_time)  # +" "+str("10:00:00"))
                print("arr=",arr)
                #print("arr=",json.dumps(arr))

                #url = "http://sales.gizmohelp.com/mcrm/2.1-test/people_counting"
                #arr = {'camera_id': '559475', 'created_date': '2018-10-03 10:00:00', 'people_out': '0', 'people_in': '0'}
            # arr = "{\"camera_id\":\"528365\",\"created_date\":\"2018-10-03 10:00:00\",\"people_in\":\"1\",\"people_out\":\"1\"}"
                headers = {'content-type': "application/json",'cache-control': "no-cache"}
                #print("post data=",json.dumps(arr))

                # response = requests.request("POST", url, data=json.dumps(arr), headers=headers)
                #
                # print("response.txt=",response.text)



                url = "http://app.gizmosmart.ios/iot/1.3/public/peopleCounting/token/51987536541"
                #arr = {"cameraId": 550113 , "data": [{"countIn": "1", "countOut": "2","date": "2018-11-13", "time": "03:00:01" }]}
                headers = {'content-type': "application/json",'cache-control': "no-cache"}
                #print("post data=",json.dumps(arr))
                try:
                    print("post count status:")
                    #response = requests.request("POST", url, data=json.dumps(arr), headers=headers)
                    response = requests.request("POST", url, data=arr, headers=headers)
                    print("response=",response.text)
                except Exception as e:
                    print ("Post Error: ",str(e)) 
                    self.post_data_file.write(arr+"\n")
                    continue
                    #continue


             #   response = requests.request("POST", url, data=arr, headers=headers)
                
             #   print("response.txt=",response.text)
            #if (cur_time > 10 and cur_time - last_time > 10):


                totalUp=0
                totalDown=0



        print("Processing Complete")
        print("Entrances: ", self.EntranceCounter)
        print(self.camera_id," Exits:", self.ExitCounter) 
        cap.release()
Exemple #15
0
    def programLoop(self):
        while True:
            # grab the next frame from the stream, store the current
            # timestamp, and store the new date
            self.ret, self.frame = self.vs.read()
            self.ts = datetime.now()
            newDate = self.ts.strftime("%m-%d-%y")

            # check if the frame is None, if so, break out of the loop
            if (self.frame is None):
                # break
                return

            # if the log file has not been created or opened
            self.createLogFileIfNotExist()

            # resize the frame
            self.frame = imutils.resize(self.frame,
                                        width=self.conf["frame_width"])
            self.rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if ((self.W is None) or (self.H is None)):
                (self.H, self.W) = self.frame.shape[:2]
                self.meterPerPixel = self.conf["distance"] / self.W

            # initialize our list of bounding box rectangles returned by
            # either (1) our object detector or (2) the correlation trackers
            self.rects = []

            self.runComputationallyTaskingAlgoIfBasicAlgoFails()

            objects = self.ct.update(self.rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = self.trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, if there is a trackable object and its speed has
                # not yet been estimated then estimate it
                elif not to.estimated:
                    # check if the direction of the object has been set, if
                    # not, calculate it, and set it
                    if to.direction is None:
                        y = [c[0] for c in to.centroids]
                        direction = centroid[0] - np.mean(y)
                        to.direction = direction

                    # if the direction is positive (indicating the object
                    # is moving from left to right)
                    if to.direction > 0:
                        # check to see if timestamp has been noted for
                        # point A
                        if to.timestamp["A"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["A"]:
                                to.timestamp["A"] = self.ts
                                to.position["A"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point B
                        elif to.timestamp["B"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["B"]:
                                to.timestamp["B"] = self.ts
                                to.position["B"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point C
                        elif to.timestamp["C"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["C"]:
                                to.timestamp["C"] = self.ts
                                to.position["C"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point D
                        elif to.timestamp["D"] == 0:
                            # if the centroid's x-coordinate is greater than
                            # the corresponding point then set the timestamp
                            # as current timestamp, set the position as the
                            # centroid's x-coordinate, and set the last point
                            # flag as True
                            if centroid[0] > self.conf[
                                    "speed_estimation_zone"]["D"]:
                                to.timestamp["D"] = self.ts
                                to.position["D"] = centroid[0]
                                to.lastPoint = True

                    # if the direction is negative (indicating the object
                    # is moving from right to left)
                    elif to.direction < 0:
                        # check to see if timestamp has been noted for
                        # point D
                        if to.timestamp["D"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["D"]:
                                to.timestamp["D"] = self.ts
                                to.position["D"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point C
                        elif to.timestamp["C"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["C"]:
                                to.timestamp["C"] = self.ts
                                to.position["C"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point B
                        elif to.timestamp["B"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp and set the position as the
                            # centroid's x-coordinate
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["B"]:
                                to.timestamp["B"] = self.ts
                                to.position["B"] = centroid[0]

                        # check to see if timestamp has been noted for
                        # point A
                        elif to.timestamp["A"] == 0:
                            # if the centroid's x-coordinate is lesser than
                            # the corresponding point then set the timestamp
                            # as current timestamp, set the position as the
                            # centroid's x-coordinate, and set the last point
                            # flag as True
                            if centroid[0] < self.conf[
                                    "speed_estimation_zone"]["A"]:
                                to.timestamp["A"] = self.ts
                                to.position["A"] = centroid[0]
                                to.lastPoint = True

                    # check to see if the vehicle is past the last point and
                    # the vehicle's speed has not yet been estimated, if yes,
                    # then calculate the vehicle speed and log it if it's
                    # over the limit
                    if to.lastPoint and not to.estimated:
                        # initialize the list of estimated speeds
                        estimatedSpeeds = []

                        # loop over all the pairs of points and estimate the
                        # vehicle speed
                        for (i, j) in self.points:
                            # calculate the distance in pixels
                            d = to.position[j] - to.position[i]
                            distanceInPixels = abs(d)

                            # check if the distance in pixels is zero, if so,
                            # skip this iteration
                            if distanceInPixels == 0:
                                continue

                            # calculate the time in hours
                            t = to.timestamp[j] - to.timestamp[i]
                            timeInSeconds = abs(t.total_seconds())
                            timeInHours = timeInSeconds / (60 * 60)

                            # calculate distance in kilometers and append the
                            # calculated speed to the list
                            distanceInMeters = distanceInPixels * self.meterPerPixel
                            distanceInKM = distanceInMeters / 1000
                            estimatedSpeeds.append(distanceInKM / timeInHours)

                        # calculate the average speed
                        to.calculate_speed(estimatedSpeeds)

                        # set the object as estimated
                        to.estimated = True
                        print("[INFO] Speed of the vehicle that just passed" \
                            " is: {:.2f} MPH".format(to.speedMPH))

                # store the trackable object in our dictionary
                self.trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(self.frame, text,
                            (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(self.frame, (centroid[0], centroid[1]), 4,
                           (0, 255, 0), -1)

                # check if the object has not been logged
                if not to.logged:
                    # check if the object's speed has been estimated and it
                    # is higher than the speed limit
                    if to.estimated and to.speedMPH > self.conf["speed_limit"]:
                        # set the current year, month, day, and time
                        year = self.ts.strftime("%Y")
                        month = self.ts.strftime("%m")
                        day = self.ts.strftime("%d")
                        time = self.ts.strftime("%H:%M:%S")

                        # check if dropbox is to be used to store the vehicle
                        # image
                        if self.conf["use_cloudinary"]:
                            # initialize the image id, and the temporary file
                            imageID = self.ts.strftime("%H%M%S%f")
                            tempFile = TempFile()
                            cv2.imwrite(tempFile.path, self.frame)

                            # create a thread to upload the file to dropbox
                            # and start it
                            t = Thread(target=self.upload_file,
                                       args=(
                                           tempFile,
                                           imageID,
                                       ))
                            t.start()
                            # t = Thread(target=app(self.keys, tempFile.path).main())
                            # t.start()
                            # app(keys, tempFile.path).main()

                            # log the event in the log file
                            info = "{},{},{},{},{},{}\n".format(
                                year, month, day, time, to.speedMPH, imageID)
                            self.logFile.write(info)

                        # otherwise, we are not uploading vehicle images to
                        # dropbox
                        else:
                            # log the event in the log file
                            info = "{},{},{},{},{}\n".format(
                                year, month, day, time, to.speedMPH)
                            self.logFile.write(info)

                        # set the object has logged
                        to.logged = True

            # # if the *display* flag is set, then display the current frame
            # to the screen and record if a user presses a key
            if self.conf["display"]:
                cv2.imshow("frame", self.frame)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key is pressed, break from the loop
                if key == ord("q"):
                    break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
            self.fps.update()

            # stop the timer and display FPS information
            self.fps.stop()
            print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
            print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))
def videoDetection():
    global detectVideoName, line, startPoint, endPoint, lineFlag, startCountFlag, odapi, initBB, roi_area, roi_elements, totalDown

    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalFrames = 0
    W = None
    H = None
    roi = 250

    frame_size_w = 500
    frame_size_h = 400

    model_path = 'faster_rcnn_inception_v2/frozen_inference_graph.pb'
    odapi = DetectorAPI(path_to_ckpt=model_path)
    threshold = 0.7

    #cap = cv2.VideoCapture(UtilsIO.SAMPLE_FILE_NAME_2)
    # cap = c4v2.VideoCapture(config.CONFIG_IP_CAM)
    cap = cv2.VideoCapture("videos/" + str(detectVideoName) + ".avi")

    # start the frames per second throughput estimator
    fps = FPS().start()

    while True:
        r, img = cap.read()

        if img is None:
            if int(detectVideoName) > 1:
                os.remove("videos/" + str(detectVideoName - 1) + ".avi")

            detectVideoName += 1
            cap.release()
            cv2.destroyAllWindows()
            videoDetection()

        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (frame_size_w, frame_size_h))
        cv2.namedWindow('preview')

        cv2.setMouseCallback('preview', on_mouse)
        if lineFlag:
            if startPoint == True and endPoint == True:
                try:
                    cv2.line(img, (line[0], line[1]), (line[2], line[3]),
                             (255, 0, 255), 2)
                except:
                    pass

        if initBB is not None and roi_elements is None or (
                initBB is not None and initBB != roi_elements.box):
            roi_elements = RoiElements(initBB)

        if initBB is not None:
            roi_area = roi_elements.getRoiArea(img)

        if W is None or H is None:
            (H, W) = img.shape[:2]

        status = "Waiting"
        rects = []

        if totalFrames % 8 == 0:
            status = "Detecting"
            trackers = []
            if roi_area is not None:
                boxes, scores, classes, num = odapi.processFrame(roi_area)

            else:
                boxes, scores, classes, num = odapi.processFrame(img)

            # Visualization of the results of a detection.
            for i in range(len(boxes)):
                # Class 1 represents human
                if classes[i] == 1 and scores[i] > threshold:
                    box = boxes[i]

                    tracker = dlib.correlation_tracker()
                    if roi_elements is not None:

                        left = box[1] + roi_elements.roi_area.coord_right
                        top = box[0] + roi_elements.roi_area.coord_bottom
                        right = box[3] + roi_elements.roi_area.coord_right
                        bottom = box[2] + roi_elements.roi_area.coord_bottom
                        rect = dlib.rectangle(int(left), int(top), int(right),
                                              int(bottom))
                    else:
                        left = box[1]
                        top = box[0]
                        right = box[3]
                        bottom = box[2]
                        rect = dlib.rectangle(int(left), int(top), int(right),
                                              int(bottom))
                    tracker.start_track(rgb, rect)

                    trackers.append(tracker)

        else:
            for tracker in trackers:
                status = "Tracking"

                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                rects.append((startX, startY, endX, endY))
                cv2.rectangle(img, (startX, startY), (endX, endY), (0, 255, 0),
                              2)

            #        cv2.line(img, (0, roi), (W, roi), (0, 255, 255), 2)

            objects = ct.update(rects)

            if startCountFlag and roi_elements.line is not None:
                dy = roi_elements.calcMeanYDistance()
                dx = roi_elements.calcMeanXDistance()
                angle = roi_elements.calculateAngle()

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():

                if roi_elements is not None and roi_elements.line is not None:
                    if roi_elements.checkCentroidInsideLine(centroid) == False:
                        pass
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    directionY = centroid[1] - np.mean(y)
                    x = [c[0] for c in to.centroids]
                    directionX = centroid[0] - np.mean(x)

                    if not to.counted:

                        if roi_elements is not None and roi_elements.line is not None:

                            if angle > 45 and directionX > 0 and centroid[
                                    0] > dx:
                                totalDown += 1
                                to.counted = True
                                frameNew = img[startY:startY + endY,
                                               startX:startX + endX]
                                cv2.imsow("image", frameNew)
                                UtilsIO.saveImages(config.CONFIG_IP_CAM,
                                                   totalDown, frameNew)

                            elif angle < 45 and directionY > 0 and centroid[
                                    1] > dy:
                                totalDown += 1
                                to.counted = True
                                frameNew = img[startY:startY + endY,
                                               startX:startX + endX]
                                UtilsIO.saveImages(config.CONFIG_IP_CAM,
                                                   totalDown, frameNew)

                        elif (directionY > 0
                              and centroid[1] > frame_size_h // 2):
                            totalDown += 1
                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Down", totalDown),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(img, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            if roi_elements != None:
                # roi area
                cv2.rectangle(img, (roi_elements.roi_area.coord_left,
                                    roi_elements.roi_area.coord_top),
                              (roi_elements.roi_area.coord_right,
                               roi_elements.roi_area.coord_bottom),
                              (0, 255, 0), 2)
            if roi_elements != None and roi_elements.line != None:
                cv2.line(img, (roi_elements.line[0], roi_elements.line[1]),
                         (roi_elements.line[2], roi_elements.line[3]),
                         (255, 0, 255), 2)

            # show the output frame
            cv2.imshow("preview", img)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            if key == ord("s"):
                initBB = None
                initBB = cv2.selectROI("ROI_FRAME",
                                       img,
                                       fromCenter=False,
                                       showCrosshair=False)
            #            cv2.imshow("tmp", tmp)
            #            cv2.namedWindow('real image')
            #            a = cv.SetMouseCallback('real image', on_mouse, 0)
            #            cv2.imshow('real image', img)

            if key == ord("x"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                if lineFlag == False:
                    lineFlag = True
                else:
                    lineFlag = False

            if key == ord("b"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                if startCountFlag is False:
                    startCountFlag = True
                    if roi_elements is not None and roi_elements.line is None:
                        roi_elements.setLine(line)
                        totalDown = 0
                else:
                    startCountFlag = False

            # increment the total number of frames processed thus far and
            # then update the FPS counter
        totalFrames += 1
        fps.update()

        # stop the timer and display FPS information
    fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
            print("Safe to Turn Left!")
            print("     :) :) :)     ")
            counter = 0
            num_zeros = 0

    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        filt = pa.filters.FilterLMS(1, mu=2)

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)

        # Note: centroid[i] = (cX, cY)

        to.distance = ds.distance_detection(to, centroid[1])
        y = filt.predict(centroid[1])
        filt.adapt(to.distance, centroid[1])

        if to.lastLoc == 0:
            # no last location recorded, so add and not calculate speed
            to.lastLoc = centroid[1]
        else:
            to.speed = ds.data_collection(to.lastLoc, centroid[1])
            if to.speed > 0:
                # only calculate vehicles that have not passed the driver
                if centroid[1] <= 360:
Exemple #18
0
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)

    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        if centroid[1] < trending_frame.shape[0] and centroid[
                0] < trending_frame.shape[1]:
            trending_frame[centroid[1], centroid[0]] = 1

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)

        # otherwise, there is a trackable object so we can utilize it
        # to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            for line in lines:
                y = [c[1] for c in to.centroids]
                x = [c[0] for c in to.centroids]
                mean_point = (np.mean(x), np.mean(y))
                updown_direction = centroid[1] - np.mean(y)
                rightleft_direction = centroid[0] - np.mean(x)
                to.centroids.append(centroid)
def recognition():
    global count
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-t",
                    "--target",
                    type=str,
                    required=True,
                    choices=["myriad", "cpu"],
                    help="target processor for object detection")
    ap.add_argument("-m",
                    "--mode",
                    type=str,
                    required=True,
                    choices=["horizontal", "vertical"],
                    help="direction in which people will be moving")
    ap.add_argument("-c",
                    "--conf",
                    required=True,
                    help="Path to the input configuration file")
    ap.add_argument("-i",
                    "--input",
                    type=str,
                    help="path to optional input video file")
    ap.add_argument("-o",
                    "--output",
                    type=str,
                    help="path to optional output video file")
    args = vars(ap.parse_args())

    # load the configuration file
    conf = Conf(args["conf"])

    # initialize the list of class labels MobileNet SSD detects
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(conf["prototxt_path"], conf["model_path"])

    # check if the target processor is myriad, if so, then set the
    # preferable target to myriad
    if args["target"] == "myriad":
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    # otherwise, the target processor is CPU
    else:
        # set the preferable target processor to CPU and preferable
        # backend to OpenCV
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)

    # if a video path was not supplied, grab a reference to the webcam
    if not args.get("input", False):
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        #vs = VideoStream(usePiCamera=True).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(args["input"])

    # initialize the video writer process (we'll instantiate later if
    # need be) along with the frame dimensions
    writerProcess = None
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(maxDisappeared=20, maxDistance=30)
    trackers = []
    trackableObjects = {}
    # initialize the direction info variable (used to store information
    # such as up/down or left/right people count) and a variable to store
    # the the total number of frames processed thus far
    directionInfo = None
    totalFrames = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if args["input"] is not None and frame is None:
            break

        # convert the frame from BGR to RGB for dlib
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # check to see if the frame dimensions are not set
        if W is None or H is None:
            # set the frame dimensions and instantiate our direction
            # counter
            (H, W) = frame.shape[:2]
            dc = DirectionCounter(args["mode"], H, W)

        # begin writing the video to disk if required
        if args["output"] is not None and writerProcess is None:
            # set the value of the write flag (used to communicate when
            # to stop the process)
            writeVideo = Value('i', 1)

            # initialize a frame queue and start the video writer
            frameQueue = Queue()
            writerProcess = Process(target=write_video,
                                    args=(args["output"], writeVideo,
                                          frameQueue, W, H))
            writerProcess.start()

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % conf["skip_frames"] == 0:
            # set the status and initialize our new set of object
            # trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame,
                                         size=(300, 300),
                                         ddepth=cv2.CV_8U)
            net.setInput(blob,
                         scalefactor=1.0 / 127.5,
                         mean=[127.5, 127.5, 127.5])
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > conf["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # check if the direction is *vertical*
        if args["mode"] == "vertical":
            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # otherwise, the direction is *horizontal*
        else:
            # draw a vertical line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'left' or 'right'
            cv2.line(frame, (W // 2, 0), (W // 2, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # grab the trackable object via its object ID
            to = trackableObjects.get(objectID, None)

            # create a new trackable object if needed
            if to is None:
                to = TrackableObject(objectID, centroid,
                                     datetime.datetime.now())
                cursor.execute(
                    'INSERT INTO trackhistory (track_time,track_date) VALUES (%s,%s)',
                    (to.dt, to.dt))
                #urllib.request.urlopen("https://api.thingspeak.com/update?api_key="+API+"&field1=0"+str(1))
                count += 1

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # find the direction and update the list of centroids
                dc.find_direction(to, centroid)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # find the direction of motion of the people
                    directionInfo = dc.count_object(to, centroid)

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            color = (0, 255, 0) if to.counted else (0, 0, 255)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

        # check if there is any direction info available
        if directionInfo is not None:
            # construct a list of information as a combination of
            # direction info and status info
            info = directionInfo + [("Status", status)]

        # otherwise, there is no direction info available yet
        else:
            # construct a list of information as status info since we
            # don't have any direction info available yet
            info = [("Status", status)]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # put frame into the shared queue for video writing
        if writerProcess is not None:
            frameQueue.put(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    import matplotlib.pyplot as plt
    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    if not args.get("input", False):
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
    count = -1
    def run(self, confidence_thresh=0.4, skip_frames=30):
        firstframe = self.vs.read()
        if not firstframe[0]:
            logging.error(
                f"Did not get a single frame from input file {self.inputFile}")
            raise ValueError('Input file did not produce a single frame.')
        while True:
            # grab the next frame and handle if we are reading from
            # VideoStream
            frame = self.vs.read()
            frame = frame[1]

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video, restart
            if frame is None:
                self.vs = cv2.VideoCapture(self.inputFile)
                self.currentOccupancy = 25
                continue

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if self.W is None or self.H is None:
                (self.H, self.W) = frame.shape[:2]

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if self.totalFrames % skip_frames == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > confidence_thresh:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if self.CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array(
                            [self.W, self.H, self.W, self.H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            #cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = self.ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = self.trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < self.H // 2:
                            self.totalOut += 1
                            self.currentOccupancy -= 1
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > self.H // 2:
                            self.totalIn += 1
                            self.currentOccupancy += 1
                            to.counted = True

                # store the trackable object in our dictionary
                self.trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
            #    text = "ID {}".format(objectID)
            #    cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
            #               cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #    cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # construct a tuple of information we will be displaying on the
            # frame
            #info = [
            #    ("Out", self.totalOut),
            #    ("In", self.totalIn),
            #    ("Occupancy", self.currentOccupancy),
            #    ("Status", status),
            #]

            ## loop over the info tuples and draw them on our frame
            #for (i, (k, v)) in enumerate(info):
            #    text = "{}: {}".format(k, v)
            #    cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),
            #                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # show the output frame
            #cv2.imshow("Frame", frame)
            #key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            #if key == ord("q"):
            #    break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            self.totalFrames += 1
    def run(self):
        # load our serialized model from disk
        net = cv2.dnn.readNetFromCaffe(self.prototxt, self.model)
        vs = PiVideoStream().start()
        time.sleep(2.0)

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:

            if self.stop_thread:
                break
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream

            frame = vs.read()

            if (input != False and frame is None):
                break

# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=300)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them

            if W is None or H is None:
                (H, W) = frame.shape[:2]

# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if self.totalFrames % self.skipFrames == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > self.confidence:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if self.CLASSES[idx] != "person":
                            continue

# compute the (x, y)-coordinates of the bounding box
# for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

# otherwise, there is a trackable object so we can utilize it
# to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            self.totalIn += 1
                            to.counted = True

# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            self.totalOut += 1
                            to.counted = True

# store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

# construct a tuple of information we will be displaying on the
# frame
            info = [
                ("In", self.totalIn),
                ("Out", self.totalOut),
                ("Status", status),
            ]

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

# show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

# increment the total number of frames processed thus far and
# then update the FPS counter
            self.totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # close any open windows
        cv2.destroyAllWindows()
        vs.stop()
def display_instances(image, boxes, masks, ids, names, scores, nFrames,
                      totalDown, totalUp, trackers):
    """
        take the image and results and apply the mask, box, and Label
    """

    # initialize the current status along with our list of bounding
    # box rectangles returned by either (1) our object detector or
    # (2) the correlation trackers
    status = "Waiting"
    rects = []
    """" - - - - - - - - - -- - - - - - -"""

    H = image.shape[0]
    W = image.shape[1]
    n_instances = boxes.shape[0]
    #print(n_instances)
    if not n_instances:
        print('NO INSTANCES TO DISPLAY')
    else:
        assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]
    if nFrames % args["skip_frames"] == 0:
        status = "Detecting"
        trackers = []
    for i in range(n_instances):
        # detecting objects loop
        if not np.any(boxes[i]):
            continue
        label = names[ids[i]]
        if label != "car":  #and label != "person" and label != "motorcycle" and label != "bicycle":
            continue
        if label == "car" and scores[i] > 0.85:
            image, x1, y1, x2, y2 = detections(image, scores[i], class_dict,
                                               masks[:, :, i], label, boxes[i])
        else:
            continue
        # if label == "person" and scores[i] > 0.90:
        #     image, x1, y1, x2, y2 = detections(image, scores[i], class_dict, masks[:, :, i], label, boxes[i])
        # images and masks of image
        #image, x1, y1, x2, y2 = detections(image, scores[i], class_dict, masks[:, :, i], label, boxes[i])
        if nFrames % args["skip_frames"] != 0:
            continue
        # add the bounding box coordinates to the rectangles list
        # rects.append((x1, y1, x2, y2))
        # construct a dlib rectangle object from the bounding
        # box coordinates and then start the dlib correlation
        # tracker
        tracker = dlib.correlation_tracker()
        rect = dlib.rectangle(x1, y1, x2, y2)
        tracker.start_track(image, rect)
        # add the tracker to our list of trackers so we can
        # utilize it during skip frames
        trackers.append(tracker)

    if nFrames % args["skip_frames"] != 0:

        # loop over the trackers
        for tracker in trackers:
            # set the status of our system to be 'tracking' rather
            # than 'waiting' or 'detecting'
            status = "Tracking"
            # update the tracker and grab the updated position
            tracker.update(image)
            pos = tracker.get_position()
            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            # add the bounding box coordinates to the rectangles list
            rects.append((startX, startY, endX, endY))

    # draw a horizontal line in the center of the frame -- once an
    # object crosses this line we will determine whether they were
    # moving 'up' or 'down'
    cv2.line(image, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
    #print(len(rects))
    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)
    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)
        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)

            # check to see if the object has been counted or not
            if not to.counted:
                # if the direction is negative (indicating the object
                # is moving up) AND the centroid is above the center
                # line, count the object
                if direction < 0 and centroid[1] < H // 2:
                    totalUp += 1
                    to.counted = True

# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
                elif direction > 0 and centroid[1] > H // 2:
                    totalDown += 1
                    to.counted = True

# store the trackable object in our dictionary
        trackableObjects[objectID] = to
        # draw both the ID of the object and the centroid of the
        # object on the output frame
        text = "ID {}".format(objectID)
        cv2.putText(image, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (4, 31, 26), 2)
        cv2.circle(image, (centroid[0], centroid[1]), 4, (4, 31, 26), -1)
    # construct a tuple of information we will be displaying on the

# frame
    info = [
        ("Up", totalUp),
        ("Down", totalDown),
        ("Status", status),
    ]

    # loop over the info tuples and draw them on our frame
    for (i, (k, v)) in enumerate(info):
        text = "{}: {}".format(k, v)
        cv2.putText(image, text, (10, H - ((i * 20) + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
    return image, totalDown, totalUp, trackers
Exemple #23
0
    def loop(self):
        W = None
        H = None

        frame = self.vs.read()
        frame = frame[1]

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if self.file is not None and frame is None:
            return

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        self.rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % 30 == 0:
            # set the status and initialize our new set of object trackers
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > 0.4:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                self.rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(rgb, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = self.ct.update(self.rects)

        # loop over the tracked objects

        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        self.totalUp += 1
                        self.total = self.totalDown - self.totalUp
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        self.totalDown += 1
                        self.total = self.totalDown - self.totalUp
                        to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame

            cv2.circle(rgb, (centroid[0], centroid[1] - 20), 4, (0, 255, 0),
                       -1)

        # show the output frame
        self.videoframe = rgb
        self.master.after(25, self.loop)

        self.totalFrames = self.totalFrames + 1
    def get_frame(self):
        if self.vs.isOpened():
            ret, frame = self.vs.read()
            frame = imutils.resize(frame, width=900)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.H, self.W = frame.shape[:2]

            status = "Waiting"
            rects = []
            if self.totalFrames % 10 == 0:
                status = "Detecting"

                self.trackers = []
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()
                for i in np.arange(0, detections.shape[2]):
                    confidence = detections[0, 0, i, 2]
                    if confidence > 0.4:
                        idx = int(detections[0, 0, i, 1])
                        if idx == 15:
                            box = detections[0, 0, i, 3:7] * np.array(
                                [self.W, self.H, self.W, self.H])
                            (startX, startY, endX, endY) = box.astype("int")
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 255), 2)
                            centroid = (int(
                                (startX + endX) / 2), int((startY + endY) / 2))
                            if centroid[1] <= self.H - 230 and centroid[
                                    1] >= self.H - 320:
                                tracker = dlib.correlation_tracker()
                                rect = dlib.rectangle(startX, startY, endX,
                                                      endY)
                                tracker.start_track(rgb, rect)
                                self.trackers.append(tracker)
            else:
                for tracker in self.trackers:
                    status = "Tracking"
                    tracker.update(rgb)
                    pos = tracker.get_position()
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    rects.append((startX, startY, endX, endY))

            #cv2.line(frame, (0, self.H-320), (self.W, self.H-320), (255, 0, 0), 2)
            cv2.line(frame, (0, self.H - 290), (self.W, self.H - 290),
                     (0, 255, 255), 2)
            #cv2.line(frame, (0, self.H-230), (self.W, self.H-230), (255, 0, 0), 2)
            objects = self.ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = self.trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                    if not to.counted:
                        if direction < 0 and centroid[1] < self.H - 270:
                            self.totalDown += 1
                            to.counted = True
                        if direction > 0 and centroid[1] > self.H - 270:
                            self.totalUp += 1
                            to.counted = True
                self.trackableObjects[objectID] = to
            self.totalFrames += 1
            info = [
                ("IN", self.totalUp),
                ("Status", status),
            ]
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                #cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                frame = cv2.resize(frame, (500, 500),
                                   interpolation=cv2.INTER_AREA)
            global count
            count = self.totalUp
            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            else:
                return (ret, None)
        else:
            return (ret, None)
def thread_for_capturing_face():
    print("[INFO] Running Thread 1...")
    global net
    global total_faces_detected_locally
    global CLASSES
    global vs
    global ct
    global trackers
    global trackableObjects
    global totalFrames
    global totalDown
    global totalUp
    global totalPeople
    global centroid_list
    while True:
        ret, frame = vs.read()

        #frame = frame
        frame = cv2.resize(frame, (240, 240), interpolation=cv2.INTER_AREA)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        (H, W) = frame.shape[:2]

        status = "Waiting"
        rects = []

        if totalFrames % 10 == 0:
            status = "Detecting"
            trackers = []

            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > 0.5:
                    idx = int(detections[0, 0, i, 1])
                    if CLASSES[idx] != "person":
                        continue
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (0, 255, 0), 2)
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)
        else:
            # loop over the trackers
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():
            if objectID in moveDict:
                values = moveDict[objectID]
                values.append(centroid[1])
                moveDict[objectID] = values
            else:
                moveDict[objectID] = [centroid[1]]
            #print("[MOVE DICTIONARY]: ", moveDict)
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                #y = [c[1] for c in to.centroids]
                #x = [c[0] for c in to.centroids]
                #direction = centroid[0] - np.mean(x)
                #print("Direction of person:", direction)
                #print("Current Centroids 1: {} 2: {} vs. Middle {}".format(centroid[0], centroid[1], W //2))
                centroid_list.append(centroid[0])
                to.centroids.append(centroid)
                if not to.counted:
                    """
                    final_centroid = centroid_list[-1]
                    beginning_centroid = centroid_list[0]
                    if final_centroid < H // 2:
                        #print("[SRINI]: ", len(centroid_list))
                        #print("[MILAN]: ", (final_centroid - beginning_centroid))
                        if len(centroid_list) > 100 and final_centroid != beginning_centroid:
                            total_faces_detected_locally += 1
                            #print("[SRINI]: Number of people in =", total_faces_detected_locally)
                            to.counted = True
                            centroid_list.clear()
                    elif final_centroid > H // 2:
                        if len(centroid_list) > 100 and final_centroid != beginning_centroid:
                            total_faces_detected_locally -= 1
                            print("[SRINI]: Number of people in =", total_faces_detected_locally)
                            to.counted = True
                            centroid_list.clear()

                    """
                    print("CENTROID 1: ", centroid[1])
                    for keyName in moveDict:
                        keyVals = moveDict[keyName]
                        # for i in range(len(keyVals)):
                        # keyVals[i] = keyVals[i].item()
                        if "Counted" in keyVals:
                            pass
                        elif (keyVals[0] < W // 2) and (keyVals[-1] > W // 2):
                            totalUp += 1
                            totalPeople += 1
                            total_faces_detected_locally -= 1
                            values = moveDict[keyName]
                            values.append("Counted")
                            moveDict[keyName] = values
                            to.counted = True
                        elif (keyVals[0] > W // 2) and (keyVals[-1] < W // 2):
                            totalPeople -= 1
                            totalDown += 1
                            total_faces_detected_locally += 1
                            values = moveDict[keyName]
                            values.append("Counted")
                            moveDict[keyName] = values
                            to.counted = True

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        totalFrames += 1
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
def detect(save_img):
    out, source, weights, view_img, save_txt, imgsz = \
        opt.output, opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
    webcam = source.isnumeric() or source.startswith(
        ('rtsp://', 'rtmp://', 'http://')) or source.endswith('.txt')

    # Initialize
    set_logging()
    cars = MainController.getLatestVehicleAmount('cars', 'carId')
    motors = MainController.getLatestVehicleAmount('motorcycles',
                                                   'motorcycleId')
    trucks = MainController.getLatestVehicleAmount('trucks', 'truckId')

    totalCarAmount = cars + motors + trucks
    totalCars = cars
    totalTrucks = trucks
    totalMotors = motors
    displayTotalAmount = totalCarAmount
    displayCarAmount = totalCars
    displayTruckAmount = totalTrucks
    displayMotorAmount = totalMotors
    oldCombinedAmount = 0
    combinedAmount = 0
    tempAmount = 0

    # Video = False, Webcam = True
    control = False

    elapsed = 0
    device = select_device(opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder
    start = time.time()
    half = device.type != 'cpu'  # half precision only supported on CUDA

    # Load model
    model = attempt_load(weights, map_location=device)  # load FP32 model
    imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size
    if half:
        model.half()  # to FP16

    # Second-stage classifier
    classify = False
    if classify:
        modelc = load_classifier(name='resnet101', n=2)  # initialize
        modelc.load_state_dict(
            torch.load('weights/resnet101.pt',
                       map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=imgsz)

    else:
        save_img = True
        dataset = LoadImages(source, img_size=imgsz)

    # Get names and colors
    names = model.module.names if hasattr(model, 'module') else model.names
    # colors = [[np.randint(0, 255) for _ in range(3)] for _ in range(len(names))]

    # Run inference

    t0 = time.time()
    ct = CentroidTracker()
    listDet = ['car', 'motorcycle', 'truck']

    totalDownCar = 0
    totalDownMotor = 0
    totalDownTruck = 0

    totalUpCar = 0
    totalUpMotor = 0
    totalUpTruck = 0

    trackableObjects = {}

    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img
              ) if device.type != 'cpu' else None  # run once
    for path, img, im0s, vid_cap in dataset:
        elapsed = time.time() - start
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        pred = model(img, augment=opt.augment)[0]

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)
        t2 = time_synchronized()

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        rects = []
        labelObj = []
        arrCentroid = []
        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
                # cv2.resize(im0, (2560, 1440))
            else:
                p, s, im0 = path, '', im0s
                # cv2.resize(im0, (2560, 1440))

            height, width, channels = im0.shape
            cv2.line(im0, (0, int(height / 1.5)),
                     (int(width), int(height / 1.5)), (255, 0, 0),
                     thickness=3)

            if not control:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                cv2.putText(im0, 'Bil: ' + str(displayCarAmount),
                            (int(width * 0.02), int(height * 0.55)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Motorcykel: ' + str(displayMotorAmount),
                            (int(width * 0.02), int(height * 0.60)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
                cv2.putText(im0, 'Lastbil: ' + str(displayTruckAmount),
                            (int(width * 0.02), int(height * 0.65)),
                            cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255), 2)
            else:
                cv2.putText(im0,
                            'Totale koeretoejer: ' + str(displayTotalAmount),
                            (int(width * 0.02), int(height * 0.5)),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # cv2.line(im0, (int(width / 1.8), int(height / 1.5)), (int(width), int(height / 1.5)), (255, 127, 0), thickness=3)

            save_path = str(Path(out) / Path(p).name)
            txt_path = str(Path(out) / Path(p).stem) + (
                '_%g' % dataset.frame if dataset.mode == 'video' else '')
            s += '%gx%g ' % img.shape[2:]  # print string
            gn = torch.tensor(im0.shape)[[1, 0, 1,
                                          0]]  # normalization gain whwh
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    label = '%s %.2f' % (names[int(cls)], conf)
                    # print(xyxy)
                    x = xyxy
                    tl = None or round(0.002 * (im0.shape[0] + im0.shape[1]) /
                                       2) + 1  # line/font thickness
                    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
                    label1 = label.split(' ')
                    if save_txt:  # Write to file
                        xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                                gn).view(-1).tolist()  # normalized xywh
                        with open(txt_path + '.txt', 'a') as f:
                            f.write(('%g ' * 5 + '\n') %
                                    (cls, *xywh))  # label format

                    if label1[0] in listDet:
                        cv2.rectangle(im0,
                                      c1,
                                      c2, (0, 0, 0),
                                      thickness=tl,
                                      lineType=cv2.LINE_AA)
                        box = (int(x[0]), int(x[1]), int(x[2]), int(x[3]))
                        rects.append(box)
                        labelObj.append(label1[0])
                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(label,
                                                 0,
                                                 fontScale=tl / 3,
                                                 thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(im0, c1, c2, (0, 100, 0), -1,
                                      cv2.LINE_AA)
                        cv2.putText(im0,
                                    label, (c1[0], c1[1] - 2),
                                    0,
                                    tl / 3, [225, 255, 255],
                                    thickness=tf,
                                    lineType=cv2.LINE_AA)

                detCentroid = generateCentroid(rects)
                objects = ct.update(rects)

                for (objectID, centroid) in objects.items():
                    arrCentroid.append(centroid[1])
                for (objectID, centroid) in objects.items():
                    # print(idxDict)
                    to = trackableObjects.get(objectID, None)
                    if to is None:
                        to = TrackableObject(objectID, centroid)
                    else:
                        y = [c[1] for c in to.centroids]
                        direction = centroid[1] - np.mean(y)
                        to.centroids.append(centroid)
                        if not to.counted:  # arah up

                            if direction < 0 and centroid[
                                    1] < height / 1.5 and centroid[
                                        1] > height / 1.7:  ##up truble when at distant car counted twice because bbox reappear
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalUpCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalUpMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalUpTruck += 1
                                    to.counted = True

                            elif direction > 0 and centroid[
                                    1] > height / 1.5:  # arah down
                                idx = detCentroid.tolist().index(
                                    centroid.tolist())
                                if (labelObj[idx] == 'car'):
                                    totalDownCar += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'motorcycle'):
                                    totalDownMotor += 1
                                    to.counted = True
                                elif (labelObj[idx] == 'truck'):
                                    totalDownTruck += 1
                                    to.counted = True

                    trackableObjects[objectID] = to

                oldCarAmount = totalCarAmount
                oldTotalCars = totalCars
                oldTotalTrucks = totalTrucks
                oldTotalMotors = totalMotors

                combinedAmount = totalDownCar + totalDownTruck + totalDownMotor + \
                                 totalUpCar + totalUpMotor + totalUpTruck

                totalCars = totalDownCar + totalUpCar
                totalTrucks = totalDownTruck + totalUpTruck
                totalMotors = totalDownMotor + totalUpMotor

                if not oldCombinedAmount == combinedAmount:
                    tempAmount = totalCarAmount + combinedAmount
                    oldCombinedAmount = combinedAmount

                if oldCarAmount < tempAmount:
                    totalCarAmount = tempAmount

                if not oldCarAmount == totalCarAmount:
                    displayTotalAmount += 1

                    if not oldTotalCars == totalCars:
                        dbInsOrUpdCar(totalCars)
                        displayCarAmount += 1

                    if not oldTotalTrucks == totalTrucks:
                        dbInsOrUpdTruck(totalTrucks)
                        displayTruckAmount += 1

                    if not oldTotalMotors == totalMotors:
                        dbInsOrUpdMotorcycle(totalMotors)
                        displayMotorAmount += 1

                if not control:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (50, 255, 255), 2)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, .75, (50, 255, 255),
                                2)
                else:
                    cv2.putText(im0, 'Frakoerende: ',
                                (int(width * 0.6), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalUpCar),
                                (int(width * 0.6), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalUpMotor),
                                (int(width * 0.6), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalUpTruck),
                                (int(width * 0.6), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

                    cv2.putText(im0, 'Modkoerende: ',
                                (int(width * 0.02), int(height * 0.10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 4, (50, 255, 255), 3)
                    cv2.putText(im0, 'Bil: ' + str(totalDownCar),
                                (int(width * 0.02), int(height * 0.15)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Motorcykel: ' + str(totalDownMotor),
                                (int(width * 0.02), int(height * 0.2)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)
                    cv2.putText(im0, 'Lastbil: ' + str(totalDownTruck),
                                (int(width * 0.02), int(height * 0.25)),
                                cv2.FONT_HERSHEY_SIMPLEX, 3, (50, 255, 255), 3)

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if view_img:
                cv2.namedWindow('Main', cv2.WINDOW_NORMAL)
                cv2.resizeWindow('Main', 1920, 1080)
                cv2.imshow("Main", im0)

                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)

                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fourcc = 'mp4v'  # output video codec
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*fourcc), fps,
                            (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % Path(out))

    print('Done. (%.3fs)' % (time.time() - t0))
    # moving 'up' or 'down'
    # cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)

    # loop over the tracked objects
    for (objectID, centroid_rect) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid_rect[0], MHI_NUM_FRAMES)

        centroid = centroid_rect[0]
        startX = centroid_rect[1][0]
        startY = centroid_rect[1][1]
        endX = centroid_rect[1][2]
        endY = centroid_rect[1][3]

        to.bbqueue.enqueue(
            np.array([[startX, startY], [endX, endY], [startX, endY],
                      [endX, startY]]))
        to.boudingbox()

        b_x, b_y, b_w, b_h = to.bb
        cv2.rectangle(mhi_frame, (b_x, b_y), (b_x + b_w, b_y + b_h),
                      (0, 255, 0), 3)
def person_tracker(yolo, video, cam_id, a, b, count_type):

    print("[INFO] opening video file...")
    fvs = WebcamVideoStream(video).start()
    time.sleep(0.5)
    W = None
    H = None
    ct = CentroidTracker(maxDisappeared=1, maxDistance=500)
    trackers = []
    trackableObjects = {}
    totalFrames = 0
    cnt = 0
    exit_cnt = 0
    scale_factor = 1
    fps = FPS().start()
    init_frame = fvs.read()
    if init_frame is None:
        print('No frame')
    # print(init_frame.type)
    if init_frame.shape[1] == 1920:
        scale_factor = 4
    elif init_frame.shape[1] == 3072:
        scale_factor = 8
    frm_width = ceil(init_frame.shape[1] / scale_factor)
    frm_height = ceil(init_frame.shape[0] / scale_factor)
    a1 = [ceil(a_ / scale_factor) for a_ in a]
    b1 = [ceil(b_ / scale_factor) for b_ in b]
    while True:
        fps.update()
        skip_frames = 60
        frame = fvs.read()
        if frame is None:
            break

        frame = imutils.resize(frame, frm_width, frm_height)

        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        rects = []
        if totalFrames % skip_frames == 0:
            trackers = []
            image = Image.fromarray(frame)
            boxs = yolo.detect_image(image)
            print(boxs)
            for box in boxs:

                startX = box[0]
                startY = box[1]
                endX = box[2] + startX
                endY = box[3] + startY
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(startX, startY, endX, endY)
                tracker.start_track(rgb, rect)
                trackers.append(tracker)

        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

                # draw a horizontal line in the center of the frame -- once an
                # object crosses this line we will determine whether they were
                # moving 'up' or 'down'

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        for (objectID, data) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            centroid = data[0]
            objectRect = data[1]
            # print(objectRect)
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # # 'up' and positive for 'down')
                # y = [c[1] for c in to.centroids]
                # direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

            trackableObjects[objectID] = to
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            count_flag = chk_movement([centroid[0], centroid[1]], a1, b1,
                                      int(objectID), int(cam_id), count_type)
            if count_flag == 1:
                cnt += 1
                cnt_col.update({
                    'cam_id': cam_id,
                    'video_file': video
                }, {
                    '$set': {
                        'entry_count': cnt,
                        'processed_timestamp': datetime.utcnow()
                    }
                },
                               upsert=True)
            elif count_flag == -1:
                exit_cnt += 1
                cnt_col.update({
                    'cam_id': cam_id,
                    'video_file': video
                }, {
                    '$set': {
                        'exit_count': exit_cnt,
                        'processed_timestamp': datetime.utcnow()
                    }
                },
                               upsert=True)

        info = [("Exit", cnt), ("Entry", exit_cnt)]
        #
        # # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        cv2.imshow("Frame", cv2.resize(frame, (800, 600)))
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    cv2.destroyAllWindows()
    print("completed....")
    fvs.stop()
Exemple #29
0
    #top line
    cv2.line(frame, (x1, y2), (x2, y2), (0, 0, 255), 2)

    # use the centroid tracker to associate the (1) old object
    # centroids with (2) the newly computed object centroids
    objects = ct.update(rects)

    # loop over the tracked objects
    for (objectID, centroid) in objects.items():
        # check to see if a trackable object exists for the current
        # object ID
        to = trackableObjects.get(objectID, None)

        # if there is no existing trackable object, create one
        if to is None:
            to = TrackableObject(objectID, centroid)

        # otherwise, there is a trackable object so we can utilize it
        # to determine direction
        else:
            # the difference between the y-coordinate of the *current*
            # centroid and the mean of *previous* centroids will tell
            # us in which direction the object is moving (negative for
            # 'up' and positive for 'down')
            y = [c[1] for c in to.centroids]
            direction = centroid[1] - np.mean(y)
            to.centroids.append(centroid)

            # check to see if the object has been counted or not
            cX = int((startX + endX) / 2.0)
            cY = int((startY + endY) / 2.0)
Exemple #30
0
def detect():
    global video_stream, outputFrame, lock, client, configurations

    config = configurations['people_counter']

    classes = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(config["prototxt"], config["model"])

    print("[INFO] opening video file...")
    video_stream = cv2.VideoCapture(config["input"])

    W = None
    H = None
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackable_objects = {}

    total_frames = 0
    total_down = 0
    total_up = 0
    total = 0
    new_person = False
    fps = FPS().start()

    while True:

        frame = video_stream.read()
        frame = frame[1] if "input" in config else frame
        if "input" in config and frame is None:
            break
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        status = "Waiting"
        rects = []

        if total_frames % config["skip_frames"] == 0:
            status = "Detecting"
            trackers = []
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > config["confidence"]:
                    idx = int(detections[0, 0, i, 1])
                    if classes[idx] != "person":
                        continue

                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (start_x, start_y, end_x, end_y) = box.astype("int")

                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(start_x, start_y, end_x, end_y)
                    tracker.start_track(rgb, rect)

                    trackers.append(tracker)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                start_x = int(pos.left())
                start_y = int(pos.top())
                end_x = int(pos.right())
                end_y = int(pos.bottom())
                rects.append((start_x, start_y, end_x, end_y))

        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        objects = ct.update(rects)

        for (objectID, centroid) in objects.items():

            to = trackable_objects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0 and centroid[1] < H // 2:
                        total_up += 1
                        total += 1
                        to.counted = True
                        new_person = True

                    elif direction > 0 and centroid[1] > H // 2:
                        total_down += 1
                        total += 1
                        to.counted = True
                        new_person = True

            trackable_objects[objectID] = to

            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        info = [
            ("Up", total_up),
            ("Down", total_down),
            ("Status", status),
        ]

        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        if new_person:
            message = {"data": {"value": total}}
            client.publish('people', json.dumps(message))
            new_person = False

            with lock:
                outputFrame = frame.copy()

        total_frames += 1
        fps.update()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    video_stream.release()