Exemple #1
0
 def test_eq_Rectangles(self):
     self.assertTrue(Rectangle(1, 2)==Rectangle(1, 2))
     self.assertTrue(Rectangle(1, 2)==Rectangle(1.0, 2.0))
     self.assertTrue(Rectangle(11.13, 23.32)==Rectangle(11.13, 23.32))
     self.assertFalse(Rectangle(11.13, 23.32)==Rectangle(11.13, 23.32))
     self.assertFalse(Rectangle(11.13, 23.33)==Rectangle(11.13, 23.32))
     with self.assertRaisesRegexp(ValueError, "niepoprawny typ"):
 def test_make4(self):
     self.assertEqual(self.rec1.make4(), [
         Rectangle(1, 2, 3, 3),
         Rectangle(1, 3, 3, 4),
         Rectangle(3, 2, 5, 3),
         Rectangle(3, 3, 5, 4)
     ])
class TestRectangle(unittest.TestCase):
    def setUp(self):
        self.rec1 = Rectangle(1, 2, 5, 4)
        self.rec2 = Rectangle(0, 2, 2, 4)
        self.rec3 = Rectangle(-1, -1, 1, 1)

    def test_str(self):
        self.assertEqual(
            str(self.rec1),
            "[(1, 2), (5, 4)]",
        )
        self.assertEqual(
            str(self.rec3),
            "[(-1, -1), (1, 1)]",
        )

    def test_repr(self):
        self.assertEqual(repr(self.rec1), "Rectangle(1, 2, 5, 4)")
        self.assertEqual(repr(self.rec3), "Rectangle(-1, -1, 1, 1)")

    def test_eq(self):
        self.assertFalse(self.rec1 == self.rec2)
        self.assertTrue(self.rec3 == self.rec3)
        self.assertTrue(self.rec2 == self.rec2)

    def test_ne(self):
        self.assertTrue(self.rec1 != self.rec2)
        self.assertFalse(self.rec3 != self.rec3)
        self.assertFalse(self.rec2 != self.rec2)

    def test_center(self):
        self.assertEqual(self.rec1.center(), Point(3, 3))
        self.assertEqual(self.rec2.center(), Point(1, 3))

    def test_area(self):
        self.assertEqual(self.rec1.area(), 8)
        self.assertEqual(self.rec2.area(), 4)

    def test_move(self):
        self.assertEqual(self.rec1.move(1, 1), Rectangle(2, 3, 6, 5))
        self.assertEqual(self.rec2.move(0, -1), Rectangle(0, 1, 2, 3))

    def test_intersection(self):
        self.assertEqual(self.rec1.intersection(self.rec2),
                         Rectangle(1, 2, 2, 4))

    def test_cover(self):
        self.assertEqual(self.rec1.cover(self.rec2), Rectangle(0, 2, 5, 4))
        self.assertEqual(self.rec1.cover(self.rec3), Rectangle(-1, -1, 5, 4))

    def test_make4(self):
        self.assertEqual(self.rec1.make4(), [
            Rectangle(1, 2, 3, 3),
            Rectangle(1, 3, 3, 4),
            Rectangle(3, 2, 5, 3),
            Rectangle(3, 3, 5, 4)
        ])

    def tearDown(self):
        pass
Exemple #4
0
 def test_make4(self):
     self.assertEqual(
         Rectangle(1, 1, 5, 5).make4(), [
             Rectangle(1, 3, 3, 5),
             Rectangle(3, 3, 5, 5),
             Rectangle(1, 1, 3, 3),
             Rectangle(3, 1, 5, 3)
         ])
     self.assertEqual(
         Rectangle(-1, -1, 5, 3).make4(), [
             Rectangle(-1, 1, 2, 3),
             Rectangle(2, 1, 5, 3),
             Rectangle(-1, -1, 2, 1),
             Rectangle(2, -1, 5, 1)
         ])
Exemple #5
0
class TestRectangles(unittest.TestCase):
           
    def test_eq_Rectangles(self):
        self.assertTrue(Rectangle(1, 2)==Rectangle(1, 2))
        self.assertTrue(Rectangle(1, 2)==Rectangle(1.0, 2.0))
        self.assertTrue(Rectangle(11.13, 23.32)==Rectangle(11.13, 23.32))
        self.assertFalse(Rectangle(11.13, 23.32)==Rectangle(11.13, 23.32))
        self.assertFalse(Rectangle(11.13, 23.33)==Rectangle(11.13, 23.32))
        with self.assertRaisesRegexp(ValueError, "niepoprawny typ"):
        
    def test_ne_Rectangles(self):
        self.assertFalse(Rectangle(1,2)!=Rectangle(1,2))
        self.assertFalse(Rectangle(1,2)!=Rectangle(1.0,2.0))
        self.assertTrue(Rectangle(11.13,23.33)!=Rectangle(11.13,23.32))
        self.assertTrue(Rectangle(1,5)!=Rectangle(5,1))
        with self.assertRaisesRegexp(ValueError, "niepoprawny typ"):
           Rectangle(1, 5)=="Rectangle"

    def test_area_Rectangles(rect):
        rect.assertEqual(Rectangle(1,1,1).area())
        rect.assertEqual(Rectangle(1,1,4).area())
        rect.assertEqual(Rectangle(0.5,0.7,3.7).area(), Rectangle(1212,1232,3.7).area())
    
    def test_center_Rectangles(rect):
        rect.assertEqual(Rectangle(1,1).center(0,0), Rectangle(1,1))
        rect.assertEqual(Rectangle(1,4).center(0,0), Rectangle(1,4))
        Rectangle(0,0).center("rect")
                         
    def test_move_Rectangles(rect):pass
        rect.assertEqual(Rectangle(0.5,0.7,3.7).move(1211.5,1231.3), Rectangle(1212,1232,3.7))
        rect.assertEqual(Rectangle(1,1).move(0,0), Rectangle(1,1))
        rect.assertEqual(Rectangle(1,1).move(1,2), Rectangle(2,3))
        with rect.assertRaisesRegexp(ValueError, "niepoprawny typ"):
            Rectangle(1, 5).move("",1)
def find_ratio_ofbboxes(bbox, rect_compare):
    ratio = 0
    rect_detection = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
    inter_detection = rect_detection & rect_compare
    if inter_detection:
        inter_square_detection = rect_square(*inter_detection)
        cur_square_detection = rect_square(*rect_detection)
        try:
            ratio = inter_square_detection / cur_square_detection
        except ZeroDivisionError:
            ratio = 0
    return ratio
Exemple #7
0
 def test_move(self):
     self.assertEqual(
         Rectangle(1.5, 2, 5.5, 6).move(1.2, 7.45),
         Rectangle(2.7, 9.45, 6.7, 13.45))
     self.assertEqual(
         Rectangle(1, 2, 3, 4).move(2, 4), Rectangle(3, 6, 5, 8))
     self.assertEqual(
         Rectangle(-1.7, -2, -0.3, 4).move(-1, 2),
         Rectangle(-2.7, 0, -1.3, 6))
 def __init__(self):
     # Init frame variables
     self.around_door_array = [234, 45, 281, 174]
     self.rect_around_door = Rectangle(self.around_door_array[0],
                                       self.around_door_array[1],
                                       self.around_door_array[2],
                                       self.around_door_array[3])
     self.first_frame = None
     self.next_frame = None
     self.font = cv2.FONT_HERSHEY_SIMPLEX
     self.delay_counter = 0
     self.movement_persistent_counter = 0
     self.fourcc = cv2.VideoWriter_fourcc(*'MP4V')
     self.fps = 20
     self.output_video = None
Exemple #9
0
class TestRectangle(unittest.TestCase):
    def setUp(self):
        self.rec1 = Rectangle(1, 2, 5, 4)
        self.rec2 = Rectangle(2, 0, 0, 2)
        self.rec3 = Rectangle(1, 1, -1, -1)
        self.rec4 = Rectangle(6, 4, 1, 2)

    def test__init__(self):
        self.assertEqual(self.rec1, Rectangle(1, 2, 5, 4))
        self.assertEqual(self.rec2, Rectangle(2, 0, 0, 2))

    def test__str__(self):
        self.assertEqual(str(self.rec1), "[(1, 2), (5, 4)]")
        self.assertEqual(str(self.rec2), "[(2, 0), (0, 2)]")

    def test__repr__(self):
        self.assertEqual(repr(self.rec1), "Rectangle[(1, 2), (5, 4)]")
        self.assertEqual(repr(self.rec2), "Rectangle[(2, 0), (0, 2)]")

    def test__eq__(self):
        self.assertFalse(self.rec1 == self.rec2)
        self.assertTrue(self.rec1 == self.rec1)

    def test__ne__(self):
        self.assertTrue(self.rec1 != self.rec2)
        self.assertFalse(self.rec1 != self.rec1)

    def test__center(self):
        self.assertEqual(self.rec1.center(), Point(3, 3))
        self.assertEqual(self.rec2.center(), Point(1, 1))

    def test__area(self):
        self.assertEqual(self.rec1.area(), 8)
        self.assertEqual(self.rec2.area(), 4)

    def test__move(self):
        self.assertEqual(self.rec1.move(3, 1), Rectangle(4, 3, 8, 5))
        self.assertEqual(self.rec2.move(-1, 2), Rectangle(1, 2, -1, 4))

    def tearDown(self): pass
Exemple #10
0
 def test_area(self):
     rect = Rectangle(8, 3)
     self.assertEqual(24, rect.area())
Exemple #11
0
 def load_doors(self):
     with open("cfg/around_doors_info.json") as doors_config:
         self.around_doors_config = json.load(doors_config)
     self.around_door_array = self.around_doors_config[self.camera_id]
     self.rect_around_door = Rectangle(self.around_door_array[0], self.around_door_array[1],
                                       self.around_door_array[2], self.around_door_array[3])
Exemple #12
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    output_format = 'mp4'
    video_name = 'bus4_2in_4out.mp4'
    file_path = join('data_files/videos', video_name)
    output_name = 'save_data/out_' + video_name[0:-3] + output_format
    initialize_door_by_yourself = False
    door_array = None
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    counter = Counter(counter_in=0, counter_out=0, track_id=0)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    ret, first_frame = video_capture.read()

    if door_array is None:
        if initialize_door_by_yourself:
            door_array = select_object(first_frame)[0]
            print(door_array)
        else:
            all_doors = read_door_info('data_files/doors_info.csv')
            door_array = all_doors[video_name]

    border_door = door_array[3]
    error_values = []
    truth = get_truth(video_name)
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            total_count = counter.return_total_count()
            true_total = truth.inside + truth.outside
            err = abs(total_count - true_total) / true_total
            log_res = "in video: {}\n predicted / true\n counter in: {} / {}\n counter out: {} / {}\n" \
                      " total: {} / {}\n error: {}\n______________\n".format(video_name, counter.counter_in,
                                                                             truth.inside,
                                                                             counter.counter_out, truth.outside,
                                                                             total_count, true_total, err)
            with open('log_results.txt', 'w') as file:
                file.write(log_res)
            print(log_res)
            error_values.append(err)
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        cv2.rectangle(frame, (int(door_array[0]), int(door_array[1])),
                      (int(door_array[2]), int(door_array[3])), (23, 158, 21),
                      2)

        for det in detections:
            bbox = det.to_tlbr()
            if show_detections and len(classes) > 0:
                score = "%.2f" % (det.confidence * 100) + "%"
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(int(door_array[0]), int(door_array[1]),
                                      int(door_array[2]), int(door_array[3]))
                intersection = rect_head & rect_door

                if intersection:
                    squares_coeff = rect_square(*intersection) / rect_square(
                        *rect_head)
                    cv2.putText(
                        frame,
                        score + " inter: " + str(round(squares_coeff, 3)),
                        (int(bbox[0]), int(bbox[3])), 0, 1e-3 * frame.shape[0],
                        (0, 100, 255), 5)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            # first appearence of object with id=track.id

            if track.track_id not in counter.people_init or counter.people_init[
                    track.track_id] == 0:
                counter.obj_initialized(track.track_id)
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(door_array[0], door_array[1],
                                      door_array[2], door_array[3])
                res = rect_head & rect_door
                if res:

                    inter_square = rect_square(*res)
                    head_square = rect_square(*rect_head)
                    #     was initialized in door, probably going in
                    if (inter_square / head_square) >= 0.8:
                        counter.people_init[track.track_id] = 2
                        #     initialized in the bus, mb going out
                    elif (inter_square /
                          head_square) <= 0.4 or bbox[3] > border_door:
                        counter.people_init[track.track_id] = 1
                # res is None, means that object is not in door contour
                else:
                    counter.people_init[track.track_id] = 1

                counter.people_bbox[track.track_id] = bbox
            counter.cur_bbox[track.track_id] = bbox

            adc = "%.2f" % (track.adc *
                            100) + "%"  # Average detection confidence
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 1e-3 * frame.shape[0],
                        (0, 255, 0), 5)

            if not show_detections:
                track_cls = track.cls
                cv2.putText(frame, str(track_cls),
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                cv2.putText(
                    frame, 'ADC: ' + adc,
                    (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                    1e-3 * frame.shape[0], (0, 255, 0), 1)

        id_get_lost = [
            track.track_id for track in tracker.tracks
            if track.time_since_update >= 25 and track.age >= 29
        ]
        id_inside_tracked = [
            track.track_id for track in tracker.tracks if track.age > 60
        ]
        for val in counter.people_init.keys():
            # check bbox also
            cur_c = find_centroid(counter.cur_bbox[val])
            init_c = find_centroid(counter.people_bbox[val])
            vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

            if val in id_get_lost and counter.people_init[val] != -1:
                # if vector_person < 0 then current coord is less than initialized, it means that man is going
                # in the exit direction
                if vector_person[1] > 70 and counter.people_init[
                        val] == 2:  # and counter.people_bbox[val][3] > border_door \
                    counter.get_in()

                elif vector_person[1] < -70 and counter.people_init[val] == 1:
                    counter.get_out()

                counter.people_init[val] = -1
                print(f"person left frame")
                print(f"current centroid - init : {cur_c} - {init_c}\n")
                print(f"vector: {vector_person}\n")

                del val
            # elif val in id_inside_tracked and val not in id_get_lost and counter.people_init[val] == 1 \
            #         and bb_intersection_over_union(counter.cur_bbox[val], door_array) <= 0.3 \
            #         and vector_person[1] > 0:  # and \
            #     # counter.people_bbox[val][3] > border_door:
            #     counter.get_in()
            #
            #     counter.people_init[val] = -1
            #     print(f"person is tracked for a long time")
            #     print(f"current centroid - init : {cur_c} - {init_c}\n")
            #     print(f"vector: {vector_person}\n")
            #     imaggg = cv2.line(frame, find_centroid(counter.cur_bbox[val]),
            #                       find_centroid(counter.people_bbox[val]),
            #                       (0, 0, 255), 7)

            # cv2.imshow('frame', imaggg)
            # cv2.waitKey(0)

        ins, outs = counter.show_counter()
        cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (10, 30), 0,
                    1e-3 * frame.shape[0], (255, 0, 0), 5)

        cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('image', 1400, 800)
        cv2.imshow('image', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            # print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()

    mean_error = np.mean(error_values)
    print("mean error for {} video: {}".format(video_name, mean_error))
Exemple #13
0
 def setUp(self):
     self.rec1 = Rectangle(1, 2, 5, 4)
     self.rec2 = Rectangle(2, 0, 0, 2)
     self.rec3 = Rectangle(1, 1, -1, -1)
     self.rec4 = Rectangle(6, 4, 1, 2)
Exemple #14
0
def main(yolo):
    # Definition of the parameters
    with open("cfg/detection_tracker_cfg.json") as detection_config:
        detect_config = json.load(detection_config)
    with open("cfg/doors_info.json") as doors_config:
        doors_config = json.load(doors_config)
    with open("cfg/around_doors_info.json") as around_doors_config:
        around_doors_config = json.load(around_doors_config)
    model_filename = detect_config["tracking_model"]
    input_folder, output_folder = detect_config["input_folder"], detect_config[
        "output_folder"]
    meta_folder = detect_config["meta_folder"]
    output_format = detect_config["output_format"]

    # Deep SORT
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    show_detections = True
    asyncVideo_flag = False

    check_gpu()

    # from here should start loop to process videos from folder
    # for video_name in os.listdir(input_folder):

    HOST = "localhost"
    PORT = 8075
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
        sock.bind((HOST, PORT))
        sock.listen()
        conn, addr = sock.accept()
        with conn:
            print('Connected by', addr)
            #  loop over all videos
            while True:
                data = conn.recv(1000)
                video_motion_list = data.decode("utf-8").split(';')
                videos_que = deque()
                for video_motion in video_motion_list:
                    videos_que.append(video_motion)
                video_name = videos_que.popleft()

                if not video_name.endswith(output_format):
                    continue

                print('elements in que', len(videos_que))
                print("opening video: {}".format(video_name))
                full_video_path = join(input_folder, video_name)
                # full_video_path = "rtsp://*****:*****@192.168.1.52:554/1/h264major"

                meta_name = meta_folder + video_name[:-4] + ".json"
                with open(meta_name) as meta_config_json:
                    meta_config = json.load(meta_config_json)
                camera_id = meta_config["camera_id"]
                if not os.path.exists(output_folder + str(camera_id)):
                    os.mkdir(output_folder + str(camera_id))

                output_name = output_folder + camera_id + '/out_' + video_name
                counter = Counter(counter_in=0, counter_out=0, track_id=0)
                tracker = Tracker(metric)

                if asyncVideo_flag:
                    video_capture = VideoCaptureAsync(full_video_path)
                    video_capture.start()
                    w = int(video_capture.cap.get(3))
                    h = int(video_capture.cap.get(4))
                else:
                    video_capture = cv2.VideoCapture(full_video_path)
                    w = int(video_capture.get(3))
                    h = int(video_capture.get(4))

                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter(output_name, fourcc, 25, (w, h))

                door_array = doors_config["{}".format(camera_id)]
                around_door_array = tuple(
                    around_doors_config["{}".format(camera_id)])
                rect_door = Rectangle(door_array[0], door_array[1],
                                      door_array[2], door_array[3])
                border_door = door_array[3]
                #  loop over video
                save_video_flag = False
                while True:
                    fps_imutils = imutils.video.FPS().start()
                    ret, frame = video_capture.read()
                    if not ret:
                        with open('videos_saved/log_results.txt', 'a') as log:
                            log.write(
                                'processed (ret). Time: {}, camera id: {}\n'.
                                format(video_name, camera_id))
                        break
                    t1 = time.time()
                    # lost_ids = counter.return_lost_ids()
                    image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
                    # image = image.crop(around_door_array)
                    boxes, confidence, classes = yolo.detect_image(image)

                    features = encoder(frame, boxes)
                    detections = [
                        Detection(bbox, confidence, cls, feature)
                        for bbox, confidence, cls, feature in zip(
                            boxes, confidence, classes, features)
                    ]

                    # Run non-maxima suppression.
                    boxes = np.array([d.tlwh for d in detections])
                    scores = np.array([d.confidence for d in detections])
                    classes = np.array([d.cls for d in detections])
                    indices = preprocessing.non_max_suppression(
                        boxes, nms_max_overlap, scores)
                    detections = [detections[i] for i in indices]

                    # Call the tracker
                    tracker.predict()
                    tracker.update(detections)

                    cv2.rectangle(frame,
                                  (int(door_array[0]), int(door_array[1])),
                                  (int(door_array[2]), int(door_array[3])),
                                  (23, 158, 21), 3)
                    if len(detections) != 0:
                        counter.someone_inframe()
                        for det in detections:
                            bbox = det.to_tlbr()
                            if show_detections and len(classes) > 0:
                                score = "%.2f" % (det.confidence * 100) + "%"
                                cv2.rectangle(frame,
                                              (int(bbox[0]), int(bbox[1])),
                                              (int(bbox[2]), int(bbox[3])),
                                              (255, 0, 0), 3)
                    else:
                        if counter.need_to_clear():
                            counter.clear_all()
                    # identities = [track.track_id for track in tracker.tracks]
                    # counter.update_identities(identities)

                    for track in tracker.tracks:
                        if not track.is_confirmed(
                        ) or track.time_since_update > 1:
                            continue
                        bbox = track.to_tlbr()

                        if track.track_id not in counter.people_init or counter.people_init[
                                track.track_id] == 0:
                            # counter.obj_initialized(track.track_id)
                            ratio_init = find_ratio_ofbboxes(
                                bbox=bbox, rect_compare=rect_door)

                            if ratio_init > 0:
                                if ratio_init >= 0.5:  # and bbox[3] < door_array[3]:
                                    counter.people_init[
                                        track.track_id] = 2  # man in the door
                                elif ratio_init < 0.5:  # and bbox[3] > door_array[3]:  # initialized in the outside
                                    counter.people_init[track.track_id] = 1
                            else:
                                counter.people_init[track.track_id] = 1
                            counter.people_bbox[track.track_id] = bbox
                        counter.cur_bbox[track.track_id] = bbox

                        adc = "%.2f" % (track.adc * 100
                                        ) + "%"  # Average detection confidence
                        cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                      (int(bbox[2]), int(bbox[3])),
                                      (255, 255, 255), 2)
                        cv2.putText(frame, "ID: " + str(track.track_id),
                                    (int(bbox[0]), int(bbox[1]) + 50), 0,
                                    1e-3 * frame.shape[0], (0, 255, 0), 3)

                        if not show_detections:
                            track_cls = track.cls
                            cv2.putText(frame, str(track_cls),
                                        (int(bbox[0]), int(bbox[3])), 0,
                                        1e-3 * frame.shape[0], (0, 255, 0), 3)
                            cv2.putText(frame, 'ADC: ' + adc,
                                        (int(bbox[0]),
                                         int(bbox[3] + 2e-2 * frame.shape[1])),
                                        0, 1e-3 * frame.shape[0], (0, 255, 0),
                                        3)
                        # if track.time_since_update >= 15:
                        #     id_get_lost.append(track.track_id)
                    id_get_lost = [
                        track.track_id for track in tracker.tracks
                        if track.time_since_update >= 15
                    ]

                    for val in counter.people_init.keys():
                        ratio = 0
                        cur_c = find_centroid(counter.cur_bbox[val])
                        init_c = find_centroid(counter.people_bbox[val])
                        if val in id_get_lost and counter.people_init[
                                val] != -1:
                            ratio = find_ratio_ofbboxes(
                                bbox=counter.cur_bbox[val],
                                rect_compare=rect_door)
                            if counter.people_init[val] == 2 \
                                    and ratio < 0.6:  # and counter.people_bbox[val][3] > border_door \
                                counter.get_out()
                                save_video_flag = True
                                print(counter.people_init[val], ratio)
                            elif counter.people_init[val] == 1 \
                                    and ratio >= 0.6:
                                counter.get_in()
                                save_video_flag = True
                                print(counter.people_init[val], ratio)
                            counter.people_init[val] = -1

                    ins, outs = counter.return_counter()
                    cv2.rectangle(frame, (frame.shape[1] - 150, 0),
                                  (frame.shape[1], 50), (0, 0, 0), -1, 8)
                    cv2.putText(frame, "in: {}, out: {} ".format(ins, outs),
                                (frame.shape[1] - 140, 20), 0,
                                1e-3 * frame.shape[0], (255, 255, 255), 3)
                    out.write(frame)
                    fps_imutils.update()
                    if not asyncVideo_flag:
                        pass
                        # fps = (1. / (time.time() - t1))
                        # print("FPS = %f" % fps)

                        # if len(fpeses) < 15:
                        #     fpeses.append(round(fps, 2))
                        #
                        # elif len(fpeses) == 15:
                        #     # fps = round(np.median(np.array(fpeses)))
                        #     median_fps = float(np.median(np.array(fpeses)))
                        #     fps = round(median_fps, 1)
                        #     print('max fps: ', fps)
                        #     # fps = 20
                        #     counter.fps = fps
                        #     fpeses.append(fps)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

                if asyncVideo_flag:
                    video_capture.stop()
                    del video_capture
                else:
                    video_capture.release()

                if save_video_flag:
                    with open('videos_saved/log_results.txt', 'a') as log:
                        log.write(
                            'detected!!! time: {}, camera id: {}, detected move in: {}, out: {}\n'
                            .format(video_name, camera_id, ins, outs))
                        log.write('video written {}\n\n'.format(output_name))
                    out.release()
                else:
                    if out.isOpened():
                        out.release()
                        if os.path.isfile(output_name):
                            os.remove(output_name)

                if os.path.isfile(full_video_path):
                    os.remove(full_video_path)
                if os.path.isfile(meta_name):
                    os.remove(meta_name)
                save_video_flag = False
                cv2.destroyAllWindows()
Exemple #15
0
 def test_cover(self):
     self.assertEqual(self.rec1.cover(self.rec2), Rectangle(0, 2, 5, 4))
     self.assertEqual(self.rec1.cover(self.rec3), Rectangle(-1, -1, 5, 4))
Exemple #16
0
 def test_move(self):
     self.assertEqual(self.rec1.move(1, 1), Rectangle(2, 3, 6, 5))
     self.assertEqual(self.rec2.move(0, -1), Rectangle(0, 1, 2, 3))
Exemple #17
0
class TestRectangles(unittest.TestCase):

    def setUp(self):
        self.one = Rectangle(1, 2, 3, 4)
        self.two = Rectangle(2, 2, 2, 2)
        self.three = Rectangle(0, 0, 2, 2)

    def test_center(self):
        self.assertEqual(self.one.center(), '(2.0, 3.0)')
        self.assertEqual(self.two.center(), '(2.0, 2.0)')
        self.assertEqual(self.three.center(), '(1.0, 1.0)')

    def test_area(self):
        self.assertEquals(self.one.area(), 4)
        self.assertEquals(self.two.area(), 0)
        self.assertEquals(self.three.area(), 4)

    def test_move(self):
        self.assertEquals(self.one.move(1, 1), '[(2, 3),(4, 5)]')
        self.assertEquals(self.two.move(0, 0), '[(2, 2),(2, 2)]')
        self.assertEquals(self.three.move(-1, -1), '[(-1, -1),(1, 1)]')

    def test_intersection(self):
        self.assertEqual(self.three.intersection(Rectangle(-1, -1, 1, 1)), Rectangle(0, 0, 1, 1))
        self.assertEqual(self.three.intersection(Rectangle(1, 1, 3, 3)), Rectangle(1, 1, 2, 2))
        self.assertEqual(self.three.intersection(Rectangle(0, 1, 2, 3)), Rectangle(0, 1, 2, 2))

    def test_cover(self):
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(0, 2, 1, 3)), Rectangle(0, 0, 1, 3))
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(2, 0, 3, 1)), Rectangle(0, 0, 3, 1))
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(0, -2, 1, -1)), Rectangle(0, -2, 1, 1))
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(-2, 0, -1, 1)), Rectangle(-2, 0, 1, 1))
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(2, 2, 3, 3)), Rectangle(0, 0, 3, 3))
        self.assertEquals(Rectangle(0, 0, 1, 1).cover(Rectangle(-3, -3, -2, -2)), Rectangle(-3, -3, 1, 1))

    def text_make4(self):
        self.assertEquals(self.rec_two.make4(), [Rectangle(2, 2, 1, 1),
                                                Rectangle(1, 1, 2, 2),
                                                 Rectangle(2, 1, 1, 2),
                                                 Rectangle(1, 2, 2, 1)])
Exemple #18
0
 def setUp(self):
     self.one = Rectangle(1, 2, 3, 4)
     self.two = Rectangle(2, 2, 2, 2)
     self.three = Rectangle(0, 0, 2, 2)
Exemple #19
0
 def test_volume_calculation(self):
     c = Rectangle(5, 4, 7)
     self.assertEqual(c.calculate_volume(), 140)
Exemple #20
0
 def test_negative_value(self):
     with self.assertRaises(ValueError) as context:
         c = Rectangle(-1, 2, 6)
Exemple #21
0
 def test_init_raise(self):
     with self.assertRaises(ValueError):
         c = Rectangle('length', (1, 2, 3), {4: 5})
 def test_not_equal(self):
     rectangle_a = Rectangle(([4, 2], [2, 1], [2, 2], [4, 1]))
     rectangle_b = Rectangle(([4, 2], [2, 2], [4, 4], [2, 4]))
     self.assertFalse(rectangle_a == rectangle_b)
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0
    output_format = 'mp4'

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    fpeses = []

    check_gpu()
    video_name = 'test1.mp4'

    print("opening video: {}".format(video_name))
    file_path = join('data_files/videos', video_name)
    output_name = 'save_data/out_' + video_name[0:-3] + output_format
    counter = Counter(counter_in=0, counter_out=0, track_id=0)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()
        w = int(video_capture.cap.get(3))
        h = int(video_capture.cap.get(4))
    else:
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))

    if writeVideo_flag:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))

    left_array = [0, 0, w / 2, h]
    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    rect_left = Rectangle(left_array[0], left_array[1], left_array[2],
                          left_array[3])

    border_door = left_array[3]
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            with open('log_results.txt', 'a') as log:
                log.write('1')
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        cv2.rectangle(frame, (int(left_array[0]), int(left_array[1])),
                      (int(left_array[2]), int(left_array[3])), (23, 158, 21),
                      3)
        if len(detections) != 0:
            counter.someone_inframe()
            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    score = "%.2f" % (det.confidence * 100) + "%"
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)
        else:
            if counter.need_to_clear():
                counter.clear_all()

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()

            if track.track_id not in counter.people_init or counter.people_init[
                    track.track_id] == 0:
                counter.obj_initialized(track.track_id)
                ratio_init = find_ratio_ofbboxes(bbox=bbox,
                                                 rect_compare=rect_left)

                if ratio_init > 0:
                    if ratio_init >= 0.8 and bbox[3] < left_array[3]:
                        counter.people_init[
                            track.track_id] = 2  # man in left side
                    elif ratio_init < 0.8 and bbox[3] > left_array[
                            3]:  # initialized in the bus, mb going out
                        counter.people_init[track.track_id] = 1
                else:
                    counter.people_init[track.track_id] = 1
                counter.people_bbox[track.track_id] = bbox
            counter.cur_bbox[track.track_id] = bbox

            adc = "%.2f" % (track.adc *
                            100) + "%"  # Average detection confidence
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1]) + 50), 0,
                        1e-3 * frame.shape[0], (0, 255, 0), 5)

            if not show_detections:
                track_cls = track.cls
                cv2.putText(frame, str(track_cls),
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                cv2.putText(
                    frame, 'ADC: ' + adc,
                    (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                    1e-3 * frame.shape[0], (0, 255, 0), 1)

        id_get_lost = [
            track.track_id for track in tracker.tracks
            if track.time_since_update >= 5
        ]

        # TODO clear people_init and other dicts
        for val in counter.people_init.keys():
            ratio = 0
            cur_c = find_centroid(counter.cur_bbox[val])
            init_c = find_centroid(counter.people_bbox[val])
            vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

            if val in id_get_lost and counter.people_init[val] != -1:
                ratio = find_ratio_ofbboxes(bbox=counter.cur_bbox[val],
                                            rect_compare=rect_left)

                if vector_person[0] > 200 and counter.people_init[val] == 2 \
                        and ratio < 0.7:  # and counter.people_bbox[val][3] > border_door \
                    counter.get_out()
                    print(vector_person[0], counter.people_init[val], ratio)

                elif vector_person[0] < -100 and counter.people_init[val] == 1 \
                        and ratio >= 0.7:
                    counter.get_in()
                    print(vector_person[0], counter.people_init[val], ratio)

                counter.people_init[val] = -1
                del val

        ins, outs = counter.show_counter()
        cv2.rectangle(frame, (700, 0), (950, 50), (0, 0, 0), -1, 8)
        cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (710, 35), 0,
                    1e-3 * frame.shape[0], (255, 255, 255), 3)

        cv2.namedWindow('video', cv2.WINDOW_NORMAL)
        # cv2.resizeWindow('video', 1422, 800)
        cv2.imshow('video', frame)

        if writeVideo_flag:
            out.write(frame)

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % fps)

            if len(fpeses) < 15:
                fpeses.append(round(fps, 2))

            elif len(fpeses) == 15:
                # fps = round(np.median(np.array(fpeses)))
                median_fps = float(np.median(np.array(fpeses)))
                fps = round(median_fps, 1)
                print('max fps: ', fps)
                fps = 20
                counter.fps = fps
                fpeses.append(fps)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Exemple #24
0
 def test_perimeter(self):
     rect = Rectangle(2, 5)
     self.assertEqual(14, rect.perimeter())
Exemple #25
0
from rectangles import Rectangle

rect1 = Rectangle(-2, 1, 4, -3)
rect2 = Rectangle(4, -2, 0, 2)

rect1.__str__()
rect2.__repr__()

print rect1.eq(rect2)
print rect2.ne(rect1)

print str(rect1.center())

print rect2.area()

rect1.move(2, -2)
rect1.__str__()

Exemple #26
0
 def test__init__(self):
     self.assertEqual(self.rec1, Rectangle(1, 2, 5, 4))
     self.assertEqual(self.rec2, Rectangle(2, 0, 0, 2))
Exemple #27
0
 def test_intersection(self):
     self.assertEqual(self.rec1.intersection(self.rec2),
                      Rectangle(1, 2, 2, 4))
Exemple #28
0
 def test__move(self):
     self.assertEqual(self.rec1.move(3, 1), Rectangle(4, 3, 8, 5))
     self.assertEqual(self.rec2.move(-1, 2), Rectangle(1, 2, -1, 4))
Exemple #29
0
 def test_init(self):
     c = Rectangle(5, 3, 10)
     self.assertEqual(c.length, 5)
     self.assertEqual(c.width, 3)
     self.assertEqual(c.height, 10)
 def test_dots(self):
     rectangle = Rectangle(([4, 2], [2, 1], [2, 2], [4, 1]))
     self.assertListEqual(rectangle.A, [2, 1])
     self.assertListEqual(rectangle.B, [2, 2])
     self.assertListEqual(rectangle.C, [4, 1])
     self.assertListEqual(rectangle.D, [4, 2])
Exemple #31
0
 def setUp(self):
     self.rec1 = Rectangle(1, 2, 5, 4)
     self.rec2 = Rectangle(0, 2, 2, 4)
     self.rec3 = Rectangle(-1, -1, 1, 1)
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    output_format = 'mp4'

    initialize_door_by_yourself = False
    door_array = None
    # Deep SORT
    model_filename = '../model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    error_values = []
    check_gpu()
    files = sorted(os.listdir('data_files/videos'))

    for video_name in files:
        print("opening video: {}".format(video_name))
        file_path = join('data_files/videos', video_name)
        output_name = 'save_data/out_' + video_name[0:-3] + output_format
        counter = Counter(counter_in=0, counter_out=0, track_id=0)
        truth = get_truth(video_name)

        if asyncVideo_flag:
            video_capture = VideoCaptureAsync(file_path)
        else:
            video_capture = cv2.VideoCapture(file_path)

        if asyncVideo_flag:
            video_capture.start()

        if writeVideo_flag:
            if asyncVideo_flag:
                w = int(video_capture.cap.get(3))
                h = int(video_capture.cap.get(4))
            else:
                w = int(video_capture.get(3))
                h = int(video_capture.get(4))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))
            frame_index = -1

        fps = 0.0
        fps_imutils = imutils.video.FPS().start()

        all_doors = read_door_info('data_files/doors_info_links.json')
        door_array = all_doors[video_name]
        rect_door = Rectangle(door_array[0], door_array[1], door_array[2],
                              door_array[3])

        border_door = door_array[3]
        while True:
            ret, frame = video_capture.read()  # frame shape 640*480*3
            if not ret:
                y1 = (counter.counter_in - truth.inside)**2
                y2 = (counter.counter_out - truth.outside)**2
                total_count = counter.return_total_count()
                true_total = truth.inside + truth.outside
                if true_total != 0:
                    err = abs(total_count - true_total) / true_total
                else:
                    err = abs(total_count - true_total)
                mse = (y1 + y2) / 2
                log_res = "in video: {}\n predicted / true\n counter in: {} / {}\n counter out: {} / {}\n" \
                          " total: {} / {}\n error: {}\n mse error: {}\n______________\n".format(video_name,
                                                                                                 counter.counter_in,
                                                                                                 truth.inside,
                                                                                                 counter.counter_out,
                                                                                                 truth.outside,
                                                                                                 total_count,
                                                                                                 true_total, err, mse)
                with open('../log_results.txt', 'a') as log:
                    log.write(log_res)
                print(log_res)
                error_values.append(err)
                break

            t1 = time.time()

            image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
            boxes, confidence, classes = yolo.detect_image(image)

            features = encoder(frame, boxes)
            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.cls for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            cv2.rectangle(frame, (int(door_array[0]), int(door_array[1])),
                          (int(door_array[2]), int(door_array[3])),
                          (23, 158, 21), 3)
            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    score = "%.2f" % (det.confidence * 100) + "%"
                    # rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                    # rect_door = Rectangle( int(door_array[0]), int(door_array[1]), int(door_array[2]), int(door_array[3]) )
                    # intersection = rect_head & rect_door
                    #
                    # if intersection:
                    #     squares_coeff = rect_square(*intersection)/ rect_square(*rect_head)
                    #     cv2.putText(frame, score + " inter: " + str(round(squares_coeff, 3)), (int(bbox[0]), int(bbox[3])), 0,
                    #             1e-3 * frame.shape[0], (0, 100, 255), 5)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                # first appearence of object with id=track.id

                if track.track_id not in counter.people_init or counter.people_init[
                        track.track_id] == 0:
                    counter.obj_initialized(track.track_id)
                    rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])

                    intersection = rect_head & rect_door
                    if intersection:

                        intersection_square = rect_square(*intersection)
                        head_square = rect_square(*rect_head)
                        rat = intersection_square / head_square

                        #             1e-3 * frame.shape[0], (0, 100, 255), 5)

                        #     was initialized in door, probably going in
                        if rat >= 0.7:
                            counter.people_init[track.track_id] = 2
                            #     initialized in the bus, mb going out
                        elif rat <= 0.4 or bbox[3] > border_door:
                            counter.people_init[track.track_id] = 1
                        #     initialized between the exit and bus, not obvious state
                        elif rat > 0.4 and rat < 0.7:
                            counter.people_init[track.track_id] = 3
                            counter.rat_init[track.track_id] = rat
                    # res is None, means that object is not in door contour
                    else:
                        counter.people_init[track.track_id] = 1
                    counter.people_bbox[track.track_id] = bbox
                counter.cur_bbox[track.track_id] = bbox

                adc = "%.2f" % (track.adc *
                                100) + "%"  # Average detection confidence
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 5)

                if not show_detections:
                    track_cls = track.cls
                    cv2.putText(frame, str(track_cls),
                                (int(bbox[0]), int(bbox[3])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 1)
                    cv2.putText(
                        frame, 'ADC: ' + adc,
                        (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])),
                        0, 1e-3 * frame.shape[0], (0, 255, 0), 1)

            id_get_lost = [
                track.track_id for track in tracker.tracks
                if track.time_since_update >= 35
            ]
            # and track.age >= 29]
            # id_inside_tracked = [track.track_id for track in tracker.tracks if track.age > 60]
            for val in counter.people_init.keys():
                # check bbox also
                inter_square = 0
                cur_square = 0
                ratio = 0
                cur_c = find_centroid(counter.cur_bbox[val])
                init_c = find_centroid(counter.people_bbox[val])
                vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

                if val in id_get_lost and counter.people_init[val] != -1:
                    rect_сur = Rectangle(counter.cur_bbox[val][0],
                                         counter.cur_bbox[val][1],
                                         counter.cur_bbox[val][2],
                                         counter.cur_bbox[val][3])
                    inter = rect_сur & rect_door
                    if inter:

                        inter_square = rect_square(*inter)
                        cur_square = rect_square(*rect_сur)
                        try:
                            ratio = inter_square / cur_square
                        except ZeroDivisionError:
                            ratio = 0

                    # if vector_person < 0 then current coord is less than initialized, it means that man is going
                    # in the exit direction
                    if vector_person[1] > 70 and counter.people_init[val] == 2 \
                            and ratio < 0.9:  # and counter.people_bbox[val][3] > border_door \
                        counter.get_in()

                    elif vector_person[1] < -70 and counter.people_init[val] == 1 \
                            and ratio >= 0.6:
                        counter.get_out()

                    elif vector_person[1] < -70 and counter.people_init[val] == 3 \
                            and ratio > counter.rat_init[val] and ratio >= 0.6:
                        counter.get_out()
                    elif vector_person[1] > 70 and counter.people_init[val] == 3 \
                            and ratio < counter.rat_init[val] and ratio < 0.6:
                        counter.get_in()

                    counter.people_init[val] = -1
                    del val

            ins, outs = counter.show_counter()
            cv2.rectangle(frame, (0, 0), (250, 50), (0, 0, 0), -1, 8)
            cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (10, 35),
                        0, 1e-3 * frame.shape[0], (255, 255, 255), 3)

            cv2.namedWindow('video', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('video', 1422, 800)
            cv2.imshow('video', frame)

            if writeVideo_flag:
                # save a frame
                out.write(frame)
                frame_index = frame_index + 1

            fps_imutils.update()

            if not asyncVideo_flag:
                fps = (fps + (1. / (time.time() - t1))) / 2
                # print("FPS = %f" % fps)

            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        fps_imutils.stop()
        print('imutils FPS: {}'.format(fps_imutils.fps()))

        if asyncVideo_flag:
            video_capture.stop()
        else:
            video_capture.release()

        if writeVideo_flag:
            out.release()

        cv2.destroyAllWindows()
        del door_array

    mean_error = np.mean(error_values)
    print("mean error for {} videos: {}".format(len(files), mean_error))