Exemple #1
0
def test_get_centroid():
    bounding_box = [1, 1, 4, 4]
    centroid = get_centroid(bounding_box)
    assert type(centroid) is tuple, 'centroid is a tuple'
    assert len(centroid) == 2, 'centroid is a 2d coordinate (x, y)'
    assert centroid[0] == 3 and centroid[
        1] == 3, 'the centroid (center point) of box [1, 1, 4, 4] is (3, 3)'
Exemple #2
0
def _get_dynamic_lines(bbox, oldbbox, what):
    x, y, w, h = bbox
    # I know centroid is already saved into blob (but not the old one), but carring an additional parameter around is annoying
    cc = get_centroid(bbox)
    ox, oy, ow, oh = oldbbox
    occ = get_centroid(oldbbox)
    lines = {
        'tl': [(ox, oy), (x, y)],
        'tr': [(ox + ow, oy), (x + w, y)],
        'bl': [(ox, oy + oh), (x, y + h)],
        'br': [(ox + ow, oy + oh), (x + w, y + h)],
        'cc': [occ, cc],
    }
    if what == 'corners':
        return [lines[c] for c in ['tl', 'tr', 'bl', 'br']]
    return [lines[what]]
Exemple #3
0
def _has_crossed_counting_line(counting_line, bloblines, blob, mode):
    '''
	Check if at least one object line cosses the countin line
	'''
    def lines_crossprod(l1, l2):
        v1 = l1[1] - l1[0]
        v2 = l2[1] - l2[0]
        return np.cross(v1, v2)

    cline = counting_line['line']
    # this function can be made a bit more efficient but less readable
    for bl in bloblines:
        intersect, o = _line_segments_intersect(cline, bl)
        if intersect:
            # test direction
            # note:If I have not forgot something, in all cases, including dir_measurement=='previous' and mode=cross_mode
            # if direction is wrong for one lines it will be wrong for all lines, so when direction is bad I don't need to 'continue', I can return False
            direction = counting_line.get('direction', None)
            if direction is None:
                return True
            dirv = 1 if direction == 'left' else -1
            if counting_line.get('dir_measurement', None) == 'previous':
                if mode == touch_mode:
                    dirline = [
                        get_centroid(blob.old_bounding_box), blob.centroid
                    ]
                else:
                    # in this case I will use the already computed orientation o of current line
                    return dirv * o > 0
            else:  # dir_measurement=='first'
                dirline = [blob.position_first_detected, blob.centroid]
            return lines_crossprod(np.array(dirline),
                                   np.array(cline)) * dirv > 0
    return False
def add_new_blobs(boxes, classes, confidences, blobs, frame, tracker, counting_line, line_position, mcdf):
    '''
    Adds new blobs or updates existing ones.
    '''
    matched_blob_ids = []
    for i, box in enumerate(boxes):
        _type = classes[i] if classes is not None else None
        _confidence = confidences[i] if confidences is not None else None
        _tracker = get_tracker(tracker, box, frame)

        box_centroid = get_centroid(box)
        match_found = False
        for _id, blob in blobs.items():
            if not blob.counted and get_overlap(box, blob.bounding_box) >= 0.7:
                match_found = True
                if _id not in matched_blob_ids:
                    blob.num_consecutive_detection_failures = 0
                    matched_blob_ids.append(_id)
                blob.update(box, _type, _confidence, _tracker)

                logger.debug('Blob updated.', extra={
                    'meta': {
                        'cat': 'BLOB_UPSERT',
                        'vehicle_id': _id,
                        'bounding_box': blob.bounding_box,
                        'type': blob.type,
                        'type_confidence': blob.type_confidence,
                        'image': get_base64_image(get_box_image(frame, blob.bounding_box)),
                    },
                })
                break

        if not match_found and not is_passed_counting_line(box_centroid, counting_line, line_position):
            _blob = Blob(box, _type, _confidence, _tracker)
            blob_id = generate_vehicle_id()
            blobs[blob_id] = _blob

            logger.debug('Blob created.', extra={
                'meta': {
                    'cat': 'BLOB_UPSERT',
                    'vehicle_id': blob_id,
                    'bounding_box': _blob.bounding_box,
                    'type': _blob.type,
                    'type_confidence': _blob.type_confidence,
                    'image': get_base64_image(get_box_image(frame, _blob.bounding_box)),
                },
            })

    blobs = remove_stray_blobs(blobs, matched_blob_ids, mcdf)
    return blobs
Exemple #5
0
def add_new_blobs(boxes, classes, confidences, blobs, frame, tracker,
                  counting_line, line_position, mcdf):
    # add new blobs or update existing ones
    matched_blob_ids = []
    for i in range(len(boxes)):
        _type = classes[i] if classes != None else None
        _confidence = confidences[i] if confidences != None else None
        _tracker = get_tracker(tracker, boxes[i], frame)

        box_centroid = get_centroid(boxes[i])
        box_area = get_area(boxes[i])
        match_found = False
        for _id, blob in blobs.items():
            if blob.counted == False and get_iou(boxes[i],
                                                 blob.bounding_box) > 0.5:
                match_found = True
                if _id not in matched_blob_ids:
                    blob.num_consecutive_detection_failures = 0
                    matched_blob_ids.append(_id)
                blob.update(boxes[i], _type, _confidence, _tracker)

                log_info(
                    'Blob updated.', {
                        'cat':
                        'BLOB_UPSERT',
                        'vehicle_id':
                        _id,
                        'bounding_box':
                        blob.bounding_box,
                        'type':
                        blob.type,
                        'type_confidence':
                        blob.type_confidence,
                        'image':
                        get_base64_image(
                            get_box_image(frame, blob.bounding_box))
                    })
                break

        if not match_found and not is_passed_counting_line(
                box_centroid, counting_line, line_position):
            _blob = Blob(boxes[i], _type, _confidence, _tracker)
            blob_id = generate_vehicle_id()
            blobs[blob_id] = _blob

            log_info(
                'Blob created.', {
                    'cat':
                    'BLOB_UPSERT',
                    'vehicle_id':
                    blob_id,
                    'bounding_box':
                    _blob.bounding_box,
                    'type':
                    _blob.type,
                    'type_confidence':
                    _blob.type_confidence,
                    'image':
                    get_base64_image(get_box_image(frame, _blob.bounding_box))
                })

    blobs = remove_stray_blobs(blobs, matched_blob_ids, mcdf)
    return blobs
    def handle_bounding_boxes(self):
        """
        for each bounding boxes :
        1. remove bounding box with height / width that are 0.
        2. Remove boxes that are too large relatively to the frame size, or have disproportional width to height ratios
        3. Remove boxes that their centroids are out of the detection roi.
        4. Remove boxes that have iou greater than the provided overlap threshold.
        :return: the picked bounding boxes
        """

        scores = self.scores[:, 0]
        # initialize the list of picked indexes
        pick = []

        # grab the coordinates of the bounding boxes
        x_min = self.boxes[:, 0]
        y_min = self.boxes[:, 1]
        x_max = self.boxes[:, 2]
        y_max = self.boxes[:, 3]

        # compute the area of the bounding boxes and grab the indexes to sort
        # (in the case that no probabilities are provided, simply sort on the
        # bottom-left y-coordinate)
        area = (x_max - x_min) * (y_max - y_min)

        # sort the indexes; note: one could use a priority queue of size max_num_boxes, but that's probably overkill
        idxs = np.argsort(scores)

        # keep looping while some indexes still remain in the indexes list
        while len(idxs) > 0:
            # grab the last index in the indexes list and add the index value
            # to the list of picked indexes
            last = len(idxs) - 1
            i = idxs[last]
            if scores[i] < self.score_threshold:
                return pick

            # Remove boxes that are too large relatively to the frame size, or have disproportional width to height ratios
            x_left, y_bottom, x_right, y_top = self.translate_coord(
                self.boxes[i])
            width = x_right - x_left
            height = y_top - y_bottom
            if height is 0 or width is 0:
                idxs = np.delete(idxs, last)
                logger.info('Deleted detection with 0 width \ height',
                            extra={
                                'meta': {
                                    'cat': 'DETECTION_PROCESS',
                                    'height': height,
                                    'width': width
                                }
                            })
                continue

            box_area = width * height
            percentage_of_frame = box_area / self.image_area
            width_to_height_ratio = width / height

            if percentage_of_frame > self.percentage_of_frame:
                idxs = np.delete(idxs, last)
                logger.info(
                    'Deleted detection exceeding percentage of frame threshold',
                    extra={
                        'meta': {
                            'cat': 'DETECTION_PROCESS',
                            'percentage_of_frame': percentage_of_frame
                        }
                    })
                continue

            if width_to_height_ratio > self.width_to_height_ratio or width_to_height_ratio < (
                    1 / self.width_to_height_ratio):
                idxs = np.delete(idxs, last)
                logger.info(
                    'Deleted detection exceeding width to height ratio',
                    extra={
                        'meta': {
                            'cat': 'DETECTION_PROCESS',
                            'width_to_height_ratio': width_to_height_ratio
                        }
                    })
                continue

            # Remove boxes that their centroids are out of the detection roi.
            centroid_box = get_centroid([x_left, y_bottom, width, height])
            centroid_box_point = Point(centroid_box[0], centroid_box[1])
            if not self.roi_polygon.contains(centroid_box_point):
                idxs = np.delete(idxs, last)
                logger.info('Deleted detection out of DROI',
                            extra={
                                'meta': {
                                    'cat': 'DETECTION_PROCESS',
                                    'centroid_box': centroid_box
                                }
                            })
                continue

            pick.append(i)

            if len(pick) == self.max_num_boxes:
                return pick

            # compute the width and height of the intersection of
            # the picked bounding box with all other bounding boxes
            yy_min = np.maximum(y_min[i], y_min[idxs[:last]])
            xx_min = np.maximum(x_min[i], x_min[idxs[:last]])
            yy_max = np.minimum(y_max[i], y_max[idxs[:last]])
            xx_max = np.minimum(x_max[i], x_max[idxs[:last]])
            w = np.maximum(0, xx_max - xx_min)
            h = np.maximum(0, yy_max - yy_min)

            # compute intersection over union
            iou = (w * h) / (area[i] + area[idxs[:last]] - w * h + 1e-5)

            # delete all indexes from the index list that have iou greater
            # than the provided overlap threshold
            idxs = np.delete(
                idxs,
                np.concatenate(
                    ([last], np.where(iou > self.iou_threshold)[0])))

        return pick