Example #1
0
def track(img):
    """
    Tracks people without detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """
    print('[TRACKING]')
    global persons_list

    # Predict state for each tracker without measurement
    # TODO what to do if the box disappeared
    for person in persons_list:
        person.update_state(img)

        x = int((person.box[0] + person.box[2]) / 2)
        y = int((person.box[1] + person.box[3]) / 2)
        center = (y, x)
        img = cv2.circle(img, center, 7, (0, 255, 0), -1)
        img = tools.draw_box(person.id, img, person.box)

    return img
Example #2
0
def predict(img, start_time):
    """
    Tracks people without detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """
    global persons_list

    # Predict state for each tracker without measurement
    for person in persons_list:
        person.predict_state(time.time() - start_time)

        x = person.x.T[0].tolist()
        x = [
            x[0] - int(person.height / 2), x[1] - int(person.width / 2),
            x[0] + int(person.height / 2), x[1] + int(person.width / 2)
        ]
        person.box = x

        x = person.x.T[0].tolist()[0]
        y = person.x.T[0].tolist()[1]
        img = cv2.circle(img, (y, x), 4, (0, 255, 0), -1)
        img = tools.draw_box(person.id, img, person.box)

    return img
def detect(img):
    """
    Runs person detection on img

    Args:
        img (np.array): input image

    Returns:
        np.array: image with bounding boxes
    """
    detections = det.get_detections(img)

    for detection in detections:
        img = tools.draw_box(0, img, detection)

    return img
Example #4
0
def predict(img, start_time):
    """
    Tracks people without detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """

    # Predict state for each tracker without measurement
    for person in persons_list:
        person.predict_state(time.time() - start_time)

        x = person.x
        x = x.T[0].tolist()
        x = [x[0], x[2], x[4], x[6]]
        person.box = x
        img = tools.draw_box(person.id, img, person.box)

    return img
Example #5
0
def track(img, start_time):
    """
    Tracks people with detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """

    global persons_list
    global next_id

    detections = det.get_detections(img)

    persons_boxes = []

    if len(persons_list) > 0:
        for person in persons_list:
            persons_boxes.append(person.box)

    matched, unmatched_dets, unmatched_trks \
        = assign_detections_to_trackers(persons_boxes, detections, iou_thrd=IOU_THRESH)

    # Update trackers with measurements
    if matched.size > 0:
        for person_idx, det_idx in matched:
            # Extract measurement
            z = detections[det_idx]
            z = np.expand_dims(z, axis=0).T

            # Extract person tracker
            person = persons_list[person_idx]

            person.process_measurement(z, time.time() - start_time)

            x = person.x.T[0].tolist()
            x = [x[0], x[2], x[4], x[6]]
            persons_boxes[person_idx] = x
            person.box = x
            person.hits += 1
            person.misses = 0

    # Update trackers without measurements
    if len(unmatched_trks) > 0:
        for person_idx in unmatched_trks:
            person = persons_list[person_idx]
            person.misses += 1
            person.hits = 0
            person.predict_state(time.time() - start_time)

            x = person.x
            x = x.T[0].tolist()
            x = [x[0], x[2], x[4], x[6]]
            person.box = x
            persons_boxes[person_idx] = x

    # Create tracker for new detections
    if len(unmatched_dets) > 0:
        for idx in unmatched_dets:
            z = detections[idx]
            z = np.expand_dims(z, axis=0).T

            person = tracker.Tracker()
            x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T
            person.x = x

            # tmp_trk.predict_state(time.time() - start_time)

            x = person.x
            x = x.T[0].tolist()
            x = [x[0], x[2], x[4], x[6]]
            person.box = x

            # Assign an ID for the tracker
            next_id += 1
            person.id = next_id

            persons_list.append(person)
            persons_boxes.append(x)

    # Draw good boxes
    for person in persons_list:
        if ((person.hits >= MIN_HITS) and (person.misses <= MAX_MISSES)):
            img = tools.draw_box(person.id, img, person.box)

    # Remove lost trackers
    persons_list = [i for i in persons_list if i.misses <= MAX_MISSES]

    return img
Example #6
0
def detect(img, start_time, det):
    """
    Tracks people with detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """
    print('[DETECTION]')
    global persons_list
    global next_id

    # Get detections
    detections = det.get_detections(img)

    # Create list of existing persons' boxes
    persons_boxes = []
    if len(persons_list) > 0:
        for person in persons_list:
            persons_boxes.append(person.box)

    # Solve data association problem
    matched, unmatched_dets, unmatched_trks \
        = assign_detections_to_trackers(persons_boxes, detections, dst_thrd=DISTANCE_THRESH)

    # Update trackers with measurements
    if matched.size > 0:
        for person_idx, det_idx in matched:
            # Extract measurement
            z = detections[det_idx]
            center = [
                z[0] + int((z[2] - z[0]) / 2), z[1] + int((z[3] - z[1]) / 2)
            ]

            img = cv2.circle(img, (center[1], center[0]), 6, (0, 0, 255), -1)

            person.hits += 1
            person.misses = 0
            person.box = tuple(z)  # NEW

    # Update trackers without measurements

    # TODO check removed unmatched trackers
    for person_idx in unmatched_trks:
        person = persons_list[person_idx]
        person.misses += 1
        person.hits = 0

    # Create tracker for new detections

    for idx in unmatched_dets:
        z = detections[idx]
        person = trc.Tracker(tuple(z), img)
        #person.box = z

        # Assign an ID for the tracker
        next_id += 1
        person.id = next_id

        persons_list.append(person)
        persons_boxes.append(z)

    # Draw good boxes
    for person in persons_list:
        if ((person.hits >= MIN_HITS) and (person.misses <= MAX_MISSES)):
            x = int((person.box[1] + person.box[3]) / 2)
            y = int((person.box[0] + person.box[2]) / 2)

            img = cv2.circle(img, (y, x), 4, (0, 255, 0), -1)
            img = tools.draw_box(person.id, img, person.box)
    # Remove lost trackers
    persons_list = [i for i in persons_list if i.misses <= MAX_MISSES]

    return img
Example #7
0
def track(img, start_time):
    """
    Tracks people with detection

    Args:
        img (np.array): current frame
        start_time (float): time when the current frame was captured

    Returns:
        np: current frame with bounding boxes
    """

    global persons_list
    global next_id

    # Get detections
    detections = det.get_detections(img)

    # Create list of existing persons' boxes
    persons_boxes = []
    if len(persons_list) > 0:
        for person in persons_list:
            persons_boxes.append(person.box)

    # Solve data association problem
    matched, unmatched_dets, unmatched_trks \
        = assign_detections_to_trackers(persons_boxes, detections, dst_thrd=DISTANCE_THRESH)

    # Update trackers with measurements
    if matched.size > 0:
        for person_idx, det_idx in matched:
            # Extract measurement
            z = detections[det_idx]
            center = [
                z[0] + int((z[2] - z[0]) / 2), z[1] + int((z[3] - z[1]) / 2)
            ]

            img = cv2.circle(img, (center[1], center[0]), 6, (0, 0, 255), -1)
            center = np.expand_dims(center, axis=0).T

            # Extract person tracker
            person = persons_list[person_idx]
            person.hits += 1
            person.misses = 0

            person.process_measurement(center, time.time() - start_time)
            person.width = z[3] - z[1]
            person.height = z[2] - z[0]
            x = person.x.T[0].tolist()
            x = [
                x[0] - int(person.height / 2), x[1] - int(person.width / 2),
                x[0] + int(person.height / 2), x[1] + int(person.width / 2)
            ]
            persons_boxes[person_idx] = x
            person.box = x

    # Update trackers without measurements
    if len(unmatched_trks) > 0:
        for person_idx in unmatched_trks:
            person = persons_list[person_idx]
            person.misses += 1
            person.hits = 0
            person.predict_state(time.time() - start_time)

            x = person.x.T[0].tolist()
            x = [
                x[0] - int(person.height / 2), x[1] - int(person.width / 2),
                x[0] + int(person.height / 2), x[1] + int(person.width / 2)
            ]
            persons_boxes[person_idx] = x
            person.box = x

    # Create tracker for new detections
    if len(unmatched_dets) > 0:
        for idx in unmatched_dets:
            z = detections[idx]
            center = [
                z[0] + int((z[2] - z[0]) / 2), z[1] + int((z[3] - z[1]) / 2)
            ]
            center = np.expand_dims(center, axis=0).T

            person = tracker_center.Tracker()
            person.x = np.expand_dims([center[0], center[1], 0, 0], axis=0).T

            person.width = z[3] - z[1]
            person.height = z[2] - z[0]
            x = person.x.T[0].tolist()
            x = [
                x[0] - int(person.height / 2), x[1] - int(person.width / 2),
                x[0] + int(person.height / 2), x[1] + int(person.width / 2)
            ]
            person.box = x

            # Assign an ID for the tracker
            next_id += 1
            person.id = next_id

            persons_list.append(person)
            persons_boxes.append(x)

    # Draw good boxes
    for person in persons_list:
        if ((person.hits >= MIN_HITS) and (person.misses <= MAX_MISSES)):
            x = person.x.T[0].tolist()[0]
            y = person.x.T[0].tolist()[1]
            img = cv2.circle(img, (y, x), 4, (0, 255, 0), -1)
            img = tools.draw_box(person.id, img, person.box)

    # Remove lost trackers
    persons_list = [i for i in persons_list if i.misses <= MAX_MISSES]

    return img