Ejemplo n.º 1
0
class PreTracker:
    def __init__(self, in_topic=None, in_bag=None, processed_frame_topic=None):
        self.pre_track_callbacks = []

        if in_topic != None:
            self.stream_frame = ROSImageStream(sub_topic_name=in_topic,
                                               in_bag_name=in_bag)

        if processed_frame_topic != None:
            self.stream_processed_frame = ROSImageStream(
                pub_topic_name=processed_frame_topic)
        else:
            self.stream_processed_frame = None

    def start(self):
        def callback(frame):
            proc_frame = self.pre_track(frame)

        self.stream_frame.img_stream_subscribe(callback)

    def set_pre_track_callbacks(self, pre_track_callbacks):
        self.pre_track_callbacks = pre_track_callbacks

    def set_color_range(self, color_range=((0, 0, 0), (255, 255, 255))):
        self.color_range = color_range

    def set_background_subtract(self):
        self.background_subtractor = cv2.createBackgroundSubtractorKNN()

    def set_median_blur(self, ksize):
        self.ksize = ksize

    def pre_track_median_blur(self, frame):
        return cv2.medianBlur(frame, self.ksize)

    def pre_track_color_range(self, frame):
        return cv2.inRange(frame, self.color_range[0], self.color_range[1])

    def pre_track_background_subtract(self, frame):
        return self.background_subtractor.apply(frame)

    def pre_track(self, frame):
        for callback in self.pre_track_callbacks:
            frame = callback(frame)

        if self.stream_processed_frame != None:
            self.stream_processed_frame.publish_single(frame, encoding="mono8")

        return frame
    class TrackerExample:
        def __init__(self):
            self.t = None
            self.last_frame = None
            self.track_window = None
            self.s = ROSImageStream(sub_topic_name=args.i)

            self.p = ROSImageStream(pub_topic_name=args.o)

            self.pre_tracker = PreTracker()
            self.pre_tracker.set_color_range(((0,0,0),(10,10,10)))
            self.pre_tracker.set_pre_track_callbacks([self.pre_tracker.pre_track_color_range])
        
        def start(self):
            def callback(frame):
                if self.t == None:
                    seg = self.pre_tracker.pre_track(frame)
                    cnts, _ = cv2.findContours(
                            seg,
                            cv2.RETR_TREE,
                            cv2.CHAIN_APPROX_SIMPLE
                    )
                    max_cnt = max(cnts, key=lambda c: cv2.contourArea(c))

                    rectangle = cv2.boundingRect(max_cnt)

                    self.t = CamshiftTracker(init_window=rectangle)

                    r = cv2.rectangle(frame, (rectangle[0],rectangle[1]), (rectangle[2], rectangle[3]), (255, 0, 0),2)

                    self.p.publish_single(r)

                self.last_frame = frame
                pre_tracked_frame = self.pre_tracker.pre_track(frame)
                ret, self.track_window = self.t.track(pre_tracked_frame)

                rectangle = cv2.rectangle(frame, (self.track_window[0], self.track_window[1]), (self.track_window[0] + self.track_window[2], self.track_window[1] + self.track_window[3]), (255,0,0), 2)

                self.p.publish_single(rectangle)
            self.s.img_stream_subscribe(callback)
Ejemplo n.º 3
0
class KalmanCarTracker(KalmanMultiTrackerReceiver):
    def __init__(self,
                 max_tracked_objects=30,
                 dt_prediction=0.05,
                 gsd=1,
                 origin_coordinate=[0, 0],
                 in_events_topic="/multi_tracker/global_events",
                 in_status_topic="/multi_tracker/status",
                 in_window_topic="/multi_tracker/window",
                 out_kalman_topic="/multi_tracker/pos",
                 in_frames_topic="/homography_output",
                 out_frames_topic="/out",
                 out_kalman_frames_topic=None):
        KalmanMultiTrackerReceiver.__init__(
            self,
            max_tracked_objects=max_tracked_objects,
            dt_prediction=dt_prediction,
            in_events_topic=in_events_topic,
            in_status_topic=in_status_topic,
            in_window_topic=in_window_topic,
            out_kalman_topic=out_kalman_topic,
            in_frames_topic=in_frames_topic,
            out_frames_topic=out_kalman_frames_topic)

        self.gsd = gsd
        self.origin_coordinate = origin_coordinate

        self.frames_sub = ROSImageStream(sub_topic_name=in_frames_topic)
        self.frames_pub = ROSImageStream(pub_topic_name=out_frames_topic)

        self.cars = {}
        self.cars_lock = threading.Lock()

        self.last_id = 0

    def start(self):
        KalmanMultiTrackerReceiver.start(self, loop=False)
        self.frames_sub.img_stream_subscribe(callback=self.frame_callback,
                                             loop=False)
        KalmanMultiTrackerReceiver.start(self, loop=False)

        rate = rospy.Rate(5)

        while True:
            rate.sleep()

            self.update_cars()

    def frame_callback(self, frame):
        self.kalman_filters_lock.acquire()

        for key in self.kalman_filters.keys():
            #print(np.array(self.kalman_filters[key].filter.x).flatten())
            if key not in self.cars.keys():
                continue

            if self.cars[key][
                    "CurrX"] == None or self.cars[key]["MeasurementsN"] < 15:
                continue

            vel_dist = sqrt(
                (self.cars[key]["AvgVelX"] - self.cars[key]["CurrVelX"])**2 +
                (self.cars[key]["AvgVelY"] - self.cars[key]["CurrVelY"])**2)

            if vel_dist > 4000 / self.cars[key]["MeasurementsN"]:
                continue

            pt1 = np.array(self.kalman_filters[key].filter.x).flatten().astype(
                np.int16)[:2]
            pt2 = pt1 + 50
            #frame = cv2.rectangle(
            #        frame,
            #        (pt1[0], pt1[1]),
            #        (pt2[0],pt2[1]),
            #        (255, 0, 0)
            #)

            frame = self.write_car(frame, self.cars[key])

        self.frames_pub.publish_single(frame)

        self.kalman_filters_lock.release()

    def write_car(self, frame, car):
        text = str(car["UniqId"]) + ", " + str(
            int(car["CurrX"] * self.gsd +
                self.origin_coordinate[0])) + ", " + str(
                    int(car["CurrY"] * self.gsd +
                        self.origin_coordinate[1])) + ", " + str(
                            int(car["CurrVelX"] * self.gsd)) + ", " + str(
                                int(car["CurrVelY"] * self.gsd))
        frame = cv2.putText(frame, text,
                            (int(car["CurrX"]), int(car["CurrY"])),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))

        return frame

    def update_cars(self):
        self.kalman_filters_lock.acquire()
        self.cars_lock.acquire()

        for key in self.cars.keys():
            if key in self.kalman_filters.keys():
                state = np.array(self.kalman_filters[key].filter.x).flatten()

                self.cars[key]["CurrX"] = state[0]
                self.cars[key]["CurrY"] = state[1]
                self.cars[key]["CurrVelX"] = state[2]
                self.cars[key]["CurrVelY"] = state[3]

                if self.cars[key]["AvgVelX"] == None:
                    self.cars[key]["AvgVelX"] = state[2]
                    self.cars[key]["AvgVelY"] = state[3]
                else:
                    self.cars[key]["AvgVelX"] *= self.cars[key][
                        "MeasurementsN"]
                    self.cars[key]["AvgVelX"] += state[2]
                    self.cars[key][
                        "AvgVelX"] /= self.cars[key]["MeasurementsN"] + 1
                    self.cars[key]["AvgVelY"] *= self.cars[key][
                        "MeasurementsN"]
                    self.cars[key]["AvgVelY"] += state[3]
                    self.cars[key][
                        "AvgVelY"] /= self.cars[key]["MeasurementsN"] + 1

                self.cars[key]["MeasurementsN"] += 1

        self.cars_lock.release()
        self.kalman_filters_lock.release()

    def event_callback(self, key, event):
        self.cars_lock.acquire()

        if key in self.cars.keys():
            self.remove_car(key)

        if event == '+':
            self.create_car(key)

        self.cars_lock.release()

        KalmanMultiTrackerReceiver.event_callback(self, key, event)

    def create_car(self, key):
        self.cars[key] = {
            "UniqId": self.last_id,
            "CurrX": None,
            "CurrY": None,
            "CurrVelX": None,
            "CurrVelY": None,
            "AvgVelX": None,
            "AvgVelY": None,
            "MeasurementsN": 0
        }

        self.last_id += 1

    def remove_car(self, key):
        if key in self.cars.keys():
            del self.cars[key]
class MultiTracker:
    def __init__(
            self,
            tracker_type=CamshiftTracker,  # MeanshiftTracker, CamshiftTracker
            cnt_min_area=500,
            window_max_width=None,
            lost_ttl_attempts=10,
            max_tracked_objects=30,
            overlap_margin=25,
            allowed_directions=['n', 's', 'e', 'w'],
            pre_tracker=None,
            in_frames_topic="/img_stream",
            out_events_topic="/multi_tracker/global_events",
            out_status_topic="/multi_tracker/status",
            out_window_topic="/multi_tracker/window",
            in_kalman_topic="/multi_tracker/pos",
            out_frames_topic="/multi_tracker/tracked_frames"):
        self.tracker_type = tracker_type
        self.cnt_min_area = cnt_min_area
        self.window_max_width = window_max_width
        self.lost_ttl_attempts = lost_ttl_attempts
        self.max_tracked_objects = max_tracked_objects
        self.overlap_margin = overlap_margin
        self.allowed_directions = allowed_directions

        if pre_tracker == None:
            self.pre_tracker = PreTracker(
            )  #processed_frame_topic="/pre_tracked_frame")
            self.pre_tracker.set_color_range(color_range=((0, 0, 0), (20, 20,
                                                                      20)))
            self.pre_tracker.set_median_blur(7)
            self.pre_tracker.set_pre_track_callbacks([
                self.pre_tracker.pre_track_color_range,
                self.pre_tracker.pre_track_median_blur
            ])
        else:
            self.pre_tracker = pre_tracker

        self.in_frames_topic = in_frames_topic
        self.out_events_topic = out_events_topic
        self.out_status_topic = out_status_topic
        self.out_window_topic = out_window_topic
        self.in_kalman_topic = in_kalman_topic
        self.out_frames_topic = out_frames_topic

        self.frames_img_stream = ROSImageStream(
            sub_topic_name=self.in_frames_topic)

        self.kalman_sub = rospy.Subscriber(self.in_kalman_topic,
                                           Window,
                                           callback=self.kalman_pos_callback,
                                           queue_size=self.max_tracked_objects)

        if self.out_frames_topic != None:
            self.out_frames_pub = ROSImageStream(
                pub_topic_name=self.out_frames_topic)
        else:
            selt.out_frames_pub = None

        self.tracked_objects = TrackedObjects(
            out_status_topic=self.out_status_topic,
            out_window_topic=self.out_window_topic,
            out_events_topic=self.out_events_topic,
            max_tracked_objects=self.max_tracked_objects,
            max_ttl=self.lost_ttl_attempts)

        self.trackers = {}  # {"id": tracker_type}

    def start(self):
        self.frames_img_stream.img_stream_subscribe(self.apply_tracking)

    def apply_tracking(self, frame):
        pre_tracked_frame = self.pre_tracker.pre_track(frame)

        updated_tracked_frame = self.update_current_trackers(pre_tracked_frame)

        self.discover_new_objects(updated_tracked_frame)

        if self.out_frames_pub != None:
            self.publish_tracked_frame(frame)

    def update_current_trackers(self, pre_tracked_frame):
        for obj in self.tracked_objects:
            obj.acquire_lock()

            ret, track_window = self.trackers[obj.id].track(pre_tracked_frame)

            if self.window_max_width != None:
                if track_window[2] > self.window_max_width and track_window[
                        3] > self.window_max_width:
                    track_window = (track_window[0], track_window[1],
                                    self.window_max_width,
                                    self.window_max_width)
                elif track_window[2] > self.window_max_width:
                    track_window = (track_window[0], track_window[1],
                                    self.window_max_width, track_window[3])
                elif track_window[3] > self.window_max_width:
                    track_window = (track_window[0], track_window[1],
                                    track_window[2], self.window_max_width)

            if obj.track_window != None and obj.last_window != None:
                last_vel = np.array([
                    obj.track_window[1] - obj.last_window[1],
                    obj.track_window[0] - obj.last_window[0]
                ])
                last_dir = None
                if abs(last_vel[0]) > abs(last_vel[1]):
                    if last_vel[0] < 0:
                        last_dir = 'n'
                    else:
                        last_dir = 's'
                else:
                    if last_vel[1] < 0:
                        last_dir = 'w'
                    else:
                        last_dir = 'e'

                current_vel = np.array([
                    track_window[1] - obj.track_window[1],
                    track_window[0] - obj.track_window[0]
                ])
                current_dir = None
                if abs(current_vel[0]) > abs(current_vel[1]):
                    if current_vel[0] < 0:
                        current_dir = 'n'
                    else:
                        current_dir = 's'
                else:
                    if current_vel[1] < 0:
                        current_dir = 'w'
                    else:
                        current_dir = 'e'

                if current_dir != last_dir:
                    if (last_dir == "n" and current_dir == "s"
                            or last_dir == "s" and current_dir == "n"
                            or last_dir == "w" and current_dir == "e"
                            or last_dir == "e" and current_dir == "w"):
                        pre_tracked_frame = self.handle_lost_object(
                            obj, pre_tracked_frame, immediate_remove=True)
                    else:
                        pre_tracked_frame = self.handle_lost_object(
                            obj, pre_tracked_frame, immediate_remove=True)

                    obj.release_lock()

                    continue

                allowed_dir = False
                for direction in self.allowed_directions:
                    if direction == current_dir:
                        allowed_dir = True
                        break

                if not allowed_dir:
                    pre_tracked_frame = self.handle_lost_object(
                        obj, pre_tracked_frame, immediate_remove=True)

                    obj.release_lock()

                    continue

            #if (
            #        obj.track_window[0] + obj.track_window[2] < 0 or
            #        obj.track_window[0] > pre_tracked_frame.shape[1] or
            #        obj.track_window[1] + obj.track_window[3] < 0 or
            #        obj.track_window[1] > pre_tracked_frame.shape[0]
            #):
            #    pre_tracked_frame = self.handle_lost_object(
            #            obj,
            #            pre_tracked_frame
            #    )

            #    obj.release_lock()

            #    continue

            x_lowerb, x_upperb, y_lowerb, y_upperb = self.get_slices(
                track_window, pre_tracked_frame)

            sub_frame = pre_tracked_frame[x_lowerb:x_upperb, y_lowerb:y_upperb]

            _, cnts, _ = cv2.findContours(sub_frame, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

            if len(cnts) == 0:
                pre_tracked_frame = self.handle_lost_object(
                    obj, pre_tracked_frame)

                obj.release_lock()

                continue

            max_cnt = cnts[0]
            max_cnt_area = cv2.contourArea(max_cnt)

            for cnt in cnts:
                cnt_area = cv2.contourArea(cnt)

                if cnt_area > max_cnt_area:
                    max_cnt_area = cnt_area

            if max_cnt_area < self.cnt_min_area:
                pre_tracked_frame = self.handle_lost_object(
                    obj, pre_tracked_frame)

                obj.release_lock()

                continue

            obj.supply_window(track_window)

            pre_tracked_frame = self.handle_ok_object(obj, pre_tracked_frame)

            obj.release_lock()

        return pre_tracked_frame

    def handle_lost_object(self,
                           obj,
                           pre_tracked_frame,
                           immediate_remove=False):
        if immediate_remove:
            obj.ttl = 0

        obj.set_lost()

        x_lowerb, x_upperb, y_lowerb, y_upperb = self.get_slices(
            obj.track_window, pre_tracked_frame)

        pre_tracked_frame[x_lowerb:x_upperb, y_lowerb:y_upperb] = np.full(
            pre_tracked_frame[x_lowerb:x_upperb, y_lowerb:y_upperb].shape, 0)

        return pre_tracked_frame

    def handle_ok_object(self, obj, pre_tracked_frame):
        obj.set_ok()

        x_lowerb, x_upperb, y_lowerb, y_upperb = self.get_slices(
            obj.track_window, pre_tracked_frame)

        pre_tracked_frame[x_lowerb:x_upperb, y_lowerb:y_upperb] = np.full(
            pre_tracked_frame[x_lowerb:x_upperb, y_lowerb:y_upperb].shape, 0)

        return pre_tracked_frame

    def get_slices(self, track_window, pre_tracked_frame):
        x_lowerb = track_window[1] - self.overlap_margin
        if x_lowerb < 0:
            x_lowerb = 0
        x_upperb = track_window[1] + track_window[3] + self.overlap_margin
        if x_upperb > pre_tracked_frame.shape[0]:
            x_upperb = pre_tracked_frame.shape[0]
        y_lowerb = track_window[0] - self.overlap_margin
        if y_lowerb < 0:
            y_lowerb = 0
        y_upperb = track_window[0] + track_window[2] + self.overlap_margin
        if y_upperb > pre_tracked_frame.shape[1]:
            y_upperb = pre_tracked_frame.shape[1]

        return x_lowerb, x_upperb, y_lowerb, y_upperb

    def discover_new_objects(self, updated_frame):
        _, cnts, _ = cv2.findContours(updated_frame, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)

        bounding_rects = []

        for cnt in cnts:
            rect = cv2.boundingRect(cnt)

            if self.window_max_width != None:
                if rect[2] > self.window_max_width and rect[
                        3] > self.window_max_width:
                    rect = (rect[0], rect[1], self.window_max_width,
                            self.window_max_width)
                elif rect[2] > self.window_max_width:
                    rect = (rect[0], rect[1], self.window_max_width, rect[3])
                elif rect[3] > self.window_max_width:
                    rect = (rect[0], rect[1], rect[2], self.window_max_width)

            overlapping = False

            for b_rect in bounding_rects:
                if not (rect[1] + rect[2] < b_rect[1]
                        or b_rect[1] + b_rect[2] < rect[1]
                        or rect[0] + rect[3] < b_rect[0]
                        or b_rect[0] + b_rect[3] < rect[0]):
                    overlapping = True
                    break

            if overlapping:
                continue

            cnt_area = cv2.contourArea(cnt)

            if cnt_area < self.cnt_min_area:
                continue

            bounding_rects.append(rect)

            key = self.tracked_objects.create(window=rect)

            if key != None:
                self.trackers[key] = self.tracker_type(init_window=rect)

    def kalman_pos_callback(self, msg):
        obj = self.tracked_objects.get_item(msg.id)

        if obj != None:
            obj.acquire_lock()

            if obj.status == "lost":
                obj.track_window = msg.window

            obj.release_lock()

    def publish_tracked_frame(self, frame):
        #frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
        for obj in self.tracked_objects.tracked_objects:
            obj.acquire_lock()

            if obj.status == "ok":
                frame = cv2.rectangle(
                    frame, (obj.track_window[0], obj.track_window[1]),
                    (obj.track_window[0] + obj.track_window[2],
                     obj.track_window[1] + obj.track_window[3]), (255, 0, 0),
                    thickness=3)
            elif obj.status == "lost":
                frame = cv2.rectangle(
                    frame, (obj.track_window[0], obj.track_window[1]),
                    (obj.track_window[0] + obj.track_window[2],
                     obj.track_window[1] + obj.track_window[3]), (0, 0, 255),
                    thickness=3)

            obj.release_lock()

        self.out_frames_pub.publish_single(frame)
Ejemplo n.º 5
0
class KalmanMultiTrackerReceiver():
    class KalmanTrackerReceiver(RTKalmanFilter):
        def __init__(
                self,
                dt_prediction=0.05,
                prediction_topic=None,
                update_topic=None,
                state_est_topic="/rt_kalman_filter/state_est",
                start_pos=None
        ):
            self.dt_prediction = dt_prediction

            self.prediction_topic = prediction_topic
            self.update_topic = update_topic
            self.state_est_topic = state_est_topic

            self.sensor_bag = None
            self.sensor_topic = None

            self.pub_prediction = None
            self.pub_update = None
            self.pub_state_est = None

            x = None

            if start_pos != None:
                x = np.array([
                    [0.],   #px
                    [0.],   #py
                    [0.],   #vx
                    [0.]]   #vy
                )
            else:
                x = np.array([
                    [start_pos[0, 0]],
                    [start_pos[1, 0]],
                    [0.],
                    [0.]]
                )

            F = np.array([
                        [1., 0, self.dt_prediction, 0], 
                        [0., 1., 0., self.dt_prediction],
                        [0., 0., 1., 0.], 
                        [0., 0., 0., 1.]]
            )
            H = np.array([
                [0., 1., 0., 0.],
                [1., 0., 0., 0.]]
            )
            P = np.array([
                [100000. ,0., 0., 0.], 
                [0., 100000., 0., 0.],
                [0., 0., 100000., 0.],
                [0., 0., 0., 100000.]]
            )
            R = np.array([
                [1.5, 0],
                [0., 1.5]]
            )
            Q = Q_discrete_white_noise(dim=4, var=0.13)

            self.init_filter(
                    x,
                    F,
                    H,
                    P,
                    R,
                    Q
            )

            self.filter_lock = threading.Lock()

            self.update_thread = None
            self.prediction_thread = None

    def __init__(
            self,
            max_tracked_objects=30,
            dt_prediction=0.05,
            prediction_topic=None,
            update_topic=None,
            state_est_topic="/kalman_multi_tracker/state_est",
            in_events_topic="/multi_tracker/global_events",
            in_status_topic="/multi_tracker/status",
            in_window_topic="/multi_tracker/window",
            out_kalman_topic="/multi_tracker/pos",
            in_frames_topic=None,
            out_frames_topic=None
    ):
        self.max_tracked_objects = max_tracked_objects
        self.dt_prediction = dt_prediction

        self.prediction_topic = prediction_topic
        self.update_topic = update_topic
        self.state_est_topic = state_est_topic
        self.in_events_topic = in_events_topic
        self.in_status_topic = in_status_topic
        self.in_window_topic = in_window_topic
        self.out_kalman_topic = out_kalman_topic
        self.in_frames_topic = in_frames_topic
        self.out_frames_topic = out_frames_topic

        self.kalman_filters = {} # {"key":KalmanTrackerReceiver}
        self.kalman_filters_lock = threading.Lock()

        self.multi_tracker_receiver = MultiTrackerReceiver(
                max_tracked_objects=self.max_tracked_objects,
                event_callback=self.event_callback,
                status_callback=self.status_callback,
                position_callback=self.position_callback,
                in_events_topic=self.in_events_topic,
                in_status_topic=self.in_status_topic,
                in_window_topic=self.in_window_topic,
                out_kalman_topic=self.out_kalman_topic
        )

    def start(
            self,
            loop=True
    ):
        if self.in_frames_topic is not None and self.out_frames_topic is not None:
            self.pub = ROSImageStream(pub_topic_name=self.out_frames_topic)
            self.sub = ROSImageStream(sub_topic_name=self.in_frames_topic)
            self.sub.img_stream_subscribe(
                    callback=self.publish_frame,
                    loop=False
            )

        self.multi_tracker_receiver.start(loop=loop)

    def publish_frame(
            self,
            frame
    ):
        self.kalman_filters_lock.acquire()

        for k in self.kalman_filters.keys():
            frame = cv2.circle(
                    frame,
                    (int(self.kalman_filters[k].filter.x[0,0]), int(self.kalman_filters[k].filter.x[1,0])),
                    10,
                    (255,0,0)
            )

        self.pub.publish_single(frame)

        self.kalman_filters_lock.release()

    def event_callback(
            self,
            key,
            event
    ):
        self.kalman_filters_lock.acquire()
        if event == "+":
            #    self.kalman_filters[key] = None
            #self.kalman_filters[key] = self.KalmanTrackerReceiver(
            #        dt_prediction=self.dt_prediction,
            #        prediction_topic=self.prediction_topic,
            #        update_topic=self.update_topic,
            #        state_est_topic=self.state_est_topic
            #)

            #self.kalman_filters[key].start()
            #if key in self.kalman_filters.keys():
            #    self.kalman_filters[key].stop()
            #    self.kalman_filters[key] = None
            pass

        elif event == "-":
            if key in self.kalman_filters.keys():
                self.kalman_filters[key].stop()
                del self.kalman_filters[key]

        self.kalman_filters_lock.release()

    def status_callback(
            self,
            key,
            status
    ):
        self.kalman_filters_lock.acquire()

        if status == "lost":
            if key in self.kalman_filters.keys():
                callbacks = {
                        "prediction": {
                            "callback": self.multi_tracker_receiver.supply_kalman_estimate,
                            "arg": key
                        } 
                }

                self.kalman_filters[key].set_callbacks(callbacks)
        
        elif status == "ok" or status == "removed":
            if key in self.kalman_filters.keys():
                self.kalman_filters[key].set_callbacks(None)

        self.kalman_filters_lock.release()

    def position_callback(
            self,
            key,
            position
    ):
        self.kalman_filters_lock.acquire()
        
        if key not in self.kalman_filters.keys():
            self.kalman_filters[key] = self.KalmanTrackerReceiver(
                    dt_prediction=self.dt_prediction,
                    prediction_topic=self.prediction_topic,
                    update_topic=self.update_topic,
                    state_est_topic=self.state_est_topic,
                    start_pos=position
            )

            self.kalman_filters[key].start()
        else:
            self.kalman_filters[key].update_step(position)

        self.kalman_filters_lock.release()