Example #1
0
def test(x, y, z, qcov, rcov):
    # Initializations
    dt = 1  # Measurement interval (s)
    I = lambda x: identity(x)
    A = zeros((9, 9))  # Transition matrix
    A[0:3, 0:3] = I(3)
    A[0:3, 3:6] = I(3) * dt
    A[0:3, 6:9] = I(3) * 0.5 * dt * dt
    A[3:6, 3:6] = I(3)
    A[3:6, 6:9] = I(3) * dt
    A[6:9, 6:9] = I(3)
    H = zeros((3, 9))  # Measurement matrix
    H[0:3, 0:3] = I(3)
    Q = identity(9) * qcov  # Transition covariance
    R = identity(3) * rcov  # Noise covariance
    B = identity(9)  # Control matrix
    kf = KalmanFilter(A, H, Q, R, B)
    # Run through the dataset
    n = len(x)
    xkf, ykf, zkf = zeros(n), zeros(n), zeros(n)
    for i in xrange(n):
        kf.correct(array([x[i], y[i], z[i]]))
        kf.predict(array([]))
        Skf = kf.getCurrentState()
        xkf[i], ykf[i], zkf[i] = Skf[0], Skf[1], Skf[2]
    return xkf, ykf, zkf
Example #2
0
class UltraSoundFilter(object):
    def __init__(self, us):
        self.ultra_sound = us

        self.kalman_filter = None
        self.last_update_time = None

        self.calibration_time = 3.0
        self.calibration_range = None
        self.calibration_std = None

    def calibrate(self):
        """ Run initialization procedure on empty scene.
            Determine mean and std of dists within calibration_time.

            Can block for a while."""
        ranges = []
        start_time = time.time()
        while time.time() - start_time <= self.calibration_time:
            dist = self.ultra_sound.get_distance()
            time.sleep(0.05)
            if dist is not None:
                ranges.append(dist)
        if len(ranges) <= 1:
            raise RuntimeError("No valid ranges during calibration.")
            return
        self.calibration_range = sum(ranges) / len(ranges)
        sqr_error = [(dist - self.calibration_range)**2 for dist in ranges]
        self.calibration_std = math.sqrt(sum(sqr_error) / (len(ranges) - 1))
        logger.info(
            f"Calibrated range as {self.calibration_range:.2f}m +- {self.calibration_std:.3f}m"
        )
        self.kalman_filter = KalmanFilter(self.calibration_range,
                                          self.calibration_std)

    def update(self):
        """ Update the internal feature based on sensor data. """
        cur_dist = self.ultra_sound.get_distance()
        now = time.time()
        if cur_dist is None:
            return
        if self.last_update_time is None:
            delta_t = 0
        else:
            delta_t = now - self.last_update_time
        self.kalman_filter.predict(delta_t)
        self.kalman_filter.correct(cur_dist)
        self.last_update_time = now

    def get_distance(self):
        if self.kalman_filter is None:
            return None
        return self.kalman_filter.distance()
Example #3
0
class Tracklet:
    STATUS_BORN = 0
    STATUS_TRACKING = 1
    STATUS_LOST = 2

    def __init__(self, global_id, x, y, confidence, timestamp):
        self.id = global_id
        self.last_update = timestamp
        self.estimator = KalmanFilter(x, y, 1 - confidence)
        self.status = Tracklet.STATUS_BORN

    def get_tracklet_info(self):
        return {
            'id': self.id,
            'x': self.estimator.mu[0],
            'y': self.estimator.mu[2],
            'v': sqrt(self.estimator.mu[1]**2 + self.estimator.mu[3]**2),
            'phi': atan2(self.estimator.mu[1], self.estimator.mu[3]),
            'sigma_x': self.estimator.sigma[0][0],
            'sigma_y': self.estimator.sigma[2][2],
            'status': self.status
        }

    def estimate_position_at(self, timestamp):
        estimate = self.estimator.estimate_at(timestamp - self.last_update)[0]
        return estimate[0], estimate[2]

    def predict(self, timestamp):
        self.estimator.predict(math.fabs(self.last_update - timestamp))
        self.last_update = timestamp

    def update(self, detection, confidence):
        self.estimator.correct(detection, 1 - confidence)

    def similarity(self, coordinates, timestamp):
        return euclidean(self.estimate_position_at(timestamp), coordinates)

    def confidence(self):
        return self.estimator.sigma[0][0] * self.estimator.sigma[2][2]
Example #4
0
class FilterNode():
    def __init__(self):
        # Number of states
        self.n = 12

        # System state
        self.X = np.matrix(np.zeros((self.n, 1)))

        # Initial State Transition Matrix
        self.F = np.asmatrix(np.eye(self.n))
        self.F[3:6, 3:6] = 0.975 * np.matrix(np.eye(3))
        self.F[9:12, 9:12] = 0.975 * np.matrix(np.eye(3))

        # Initial Process Matrix
        self.P = np.asmatrix(1.0e3 * np.eye(self.n))

        # Process Noise Level
        self.N = 1.0e-3

        # Initialize H and R matrices for optitrack pose
        self.Hopti = np.matrix(np.zeros((6, self.n)))
        self.Hopti[0:3, 0:3] = np.matrix(np.eye(3))
        self.Hopti[3:6, 6:9] = np.matrix(np.eye(3))

        self.Ropti = np.matrix(np.zeros((6, 6)))
        self.Ropti[0:3, 0:3] = 1.0 * 1.0e-3 * np.matrix(np.eye(3))
        self.Ropti[3:6, 3:6] = 1.0 * 1.0e-2 * np.matrix(np.eye(3))

        # Initialize Kalman Filter
        self.kalmanF = KalmanFilter()
        self.isInit = False
        self.lastTime = None

    def state_update(self, T, R, mTime):
        attiQ = Quaternion(R[0], R[1], R[2], R[3])
        rpy = quat2rpy(attiQ)

        pose = np.matrix([[T[0]], [T[1]], [T[2]], [rpy[0]], [rpy[1]],
                          [rpy[2]]])

        if self.isInit:
            dt = mTime - self.lastTime

            # Prediction
            self.kalmanF.updateF(dt)
            self.kalmanF.updateQ()
            self.kalmanF.predict()

            # Correction
            self.kalmanF.correct(pose, self.Hopti, self.Ropti)

            # Update state
            self.X = self.kalmanF.getState()

            self.lastTime = mTime

        else:
            # Initialize state
            self.X[0] = pose[0]
            self.X[1] = pose[1]
            self.X[2] = pose[2]
            self.X[6] = pose[3]
            self.X[7] = pose[4]
            self.X[8] = pose[5]

            # Initialize filter
            self.kalmanF.initialize(self.X, self.F, self.P, self.N)
            self.lastTime = mTime
            self.isInit = True

    def get_state(self):
        if self.isInit:
            timeNow = time.time()
            dt = timeNow - self.lastTime
            self.lastTime = timeNow

            # Prediction
            self.kalmanF.updateF(dt)
            self.kalmanF.updateQ()
            self.kalmanF.predict()

            # Update state
            self.X = self.kalmanF.getState()

            return self.X
        else:
            return None
Example #5
0
 def correct(self, y):
     KalmanFilter.correct(self, y, self.v.var)
Example #6
0
        red_rgb = (255, 0, 0)
        # light_blue_rgb = (173,216,230)

        draw_rectangle(rgb_frame, x1, y1, x2, y2, green_rgb, 1)

        cx, cy = get_center_rect(x1, y1, x2, y2)
        tracked_points.append([cx, cy])

        if len(tracked_points) % nth_point_fade == 1 and len(
                tracked_points) > nth_point_fade:
            tracked_points.popleft()

        for _point in tracked_points:
            current_state_measurement = np.array([[_point[0]], [_point[1]]])

            tracker_point = kf.predict()
            _ = kf.correct(current_state_measurement)

            # draw_circle(rgb_frame, _point[0], _point[1], 3, red_rgb)
            draw_circle(rgb_frame, tracker_point[0], tracker_point[1], 3,
                        red_rgb)

        bgr_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2BGR)

        cv2.imshow('frame', bgr_frame)

        if cv2.waitKey(28) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
class FusionEKF:
	"""
		Gets inputs from multiple sources and uses a Kalman Filter to estimate the state of the system

		The state of the system is: 
			X = {pD, dotpD, attD, dotattD, pJ, dotpJ, headJ, GSPoffsetJ, compassOffsetJ}
		
		The inputs are:
			- Drone's position in local ENU
			- Drone's velocity in local ENU
			- Drone's attitude
			- Drone's angular velocities
			- Jetyak's position in local ENU
			- Jetyak's heading
			- Jetyak's position in the Drone's body frame
	"""
	
	def __init__(self, F, P, N, rate):
		self.kalmanF   = KalmanFilter()
		self.F         = F
		self.P         = P
		self.N         = N
		self.n         = np.shape(F)[0]
		self.X         = np.matrix(np.zeros((self.n, 1)))
		self.dt        = 1 / rate
		self.isInit    = False		

		self.X[0:3]    = np.nan  # Drone's position
		self.X[14:17]  = np.nan  # Jetyak's position
		self.X[19:24]  = np.nan  # Jetyak's GPS and heading offset

	def initialize(self, dataPoint):
		if dataPoint.getID() == 'dgps':
			self.X[0] = dataPoint.getZ().item(0)
			self.X[1] = dataPoint.getZ().item(1)
			self.X[2] = dataPoint.getZ().item(2)
		elif dataPoint.getID() == 'jgps':
			self.X[6] = dataPoint.getZ().item(0)
			self.X[7] = dataPoint.getZ().item(1)
			self.X[8] = dataPoint.getZ().item(2)
		elif dataPoint.getID() == 'tag':
			if (not (np.isnan(self.X[0]) or 
					 np.isnan(self.X[6]))):

				self.X[11] = self.X[6] - self.X[0] - dataPoint.getZ().item(0)
				self.X[12] = self.X[7] - self.X[1] - dataPoint.getZ().item(1)
				self.X[13] = self.X[8] - self.X[2] - dataPoint.getZ().item(2)
				self.X[14] = dataPoint.getZ().item(3)

				self.kalmanF.initialize(self.X, self.F, self.P, self.N)
				self.kalmanF.updateF(self.dt)
				self.kalmanF.updateQ()
				self.kalmanF.predict()
				self.isInit = True

				print 'Colocalization filter initialized'

	def process(self, dataPoint):
		if not self.isInit:
			self.initialize(dataPoint)

			return None, None
		else:
			r = None
			P = None

			# KF Correction Step
			r, P = self.kalmanF.correct(dataPoint.getZ(), dataPoint.getH(), dataPoint.getR(), dataPoint.getChi())
			
			if r is None:
				print "A ", dataPoint.getID(), " measurement was rejected"

			self.X = self.kalmanF.getState()

			return r, P

	def getState(self):
		if self.isInit:
			self.kalmanF.predict()
			return self.X
		else:
			return None

	def resetFilter(self):
		self.isInit    = False

		self.X         = np.matrix(np.zeros((self.n, 1)))
		self.X[0:3]    = np.nan  # Drone's position
		self.X[14:17]  = np.nan  # Jetyak's position
		self.X[19:24]  = np.nan  # Jetyak's GPS and heading offset
def main():

    # open vid file
    cap = cv.VideoCapture('resources/benchmarkV9.mp4')

    # calculate time between frames of video
    dt = 1 / cap.get(cv.CAP_PROP_FPS)

    # initialize necessary objects
    cascade = cv.CascadeClassifier()
    backSub = cv.createBackgroundSubtractorKNN()

    detection = HAAR_Detection(cascade, 'detection', COLOR.BLUE,
                               SHAPE.RECTANGLE)
    tracking = Tracking('tracking', COLOR.GREEN, SHAPE.RECTANGLE, TM.MOSSE)
    kalman_filter = KalmanFilter(dt, 'kalman_filter', COLOR.WHITE,
                                 SHAPE.RECTANGLE)

    # necessary variables
    tracking_counter = 0
    frame_counter = 0
    ticks = 0

    # flags
    is_object_moving = None
    was_object_moving = None
    is_tracking_active = False

    # variables to adjust program
    detection_frame_rate = 8
    tracking_detection_diff = 10

    # check video
    vid_end, frame = cap.read()
    if not vid_end:
        print('Error during loading video')
        sys.exit(0)

    # define window name
    cv.namedWindow('vid_frame')

    # check if cascade is loading properly
    if not cascade.load(
            cv.samples.findFile(
                'resources/cascades/haarcascade_frontalface_alt.xml')):
        print('Error loading cascade')
        exit(0)

    # main program loop
    while True:

        # read vid frame
        vid_end, frame = cap.read()

        # Check for vid end
        if vid_end:
            frame_to_draw = frame.copy()

            # detect movement on frame for kalman filter porpoise
            is_object_moving = detect_move(frame, backSub)
            if is_object_moving:
                cv.putText(frame_to_draw, "Object is moving", (20, 60),
                           cv.FONT_HERSHEY_COMPLEX_SMALL, 1, COLOR.RED)
            else:
                cv.putText(frame_to_draw, "Object is not moving", (20, 60),
                           cv.FONT_HERSHEY_COMPLEX_SMALL, 1, COLOR.RED)

            if not is_tracking_active:

                # activate tracking if there is object something detected
                detection.detect_and_draw(frame, frame_to_draw)
                detected_objects = detection.get_detected_objects()
                if len(detected_objects) > 0:
                    roi_x, roi_y, roi_width, roi_height = detected_objects[0]
                    tracking.init_tracker(
                        frame, (roi_x, roi_y, roi_width, roi_height))
                    is_tracking_active = True
            else:
                # if tracking is active
                # preforming kalman filter prediction
                kalman_filter.predict_and_draw(frame_to_draw)
                # preforming tracking
                tracking.track_and_draw(frame, frame_to_draw)
                # preforming kalman filter prediction update
                kalman_filter.correct(tracking.get_trackedROI(),
                                      is_object_moving)
                # if tracking is active for indicated amount of frames perform detection to correct tracking error
                if tracking_counter is detection_frame_rate - 1:
                    detected_ROIs = (detection.detect(frame))
                    # check if there is any object detected, if not pass
                    if len(detected_ROIs) > 0:
                        # to ensure that detected and tracked objects are the same object, choose detected
                        # ROI that is the nearest to the tracked object
                        nearest_ROI_dict = tracking.find_nearest(detected_ROIs)

                        # if error is larger than maximal value reinitialise tracker (because there is no
                        # other option to reset tracker)
                        if nearest_ROI_dict[
                                'distance'] > tracking_detection_diff:
                            tracking.init_tracker(frame,
                                                  tuple(detected_ROIs[0]))

                # count frames without detection
                tracking_counter = (tracking_counter +
                                    1) % detection_frame_rate

            # checking for frame when object not in sight
            if is_object_moving and not was_object_moving:
                if check_roi(tuple(kalman_filter.get_predicted_bb())):
                    tracking.init_tracker(
                        frame, tuple(kalman_filter.get_predicted_bb()))
            was_object_moving = is_object_moving
            frame_counter += 1

            cv.imshow('vid_frame', frame_to_draw)
            key = cv.waitKey(30)
            # press ecc to exit
            if key == 27:
                print("User ended program with ESC button press")
                break
        else:
            print("Video ended")
            break
    cap.release()
    cv.destroyAllWindows()
Example #9
0
			lastYTilt = evDevValue
		elif evDevCode == WACOM_EVCODE_DISTANCE:
			lastDistance = evDevValue
		elif evDevCode == WACOM_EVCODE_PRESSURE:
			if not ONLY_DEBUG:
				if not mouseButtonPressed and evDevValue > SCREEN_DRAW_BUTTON_PRESSURE:
					mouse.press(Button.left)
					mouseButtonPressed = True
				elif mouseButtonPressed and evDevValue <= SCREEN_DRAW_BUTTON_PRESSURE:
					mouse.release(Button.left)
					mouseButtonPressed = False

			lastPressure = evDevValue

		if ONLY_DEBUG:
			print('XPos: %5d | YPos: %5d | XTilt: %5d | YTilt: %5d | Distance: %3d | Pressure: %4d' % (lastXPos, lastYPos, lastXTilt, lastYTilt, lastDistance, lastPressure))
		else:
			screenY = SCREEN_DRAW_AREA_FROM_X + (WACOM_WIDTH - lastXPos) * ratioX                 	# (X doesn't need to invert)
			screenX = SCREEN_DRAW_AREA_FROM_Y + (WACOM_HEIGHT - lastYPos) * ratioY  # (Y has to be inverted)

			currentMeasurement = np.array([[np.float32(screenX)], [np.float32(screenY)]])
			currentPrediction = kalman.predict()

			cmx, cmy = currentMeasurement[0], currentMeasurement[1]
			cpx, cpy = currentPrediction[0], currentPrediction[1]

			kalman.correct(currentMeasurement)

			mouseMoveAbs(int(np.round(cpx)), int(np.round(cpy)))

Example #10
0
class FilterNode():
    def __init__(self, ):
        rp.init_node("filter")

        # Get rate of state publisher
        self.rate = rp.get_param("rate", 100)

        # Get VRPN client topic
        self.vrpn_topic = rp.get_param("vrpn_topic")

        # Number of states
        self.n = 12

        # System state
        self.X = np.matrix(np.zeros((self.n, 1)))

        # Initial State Transition Matrix
        self.F = np.asmatrix(np.eye(self.n))

        # Initial Process Matrix
        self.P = np.asmatrix(1.0e3 * np.eye(self.n))

        # Process Noise Level
        self.N = 1.0e-3

        # Initialize H and R matrices for optitrack pose
        self.Hopti = np.matrix(np.zeros((6, self.n)))
        self.Hopti[0:3, 0:3] = np.matrix(np.eye(3))
        self.Hopti[3:6, 6:9] = np.matrix(np.eye(3))

        self.Ropti = np.matrix(np.zeros((6, 6)))
        self.Ropti[0:3, 0:3] = 1.0e-3 * np.matrix(np.eye(3))
        self.Ropti[3:6, 3:6] = 1.0e-5 * np.matrix(np.eye(3))

        # Initialize Kalman Filter
        self.kalmanF = KalmanFilter()
        self.isInit = False
        self.lastTime = None

        # Set up Subscriber
        self.vrpn_sub = rp.Subscriber(self.vrpn_topic,
                                      PoseStamped,
                                      self.state_callback,
                                      queue_size=1)

        # Set up Publisher
        self.state_pub = rp.Publisher("opti_state/pose",
                                      PoseStamped,
                                      queue_size=1)
        self.state_rate_pub = rp.Publisher("opti_state/rates",
                                           TwistStamped,
                                           queue_size=1)

        # Create thread for publisher
        t = threading.Thread(target=self.statePublisher)
        t.start()

        rp.spin()

    def state_callback(self, msg):
        attiQ = Quaternion(msg.pose.orientation.x, msg.pose.orientation.y,
                           msg.pose.orientation.z, msg.pose.orientation.w)

        rpy = quat2rpy(attiQ)

        pose = np.matrix([[msg.pose.position.x], [msg.pose.position.y],
                          [msg.pose.position.z], [rpy[0]], [rpy[1]], [rpy[2]]])

        if self.isInit:
            timeNow = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
            dt = timeNow - self.lastTime

            # Prediction
            self.kalmanF.updateF(dt)
            self.kalmanF.updateQ()
            self.kalmanF.predict()

            # Correction
            self.kalmanF.correct(pose, self.Hopti, self.Ropti)

            # Update state
            self.X = self.kalmanF.getState()

            self.lastTime = timeNow

        else:
            # Initialize state
            self.X[0] = pose[0]
            self.X[1] = pose[1]
            self.X[2] = pose[2]
            self.X[6] = pose[3]
            self.X[7] = pose[4]
            self.X[8] = pose[5]

            # Initialize filter
            self.kalmanF.initialize(self.X, self.F, self.P, self.N)
            self.lastTime = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
            self.isInit = True

    def statePublisher(self):
        r = rp.Rate(self.rate)

        while not rp.is_shutdown():
            if self.isInit:
                timeNow = rp.Time.now()
                stateMsg = PoseStamped()
                stateMsg.header.stamp = timeNow
                stateMsg.header.frame_id = 'optitrack'

                stateMsg.pose.position.x = self.X.item(0)
                stateMsg.pose.position.y = self.X.item(1)
                stateMsg.pose.position.z = self.X.item(2)

                q = rpy2quat(self.X.item(6), self.X.item(7), self.X.item(8))

                stateMsg.pose.orientation.x = q.x
                stateMsg.pose.orientation.y = q.y
                stateMsg.pose.orientation.z = q.z
                stateMsg.pose.orientation.w = q.w

                twistMsg = TwistStamped()
                twistMsg.header.stamp = timeNow
                twistMsg.header.frame_id = 'optitrack'

                twistMsg.twist.linear.x = self.X.item(3)
                twistMsg.twist.linear.y = self.X.item(4)
                twistMsg.twist.linear.z = self.X.item(5)

                twistMsg.twist.angular.x = self.X.item(9)
                twistMsg.twist.angular.y = self.X.item(10)
                twistMsg.twist.angular.z = self.X.item(11)

                self.state_pub.publish(stateMsg)
                self.state_rate_pub.publish(twistMsg)

            r.sleep()