コード例 #1
0
ファイル: sort2.py プロジェクト: chetanchawla/Sort-Tracker
    def __init__(self, bbox):
        """
    Initialises a tracker using initial bounding box.
    """
        #define constant velocity model
        self.kf = KalmanFilter(dim_x=7, dim_z=4)
        self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0],
                              [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0],
                              [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0],
                              [0, 0, 0, 0, 0, 0, 1]])
        self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0],
                              [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])

        self.kf.R[2:, 2:] *= 10.
        self.kf.P[
            4:,
            4:] *= 1000.  #give high uncertainty to the unobservable initial velocities
        self.kf.P *= 10.
        self.kf.Q[-1, -1] *= 0.01
        self.kf.Q[4:, 4:] *= 0.01

        self.kf.x[:4] = convert_bbox_to_z(bbox)
        self.time_since_update = 0
        self.id = KalmanBoxTracker.count
        KalmanBoxTracker.count += 1
        self.history = []
        self.hits = 0
        self.hit_streak = 0
        self.age = 0
コード例 #2
0
def ball_kf_noay(x, y, omega, v0, dt, r=0.5, q=0.02):

    g = 9.8  # gravitational constant
    f1 = KalmanFilter(dim_x=5, dim_z=2)

    ay = .5 * dt**2
    f1.F = np.mat([
        [1, dt, 0, 0, 0],  # x   = x0+dx*dt
        [0, 1, 0, 0, 0],  # dx  = dx
        [0, 0, 1, dt, 0],  # y   = y0 +dy*dt
        [0, 0, 0, 1, dt],  # dy  = dy0 + ddy*dt 
        [0, 0, 0, 0, 1]
    ])  # ddy = -g.

    f1.H = np.mat([[1, 0, 0, 0, 0], [0, 0, 1, 0, 0]])

    f1.R *= r
    f1.Q *= q

    omega = radians(omega)
    vx = cos(omega) * v0
    vy = sin(omega) * v0

    f1.x = np.mat([x, vx, y, vy, -9.8]).T

    return f1
コード例 #3
0
ファイル: wheelVel_filter.py プロジェクト: ArmSorawis/A2DR
    def __init__(self):
        rospy.init_node('wheelVel_filter_node', anonymous=True)

        self.odomFreq = float(rospy.get_param("~odom_freq", "20"))

        self.sub_vw_r = rospy.Subscriber("v_wheel_right", Float32,
                                         self.updateVW_R)
        self.sub_vw_l = rospy.Subscriber("v_wheel_left", Float32,
                                         self.updateVW_L)
        # self.sub_imu = rospy.Subscriber("/imu", Float32, self.updateIMU)

        # self.pub_raw_vel = rospy.Publisher("/raw_vel", Float32MultiArray, queue_size=1)
        self.pub_filter_vel = rospy.Publisher("filter_vel",
                                              Float32MultiArray,
                                              queue_size=1)
        # self.pub_filter_yaw = rospy.Publisher("/filter_yaw", Float32, queue_size=1)

        self.timer_v_wheel = rospy.Timer(rospy.Duration(1.0 / self.odomFreq),
                                         self.timerVWheelCB)

        self.filterR = KalmanFilter(1, 30)
        self.filterL = KalmanFilter(1, 30)
        # self.filterYaw = KalmanFilter(0.01, 2)

        self.filter_yaw = 0.0
        self.yaw = 0.0
        self.vw_r = 0.0
        self.vw_l = 0.0
        self.filter_vw_r = 0.0
        self.filter_vw_l = 0.0
コード例 #4
0
ファイル: IMU.py プロジェクト: alyyousuf7/RollE
	def __init__(self):
		threading.Thread.__init__(self)

		try:
			self.mpu6050 = MPU6050()
			self.hmc5883l = HMC5883L()

			# Original Values
			self.roll = 0
			self.pitch = 0
			self.yaw = 0

			# Kalman Filter
			self.kalmanX = KalmanFilter()
			self.kalmanY = KalmanFilter()
			self.kalmanZ = KalmanFilter()
			self.kalAngleX = 0
			self.kalAngleY = 0
			self.kalAngleZ = 0
			
			# Complementary Filter
			self.compAngleX = 0
			self.compAngleY = 0
			self.compAngleZ = 0
			
			# Only from Gyroscrope
			self.gyroAngleX = 0
			self.gyroAngleY = 0
			self.gyroAngleZ = 0
		except:
			print "Unable to create an instance for IMU."
			exit()
コード例 #5
0
 def __init__(self, theta, emax, cachelen):
     self.cache = []
     self.readings = []
     self.totaltrans = 0
     self.cacheLen = cachelen
     self.theta = theta
     self.emax = emax
     self.counter = 0
     self.correcteddata = []
     self.totalTransferredData = 0
     r_std, q_std = 1., 0.003
     self.kf = KalmanFilter(dim_x=2, dim_z=1)
     self.kf.x = np.array([[19], [0]])  # position, velocity
     self.kf.F = np.array([[1, 1], [0, 1]])
     self.kf.R = np.array([[r_std**2]])  # input noise
     self.kf.H = np.array([[1., 0.]])
     self.kf.P = np.diag([0.001**2, 0])
     self.kf.Q = Q_discrete_white_noise(2, 1, q_std**2)
     self.est_list = []
     self.counter = 0
     self.batCap = 3.6
     self.variance = 0
     self.V = 3.6
     self.Itx = ((17.4 + 18.8 + 0.5 + 0.426) / 32768)
     self.DetaT = 5
コード例 #6
0
def main():

    # Create opencv video capture object
    VideoCap = cv2.VideoCapture('video/randomball.avi')

    #Variable used to control the speed of reading the video
    ControlSpeedVar = 100  #Lowest: 1 - Highest:100

    HiSpeed = 100

    #Create KalmanFilter object KF
    #KalmanFilter(dt, u_x, u_y, std_acc, x_std_meas, y_std_meas)

    KF = KalmanFilter(0.1, 1, 1, 1, 0.1, 0.1)

    debugMode = 1

    while (True):
        # Read frame
        ret, frame = VideoCap.read()

        # Detect object
        centers = detect(frame, debugMode)

        # If centroids are detected then track them
        if (len(centers) > 0):

            # Draw the detected circle
            cv2.circle(frame, (int(centers[0][0]), int(centers[0][1])), 10,
                       (0, 191, 255), 2)

            # Predict
            (x, y) = KF.predict()
            # Draw a rectangle as the predicted object position
            cv2.rectangle(frame, (int(x) - 15, int(y) - 15),
                          (int(x) + 15, int(y) + 15), (255, 0, 0), 2)

            # Update
            (x1, y1) = KF.update(centers[0])

            # Draw a rectangle as the estimated object position
            cv2.rectangle(frame, (int(x1) - 15, int(y1) - 15),
                          (int(x1) + 15, int(y1) + 15), (0, 0, 255), 2)

            cv2.putText(frame, "Estimated Position",
                        (int(x1) + 15, int(y1) + 10), 0, 0.5, (0, 0, 255), 2)
            cv2.putText(frame, "Predicted Position", (int(x) + 15, int(y)), 0,
                        0.5, (255, 0, 0), 2)
            cv2.putText(frame, "Measured Position",
                        (int(centers[0][0]) + 15, int(centers[0][1]) - 15), 0,
                        0.5, (0, 191, 255), 2)

        cv2.imshow('image', frame)

        if cv2.waitKey(2) & 0xFF == ord('q'):
            VideoCap.release()
            cv2.destroyAllWindows()
            break

        cv2.waitKey(HiSpeed - ControlSpeedVar + 1)
コード例 #7
0
def example1():
    """
    Test Kalman Filter implementation
    :return:
    """
    dt = 1.0 / 60
    F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])
    H = np.array([1, 0, 0]).reshape(1, 3)
    Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]])
    R = np.array([0.5]).reshape(1, 1)

    x = np.linspace(-10, 10, 100)
    measurements = -(x**2 + 2 * x - 2) + np.random.normal(0, 2, 100)
    kf = KalmanFilter(F=F, H=H, Q=Q, R=R)
    predictions = []

    for z in measurements:
        predictions.append(np.dot(H, kf.predict())[0])
        kf.update(z)

    plt.plot(range(len(measurements)), measurements, label='Measurements')
    plt.plot(range(len(predictions)),
             np.array(predictions),
             label='Kalman Filter Prediction')
    plt.legend()
    plt.show()
コード例 #8
0
def ball_kf_noay(x, y, omega, v0, dt, r=0.5, q=0.02):

    g = 9.8 # gravitational constant
    f1 = KalmanFilter(dim_x=5, dim_z=2)

    ay = .5*dt**2
    f1.F = np.mat ([[1, dt,  0,  0,  0],   # x   = x0+dx*dt
                    [0,  1,  0,  0,  0],   # dx  = dx
                    [0,  0,  1, dt,  0],   # y   = y0 +dy*dt
                    [0,  0,  0,  1, dt],   # dy  = dy0 + ddy*dt 
                    [0,  0,  0,  0, 1]])   # ddy = -g.

    f1.H = np.mat([
                [1, 0, 0, 0, 0],
                [0, 0, 1, 0, 0]])
    
    f1.R *= r
    f1.Q *= q

    omega = radians(omega)
    vx = cos(omega) * v0
    vy = sin(omega) * v0

    f1.x = np.mat([x,vx,y,vy,-9.8]).T
    
    return f1
コード例 #9
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        self.pos_goal = pos_goal

        # YOUR CODE AFTER THIS
        #-------------------------------------------#
        self.time = rospy.get_time()
        self.controlOut = (0.0, 0.0, False)
        self.count_noMeasurement = 0

        #-------------------------------------------#
        self.markers = world_map
        self.idx_target_marker = 0

        # Calculate the optimal path
        # From pos_init to pos_goal
        self.path_2D = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                                 pos_goal)
        self.idx_path = 0
        self.size_path = self.path_2D.shape[0]
        print "path.shape[0]", self.size_path
        # Generate the 3D path (include "theta")
        self.path = np.zeros((self.size_path, 3))
        theta = 0.0
        for idx in range(self.size_path - 1):
            delta = self.path_2D[(idx + 1), :] - self.path_2D[idx, :]
            theta = atan2(delta[1], delta[0])
            if theta > np.pi:
                theta -= np.pi * 2
            elif theta < -np.pi:
                theta += np.pi * 2
            self.path[idx, :] = np.concatenate(
                (self.path_2D[idx, :], np.array([theta])), axis=1)
        self.path[self.size_path - 1, 0:2] = self.path_2D[self.size_path - 1,
                                                          0:2]
        self.path[self.size_path - 1, 2] = pos_goal[2]  # theta
        #
        self.path[0, 2] = pos_init[2]  # theta
        print "3D path:"
        print self.path

        # Uncomment as completed
        # Kalman filter
        self.kalman_filter = KalmanFilter(world_map)
        self.kalman_filter.mu_est = pos_init  # 3*pos_init # For test

        # Differential drive
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
        self.wayPointFollowing = wayPointFollowing(max_speed, max_omega)

        #
        self.task_done = False
コード例 #10
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # TODO for student: Comment this when running on the robot
        #self.robot_sim = RobotSim(world_map, occupancy_map, pos_init, pos_goal,
        #	                            max_speed, max_omega, x_spacing, y_spacing)
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS

        # speed control variables
        self.v = 0.1  # allows persistent cmds through detection misses
        self.omega = -0.1  # allows persistent cmds through detection misses
        self.last_detect_time = rospy.get_time()  #TODO on bot only
        self.missed_vision_debounce = 1

        self.start_time = 0

        # generate the path assuming we know our start location, goal, and environment
        self.path = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                              pos_goal)
        self.path_idx = 0
        self.mission_complete = False
        self.carrot_distance = 0.22

        # Uncomment as completed
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
コード例 #11
0
ファイル: KFWolinFilter.py プロジェクト: next-exp/pykalman
    def __init__(self, name, P0=2.913527586963954):
        """
        inits the fitter
        """

        KalmanFilter.__init__(self, name)

        self.P = P0  # in MeV
        lgx.info("KFWolinFilter__init__ -> P0 ={0}".format(P0))
コード例 #12
0
 def run(self):
     obj_xyzvels = self.get_obj_list()
     for bd_id, xyzvel in obj_xyzvels.items():
         cur_pos = xyzvel[['center_x', 'center_y']].iloc[0].values
         cur_vel = xyzvel[['speed_x', 'speed_y']].iloc[0].values
         pred_duration = 5
         kf = KalmanFilter()
         kf.motion_model(cur_pos, cur_vel, pred_duration)
     return
コード例 #13
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # TODO for student: Comment this when running on the robot
        self.robot_sim = RobotSim(world_map, occupancy_map, pos_init, pos_goal,
                                  max_speed, max_omega, x_spacing, y_spacing)
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        self.pos_goal = pos_goal
        self.world_map = world_map
        self.vel = np.array([0., 0.])
        self.imu_meas = np.array([])
        self.meas = []
        self.max_speed = max_speed
        self.max_omega = max_omega
        self.goals = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                               pos_goal)
        # print(self.goals)
        self.total_goals = self.goals.shape[0]
        self.cur_goal = 2
        self.end_goal = self.goals.shape[0] - 1
        self.est_pose = None

        # Uncomment as completed
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
コード例 #14
0
 def __init__(self, detection, trackId):
     super(Tracks, self).__init__()
     self.KF = KalmanFilter()
     self.KF.predict()
     self.KF.correct(np.matrix(detection).reshape(2, 1))
     self.trace = deque(maxlen=500)
     self.nframe = deque(maxlen=500)
     self.prediction = detection.reshape(1, 2)
     self.trackId = trackId
     self.skipped_frames = 0
コード例 #15
0
ファイル: KFWolinFilter.py プロジェクト: nextsw/pykalman
    def __init__(self,name,P0=2.913527586963954):
        """
        inits the fitter
        """

        KalmanFilter.__init__(self,name)
        
        self.P = P0 # in MeV
        lgx.info("KFWolinFilter__init__ -> P0 ={0}".
            format(P0))
コード例 #16
0
    def __init__(self, initialPosition, initialWidth, initialHeight, frame,
                 parametersNew):

        #########################################

        #########################################
        self.initFrame = frame
        self.initPos = initialPosition
        self.initW = initialWidth
        self.initH = initialHeight
        self.KM = KalmanFilter()
        self.MF = MaskingFilter()
        self.KM.setStatePost(
            np.array([initialPosition[0], initialPosition[1], 0.,
                      0.]).reshape(4, 1))
        self.selectionWidth = initialWidth
        self.selectionHeight = initialHeight

        self.prevFrameGray = None
        self.frameCounter = 0  #Shape = {tuple}: (x, 1, 2)
        #Por ejemplo: [[[x1 y1]]\n\n [[x2 y2]]\n\n [[x3 y3]]]
        #es decir una matriz de x filas y 1 columna, donde cada elemento
        #de la unica columna es una coordenada [x y].
        if self.MF.mask is not self.MF.maskingType["FILTER_OFF"]:
            self.MF.calculateNewMask(
                frame, frame[int(initialPosition[1] -
                                 initialHeight / 2):int(initialPosition[1] +
                                                        initialHeight / 2),
                             int(initialPosition[0] -
                                 initialWidth / 2):int(initialPosition[0] +
                                                       initialWidth / 2)])
            frame = self.MF.filterFrame(frame)

        self.prevFrameGray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

        self.SC = Searcher(self.initFrame, initialHeight, initialWidth,
                           initialPosition[0], initialPosition[1],
                           cv.cvtColor(frame, cv.COLOR_BGR2GRAY))
        self.SC.features, self.SC.trackingError = self.SC.ST.recalculateFeatures(
            self.prevFrameGray[int(initialPosition[1] -
                                   initialHeight / 2):int(initialPosition[1] +
                                                          initialHeight / 2),
                               int(initialPosition[0] -
                                   initialWidth / 2):int(initialPosition[0] +
                                                         initialWidth / 2)])
        self.SC.features = self.SC.featureTranslate(
            initialPosition[0] - initialWidth / 2,
            initialPosition[1] - initialHeight / 2, self.SC.features)
        self.SC.LK.prevFeatures = self.SC.features

        #OPTIMIZACIÓN
        self.bins_var = 1
        self.kernel_blur_var = 1
        self.mask_blur_var = 1
        self.low_pth_var = 200
コード例 #17
0
ファイル: orbit_analysis.py プロジェクト: skyler237/ecen774
    def __init__(self, dt):
        self.filter = KalmanFilter()

        self.filter.F = np.kron(np.array([[1., dt], [0., 1.]]), np.eye(2))
        self.filter.H = np.kron(np.array([1., 0.]), np.eye(2))

        self.filter.P = np.diag([1., 1., 1e-9, 1e-9])
        self.filter.Q = np.kron(np.diag([1e-9] * 2), np.eye(2))
        self.filter.R = np.diag([5.0] * 2)

        self.filter.xhat = np.array([0., 0., 0., 0.])
コード例 #18
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # TODO for student: Comment this when running on the robot
        self.robot_sim = RobotSim(world_map, occupancy_map, pos_init, pos_goal,
                                  max_speed, max_omega, x_spacing, y_spacing)
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # Uncomment as completed
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
        plan = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                         pos_goal)
        self.state_tol = 0.1
        self.path = plan.tolist()
        print("Path: ", self.path, type(self.path))
        self.path.reverse()
        self.path.pop()
        self.state = pos_init
        self.goal = self.path.pop()
        self.x_offset = x_spacing
        self.vw = (0, 0, False)
        # self.goal[0] += self.x_offset/2
        # self.goal[1] += y_spacing
        print("INIT GOAL: ", self.goal)
コード例 #19
0
def main(args):
    assert args.path_to_video is not None
    KFilter = KalmanFilter(args.Q, args.R)
    MAnly = MotionAnalyzer(args.path_to_video)
    Vcorr = VideoCorrector(args.path_to_video)

    Tx, Ty, _ = MAnly.process()
    res_x, res_y = KFilter.filter(Tx), KFilter.filter(Ty)
    delta_x = np.array(Tx) - np.array(res_x)
    delta_y = np.array(Ty) - np.array(res_x)
    Vcorr.correct(zip(delta_x, delta_y))
コード例 #20
0
 def predict(self, obj_xyzvels):
     #TODO: Write a function that given the data, predits the position of an object at given time period.
     res = {}
     for bd_id, xyzvel in obj_xyzvels.items():
         cur_pos = xyzvel[['center_x', 'center_y']].iloc[0].values
         cur_vel = xyzvel[['speed_x', 'speed_y']].iloc[0].values
         cur_time = xyzvel[['frame_id']].iloc[0].values[0]
         pred_duration = 5
         kf = KalmanFilter()
         pred_xys = kf.motion_model(cur_pos, cur_vel, pred_duration,
                                    cur_time)
         res[bd_id] = pred_xys
     return res
コード例 #21
0
class AmclAuxLocalization(object):
    """
    Class used to interface with the rover. Gets sensor measurements through ROS subscribers,
    and transforms them into the 2D plane, and publishes velocity commands.
    """
    def __init__(self, camera_frame_id, base_frame_id, odom_frame_id, map_frame_id, ns_tag_detector, tag_pose_id, pos_init):
        """
        Initialize the class
        """
        # Handles all the ROS related items
        self.ros_interface = ROSInterface(camera_frame_id, base_frame_id, odom_frame_id, map_frame_id, ns_tag_detector)
        self.time = rospy.get_time()
        # YOUR CODE AFTER THIS
        #-------------------------------------------#
        self.markers = tag_pose_id
        self.idx_target_marker = 0
        #-------------------------------------------#
        # Kalman filter
        self.kalman_filter = KalmanFilter(tag_pose_id)
        self.kalman_filter.mu_est = pos_init # 3*pos_init # For test


    def process_measurements(self):
        """
        YOUR CODE HERE
        This function is called at 10Hz
        """
        # tag_measurement = self.ros_interface.get_measurements()
        # tag_measurement = self.ros_interface.get_measurements_tf()
        tag_measurement = self.ros_interface.get_measurements_tf_directMethod()
        # amcl_pose = self.ros_interface.get_amcl_pose()

        print '-----------------'
        print 'tag:'
        print tag_measurement

        #----------------------------------------#
        # self.time = rospy.get_time()
        # print "self.time",self.time

        # Kalman filter
        # self.kalman_filter.step_filter(self.controlOut[0], (-1)*imu_meas, tag_measurement, rospy.get_time())
        self.kalman_filter.step_filter_by_amcl(self.ros_interface, tag_measurement, rospy.get_time())
        """
        print "After--"
        print "mu_est",self.kalman_filter.mu_est
        print "angle_est =", (self.kalman_filter.mu_est[2,0]*180.0/np.pi), "deg"
        """
        #
        return
コード例 #22
0
    def __init__(self, world_map,occupancy_map, pos_init, pos_goal, max_speed, max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.markers = world_map
        self.vel = np.array([0, 0])
        self.imu_meas = np.array([])
        self.meas = []
        
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.goals = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init, pos_goal)
        # self.total_goals = self.goals.shape[0]
        self.cur_goal = 2
        self.end_goal = self.goals.shape[0] - 1
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
コード例 #23
0
    def SetMeasurements(self, runs, hits):
        KalmanFilter.SetMeasurements(self, runs, hits)

        totalE = sum(hit.State[-1] for hit in hits)
        for i, hit in enumerate(hits):
            self.Track.GetNode(i).ParticleEnergy = totalE
            totalE -= hit.State[-1]
コード例 #24
0
def LimpiezaDatosCSV (ruta):
    i=0

    valores_baliza1=[]
    valores_baliza2=[]
    valores_baliza3=[]
    with open (ruta, newline='') as File:
        reader = csv.reader(File)
        for row in reader:
            if i>0:
                valores_baliza1 = valores_baliza1 + [float(row[6])]
                valores_baliza2 = valores_baliza2 + [float(row[7])]
                valores_baliza3 = valores_baliza3 + [float(row[8])]
            
            i=i+1
      
           
    #Una vez tenemos todos los valores de potencia de todas las balizas, eliminamos los cero por desconexion de la baliza
    # y le aplicamos un filtro de Kalman y una media para tener un único valor de potencia por baliza

    Balizas=[valores_baliza1, valores_baliza2, valores_baliza3]

    RSSI_balizas=[]

    for baliza in Balizas:
        repeticiones = baliza.count(0.0)
        
        for i in range (repeticiones):
            baliza.remove(0.0)
        
        valores_filtrados = KalmanFilter(baliza)
        
        RSSI_balizas = RSSI_balizas + [sum(valores_filtrados)/len(valores_filtrados)]
    
    return RSSI_balizas
コード例 #25
0
def test_ball_path():

    y = 15
    x = 0
    omega = 0.
    noise = [1, 1]
    v0 = 100.
    ball = BallPath(x0=x, y0=y, omega_deg=omega, velocity=v0, noise=noise)
    dt = 1

    f1 = KalmanFilter(dim_x=6, dim_z=2)
    dt = 1 / 30.  # time step

    ay = -.5 * dt**2

    f1.F = np.mat([
        [1, dt, 0, 0, 0, 0],  # x=x0+dx*dt
        [0, 1, dt, 0, 0, 0],  # dx = dx
        [0, 0, 0, 0, 0, 0],  # ddx = 0
        [0, 0, 0, 1, dt, ay],  # y = y0 +dy*dt+1/2*g*dt^2
        [0, 0, 0, 0, 1, dt],  # dy = dy0 + ddy*dt
        [0, 0, 0, 0, 0, 1]
    ])  # ddy = -g

    f1.H = np.mat([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])

    f1.R = np.eye(2) * 5
    f1.Q = np.eye(6) * 0.

    omega = radians(omega)
    vx = cos(omega) * v0
    vy = sin(omega) * v0

    f1.x = np.mat([x, vx, 0, y, vy, -9.8]).T

    f1.P = np.eye(6) * 500.

    z = np.mat([[0, 0]]).T
    count = 0
    markers.MarkerStyle(fillstyle='none')

    np.set_printoptions(precision=4)
    while f1.x[3, 0] > 0:
        count += 1
        #f1.update (z)
        f1.predict()
        plt.scatter(f1.x[0, 0], f1.x[3, 0], color='green')
コード例 #26
0
 def __init__(self, tank_index, target_color, state):
     self.state = state
     self.target_color = target_color
     self.tank_index = tank_index
     self.pdc = PDController()
     self.timer_id = Timer.add_task(self.update)
     self.kvis = KalmanVisualizer(800, 800)
     self.kfilter = KalmanFilter()
コード例 #27
0
class RobotControl(object):
    """
    Class used to interface with the rover. Gets sensor measurements through ROS subscribers,
    and transforms them into the 2D plane, and publishes velocity commands.
    """
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        self.kalman_filter = KalmanFilter(world_map)
        print("INITSTATE", self.kalman_filter.state)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
        self.vel = 0
        self.omega = 0
        self.curInd = 0

        self.path = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                              pos_goal)
        print(self.path)
        self.curGoal = self.path[0]
        self.done = False

    def process_measurements(self):
        """ 
        This function is called at 60Hz
        """
        meas = self.ros_interface.get_measurements()
        print("Mesurements", meas)
        imu_meas = self.ros_interface.get_imu()
        print(imu_meas)
        updatedPosition = self.kalman_filter.step_filter(
            self.vel, self.omega, imu_meas, meas)
        print(np.linalg.norm(self.curGoal - updatedPosition[0:1]))
        if ((np.abs(self.curGoal[0] - updatedPosition[0]) > 0.1)
                or (np.abs(self.curGoal[1] - updatedPosition[1]) > 0.1)):
            (v, omega, done) = self.diff_drive_controller.compute_vel(
                updatedPosition, self.curGoal)
            self.vel = v
            self.omega = omega
            print("commanded vel:", v, omega)
            self.ros_interface.command_velocity(v, omega)
        else:
            print("updating")
            self.curInd = self.curInd + 1
            if self.curInd < len(self.path):
                self.curGoal = self.path[self.curInd]
            else:
                self.done = True

        updatedPosition.shape = (3, 1)

        return
コード例 #28
0
 def __init__(self, **filter_kwargs):
     """Initialize the class with the Kalman filter kwargs.  Inherits the
     RaceTrack() class variables so that Kalman filter estimates and 
     uncertainty can be plotted on the race track.
     """
     super().__init__()  # Inherit RaceTrack class variables
     self.kalman_rotation = self.path_angles.copy()
     self.init_rotation()  # Calculate pixel-by-pixel car orientation
     self.kf = KalmanFilter(**filter_kwargs)  # Instantiate Kalman filter
     # Get initial estimate and estimate uncertainty values
     self.estimate = (self.kf.x[0, 0], self.kf.x[1, 0])
     self.estimate_uncertainty = (self.kf.P[0, 0], self.kf.P[1, 1],
                                  self.kf.P[2, 2], self.kf.P[3, 3])
     self.estimates = [self.estimate]
     self.uncertainties = [self.estimate_uncertainty]
     # Transparent surface used to blit estimate uncertainty circle
     self.surface = pygame.Surface((self.game_width, self.game_height),
                                   pygame.SRCALPHA)
コード例 #29
0
def test_ball_path():
    
    y = 15
    x = 0
    omega = 0.
    noise = [1,1]
    v0 = 100.
    ball = BallPath (x0=x, y0=y, omega_deg=omega, velocity=v0, noise=noise)
    dt = 1
    
    
    f1 = KalmanFilter(dim_x=6, dim_z=2)
    dt = 1/30.   # time step
    
    ay = -.5*dt**2
    
    f1.F = np.mat ([[1, dt,  0,  0,  0,  0],   # x=x0+dx*dt
                    [0,  1, dt,  0,  0,  0],   # dx = dx
                    [0,  0,  0,  0,  0,  0],   # ddx = 0
                    [0,  0,  0,  1, dt, ay],   # y = y0 +dy*dt+1/2*g*dt^2
                    [0,  0,  0,  0,  1, dt],   # dy = dy0 + ddy*dt
                    [0,  0,  0,  0,  0, 1]])  # ddy = -g
    
    
    f1.H = np.mat([
                [1, 0, 0, 0, 0, 0],
                [0, 0, 0, 1, 0, 0]])
    
    f1.R = np.eye(2) * 5
    f1.Q = np.eye(6) * 0.
    
    omega = radians(omega)
    vx = cos(omega) * v0
    vy = sin(omega) * v0
    
    f1.x = np.mat([x,vx,0,y,vy,-9.8]).T
    
    f1.P = np.eye(6) * 500.
   
    z = np.mat([[0,0]]).T
    count = 0
    markers.MarkerStyle(fillstyle='none')
    
    np.set_printoptions(precision=4)
    while f1.x[3,0] > 0:
        count += 1
        #f1.update (z)
        f1.predict()
        plt.scatter(f1.x[0,0],f1.x[3,0], color='green')
コード例 #30
0
ファイル: gps.py プロジェクト: mfkiwl/pyGpsReceiver-sdr
    def run(self):

        # Create the Kalman filter tracker
        A = np.eye(6)
        deltaT = np.array(2e-3)
        A2 = np.eye(6, k=3) * deltaT
        A = A + A2
        R = np.eye(3) * 100**2
        H = np.eye(3, 6)
        P = np.eye(6) * 100**2
        B = np.eye(6)

        kalman = KalmanFilter(A, R, H, P, B)

        first = 0
        oldDataNdx = np.zeros(1, dtype=np.uint64)
        tmp = np.array(np.zeros(6))

        while (1):

            navData = navQueue.get()
            dataNdx = navQueue.get()

            numMeas = np.size(navData, 1)

            if first == 0:
                kalman.initialize(np.hstack((navData[0:3, 0], 0, 0, 0)))

            # Loop over each position measurement
            for measNdx in range(0, numMeas):
                if measNdx == 0 and first != 0:
                    deltaT = np.array(np.float(dataNdx - oldDataNdx) * 1e-3)
                    A2 = np.eye(6, k=3) * deltaT
                    kalman.A = A + A2
                else:
                    deltaT = np.array(1e-3)
                    A2 = np.eye(6, k=3) * deltaT
                    kalman.A = A + A2

                kalman.run(navData[0:3, measNdx])
                tmp = np.vstack((tmp, kalman.x))

            print('%i' % (dataNdx - oldDataNdx))
            oldDataNdx = deepcopy(dataNdx)

            navQueue.task_done()
            navQueue.task_done()

            first = first + 1
            # Convert from ECEF to lat/lon
            (lat, lon, height) = ecef2lla(kalman.x[0], kalman.x[1],
                                          kalman.x[2])
            mapQueue.put((lat, lon))
コード例 #31
0
 def __init__(self):
     self.validSpeedUpdated = False
     self.lastPoint = 0.
     self.distanceThreshold = 0.05
     self.speedUpdateTime = []
     self.speedWindowSize = 10
     self.keyMeasPairs = deque(maxlen=80)
     self.speedWindow = []
     self.filtedRange = []
     self.curSpeed = 0.0
     self.speedRecord = []
     self.rangeKF = KalmanFilter(dim_x=1, dim_z=1)
     self.rangeKF.x = np.array([0.])
     self.rangeKF.F = np.array([[1.]])
     self.rangeKF.H = np.array([[1.]])
     self.rangeKF.P *= 100.
     self.rangeKF.R = 0.1
     self.rangeKF.Q = 0.001
     self.rangeKF.initialized = False
コード例 #32
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        self.kalman_filter = KalmanFilter(world_map)
        print("INITSTATE", self.kalman_filter.state)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
        self.vel = 0
        self.omega = 0
        self.curInd = 0

        self.path = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init,
                              pos_goal)
        print(self.path)
        self.curGoal = self.path[0]
        self.done = False
コード例 #33
0
ファイル: track.py プロジェクト: afcarl/pendulum_tracking
    def __init__(self):
        self.IMAGE_WIDTH = 320
        self.IMAGE_HEIGHT = 240

        # PID parameters
        self.KP = 0.035
        self.KI = 0.045
        self.KD = 0.005

        self.prev_errx = 0
        self.prev_erry = 0

        self.integral_x = 0
        self.integral_y = 0

        self.curr_yaw = 90
        self.curr_pitch = 90

        self.last_obs = time.time()

        # Open the camera
        self.capture = cv.CreateCameraCapture(0)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_WIDTH,
                              self.IMAGE_WIDTH)
        cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_HEIGHT,
                              self.IMAGE_HEIGHT)

        # Union-Find Connected Comonent Labeling
        self.detector = BlobDetector()

        # Kalman filter
        self.filter = KalmanFilter()

        # Open the serial port to the arduino
        print 'Opening serial port  ...'
        self.serial = serial.Serial('/dev/ttyACM0', 19200)
        time.sleep(2)
        print 'Moving servos to initial position ...'
        self.serial.write('90s90t')
        time.sleep(1)
コード例 #34
0
ファイル: orbit_analysis.py プロジェクト: skyler237/ecen774
class VelocityFilter:
    def __init__(self, dt):
        self.filter = KalmanFilter()

        self.filter.F = np.kron(np.array([[1., dt], [0., 1.]]), np.eye(2))
        self.filter.H = np.kron(np.array([1., 0.]), np.eye(2))

        self.filter.P = np.diag([1., 1., 1e-9, 1e-9])
        self.filter.Q = np.kron(np.diag([1e-9] * 2), np.eye(2))
        self.filter.R = np.diag([5.0] * 2)

        self.filter.xhat = np.array([0., 0., 0., 0.])

    def predict(self):
        return self.filter.predict()

    def update(self, meas):
        return self.filter.update(meas)

    def run(self, meas):
        self.update(meas)
        return self.predict()
コード例 #35
0
    def __init__(self, markers, occupancy_map, pos_init, pos_goal, max_speed,
                 min_speed, max_omega, x_spacing, y_spacing, t_cam_to_body,
                 mode):
        """
        Initialize the class
        """
        # plan a path around obstacles using dijkstra's algorithm
        print('Planning path...')
        path = findShortestPath(occupancy_map,
                                x_spacing,
                                y_spacing,
                                pos_init[0:2],
                                pos_goal[0:2],
                                dilate=2)
        print('Done!')
        self.path_manager = PathManager(path)
        self.kalman_filter = KalmanFilter(markers, pos_init)
        self.diff_drive_controller = DiffDriveController(
            max_speed, min_speed, max_omega)

        if 'HARDWARE' in mode:
            # Handles all the ROS related items
            self.ros_interface = ROSInterface(t_cam_to_body)

        elif 'SIMULATE' in mode:
            self.robot_sim = RobotSim(markers, occupancy_map, pos_init,
                                      pos_goal, max_speed, max_omega,
                                      x_spacing, y_spacing,
                                      self.path_manager.path, mode)

        self.user_control = UserControl()
        self.vel = 0  # save velocity to use for kalman filter
        self.goal = self.path_manager.getNextWaypoint()  # get first waypoint

        # for logging postion data to csv file
        self.stateSaved = []
        self.tagsSaved = []
        self.waypoints = []
コード例 #36
0
    def __init__(self, world_map, occupancy_map, pos_init, pos_goal, max_speed,
                 max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        self.kalman_filter = KalmanFilter(world_map)
        self.prev_v = 0
        self.est_pose = np.array([[0], [0], [0]])
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
        self.prev_imu_meas = np.array([[0], [0], [0], [0], [0]])
コード例 #37
0
    def initialise_filter(self):
        filter_ = KalmanFilter(
            dim_x=4,
            dim_z=2)  # need to instantiate every time to reset all fields
        filter_.F = self.matrix_a
        filter_.H = self.matrix_c
        filter_.B = self.matrix_g

        if KalmanParams.use_noise_in_kalman:
            u = np.random.normal(loc=0, scale=KalmanParams.var_kalman, size=2)
            filter_.u = u
        # u = Q_discrete_white_noise(dim=2, var=1)

        filter_.Q = self.q
        filter_.R = self.r
        return filter_
コード例 #38
0
ファイル: gps.py プロジェクト: dswiston/pyGpsReceiver
  def run(self):
    
    # Create the Kalman filter tracker
    A = np.eye(6);
    deltaT = np.array(2e-3);
    A2 = np.eye(6,k=3) * deltaT;
    A = A + A2;
    R = np.eye(3) * 100**2;
    H = np.eye(3,6);
    P = np.eye(6) * 100**2;
    B = np.eye(6);
    
    kalman = KalmanFilter(A,R,H,P,B);
    
    first = 0;
    oldDataNdx = np.zeros(1,dtype=np.uint64);
    tmp = np.array(np.zeros(6));

    while(1):
      
      navData = navQueue.get();
      dataNdx = navQueue.get();

      numMeas = np.size(navData,1);
      
      if first == 0:
        kalman.initialize( np.hstack( (navData[0:3,0],0,0,0) ) );
            
      # Loop over each position measurement
      for measNdx in range(0,numMeas):
        if measNdx == 0 and first != 0:
          deltaT = np.array(np.float(dataNdx - oldDataNdx)*1e-3);
          A2 = np.eye(6,k=3) * deltaT;
          kalman.A = A + A2;
        else:
          deltaT = np.array(1e-3);
          A2 = np.eye(6,k=3) * deltaT;
          kalman.A = A + A2;
        
        kalman.run( navData[0:3,measNdx] );
        tmp = np.vstack((tmp,kalman.x));

      print('%i' % (dataNdx - oldDataNdx));
      oldDataNdx = deepcopy(dataNdx);

      navQueue.task_done();
      navQueue.task_done();

      first = first + 1;
      # Convert from ECEF to lat/lon
      (lat, lon, height) = ecef2lla(kalman.x[0],kalman.x[1],kalman.x[2]);
      mapQueue.put((lat,lon));
コード例 #39
0
    def __init__(self, world_map,occupancy_map, pos_init, pos_goal, max_speed, max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # TODO for student: Comment this when running on the robot 
        self.markers = world_map
        self.robot_sim = RobotSim(world_map, occupancy_map, pos_init, pos_goal,
                                  max_speed, max_omega, x_spacing, y_spacing)
        self.vel = np.array([0, 0])
        self.imu_meas = np.array([])
        self.meas = []
        
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.goals = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init, pos_goal)
        self.total_goals = self.goals.shape[0]
        self.cur_goal = 1
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)
コード例 #40
0
def kalman_test(vid_src):
    _, frame = vid_src.read()
    used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    model = KalmanFilter(used_frame, 60)

    # applying background detection
    while frame is not None:

        used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        fg = model.apply(used_frame)

        cv2.imshow('img', used_frame)
        cv2.imshow('fg', fg)

        prev_frame = np.copy(frame)
        _, frame = vid_src.read()

        # print model.get_background_model().get_density_range(used_frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    pass
コード例 #41
0
 def __init__(self, camera_frame_id, base_frame_id, odom_frame_id, map_frame_id, ns_tag_detector, tag_pose_id, pos_init):
     """
     Initialize the class
     """
     # Handles all the ROS related items
     self.ros_interface = ROSInterface(camera_frame_id, base_frame_id, odom_frame_id, map_frame_id, ns_tag_detector)
     self.time = rospy.get_time()
     # YOUR CODE AFTER THIS
     #-------------------------------------------#
     self.markers = tag_pose_id
     self.idx_target_marker = 0
     #-------------------------------------------#
     # Kalman filter
     self.kalman_filter = KalmanFilter(tag_pose_id)
     self.kalman_filter.mu_est = pos_init # 3*pos_init # For test
コード例 #42
0
ファイル: track.py プロジェクト: pierrelux/pendulum_tracking
    def __init__(self):
        self.IMAGE_WIDTH = 320
        self.IMAGE_HEIGHT = 240
        
        # PID parameters
        self.KP = 0.035
        self.KI = 0.045
        self.KD = 0.005        
        
        self.prev_errx = 0
        self.prev_erry = 0

        self.integral_x = 0
        self.integral_y = 0
        
        self.curr_yaw = 90
        self.curr_pitch = 90
        
        self.last_obs = time.time()

        # Open the camera       
        self.capture = cv.CreateCameraCapture(0) 
        cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_WIDTH, self.IMAGE_WIDTH)
        cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_HEIGHT, self.IMAGE_HEIGHT);     
        
        # Union-Find Connected Comonent Labeling
        self.detector = BlobDetector()
  
        # Kalman filter
        self.filter = KalmanFilter()
   
        # Open the serial port to the arduino
        print 'Opening serial port  ...'
        self.serial = serial.Serial('/dev/ttyACM0', 19200)
        time.sleep(2)
        print 'Moving servos to initial position ...'
        self.serial.write('90s90t')
        time.sleep(1)      
コード例 #43
0
class KalmanAgent(Agents.Agent):

    def __init__(self, tank_index, target_color, state):
        self.state = state
        self.target_color = target_color
        self.tank_index = tank_index
        self.pdc = PDController()
        self.timer_id = Timer.add_task(self.update)
        self.kvis = KalmanVisualizer(800, 800)
        self.kfilter = KalmanFilter()

    def update(self):
        # update state
        self.state.update_mytanks()
        self.state.update_othertanks()

        tank = self.state.mytanks['0']
        angvel = self.getAngvelToTarget(tank)

        tank.set_angvel(angvel)

    def getAngvelToTarget(self, tank):
        """
        Calculates the vector that we should be facing to kill the target, then uses the PDController to find the
        angular velocity required to face the target.
        This calculation is performed like this:
        1. Use the Kalman Filter to get the predicted location of the target.
        2. Add to that vector the distance that the target will travel while the bullet fires.
        3. Pass the vector into the PDController to find the angular velocity that we need to face the right way
        :return float: the angular velocity that we need in order to correctly face the target.
        """
        # get newest data about target
        target = self.state.othertanks[self.target_color][0]

        # get the angle to the target

        # step 1: Kalman Filter
        ut, width, height = self.kfilter.update(npy.array([[target.x], [target.y]]))
        xpos, xvel, xacc, ypos, yvel, yacc = ut

        # step 2: bullet travel time
        t = math.sqrt((tank.x - target.x) ** 2 + (tank.y - target.y) ** 2) / 100
        hitzone_x = xpos + xvel*t + 0.5  # * xacc * t**2 + 100 * math.cos(tank.x) * t
        hitzone_y = ypos + yvel*t + 0.5  # * yacc * t**2 + 100 * math.sin(tank.y) * t
        self.kvis.update(xpos, ypos, width, target.x, target.y, hitzone_x, hitzone_y)


        # step 3: PDController to find angular velocity
        ang = self.ang(hitzone_x, hitzone_y)
        # print ang
        if abs(math.pi + tank.angle - ang) < 0.1:
            tank.shoot()
        target_vec = [-math.cos(ang), -math.sin(ang)]
        next_action = self.pdc.get_next_action(self.state.mytanks[self.tank_index], target_vec)
        return next_action['angvel']

    def ang(self, x, y):
        tank = self.state.mytanks['0']
        angle = math.atan2(tank.y - y, tank.x - x)
        '''Make any angle be between +/- pi.'''
        angle -= 2 * math.pi * int(angle / (2 * math.pi))
        if angle <= -math.pi:
            angle += 2 * math.pi
        elif angle > math.pi:
            angle -= 2 * math.pi
        return angle
コード例 #44
0
class DroneVideoDisplay(QtGui.QMainWindow):
	StatusMessages = {
		DroneStatus.Emergency : 'Emergency',
		DroneStatus.Inited    : 'Initialized',
		DroneStatus.Landed    : 'Landed',
		DroneStatus.Flying    : 'Flying',
		DroneStatus.Hovering  : 'Hovering',
		DroneStatus.Test      : 'Test (?)',
		DroneStatus.TakingOff : 'Taking Off',
		DroneStatus.GotoHover : 'Going to Hover Mode',
		DroneStatus.Landing   : 'Landing',
		DroneStatus.Looping   : 'Looping (?)'
		}
	DisconnectedMessage = 'Disconnected'
	UnknownMessage = 'Unknown Status'
	
	def __init__(self):
		# Construct the parent class
		super(DroneVideoDisplay, self).__init__()

		# Setup our very basic GUI - a label which fills the whole window and holds our image
		self.setWindowTitle('AR.Drone Video Feed')
		self.imageBox = QtGui.QLabel(self)
		self.setCentralWidget(self.imageBox)

		self.controller = BasicDroneController()
		self.counter = 0
		# Subscribe to the /ardrone/navdata topic, of message type navdata, and call self.ReceiveNavdata when a message is received
		self.subNavdata = rospy.Subscriber('/ardrone/navdata',Navdata,self.ReceiveNavdata) 
		
		# Subscribe to the drone's video feed, calling self.ReceiveImage when a new frame is received
		self.subVideo   = rospy.Subscriber('/ardrone/image_raw',Image,self.ReceiveImage)

		'''BEGIN CHANGES'''
		#Define Kalman Filter constants
		time = GUI_UPDATE_PERIOD
		time2 = time*time

		#1D case for velocity in the x-direction
		dimension = 1
		A = np.identity(dimension)
		B = np.matrix(time)
		H = np.identity(dimension)
		P = np.identity(dimension)
		Q = np.identity(dimension)
		R = np.identity(dimension)

		#tweak covariance matrices
		Q = np.dot(1,Q)
		R = np.dot(0.1, R)

		#create the Kalman Filter instance
		self.kfilter = KalmanFilter(A, P, R, Q, H, B, dimension)
		#rospy.loginfo("INIT")

		#create empty array to house our estimates
		self.state_estimate = []
		self.state_real = []


		#### Computer vision code

		self.x_pix = 320
		self.y_pix = 240

		self.x_ang = np.radians(54.4)
		self.y_ang = np.radians(37.8)

		self.prev_frame = None
		self.prev_points = None
		self.prev_time = None
		self.vel = []

		plt.ion()

		'''END CHANGES'''
		
		# Holds the image frame received from the drone and later processed by the GUI
		self.image = None
		self.imageLock = Lock()

		self.tags = []
		self.tagLock = Lock()
		
		# Holds the status message to be displayed on the next GUI update
		self.statusMessage = ''

		# Tracks whether we have received data since the last connection check
		# This works because data comes in at 50Hz but we're checking for a connection at 4Hz
		self.communicationSinceTimer = False
		self.connected = False

		# A timer to check whether we're still connected
		self.connectionTimer = QtCore.QTimer(self)
		self.connectionTimer.timeout.connect(self.ConnectionCallback)
		self.connectionTimer.start(CONNECTION_CHECK_PERIOD)
		
		# A timer to redraw the GUI
		self.redrawTimer = QtCore.QTimer(self)
		self.redrawTimer.timeout.connect(self.RedrawCallback)
		self.redrawTimer.start(GUI_UPDATE_PERIOD)

	# Called every CONNECTION_CHECK_PERIOD ms, if we haven't received anything since the last callback, will assume we are having network troubles and display a message in the status bar
	def ConnectionCallback(self):
		self.connected = self.communicationSinceTimer
		self.communicationSinceTimer = False

	def RedrawCallback(self):
		if self.image is not None:
			# We have some issues with locking between the display thread and the ros messaging thread due to the size of the image, so we need to lock the resources
			self.imageLock.acquire()
			try:			

					''' 
					TODO:

					1. Create Kalman Filter instance in constructor.         DONE 
					2. Create optical flow instance in constructor. 	     DONE
					3. Retrieve controller navdata here.                     DONE 
					4. Retrieve image matrix here. Conver to cv matrix.      DONE
					5. Run optical flow alg. on image. 						 DONE
					6. Make prediction with controller data.                 DONE
					7. Update prediction with image data.                    DONE
					8. Plot estimate vs real continuously					 DONE
					9. GetHeight() function in drone_controller.py 			 INCOMPLETE
					10. GetTime() function in here						     INCOMPLETE
					'''

					'''BEGIN CHANGES'''
					#convert the ROS image to OpenCV and apply some processing. then convert back to ROS
					openimage = ToOpenCV(self.image)

					# make picture 2D
					frame = cv2.cvtColor(openimage, cv2.COLOR_BGR2GRAY)

					''' TODO: Implement GetHeight in drone_controller.py '''

					feature_params = dict( maxCorners = 100,
	                       qualityLevel = 0.3,
	                       minDistance = 7,
	                       blockSize = 7 )

					if self.prev_frame is None:
						self.prev_frame = frame 
						self.prev_points = cv2.goodFeaturesToTrack(self.prev_frame, mask=None, **feature_params)
						self.prev_time = GUI_UPDATE_PERIOD #self.GetTime() is set to a constant for now
					else:

						h = self.controller.GetHeight()
						xdist = (h * np.tan(self.x_ang / 2.0))
						ydist = (h * np.tan(self.y_ang / 2.0)) 

						pix2mx = self.x_pix / xdist
						pix2my = self.y_pix / ydist

						curr_frame = frame

						''' Implement GetTime() '''
						curr_time = GUI_UPDATE_PERIOD #self.GetTime() is set to constant for now
						new_points, status, error = cv2.calcOpticalFlowPyrLK(self.prev_frame, curr_frame, self.prev_points)

						good_new = new_points[status==1]
						good_old = self.prev_points[status==1]
						assert good_new.shape == good_old.shape


						### Calculate velocity components
						sum_x = 0.0
						sum_y = 0.0
						ptslen = len(good_new)
						xcomp = 0
						ycomp = 0
						for x in range(ptslen):
							xcomp = ((good_new[x][1] - good_old[x][1]) / self.x_pix) / (curr_time / 1000.0) #- self.prev_time
							ycomp = ((good_new[x][0] - good_old[x][0]) / self.y_pix) / (curr_time / 1000.0)
							sum_y += ycomp
							sum_x += xcomp

						avg_x = np.divide(xcomp, ptslen)
						avg_y = np.divide(ycomp, ptslen)

						self.vel.append(avg_x) #only x for now

						# iterate next frames
						self.prev_frame = curr_frame
						self.prev_points = new_points
						self.prev_time = curr_time


					#Convert to ROS
					ROSimage = ToRos(openimage)

					##############################################
					### Change velocity to get correct one here ## 
					##############################################
					estimated_velocity = self.vel[-1:] ######
					##############################################
					if estimated_velocity == []:
						estimated_velocity = 0
					#rospy.loginfo("estimated_velocity")
					#rospy.loginfo(estimated_velocity)

					u_k = 0
					real_velocity = 0
					#update the measured accelerations and velocities
					real_velocity = self.controller.GetVelocity()
					rospy.loginfo("real_velocity")
					rospy.loginfo(real_velocity)
					u_k = self.controller.GetAcceleration()
					z_k = estimated_velocity
					rospy.loginfo("z_k")
					rospy.loginfo(estimated_velocity)

					#Kalman Filter step
					self.kfilter.predictState(u_k)
					self.kfilter.getKalmanGain()
					#rospy.loginfo("kalman_gain")
					#rospy.loginfo(self.kfilter.kalmanGain)
					self.kfilter.update(estimated_velocity)

					self.state_estimate.append(self.kfilter.x_k.item(0))
					self.state_real.append(real_velocity)
					#plot everything here, estimates not plotting atm
					if self.counter > 100:
						plt.plot(self.state_estimate[-90:], label = "estimated velocity", color = 'black')
						plt.plot(self.state_real[-90], color = 'yellow')
					else: 
						plt.plot(self.state_estimate, label = "estimated velocity", color = 'black')
						plt.plot(self.state_real, color = 'yellow')
					self.counter += 1
					#rospy.loginfo("estimate")
					#rospy.loginfo(self.state_estimate)
					#rospy.loginfo("real state")
					#rospy.loginfo(self.state_real)
					plt.draw()

					'''END CHANGES'''

					# Convert the ROS image into a QImage which we can display
					image = QtGui.QPixmap.fromImage(QtGui.QImage(ROSimage.data, ROSimage.width, ROSimage.height, QtGui.QImage.Format_RGB888))
					if len(self.tags) > 0:
						self.tagLock.acquire()
						try:
							painter = QtGui.QPainter()
							painter.begin(image)
							painter.setPen(QtGui.QColor(0,255,0))
							painter.setBrush(QtGui.QColor(0,255,0))
							for (x,y,d) in self.tags:
								r = QtCore.QRectF((x*image.width())/1000-DETECT_RADIUS,(y*image.height())/1000-DETECT_RADIUS,DETECT_RADIUS*2,DETECT_RADIUS*2)
								painter.drawEllipse(r)
								painter.drawText((x*image.width())/1000+DETECT_RADIUS,(y*image.height())/1000-DETECT_RADIUS,str(d/100)[0:4]+'m')
							painter.end()
						finally:
							self.tagLock.release()
			finally:
				self.imageLock.release()

			# display the window.
			self.resize(image.width(),image.height())
			self.imageBox.setPixmap(image)

		# Update the status bar to show the current drone status & battery level
		self.statusBar().showMessage(self.statusMessage if self.connected else self.DisconnectedMessage)

	def ReceiveImage(self,data):
		# Indicate that new data has been received (thus we are connected)
		self.communicationSinceTimer = True

		# We have some issues with locking between the GUI update thread and the ROS messaging thread due to the size of the image, so we need to lock the resources
		self.imageLock.acquire()
		try:
			self.image = data # Save the ros image for processing by the display thread
		finally:
			self.imageLock.release()

	def ReceiveNavdata(self,navdata):
		# Indicate that new data has been received (thus we are connected)
		self.communicationSinceTimer = True

		# Update the message to be displayed
		msg = self.StatusMessages[navdata.state] if navdata.state in self.StatusMessages else self.UnknownMessage
		self.statusMessage = '{} (Battery: {}%)'.format(msg,int(navdata.batteryPercent))

		self.tagLock.acquire()
		try:
			if navdata.tags_count > 0:
				self.tags = [(navdata.tags_xc[i],navdata.tags_yc[i],navdata.tags_distance[i]) for i in range(0,navdata.tags_count)]
			else:
				self.tags = []
		finally:
			self.tagLock.release()
コード例 #45
0
#Define the starting point of the robot
#start = (2,1)
start = (17,8)
#while wrapper.dest is -1:
#	time.sleep(1)
#print wrapper.dest
#finish = wrapper.dest
finish = (19,8)
#Use a*star to plan the path
arrows, cost = pathplan.a_star_search(grid,start,finish)
pathplan.print_cost_grid_float(width,height,cost,finish)
star_path = pathplan.get_star_path(grid,cost,start,finish)
pathplan.print_path(grid,star_path,width,height,start,finish)
waypoints = pathplan.set_waypoints(grid,star_path,start,finish)

kalfilter = KalmanFilter(start[0]*GRID_SIZE+(GRID_SIZE/2),start[1]*GRID_SIZE+(GRID_SIZE/2),math.pi/2)
kalfilter.GRID_SIZE = GRID_SIZE
pathplan.GRID_SIZE = GRID_SIZE
time.sleep(1) #Wait a second for everything to wake up
print 'Starting Up!!!'
prior_time = time.clock()
#kalfilter.plot_robot(grid.walls,waypoints)
delay_time = .001
first = True
second = True
third = False
old_finish = finish
while(True):
	old_finish = finish
	time.sleep(delay_time)
	marker_dist = -1
コード例 #46
0
	def __init__(self):
		# Construct the parent class
		super(DroneVideoDisplay, self).__init__()

		# Setup our very basic GUI - a label which fills the whole window and holds our image
		self.setWindowTitle('AR.Drone Video Feed')
		self.imageBox = QtGui.QLabel(self)
		self.setCentralWidget(self.imageBox)

		self.controller = BasicDroneController()
		self.counter = 0
		# Subscribe to the /ardrone/navdata topic, of message type navdata, and call self.ReceiveNavdata when a message is received
		self.subNavdata = rospy.Subscriber('/ardrone/navdata',Navdata,self.ReceiveNavdata) 
		
		# Subscribe to the drone's video feed, calling self.ReceiveImage when a new frame is received
		self.subVideo   = rospy.Subscriber('/ardrone/image_raw',Image,self.ReceiveImage)

		'''BEGIN CHANGES'''
		#Define Kalman Filter constants
		time = GUI_UPDATE_PERIOD
		time2 = time*time

		#1D case for velocity in the x-direction
		dimension = 1
		A = np.identity(dimension)
		B = np.matrix(time)
		H = np.identity(dimension)
		P = np.identity(dimension)
		Q = np.identity(dimension)
		R = np.identity(dimension)

		#tweak covariance matrices
		Q = np.dot(1,Q)
		R = np.dot(0.1, R)

		#create the Kalman Filter instance
		self.kfilter = KalmanFilter(A, P, R, Q, H, B, dimension)
		#rospy.loginfo("INIT")

		#create empty array to house our estimates
		self.state_estimate = []
		self.state_real = []


		#### Computer vision code

		self.x_pix = 320
		self.y_pix = 240

		self.x_ang = np.radians(54.4)
		self.y_ang = np.radians(37.8)

		self.prev_frame = None
		self.prev_points = None
		self.prev_time = None
		self.vel = []

		plt.ion()

		'''END CHANGES'''
		
		# Holds the image frame received from the drone and later processed by the GUI
		self.image = None
		self.imageLock = Lock()

		self.tags = []
		self.tagLock = Lock()
		
		# Holds the status message to be displayed on the next GUI update
		self.statusMessage = ''

		# Tracks whether we have received data since the last connection check
		# This works because data comes in at 50Hz but we're checking for a connection at 4Hz
		self.communicationSinceTimer = False
		self.connected = False

		# A timer to check whether we're still connected
		self.connectionTimer = QtCore.QTimer(self)
		self.connectionTimer.timeout.connect(self.ConnectionCallback)
		self.connectionTimer.start(CONNECTION_CHECK_PERIOD)
		
		# A timer to redraw the GUI
		self.redrawTimer = QtCore.QTimer(self)
		self.redrawTimer.timeout.connect(self.RedrawCallback)
		self.redrawTimer.start(GUI_UPDATE_PERIOD)
コード例 #47
0
class RobotControl(object):
    """
    Class used to interface with the rover. Gets sensor measurements through ROS subscribers,
    and transforms them into the 2D plane, and publishes velocity commands.
    """
    def __init__(self, world_map,occupancy_map, pos_init, pos_goal, max_speed, max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        """

        # Handles all the ROS related items
        self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.markers = world_map
        self.vel = np.array([0, 0])
        self.imu_meas = np.array([])
        self.meas = []
        
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.goals = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init, pos_goal)
        # self.total_goals = self.goals.shape[0]
        self.cur_goal = 2
        self.end_goal = self.goals.shape[0] - 1
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)

    def process_measurements(self):
        """ 
        YOUR CODE HERE
        This function is called at 60Hz
        """
        meas = self.ros_interface.get_measurements()
        self.meas = meas;
        print 'tag output'
        print meas
        # imu_meas: measurment comig from the imu
        imu_meas = self.ros_interface.get_imu()
        self.imu_meas = imu_meas
        print 'imu measurement'
        print imu_meas
        pose = self.kalman_filter.step_filter(self.vel, self.imu_meas, np.asarray(self.meas))
        

        # Code to follow AprilTags
        '''
        if(meas != None and meas):
            cur_meas = meas[0]
            tag_robot_pose = cur_meas[0:3]
            tag_world_pose = self.tag_pos(cur_meas[3])
            state = self.robot_pos(tag_world_pose, tag_robot_pose)
            goal = tag_world_pose
            vel = self.diff_drive_controller.compute_vel(state, goal)        
            self.vel = vel[0:2];
            print vel
            if(not vel[2]):
                self.ros_interface.command_velocity(vel[0], vel[1])
            else:
                vel = (0.01, 0.1) 
                self.vel = vel
                self.ros_interface.command_velocity(vel[0], vel[1])
        '''


        # Code to move autonomously
        goal = self.goals[self.cur_goal]
        print 'pose'
        print pose
        print 'goal'
        print goal
        vel = self.diff_drive_controller.compute_vel(pose, goal)        
        self.vel = vel[0:2];
        print 'speed'
        print vel
        if(not vel[2]):
            self.ros_interface.command_velocity(vel[0], vel[1])
        else:
            vel = (0, 0) 
            if self.cur_goal < self.end_goal:
                self.cur_goal = self.cur_goal + 1
            self.ros_interface.command_velocity(vel[0], vel[1])
            self.vel = vel
        return

    def tag_pos(self, marker_id):
        for i in range(len(self.markers)):
            marker_i = np.copy(self.markers[i])
            if marker_i[3] == marker_id:
                return marker_i[0:3]
        return None

    def robot_pos(self, w_pos, r_pos):
        H_W = np.array([[math.cos(w_pos[2]), -math.sin(w_pos[2]), w_pos[0]],
                        [math.sin(w_pos[2]), math.cos(w_pos[2]), w_pos[1]],
                        [0, 0, 1]])
        H_R = np.array([[math.cos(r_pos[2]), -math.sin(r_pos[2]), r_pos[0]],
                        [math.sin(r_pos[2]), math.cos(r_pos[2]), r_pos[1]],
                        [0, 0, 1]])
        w_r = H_W.dot(inv(H_R))
        robot_pose =  np.array([[w_r[0,2]], [w_r[1,2]], [math.atan2(w_r[1,0], w_r[0, 0])]])
        return robot_pose
コード例 #48
0
    return np.mat([[-cos(theta_a), 0, -sin(theta_a), 0],
                   [-cos(theta_b), 0, -sin(theta_b), 0]])

    # equivalently we can do this...
    #dist_a = dist(pos, pos_A)
    #dist_b = dist(pos, pos_B)

    #return np.mat([[(pos[0]-pos_A[0])/dist_a, 0, (pos[1]-pos_A[1])/dist_a,0],
    #               [(pos[0]-pos_B[0])/dist_b, 0, (pos[1]-pos_B[1])/dist_b,0]])


pos_a = (100, -20)
pos_b = (-100, -20)

f1 = KalmanFilter(dim=4)
dt = 1.0  # time step
'''
f1.F = np.mat ([[1, dt, 0,  0],
                [0,  1, 0,  0],
                [0,  0, 1, dt],
                [0,  0, 0,  1]])

'''
f1.F = np.mat([[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]])

f1.B = 0.

f1.R = np.eye(2) * 1.
f1.Q = np.eye(4) * .1
コード例 #49
0
ファイル: KalmanNEXT.py プロジェクト: gonzaponte/PyKal
 def __init__( self, P = 15., T = 27. ):
     KalmanFilter.__init__( self, name = 'NEXT Kalman Filter' )
     self.gas = NEXT( P, T )
コード例 #50
0
def test_kf_drag():
    
    y = 1
    x = 0
    omega = 35.
    noise = [0,0]
    v0 = 50.
    ball = BaseballPath (x0=x, y0=y, 
                         launch_angle_deg=omega, 
                         velocity_ms=v0, noise=noise)
    #ball = BallPath (x0=x, y0=y, omega_deg=omega, velocity=v0, noise=noise)

    dt = 1
    
    
    f1 = KalmanFilter(dim_x=6, dim_z=2)
    dt = 1/30.   # time step
    
    ay = -.5*dt**2
    ax = .5*dt**2
    
    f1.F = np.mat ([[1, dt, ax,  0,  0,  0],   # x=x0+dx*dt
                    [0,  1, dt,  0,  0,  0],   # dx = dx
                    [0,  0,  1,  0,  0,  0],   # ddx = 0
                    [0,  0,  0,  1, dt, ay],   # y = y0 +dy*dt+1/2*g*dt^2
                    [0,  0,  0,  0,  1, dt],   # dy = dy0 + ddy*dt
                    [0,  0,  0,  0,  0, 1]])  # ddy = -g
    
    # We will inject air drag using Bu
    f1.B = np.mat([[0., 0., 1., 0., 0., 0.],
                   [0., 0., 0., 0., 0., 1.]]).T
                   
    f1.u = np.mat([[0., 0.]]).T
    
    f1.H = np.mat([
                [1, 0, 0, 0, 0, 0],
                [0, 0, 0, 1, 0, 0]])
    
    f1.R = np.eye(2) * 5
    f1.Q = np.eye(6) * 0.
    
    omega = radians(omega)
    vx = cos(omega) * v0
    vy = sin(omega) * v0
    
    f1.x = np.mat([x,vx,0,y,vy,-9.8]).T
    
    f1.P = np.eye(6) * 500.
   
    z = np.mat([[0,0]]).T
    markers.MarkerStyle(fillstyle='none')
    
    np.set_printoptions(precision=4)
    
    t=0
    while f1.x[3,0] > 0:
        t+=dt

        #f1.update (z)
        x,y = ball.update(dt)
        #x,y = ball.pos_at_t(t)
        update_drag(f1, dt)
        f1.predict()
        print f1.x.T
        plt.scatter(f1.x[0,0],f1.x[3,0], color='red', alpha=0.5)
        plt.scatter (x,y)
    return f1
コード例 #51
0
ファイル: IMU.py プロジェクト: alyyousuf7/RollE
class IMU (threading.Thread):
	def __init__(self):
		threading.Thread.__init__(self)

		try:
			self.mpu6050 = MPU6050()
			self.hmc5883l = HMC5883L()

			# Original Values
			self.roll = 0
			self.pitch = 0
			self.yaw = 0

			# Kalman Filter
			self.kalmanX = KalmanFilter()
			self.kalmanY = KalmanFilter()
			self.kalmanZ = KalmanFilter()
			self.kalAngleX = 0
			self.kalAngleY = 0
			self.kalAngleZ = 0
			
			# Complementary Filter
			self.compAngleX = 0
			self.compAngleY = 0
			self.compAngleZ = 0
			
			# Only from Gyroscrope
			self.gyroAngleX = 0
			self.gyroAngleY = 0
			self.gyroAngleZ = 0
		except:
			print "Unable to create an instance for IMU."
			exit()
	
	def run(self):
		# Get the initial data
		[accX, accY, accZ, gyroX, gyroY, gyroZ, temperature] = self.mpu6050.read()
		[magX, magY, magZ] = self.hmc5883l.read()
		
		self.updatePitchRoll(accX, accY, accZ)
		self.updateYaw(magX, magY, magZ)
		
		# Set the initial angles (X: Roll; Y: Pitch, Z: Yaw)
		self.kalAngleX = self.kalmanX.setAngle(self.roll)
		self.kalAngleY = self.kalmanY.setAngle(self.pitch)
		self.kalAngleZ = self.kalmanZ.setAngle(self.yaw)
		self.compAngleX = self.roll
		self.compAngleY	= self.pitch
		self.compAngleZ = self.yaw
		self.gyroAngleX	= self.roll
		self.gyroAngleY	= self.pitch
		self.gyroAngleZ = self.yaw
		
		timer = datetime.now().microsecond
		
		while not config.exitStatus:
			IMULock.acquire()

			[accX, accY, accZ, gyroX, gyroY, gyroZ, temperature] = self.mpu6050.read()
			[magX, magY, magZ] = self.hmc5883l.read()

			dt = ( datetime.now().microsecond - timer ) / 1000000.0
			if dt < 0:
				dt = dt + 1
			timer = datetime.now().microsecond
			
			# PITCH AND ROLL
			self.updatePitchRoll(accX, accY, accZ)

			if (self.roll < -90 and self.kalAngleX > 90) or (self.roll > 90 and self.kalAngleY < -90):
				self.kalAngleX = self.kalmanX.setAngle(self.roll)
				self.compAngleX = self.roll
				self.gyroAngleX = self.roll
			else:
				self.kalAngleX = self.kalmanX.getAngle(self.roll, gyroX, dt)
			
			if math.fabs(self.kalAngleX) > 90:
				gyroY = -gyroY
			
			self.kalAngleY = self.kalmanY.getAngle(self.pitch, gyroY, dt)
			
			# YAW
			self.updateYaw(magX, magY, magZ)
			
			if (self.yaw < -90 and self.kalAngleZ > 90) or (self.yaw > 90 and self.kalAngleZ < -90):
				self.kalAngleZ = self.kalmanZ.setAngle(self.yaw)
				self.compAngleZ = self.yaw
				self.gyroAngleZ = self.yaw
			else:
				self.kalAngleZ = self.kalmanZ.getAngle(self.yaw, gyroZ, dt)

			self.gyroAngleX += gyroX * dt
			self.gyroAngleY += gyroY * dt
			self.gyroAngleZ += gyroZ * dt

			complementaryRate = 0.98
			self.compAngleX = complementaryRate * (self.compAngleX + gyroX * dt) + (1-complementaryRate) * self.roll
			self.compAngleY = complementaryRate * (self.compAngleY + gyroY * dt) + (1-complementaryRate) * self.pitch
			self.compAngleZ = complementaryRate * (self.compAngleZ + gyroZ * dt) + (1-complementaryRate) * self.yaw

			# Reset GyroAngle if its drifted too much
			if ((self.gyroAngleX < -180) or (self.gyroAngleX > 180)):
				self.gyroAngleX = self.kalAngleX
			if ((self.gyroAngleY < -180) or (self.gyroAngleY > 180)):
				self.gyroAngleY = self.kalAngleY
			if ((self.gyroAngleZ < -180) or (self.gyroAngleZ > 180)):
				self.gyroAngleZ = self.kalAngleZ
			
			IMULock.release()
			
			sleep(config.IMU_delay)

		print "IMU Thread: Shutting down."
	
	def updatePitchRoll(self, accX, accY, accZ):
		self.roll = math.degrees(math.atan2(accY, accZ))
		self.pitch = math.degrees(math.atan(-accX / math.sqrt(accY * accY + accZ * accZ)))
	
	def updateYaw(self, magX, magY, magZ):
		rollAngle = math.radians(self.kalAngleX)
		pitchAngle = math.radians(self.kalAngleY)
		
		Bfy = magZ * math.sin(rollAngle) - magY * math.cos(rollAngle)
		Bfx = magX * math.cos(pitchAngle) + magY * math.sin(pitchAngle) * math.sin(rollAngle) + magZ * math.sin(pitchAngle) * math.cos(rollAngle)
		
		self.yaw = math.degrees(math.atan2(-Bfy, Bfx))
	
	def getData(self):
		IMULock.acquire()
		data = {'originalX':self.roll, 'originalY':self.pitch, 'originalZ':self.yaw, 'kalmanX':self.kalAngleX, 'kalmanY':self.kalAngleY, 'kalmanZ':self.kalAngleZ, 'complementaryX':self.compAngleX, 'complementaryY':self.compAngleY, 'complementaryZ':self.compAngleZ}
		IMULock.release()
		return data
# -*- coding: utf-8 -*-
"""
Created on Sat May 24 14:42:55 2014

@author: rlabbe
"""

from KalmanFilter import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as random

f = KalmanFilter (dim=4)

dt = 1
f.F = np.mat ([[1, dt, 0,  0],
               [0,  1, 0,  0],
               [0,  0, 1, dt],
               [0,  0, 0,  1]])

f.H = np.mat ([[1, 0, 0, 0],
               [0, 0, 1, 0]])



f.Q *= 4.
f.R = np.mat([[50,0],
              [0, 50]])

f.x = np.mat([0,0,0,0]).T
f.P *= 100.
コード例 #53
0
ファイル: track.py プロジェクト: pierrelux/pendulum_tracking
class ColorBlobTracker: 
    def __init__(self):
        self.IMAGE_WIDTH = 320
        self.IMAGE_HEIGHT = 240
        
        # PID parameters
        self.KP = 0.035
        self.KI = 0.045
        self.KD = 0.005        
        
        self.prev_errx = 0
        self.prev_erry = 0

        self.integral_x = 0
        self.integral_y = 0
        
        self.curr_yaw = 90
        self.curr_pitch = 90
        
        self.last_obs = time.time()

        # Open the camera       
        self.capture = cv.CreateCameraCapture(0) 
        cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_WIDTH, self.IMAGE_WIDTH)
        cv.SetCaptureProperty(self.capture,cv.CV_CAP_PROP_FRAME_HEIGHT, self.IMAGE_HEIGHT);     
        
        # Union-Find Connected Comonent Labeling
        self.detector = BlobDetector()
  
        # Kalman filter
        self.filter = KalmanFilter()
   
        # Open the serial port to the arduino
        print 'Opening serial port  ...'
        self.serial = serial.Serial('/dev/ttyACM0', 19200)
        time.sleep(2)
        print 'Moving servos to initial position ...'
        self.serial.write('90s90t')
        time.sleep(1)      
    
    def _MoveCameraHead(self, ballx, bally, angle):
        # Apply PID control to keep ball center in middle
        err_x = int(self.IMAGE_WIDTH/2) - ballx
        err_y = int(self.IMAGE_HEIGHT/2) - bally
        
        curr_time = time.time()
        dt = curr_time - self.last_obs
        self.last_obs = curr_time

        self.integral_x = self.integral_x + (err_x * dt)
        self.integral_y = self.integral_y + (err_y * dt)
        
        derivative_x = (err_x - self.prev_errx)/dt
        derivative_y = (err_y - self.prev_erry)/dt
        
        correction_x = self.KP*err_x + self.KI*self.integral_x + self.KD*derivative_x      
        self.curr_yaw = int(self.curr_yaw + correction_x)
        if (self.curr_yaw > 135):
            self.curr_yaw = 135
        elif (self.curr_yaw < 45):
            self.curr_yaw = 45
            
        correction_y = self.KP*err_y + self.KI*self.integral_y + self.KD*derivative_y      
        self.curr_pitch = int(self.curr_pitch + correction_y)
        if (self.curr_pitch > 135):
            self.curr_pitch = 135
        elif (self.curr_pitch < 45):
            self.curr_pitch = 45            
        
        print 'Correction x %f for error %d, command %d dt %f'%(correction_x, err_x, self.curr_yaw, dt)
        print 'Correction x %f for error %d, command %d dt %f'%(correction_y, err_y, self.curr_pitch, dt)
                
        self.serial.write('%ds%dt'%(self.curr_yaw, self.curr_pitch))
        
        self.prev_errx = err_x        
        self.prev_erry = err_y        
        
    def run(self): 
        while True: 
            img = cv.QueryFrame( self.capture ) 

            img_undist = cv.CreateImage(cv.GetSize(img), 8, 3)
            self.filter.undistort(img, img_undist)
            img = img_undist

            # Convert the BGR image to HSV space for easier color thresholding
            img_hsv = cv.CreateImage(cv.GetSize(img), 8, 3) 
            cv.CvtColor(img, img_hsv, cv.CV_BGR2HSV) 

            # Smooth the image with a Gaussian Kernel
            cv.Smooth(img_hsv, img_hsv, cv.CV_BLUR, 3); 
            
            # Threshold the HSV image to find yellow objects
            img_th = cv.CreateImage(cv.GetSize(img_hsv), 8, 1) 
            cv.InRangeS(img_hsv, (20, 100, 100), (30, 255, 255), img_th)

            # Connected Component Analysis
            roi = self.detector.detect(cv.GetMat(img_th))
            if len(roi) == 0:
                continue;
                
            # Only consider the biggest blob
            i = max(roi, key=lambda x:roi.get(x).count)
            blob = roi[i]

            # Filter out blobs that are smaller
            if blob.count > 50:
                # Draw bounding box and center of object
                center_x = int(blob.x1 + (blob.x2 - blob.x1)/2.0)
                center_y = int(blob.y1 + (blob.y2 - blob.y1)/2.0)
                
                cv.Rectangle(img, (blob.x1, blob.y1), (blob.x2, blob.y2), cv.RGB(255, 0, 0), 2)
                cv.Circle(img, (center_x, center_y), 2, cv.RGB(255, 0, 0), -1)

                # Draw cross
                cv.Line(img, (int(img.width/2), 0), (int(img.width/2), img.height), cv.RGB(0, 0, 0))
                cv.Line(img, (0, int(img.height/2)), (img.width, int(img.height/2)), cv.RGB(0, 0, 0))
               
                # Apply Kalman filter
                xhat = self.filter.predictUpdate(self.curr_pitch, self.curr_yaw, center_x, center_y)
                print math.degrees(xhat[0])
                fpix = -1*self.filter.state2pixel(self.curr_pitch, self.curr_yaw)
                print fpix
                cv.Circle(img, (int(fpix[0]), int(fpix[1])), 2, cv.RGB(0, 255, 0), -1)    
                            
                #self._MoveCameraHead(center_x, center_y, angle)
                
                # Draw predicted object center
                                
            # Display the thresholded image 
            cv.ShowImage('Tracking', img_undist) 
                        
            if cv.WaitKey(10) == 27: 
                break 
                
        self.serial.close()                
コード例 #54
0
def main():
   params = check_params(parse_args(sys.argv[1:]))
   kf = KalmanFilter()

   if params['file']:
      template = read_template(params)

   surf, flann = init_surf()
   kp, des = surf.detectAndCompute(template, None)

   #Start up Video processing thread
   vs = PiVideoStream(resolution=(params['xRes'], params['yRes'])).start()

   #Wait for camera to warm up
   time.sleep(3.0)

   #Used for calculating FPS
   startTime = time.time()
   if params['capTime'] == 0:
      endTime = startTime + 1000000000
   else:
      endTime = startTime + params['capTime']
   frames = 0

   # Open up serial port with Pixhawk
   if params['sendToPX4']:
      port = serial.Serial('/dev/ttyAMA0', 57600)
      mav = mavlink.MAVLink(port)

   # Typically only need to search within a small Y-range
   yRes = params['yRes']
   (cropYmin, cropYmax) = (yRes * .25, yRes * .70)

   #Take weighted average of last # of distances to filter out noise
   notFoundCount = 1000

   while time.time() < endTime:
      frames += 1
      frame = vs.read()
      frame = frame[cropYmin : cropYmax, 0:params['xRes']]

      found, [x,y], frame = FindingFuncs.findPatternSURF(frame, surf, kp, des, template, flann, params['preview'])

      # Count how many frames it has been since the RPi has not found anything
      if not found:
         notFoundCount += 1
      else:
         notFoundCount = 0
         kf.update(x)

      # How many frames until you assume to keep going straight.
      if notFoundCount > 100:
         kf.reset()
         x = params['xRes'] / 2
      else:      
         x = kf.predict()

      loc = boundFloat(2.0 * x / params['xRes'] - 1, -1., 1.)

      if params['sendToPX4']:
         message = mavlink.MAVLink_duck_leader_loc_message(loc, 5.0)
         mav.send(message)

      if params['debug'] :
         print str(time.time() - startTime) + ", " + str(loc)

      if params['preview']:
         # Draw a circle on frame where the Kalman filter predicts.
         cv2.circle(frame, (int(x), 10), 4, (0, 0, 255), 6)

         frame = imutils.resize(frame, width=1000)
         cv2.imshow("Preview", frame)

      #Check for keypress, ending if Q is entered
      key = cv2.waitKey(1) & 0xFF
      if (key == ord("q")):
         break;
      
   totalTime = time.time() - startTime
   cv2.destroyAllWindows()

   vs.stop()

   print "Average main FPS: " + str(float(frames) / totalTime) + "fps"
コード例 #55
0
class RobotControl(object):
    """
    Class used to interface with the rover. Gets sensor measurements through ROS subscribers,
    and transforms them into the 2D plane, and publishes velocity commands.
    """
    def __init__(self, world_map,occupancy_map, pos_init, pos_goal, max_speed, max_omega, x_spacing, y_spacing, t_cam_to_body):
        """
        Initialize the class
        Inputs: (all loaded from the parameter YAML file)
        world_map - a P by 4 numpy array specifying the location, orientation,
            and identification of all the markers/AprilTags in the world. The
            format of each row is (x,y,theta,id) with x,y giving 2D position,
            theta giving orientation, and id being an integer specifying the
            unique identifier of the tag.
        occupancy_map - an N by M numpy array of boolean values (represented as
            integers of either 0 or 1). This represents the parts of the map
            that have obstacles. It is mapped to metric coordinates via
            x_spacing and y_spacing
        pos_init - a 3 by 1 array specifying the initial position of the robot,
            formatted as usual as (x,y,theta)
        pos_goal - a 3 by 1 array specifying the final position of the robot,
            also formatted as (x,y,theta)
        max_speed - a parameter specifying the maximum forward speed the robot
            can go (i.e. maximum control signal for v)
        max_omega - a parameter specifying the maximum angular speed the robot
            can go (i.e. maximum control signal for omega)
        x_spacing - a parameter specifying the spacing between adjacent columns
            of occupancy_map
        y_spacing - a parameter specifying the spacing between adjacent rows
            of occupancy_map
        t_cam_to_body - numpy transformation between the camera and the robot
            (not used in simulation)
        """

        # TODO for student: Comment this when running on the robot 
        self.markers = world_map
        self.robot_sim = RobotSim(world_map, occupancy_map, pos_init, pos_goal,
                                  max_speed, max_omega, x_spacing, y_spacing)
        self.vel = np.array([0, 0])
        self.imu_meas = np.array([])
        self.meas = []
        
        # TODO for student: Use this when transferring code to robot
        # Handles all the ROS related items
        #self.ros_interface = ROSInterface(t_cam_to_body)

        # YOUR CODE AFTER THIS
        
        # Uncomment as completed
        self.goals = dijkstras(occupancy_map, x_spacing, y_spacing, pos_init, pos_goal)
        self.total_goals = self.goals.shape[0]
        self.cur_goal = 1
        self.kalman_filter = KalmanFilter(world_map)
        self.diff_drive_controller = DiffDriveController(max_speed, max_omega)

    def process_measurements(self):
        """ 
        YOUR CODE HERE
        Main loop of the robot - where all measurements, control, and esimtaiton
        are done. This function is called at 60Hz
        """
        # TODO for student: Comment this when running on the robot 
        """
        measurements - a N by 5 list of visible tags or None. The tags are in
            the form in the form (x,y,theta,id,time) with x,y being the 2D
            position of the marker relative to the robot, theta being the
            relative orientation of the marker with respect to the robot, id
            being the identifier from the map, and time being the current time
            stamp. If no tags are seen, the function returns None.
        """
        # meas: measurements coming from tags
        meas = self.robot_sim.get_measurements()
        self.meas = meas;
        # imu_meas: measurment comig from the imu
        imu_meas = self.robot_sim.get_imu()
        self.imu_meas = imu_meas


        pose = self.kalman_filter.step_filter(self.vel, self.imu_meas, np.asarray(self.meas))
        self.robot_sim.set_est_state(pose)
        # goal = self.goals[self.cur_goal]
        # vel = self.diff_drive_controller.compute_vel(pose, goal)        
        # self.vel = vel[0:2]
        # self.robot_sim.command_velocity(vel[0], vel[1])
        # close_enough = vel[2]
        # if close_enough:
        #     print 'goal reached' 
        #     if self.cur_goal < (self.total_goals - 1):
        #         self.cur_goal = self.cur_goal + 1

                # vel = (0, 0) 
                # self.vel = vel
                # self.robot_sim.command_velocity(vel[0], vel[1])
            # else:
                # vel = (0, 0) 
                # self.vel = vel
                # self.robot_sim.command_velocity(vel[0], vel[1])


        # Code to follow AprilTags
        if(meas != None and meas):
            cur_meas = meas[0]
            tag_robot_pose = cur_meas[0:3]
            tag_world_pose = self.tag_pos(cur_meas[3])
            state = self.robot_pos(tag_world_pose, tag_robot_pose)
            goal = tag_world_pose
            vel = self.diff_drive_controller.compute_vel(pose, goal)        
            self.vel = vel[0:2];
            if(not vel[2]):
                self.robot_sim.command_velocity(vel[0], vel[1])
            else:
                vel = (0.1, 0.1) 
                self.vel = vel
                self.robot_sim.command_velocity(vel[0], vel[1])
        else: 
            vel = (0.1, 0.1) 
            self.vel = vel
            self.robot_sim.command_velocity(vel[0], vel[1])



        # goal = [-0.5, 2.5]
        # goal = self.goals[self.cur_goal]
        # vel = self.diff_drive_controller.compute_vel(pose, goal)        
        # self.vel = vel[0:2];
        # if(not vel[2]):
        #     self.robot_sim.command_velocity(vel[0], vel[1])
        # else:
        #     vel = (0, 0) 
        #     if self.cur_goal < (self.total_goals - 1):
        #         self.cur_goal = self.cur_goal + 1
        #     self.vel = vel




        # TODO for student: Use this when transferring code to robot
        # meas = self.ros_interface.get_measurements()
        # imu_meas = self.ros_interface.get_imu()

        return

    def tag_pos(self, marker_id):
        for i in range(len(self.markers)):
            marker_i = np.copy(self.markers[i])
            if marker_i[3] == marker_id:
                return marker_i[0:3]
        return None

    def robot_pos(self, w_pos, r_pos):
        H_W = np.array([[math.cos(w_pos[2]), -math.sin(w_pos[2]), w_pos[0]],
                        [math.sin(w_pos[2]), math.cos(w_pos[2]), w_pos[1]],
                        [0, 0, 1]])
        H_R = np.array([[math.cos(r_pos[2]), -math.sin(r_pos[2]), r_pos[0]],
                        [math.sin(r_pos[2]), math.cos(r_pos[2]), r_pos[1]],
                        [0, 0, 1]])
        w_r = H_W.dot(inv(H_R))
        robot_pose =  np.array([[w_r[0,2]], [w_r[1,2]], [math.atan2(w_r[1,0], w_r[0, 0])]])
        return robot_pose
コード例 #56
0
ファイル: KalmanNEXT.py プロジェクト: gonzaponte/PyKal
 def __init__( self ):
     KalmanFilter.__init__( self, name = 'Trigo Kalman Filter' )
コード例 #57
0
from KalmanFilter import KalmanFilter
import time
kf = KalmanFilter()
kf.update(5)
print kf.predict()
time.sleep(.2)
kf.update(5.5)
print kf.predict()

time.sleep(.2)
kf.update(6)
print kf.predict()

time.sleep(.2)
kf.update(6.5)
print kf.predict()

time.sleep(.2)
kf.update(7)
print kf.predict()

time.sleep(.2)
kf.update(15)
print kf.predict()

time.sleep(.2)
kf.update(8.5)
print kf.predict()

time.sleep(.2)
kf.update(7)