def test_straight_right(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     for _ in range(0, 100):
         odometry[0] += 1
         motion_model.update(self.particle, odometry, 0)
     self.assertEqual(self.particle.x, 100)
     self.assertEqual(self.particle.y, 40)
     self.assertEqual(self.particle.theta, 0)
 def test_turn_right(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     self.particle.x = 0
     self.particle.y = 40
     self.particle.theta = 0
     for _ in range(0, 10):
         odometry[2] -= math.pi/20
         motion_model.update(self.particle, odometry, 0)
     self.assertEqual(self.particle.x, 0)
     self.assertEqual(self.particle.y, 40)
     self.assertEqual(self.particle.theta, -math.pi/2)
 def test_sidestep_right(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     self.particle.x = 0
     self.particle.y = 40
     self.particle.theta = math.pi/2
     for _ in range(0, 100):
         odometry[1] -= 1
         motion_model.update(self.particle, odometry, 0)
     self.assertEqual(self.particle.x, 100)
     self.assertEqual(self.particle.y, 40)
     self.assertEqual(self.particle.theta, math.pi/2)
 def test_straight_up(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     self.particle.x = 40
     self.particle.y = 0
     self.particle.theta = math.pi/2
     for _ in range(0, 100):
         odometry[0] += 1
         motion_model.update(self.particle, odometry, 0)
     self.assertEqual(self.particle.x, 40)
     self.assertEqual(self.particle.y, 100)
     self.assertEqual(self.particle.theta, math.pi/2)
 def test_distribution_line(self):
     motion_model = MotionModel(0.1, 0.1, 0.1, 0.1)
     odometry = [0, 0, 0]
     p, = plt.plot([], [], 'r.')
     for _ in range(0, 3):
         odometry[0] += 1
         x = list()
         y = list()
         for particle in self.particles:
             motion_model.update(particle, odometry, 0)
             x.append(particle.x)
             y.append(particle.y)
         self.map.display(self.particles, title='Line w/ motion model')
 def test_distribution_circle(self):
     motion_model = MotionModel(0.01, 0.01, 0.01, 0.01)
     odometry = [0, 0, 0]
     for particle in self.particles:
         particle.x = 0
         particle.y = 40
         particle.theta = math.pi/2
     for _ in range(0, 4):
         odometry[0] += 10*math.sqrt(2)/2
         odometry[1] -= 10*(1-math.sqrt(2)/2)
         odometry[2] -= math.pi/4
         for particle in self.particles:
             motion_model.update(particle, odometry, 0)
         self.map.display(self.particles, title='Upper Circle w/ Motion Model')
 def test_circle(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     self.particle.x = 0
     self.particle.y = 40
     self.particle.theta = math.pi/2
     for _ in range(0, 4):
         odometry[0] += 10*math.sqrt(2)/2
         odometry[1] -= 10*(1-math.sqrt(2)/2)
         odometry[2] -= math.pi/4
         motion_model.update(self.particle, odometry, 0)
         #self.map.display([self.particle], title='Zero Variance Upper Circle')
     self.assertAlmostEqual(self.particle.x, 20)
     self.assertEqual(self.particle.y, 40)
     self.assertEqual(self.particle.theta, -math.pi/2)
 def __init__(self, map_file='../data/map/wean.dat'):
     self.map = Map(map_file)
     #self.map.display_gaussian([], 'Gaussian Blurred Map')
     self._number_particles = 1000
     self.particles = list()
     self._particle_probabilities = []
     for _ in range(0, self._number_particles):
         print 'Initializing particle', _
         particle = Particle(self.map)
         #particle.x = 4080 + np.random.normal(scale=35)
         #particle.y = 3980 + np.random.normal(scale=15)
         #particle.theta = math.pi + 0.1 + np.random.normal(scale=.1)
         self.particles.append(particle)
         self._particle_probabilities.append(1)
     self._motion_model = MotionModel(0.001, 0.001, 0.1, 0.1)
     self._sensor_model = SensorModel(self.map)
     self._ranges = []
 def test_forward_back(self):
     motion_model = MotionModel(0, 0, 0, 0)
     odometry = [0, 0, 0]
     self.particle.x = 0
     self.particle.y = 40
     self.particle.theta = 0
     for _ in range(0, 10):
         odometry[0] += 1
         motion_model.update(self.particle, odometry, 0)
     for _ in range(0, 10):
         odometry[2] += math.pi/10
         motion_model.update(self.particle, odometry, 0)
     for _ in range(0, 10):
         odometry[0] += 1
         motion_model.update(self.particle, odometry, 0)
     self.assertEqual(self.particle.x, 0)
     self.assertEqual(self.particle.y, 40)
     self.assertEqual(self.particle.theta, math.pi)
示例#10
0
import numpy as np
import matplotlib.pyplot as plt
from motion_model import MotionModel
from regression import load_data, get_norm_emg
from activation import Activation

if __name__ == '__main__':
    # Constants Activation parameters
    frequency = np.arange(20, 55, 5)
    duty_cycle = np.arange(0, 1.0, 0.1)
    scaling = 1
    non_linearity = -1
    shape = "monophasic"

    # Motion Model
    motion_model = MotionModel(0.58, 1)

    # Tile for plotting
    independent_1 = np.tile(frequency, (len(duty_cycle), 1))
    independent_1 = np.transpose(independent_1)
    independent_2 = np.tile(duty_cycle, (len(frequency), 1))

    # Metrics
    rmse_ankle_angle = []
    above_0 = []
    rmse_toe_height = []

    for i in range(len(independent_1)):
        for j in range(len(independent_1[0])):
            motion_model.set_activation(frequency[i], duty_cycle[j], scaling,
                                        non_linearity, shape)
示例#11
0
    def __init__(self):

        # Get parameters
        self.particle_filter_frame = rospy.get_param("~particle_filter_frame")
        self.MAX_PARTICLES = int(rospy.get_param("~num_particles"))
        self.MAX_VIZ_PARTICLES = int(rospy.get_param("~max_viz_particles"))
        self.PUBLISH_ODOM = bool(rospy.get_param("~publish_odom", "1"))
        self.ANGLE_STEP = int(rospy.get_param("~angle_step"))
        self.DO_VIZ = bool(rospy.get_param("~do_viz"))

        # MCL algorithm
        self.iters = 0
        self.lidar_initialized = False
        self.odom_initialized = False
        self.map_initialized = False
        self.last_odom_pose = None  # last received odom pose
        self.last_odom_msg = None
        self.laser_angles = None
        self.downsampled_angles = None
        self.state_lock = Lock()

        # paritcles
        self.inferred_pose = None
        self.particles = np.zeros((self.MAX_PARTICLES, 3))
        self.particle_indices = np.arange(self.MAX_PARTICLES)
        self.weights = np.ones(self.MAX_PARTICLES) / float(self.MAX_PARTICLES)

        # Initialize the models
        self.motion_model = MotionModel()
        self.sensor_model = SensorModel()
        # initialize the state
        self.smoothing = Utils.CircularArray(10)
        self.timer = Utils.Timer(10)
        self.initialize_global()
        # map
        self.permissible_region = None

        self.SHOW_FINE_TIMING = False

        # these topics are for visualization
        self.pose_pub = rospy.Publisher("/pf/viz/inferred_pose",
                                        PoseStamped,
                                        queue_size=1)
        self.particle_pub = rospy.Publisher("/pf/viz/particles",
                                            PoseArray,
                                            queue_size=1)
        self.pub_fake_scan = rospy.Publisher("/pf/viz/fake_scan",
                                             LaserScan,
                                             queue_size=1)
        self.path_pub = rospy.Publisher('/pf/viz/path', Path, queue_size=1)
        self.path = Path()

        if self.PUBLISH_ODOM:
            self.odom_pub = rospy.Publisher("/pf/pose/odom",
                                            Odometry,
                                            queue_size=1)

        # these topics are for coordinate space things
        self.pub_tf = tf.TransformBroadcaster()

        # these topics are to receive data from the racecar
        self.laser_sub = rospy.Subscriber(rospy.get_param(
            "~scan_topic", "/scan"),
                                          LaserScan,
                                          self.callback_lidar,
                                          queue_size=1)
        self.odom_sub = rospy.Subscriber(rospy.get_param(
            "~odom_topic", "/odom"),
                                         Odometry,
                                         self.callback_odom,
                                         queue_size=1)
        self.pose_sub = rospy.Subscriber("/initialpose",
                                         PoseWithCovarianceStamped,
                                         self.clicked_pose,
                                         queue_size=1)
        self.click_sub = rospy.Subscriber("/clicked_point",
                                          PointStamped,
                                          self.clicked_pose,
                                          queue_size=1)

        print "Finished initializing, waiting on messages..."
示例#12
0
class ParticleFilter:
    '''
    Monte Carlo Localization based on odometry and a laser scanner.
    '''
    def __init__(self):

        # Get parameters
        self.particle_filter_frame = rospy.get_param("~particle_filter_frame")
        self.MAX_PARTICLES = int(rospy.get_param("~num_particles"))
        self.MAX_VIZ_PARTICLES = int(rospy.get_param("~max_viz_particles"))
        self.PUBLISH_ODOM = bool(rospy.get_param("~publish_odom", "1"))
        self.ANGLE_STEP = int(rospy.get_param("~angle_step"))
        self.DO_VIZ = bool(rospy.get_param("~do_viz"))

        # MCL algorithm
        self.iters = 0
        self.lidar_initialized = False
        self.odom_initialized = False
        self.map_initialized = False
        self.last_odom_pose = None  # last received odom pose
        self.last_odom_msg = None
        self.laser_angles = None
        self.downsampled_angles = None
        self.state_lock = Lock()

        # paritcles
        self.inferred_pose = None
        self.particles = np.zeros((self.MAX_PARTICLES, 3))
        self.particle_indices = np.arange(self.MAX_PARTICLES)
        self.weights = np.ones(self.MAX_PARTICLES) / float(self.MAX_PARTICLES)

        # Initialize the models
        self.motion_model = MotionModel()
        self.sensor_model = SensorModel()
        # initialize the state
        self.smoothing = Utils.CircularArray(10)
        self.timer = Utils.Timer(10)
        self.initialize_global()
        # map
        self.permissible_region = None

        self.SHOW_FINE_TIMING = False

        # these topics are for visualization
        self.pose_pub = rospy.Publisher("/pf/viz/inferred_pose",
                                        PoseStamped,
                                        queue_size=1)
        self.particle_pub = rospy.Publisher("/pf/viz/particles",
                                            PoseArray,
                                            queue_size=1)
        self.pub_fake_scan = rospy.Publisher("/pf/viz/fake_scan",
                                             LaserScan,
                                             queue_size=1)
        self.path_pub = rospy.Publisher('/pf/viz/path', Path, queue_size=1)
        self.path = Path()

        if self.PUBLISH_ODOM:
            self.odom_pub = rospy.Publisher("/pf/pose/odom",
                                            Odometry,
                                            queue_size=1)

        # these topics are for coordinate space things
        self.pub_tf = tf.TransformBroadcaster()

        # these topics are to receive data from the racecar
        self.laser_sub = rospy.Subscriber(rospy.get_param(
            "~scan_topic", "/scan"),
                                          LaserScan,
                                          self.callback_lidar,
                                          queue_size=1)
        self.odom_sub = rospy.Subscriber(rospy.get_param(
            "~odom_topic", "/odom"),
                                         Odometry,
                                         self.callback_odom,
                                         queue_size=1)
        self.pose_sub = rospy.Subscriber("/initialpose",
                                         PoseWithCovarianceStamped,
                                         self.clicked_pose,
                                         queue_size=1)
        self.click_sub = rospy.Subscriber("/clicked_point",
                                          PointStamped,
                                          self.clicked_pose,
                                          queue_size=1)

        print "Finished initializing, waiting on messages..."

    def initialize_global(self):
        '''
        Spread the particle distribution over the permissible region of the state space.
        '''
        print("GLOBAL INITIALIZATION")
        # randomize over grid coordinate space
        self.state_lock.acquire()

        print('Waiting for map..')
        while not self.sensor_model.map_set:
            continue

        self.map_initialized = True
        self.permissible_region = self.sensor_model.permissible_region
        self.map = self.sensor_model.map
        self.map_info = self.sensor_model.map_info

        permissible_x, permissible_y = np.where(self.permissible_region == 1)
        indices = np.random.randint(0,
                                    len(permissible_x),
                                    size=self.MAX_PARTICLES)

        permissible_states = np.zeros((self.MAX_PARTICLES, 3))
        permissible_states[:, 0] = permissible_y[indices]
        permissible_states[:, 1] = permissible_x[indices]
        permissible_states[:, 2] = np.random.random(
            self.MAX_PARTICLES) * np.pi * 2.0

        Utils.map_to_world(permissible_states, self.map_info)
        self.particles = permissible_states
        self.weights[:] = 1.0 / self.MAX_PARTICLES
        self.state_lock.release()

    def initialize_particles_pose(self, pose):
        '''
        Initialize particles in the general region of the provided pose.
        '''
        print "SETTING POSE"
        print pose
        self.state_lock.acquire()
        self.weights = np.ones(self.MAX_PARTICLES) / float(self.MAX_PARTICLES)
        self.particles[:, 0] = pose.position.x + np.random.normal(
            loc=0.0, scale=0.5, size=self.MAX_PARTICLES)
        self.particles[:, 1] = pose.position.y + np.random.normal(
            loc=0.0, scale=0.5, size=self.MAX_PARTICLES)
        self.particles[:, 2] = Utils.quaternion_to_angle(
            pose.orientation) + np.random.normal(
                loc=0.0, scale=0.4, size=self.MAX_PARTICLES)

        # This is supposed to be used only when in simulation env (due to simulation bugs)
        # if isinstance(self.last_odom_pose, np.ndarray):
        #     self.last_odom_pose[0] = pose.position.x
        #     self.last_odom_pose[1] = pose.position.y
        #     self.last_odom_pose[2] = Utils.quaternion_to_angle(pose.orientation)

        self.state_lock.release()

    def clicked_pose(self, msg):
        '''
        Receive pose messages from RViz and initialize the particle distribution in response.
        '''
        print('clicked_pose')
        if isinstance(msg, PointStamped):
            #self.initialize_particles_pose(msg.pose.pose)
            self.initialize_global()
        elif isinstance(msg, PoseWithCovarianceStamped):
            self.initialize_particles_pose(msg.pose.pose)

    def publish_tf(self, pose, stamp=None):
        """ Publish a tf for the car. This tells ROS where the car is with respect to the map. """
        if stamp == None:
            stamp = rospy.Time.now()

        # this may cause issues with the TF tree. If so, see the below code.
        self.pub_tf.sendTransform(
            (pose[0], pose[1], 0),
            tf.transformations.quaternion_from_euler(0, 0, pose[2]), stamp,
            "/laser", "/map")

        # also publish odometry to facilitate getting the localization pose
        if self.PUBLISH_ODOM:
            odom = Odometry()
            odom.header = Utils.make_header("/map", stamp)
            odom.pose.pose.position.x = pose[0]
            odom.pose.pose.position.y = pose[1]
            odom.pose.pose.orientation = Utils.angle_to_quaternion(pose[2])
            self.odom_pub.publish(odom)
        """
        Our particle filter provides estimates for the "laser" frame
        since that is where our laser range estimates are measure from. Thus,
        We want to publish a "map" -> "laser" transform.
        However, the car's position is measured with respect to the "base_link"
        frame (it is the root of the TF tree). Thus, we should actually define
        a "map" -> "base_link" transform as to not break the TF tree.
        """

        # Get map -> laser transform.
        map_laser_pos = np.array((pose[0], pose[1], 0))
        map_laser_rotation = np.array(
            tf.transformations.quaternion_from_euler(0, 0, pose[2]))
        # Apply laser -> base_link transform to map -> laser transform
        # This gives a map -> base_link transform
        laser_base_link_offset = (0.265, 0, 0)
        map_laser_pos -= np.dot(
            tf.transformations.quaternion_matrix(
                tf.transformations.unit_vector(map_laser_rotation))[:3, :3],
            laser_base_link_offset).T

        # Publish transform
        self.pub_tf.sendTransform(map_laser_pos, map_laser_rotation, stamp,
                                  self.particle_filter_frame, "/map")

    def publish_particles(self, particles):
        pa = PoseArray()
        pa.header = Utils.make_header("map")
        pa.poses = Utils.particles_to_poses(particles)
        self.particle_pub.publish(pa)

    def publish_scan(self, angles, ranges):
        ls = LaserScan()
        ls.header = Utils.make_header("laser", stamp=self.last_stamp)
        ls.angle_min = np.min(angles)
        ls.angle_max = np.max(angles)
        ls.angle_increment = np.abs(angles[0] - angles[1])
        ls.range_min = 0
        ls.range_max = np.max(ranges)
        ls.ranges = ranges
        self.pub_fake_scan.publish(ls)

    def visualize(self):
        '''
        Publish various visualization messages.
        '''
        if not self.DO_VIZ:
            return

        if self.pose_pub.get_num_connections() > 0 and isinstance(
                self.inferred_pose, np.ndarray):
            ps = PoseStamped()
            ps.header = Utils.make_header("map")
            ps.pose.position.x = self.inferred_pose[0]
            ps.pose.position.y = self.inferred_pose[1]
            ps.pose.orientation = Utils.angle_to_quaternion(
                self.inferred_pose[2])
            self.pose_pub.publish(ps)
            if self.iters % 10 == 0:
                self.path.header = ps.header
                self.path.poses.append(ps)
                self.path_pub.publish(self.path)

        if self.particle_pub.get_num_connections() > 0:
            if self.MAX_PARTICLES > self.MAX_VIZ_PARTICLES:
                # randomly downsample particles
                proposal_indices = np.random.choice(self.particle_indices,
                                                    self.MAX_VIZ_PARTICLES,
                                                    p=self.weights)
                # proposal_indices = np.random.choice(self.particle_indices, self.MAX_VIZ_PARTICLES)
                self.publish_particles(self.particles[proposal_indices, :])
            else:
                self.publish_particles(self.particles)

        if self.pub_fake_scan.get_num_connections() > 0 and isinstance(
                self.sensor_model.particle_scans, np.ndarray):
            # generate the scan from the point of view of the inferred position for visualization
            inferred_scans = self.sensor_model.get_scans(
                self.inferred_pose[None, :])
            self.publish_scan(self.downsampled_angles, inferred_scans[0, :])

    def callback_lidar(self, msg):
        '''
        Initializes reused buffers, and stores the relevant laser scanner data for later use.
        '''
        if not isinstance(self.laser_angles, np.ndarray):
            print "...Received first LiDAR message"
            self.laser_angles = np.linspace(msg.angle_min, msg.angle_max,
                                            len(msg.ranges))
            self.downsampled_angles = np.copy(
                self.laser_angles[0::self.ANGLE_STEP]).astype(np.float32)
            #self.viz_queries = np.zeros((self.downsampled_angles.shape[0],3), dtype=np.float32)
            #self.viz_ranges = np.zeros(self.downsampled_angles.shape[0], dtype=np.float32)
            #print self.downsampled_angles.shape[0]
        self.downsampled_ranges = np.array(msg.ranges[::self.ANGLE_STEP])
        self.lidar_initialized = True

    def callback_odom(self, msg):
        '''
        Store deltas between consecutive odometry messages in the coordinate space of the car.
        Odometry data is accumulated via dead reckoning, so it is very inaccurate on its own.
        '''
        if self.last_odom_msg:
            last_time = self.last_odom_msg.header.stamp.secs
            this_time = msg.header.stamp.secs
            dt = this_time - last_time
            self.odometry_data = np.array([
                msg.twist.twist.linear.x, msg.twist.twist.linear.y,
                msg.twist.twist.angular.z
            ]) * dt
            self.last_odom_msg = msg
            self.last_stamp = msg.header.stamp
            self.update()
        else:
            print "...Received first Odometry message"
            self.last_odom_msg = msg
            self.last_stamp = msg.header.stamp
            self.odom_initialized = True

    def expected_pose(self, particles):
        # returns the expected value of the pose given the particle distribution
        return np.dot(particles.transpose(), self.weights)

    def MCL(self, odom, scans):
        # Implement the MCL algorithm
        # using the sensor model and the motion model
        #
        # Make sure you include some way to initialize
        # your particles, ideally with some sort
        # of interactive interface in rviz
        proposal_indices = np.random.choice(self.particle_indices,
                                            self.MAX_PARTICLES,
                                            p=self.weights)
        proposal_distribution = self.particles[proposal_indices, :]
        if self.SHOW_FINE_TIMING:
            t_propose = time.time()
        #print("resample",self.expected_pose(proposal_distribution))
        # compute the motion model to update the proposal distribution
        proposal_distribution = self.motion_model.evaluate(
            proposal_distribution, odom)
        if self.SHOW_FINE_TIMING:
            t_motion = time.time()
        #print("motion model:", self.expected_pose(proposal_distribution))

        # compute the sensor model
        self.weights = self.sensor_model.evaluate(proposal_distribution, scans)

        if self.SHOW_FINE_TIMING:
            t_sensor = time.time()

        # normalize importance weights
        self.weights /= np.sum(self.weights)
        #print("sensor model:", self.expected_pose(proposal_distribution))

        if self.SHOW_FINE_TIMING:
            t_norm = time.time()
            t_total = (t_norm - t) / 100.0

        if self.SHOW_FINE_TIMING and self.iters % 10 == 0:
            print "MCL: propose: ", np.round((t_propose-t)/t_total, 2), "motion:", np.round((t_motion-t_propose)/t_total, 2), \
                  "sensor:", np.round((t_sensor-t_motion)/t_total, 2), "norm:", np.round((t_norm-t_sensor)/t_total, 2)

        # save the particles
        self.particles = proposal_distribution

        #print(self.expected_pose(self.particles))

        # Publish a transformation frame between the map
        # and the particle_filter_frame.

    def update(self):
        '''
        Apply the MCL function to update particle filter state. 
        Ensures the state is correctly initialized, and acquires the state lock before proceeding.
        '''
        if self.lidar_initialized and self.odom_initialized and self.map_initialized:
            if self.state_lock.locked():
                print("Concurrency error avoided")
            else:
                self.state_lock.acquire()
                self.timer.tick()
                self.iters += 1

                t1 = time.time()
                scans = np.copy(self.downsampled_ranges).astype(np.float32)
                odom = np.copy(self.odometry_data)
                self.odometry_data = np.zeros(3)

                # run the MCL update algorithm
                self.MCL(odom, scans)

                # compute the expected value of the robot pose
                self.inferred_pose = self.expected_pose(self.particles)
                self.state_lock.release()
                t2 = time.time()

                # publish transformation frame based on inferred pose
                self.publish_tf(self.inferred_pose, self.last_stamp)

                # this is for tracking particle filter speed
                ips = 1.0 / (t2 - t1)
                self.smoothing.append(ips)
                #if self.iters % 10 == 0:
                #    print "iters per sec:", int(self.timer.fps()), " possible:", int(self.smoothing.mean())

                self.visualize()
示例#13
0
    def forward(self, param, p_c, p_e, priors, times):
        # param.view(param.size(0), -1, config["num_motion_model_param"]),  # parameter predicts
        # # self.softmax(p_c.view(p_c.size(0), -1, 2)),  # motion possibility
        # self.softmax(p_e.view(p_e.size(0), -1, self.num_classes)),  # classification possiblity
        # self.priors  # default boxes

        loc_datas = MotionModel.get_bbox_by_frames_pytorch(param, times)

        # find the times scale
        if loc_datas.size(1) % p_e.size(1) != 0:
            raise AssertionError(
                "time scales should be the int in nms_with_frames")

        time_scales = loc_datas.size(1) // p_e.size(1)
        if time_scales > 1:
            p_e = p_e[:, :, None, :, :].repeat(1, 1, time_scales, 1, 1)\
                .view(p_e.size(0), -1, 1, p_e.size(2), p_e.size(3)).squeeze(2)

        num = loc_datas.size(0)  # batch size
        num_priors = priors.size(0)
        num_frames = times.size(1)
        param_shape_1 = param.size(2)
        param_shape_2 = param.size(3)

        output_boxes = torch.zeros(num, self.num_classes, num_frames,
                                   self.top_k, 4)
        output_p_e = torch.zeros(num, 2, num_frames, self.top_k)
        output_params = torch.zeros(num, self.num_classes, self.top_k,
                                    param_shape_1, param_shape_2)
        output_p_c = torch.zeros(num, self.num_classes, self.top_k)
        conf_preds = p_c.squeeze(1).view(num, num_priors,
                                         self.num_classes).transpose(2, 1)
        conf_exists = p_e.transpose(3, 2)

        # Decode predictions into bboxes.
        decoded_locs = decode_with_frames(loc_datas, priors, self.variance)
        for i in range(num):
            # For each class, perform nms
            conf_scores = conf_preds[i]
            decoded_boxes = decoded_locs[i, :]
            for cl in range(1, self.num_classes):
                # filter boxes by confidence
                c_mask = conf_scores[cl].gt(self.conf_thresh)
                if c_mask.sum() == 0:
                    continue
                scores = conf_scores[cl][c_mask]
                exists = conf_exists[i, :, 1, c_mask]
                boxes = decoded_boxes[:, c_mask, :]

                # filter boxes by visibility
                v_mask = (exists > self.exist_thresh).sum(
                    dim=0) / exists.shape[0] >= self.min_valid_node_rate
                if v_mask.sum() == 0:
                    continue

                scores = scores[v_mask]
                exists = exists[:, v_mask]
                boxes = boxes[:, v_mask, :]

                # if there are exists the reasonable boxes.
                # print(c_mask.sum().item())
                # if c_mask.sum().item() > 0:
                # idx of highest scoring and non-overlapping boxes per class
                ids, count = nms_with_frames(boxes, scores, exists,
                                             self.nms_thresh, self.top_k,
                                             self.exist_thresh)
                if count > 0:
                    output_boxes[i, cl, :, :count, :] = boxes[:, ids[:count]]
                    output_p_e[i, 1, :, :count] = exists[:, ids[:count]]
                    output_params[i, cl, :count, :] = param[i, c_mask, :][
                        v_mask, :][ids[:count], :]
                    output_p_c[i, cl, :count] = scores[ids[:count]]
        output_p_c_1 = output_p_c.contiguous().view(num, -1)
        _, idx = output_p_c_1.sort(1, descending=True)
        _, rank = idx.sort(1)
        mask = rank > self.top_k
        output_p_c_1.masked_fill_(mask, 0)

        return (output_params, output_p_c, output_p_e, output_boxes)
示例#14
0
    def __init__(self, system):

        if kDebugDrawMatches:
            Frame.is_store_imgs = True

        self.system = system
        self.camera = system.camera
        self.map = system.map

        self.local_mapping = system.local_mapping

        self.intializer = Initializer()

        self.motion_model = MotionModel(
        )  # motion model for current frame pose prediction without damping
        #self.motion_model = MotionModelDamping()  # motion model for current frame pose prediction with damping

        self.dyn_config = SLAMDynamicConfig()
        self.descriptor_distance_sigma = Parameters.kMaxDescriptorDistance
        self.reproj_err_frame_map_sigma = Parameters.kMaxReprojectionDistanceMap

        self.max_frames_between_kfs = int(system.camera.fps)
        self.min_frames_between_kfs = 0

        self.state = SlamState.NO_IMAGES_YET

        self.num_matched_kps = None  # current number of matched keypoints
        self.num_inliers = None  # current number of matched points
        self.num_matched_map_points = None  # current number of matched map points (matched and found valid in current pose optimization)
        self.num_kf_ref_tracked_points = None  # number of tracked points in k_ref (considering a minimum number of observations)

        self.mask_match = None

        self.pose_is_ok = False
        self.predicted_pose = None
        self.velocity = None

        self.f_cur = None
        self.idxs_cur = None
        self.f_ref = None
        self.idxs_ref = None

        self.kf_ref = None  # reference keyframe (in general, different from last keyframe depending on the used approach)
        self.kf_last = None  # last keyframe
        self.kid_last_BA = -1  # last keyframe id when performed BA

        self.local_keyframes = []  # local keyframes
        self.local_points = []  # local points

        self.tracking_history = TrackingHistory()

        self.timer_verbose = kTimerVerbose  # set this to True if you want to print timings
        self.timer_main_track = TimerFps('Track',
                                         is_verbose=self.timer_verbose)
        self.timer_pose_opt = TimerFps('Pose optimization',
                                       is_verbose=self.timer_verbose)
        self.timer_seach_frame_proj = TimerFps('Search frame by proj',
                                               is_verbose=self.timer_verbose)
        self.timer_match = TimerFps('Match', is_verbose=self.timer_verbose)
        self.timer_pose_est = TimerFps('Ess mat pose estimation',
                                       is_verbose=self.timer_verbose)
        self.timer_frame = TimerFps('Frame', is_verbose=self.timer_verbose)
        self.timer_seach_map = TimerFps('Search map',
                                        is_verbose=self.timer_verbose)

        self.init_history = True
        self.poses = []  # history of poses
        self.t0_est = None  # history of estimated translations
        self.t0_gt = None  # history of ground truth translations (if available)
        self.traj3d_est = [
        ]  # history of estimated translations centered w.r.t. first one
        self.traj3d_gt = [
        ]  # history of estimated ground truth translations centered w.r.t. first one

        self.cur_R = None  # current rotation w.r.t. world frame
        self.cur_t = None  # current translation w.r.t. world frame
        self.trueX, self.trueY, self.trueZ = None, None, None
        self.groundtruth = system.groundtruth  # not actually used here; could be used for evaluating performances

        if kLogKFinfoToFile:
            self.kf_info_logger = Logging.setup_file_logger(
                'kf_info_logger',
                'kf_info.log',
                formatter=Logging.simple_log_formatter)
示例#15
0
class Tracking(object):
    def __init__(self, system):

        if kDebugDrawMatches:
            Frame.is_store_imgs = True

        self.system = system
        self.camera = system.camera
        self.map = system.map

        self.local_mapping = system.local_mapping

        self.intializer = Initializer()

        self.motion_model = MotionModel(
        )  # motion model for current frame pose prediction without damping
        #self.motion_model = MotionModelDamping()  # motion model for current frame pose prediction with damping

        self.dyn_config = SLAMDynamicConfig()
        self.descriptor_distance_sigma = Parameters.kMaxDescriptorDistance
        self.reproj_err_frame_map_sigma = Parameters.kMaxReprojectionDistanceMap

        self.max_frames_between_kfs = int(system.camera.fps)
        self.min_frames_between_kfs = 0

        self.state = SlamState.NO_IMAGES_YET

        self.num_matched_kps = None  # current number of matched keypoints
        self.num_inliers = None  # current number of matched points
        self.num_matched_map_points = None  # current number of matched map points (matched and found valid in current pose optimization)
        self.num_kf_ref_tracked_points = None  # number of tracked points in k_ref (considering a minimum number of observations)

        self.mask_match = None

        self.pose_is_ok = False
        self.predicted_pose = None
        self.velocity = None

        self.f_cur = None
        self.idxs_cur = None
        self.f_ref = None
        self.idxs_ref = None

        self.kf_ref = None  # reference keyframe (in general, different from last keyframe depending on the used approach)
        self.kf_last = None  # last keyframe
        self.kid_last_BA = -1  # last keyframe id when performed BA

        self.local_keyframes = []  # local keyframes
        self.local_points = []  # local points

        self.tracking_history = TrackingHistory()

        self.timer_verbose = kTimerVerbose  # set this to True if you want to print timings
        self.timer_main_track = TimerFps('Track',
                                         is_verbose=self.timer_verbose)
        self.timer_pose_opt = TimerFps('Pose optimization',
                                       is_verbose=self.timer_verbose)
        self.timer_seach_frame_proj = TimerFps('Search frame by proj',
                                               is_verbose=self.timer_verbose)
        self.timer_match = TimerFps('Match', is_verbose=self.timer_verbose)
        self.timer_pose_est = TimerFps('Ess mat pose estimation',
                                       is_verbose=self.timer_verbose)
        self.timer_frame = TimerFps('Frame', is_verbose=self.timer_verbose)
        self.timer_seach_map = TimerFps('Search map',
                                        is_verbose=self.timer_verbose)

        self.init_history = True
        self.poses = []  # history of poses
        self.t0_est = None  # history of estimated translations
        self.t0_gt = None  # history of ground truth translations (if available)
        self.traj3d_est = [
        ]  # history of estimated translations centered w.r.t. first one
        self.traj3d_gt = [
        ]  # history of estimated ground truth translations centered w.r.t. first one

        self.cur_R = None  # current rotation w.r.t. world frame
        self.cur_t = None  # current translation w.r.t. world frame
        self.trueX, self.trueY, self.trueZ = None, None, None
        self.groundtruth = system.groundtruth  # not actually used here; could be used for evaluating performances

        if kLogKFinfoToFile:
            self.kf_info_logger = Logging.setup_file_logger(
                'kf_info_logger',
                'kf_info.log',
                formatter=Logging.simple_log_formatter)

    # estimate a pose from a fitted essential mat;
    # since we do not have an interframe translation scale, this fitting can be used to detect outliers, estimate interframe orientation and translation direction
    # N.B. read the NBs of the method estimate_pose_ess_mat(), where the limitations of this method are explained
    def estimate_pose_by_fitting_ess_mat(self, f_ref, f_cur, idxs_ref,
                                         idxs_cur):
        # N.B.: in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat()
        self.timer_pose_est.start()
        # estimate inter frame camera motion by using found keypoint matches
        # output of the following function is:  Trc = [Rrc, trc] with ||trc||=1  where c=cur, r=ref  and  pr = Trc * pc
        Mrc, self.mask_match = estimate_pose_ess_mat(
            f_ref.kpsn[idxs_ref],
            f_cur.kpsn[idxs_cur],
            method=cv2.RANSAC,
            prob=kRansacProb,
            threshold=kRansacThresholdNormalized)
        #Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))
        Mcr = inv_T(Mrc)
        estimated_Tcw = np.dot(Mcr, f_ref.pose)
        self.timer_pose_est.refresh()

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        #print('# inliers: ', self.num_inliers )
        idxs_ref = idxs_ref[mask_idxs]
        idxs_cur = idxs_cur[mask_idxs]

        # if there are not enough inliers do not use the estimated pose
        if self.num_inliers < kNumMinInliersEssentialMat:
            #f_cur.update_pose(f_ref.pose) # reset estimated pose to previous frame
            Printer.red('Essential mat: not enough inliers!')
        else:
            # use the estimated pose as an initial guess for the subsequent pose optimization
            # set only the estimated rotation (essential mat computation does not provide a scale for the translation, see above)
            #f_cur.pose[:3,:3] = estimated_Tcw[:3,:3] # copy only the rotation
            #f_cur.pose[:,3] = f_ref.pose[:,3].copy() # override translation with ref frame translation
            Rcw = estimated_Tcw[:3, :3]  # copy only the rotation
            tcw = f_ref.pose[:3,
                             3]  # override translation with ref frame translation
            f_cur.update_rotation_and_translation(Rcw, tcw)
        return idxs_ref, idxs_cur

    def pose_optimization(self, f_cur, name=''):
        #print('pose opt %s ' % (name) )
        pose_before = f_cur.pose.copy()
        # f_cur pose optimization 1  (here we use f_cur pose as first guess and exploit the matched map points of f_ref )
        self.timer_pose_opt.start()
        pose_opt_error, self.pose_is_ok, self.num_matched_map_points = optimizer_g2o.pose_optimization(
            f_cur, verbose=False)
        self.timer_pose_opt.pause()
        #print('     error^2: %f,  ok: %d' % (pose_opt_error, int(self.pose_is_ok)) )

        if not self.pose_is_ok:
            # if current pose optimization failed, reset f_cur pose
            f_cur.update_pose(pose_before)

        return self.pose_is_ok

    # track camera motion of f_cur w.r.t. f_ref
    def track_previous_frame(self, f_ref, f_cur):
        print('>>>> tracking previous frame ...')
        is_search_frame_by_projection_failure = False
        use_search_frame_by_projection = self.motion_model.is_ok and kUseSearchFrameByProjection and kUseMotionModel

        if use_search_frame_by_projection:
            # search frame by projection: match map points observed in f_ref with keypoints of f_cur
            #print('search frame by projection')
            search_radius = Parameters.kMaxReprojectionDistanceFrame
            f_cur.reset_points()
            self.timer_seach_frame_proj.start()
            idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                f_ref,
                f_cur,
                max_reproj_distance=search_radius,
                max_descriptor_distance=self.descriptor_distance_sigma)
            self.timer_seach_frame_proj.refresh()
            self.num_matched_kps = len(idxs_cur)
            #print("# matched map points in prev frame: %d " % self.num_matched_kps)

            # if not enough map point matches consider a larger search radius
            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                idxs_ref, idxs_cur, num_found_map_pts = search_frame_by_projection(
                    f_ref,
                    f_cur,
                    max_reproj_distance=2 * search_radius,
                    max_descriptor_distance=0.5 *
                    self.descriptor_distance_sigma)
                self.num_matched_kps = len(idxs_cur)
                #Printer.orange("# matched map points in prev frame (wider search): %d " % self.num_matched_kps)

            if kDebugDrawMatches and True:
                img_matches = draw_feature_matches(f_ref.img,
                                                   f_cur.img,
                                                   f_ref.kps[idxs_ref],
                                                   f_cur.kps[idxs_cur],
                                                   f_ref.sizes[idxs_ref],
                                                   f_cur.sizes[idxs_cur],
                                                   horizontal=False)
                cv2.imshow('tracking frame by projection - matches',
                           img_matches)
                cv2.waitKey(1)

            if self.num_matched_kps < Parameters.kMinNumMatchedFeaturesSearchFrameByProjection:
                f_cur.remove_frame_views(idxs_cur)
                f_cur.reset_points()
                is_search_frame_by_projection_failure = True
                Printer.red(
                    'Not enough matches in search frame by projection: ',
                    self.num_matched_kps)
            else:
                # search frame by projection was successful
                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        f_ref, f_cur, idxs_ref, idxs_cur)

                # store tracking info (for possible reuse)
                self.idxs_ref = idxs_ref
                self.idxs_cur = idxs_cur

                # f_cur pose optimization 1:
                # here, we use f_cur pose as first guess and exploit the matched map point of f_ref
                self.pose_optimization(f_cur, 'proj-frame-frame')
                # update matched map points; discard outliers detected in last pose optimization
                num_matched_points = f_cur.clean_outlier_map_points()
                #print('     # num_matched_map_points: %d' % (self.num_matched_map_points) )
                #print('     # matched points: %d' % (num_matched_points) )

                if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
                    Printer.red(
                        'failure in tracking previous frame, # matched map points: ',
                        self.num_matched_map_points)
                    self.pose_is_ok = False

        if not use_search_frame_by_projection or is_search_frame_by_projection_failure:
            self.track_reference_frame(f_ref, f_cur, 'match-frame-frame')

    # track camera motion of f_cur w.r.t. f_ref
    # estimate motion by matching keypoint descriptors
    def track_reference_frame(self, f_ref, f_cur, name=''):
        print('>>>> tracking reference %d ...' % (f_ref.id))
        if f_ref is None:
            return
        # find keypoint matches between f_cur and kf_ref
        #print('matching keypoints with ', Frame.feature_matcher.type.name)
        self.timer_match.start()
        idxs_cur, idxs_ref = match_frames(f_cur, f_ref)
        self.timer_match.refresh()
        self.num_matched_kps = idxs_cur.shape[0]
        #print("# keypoints matched: %d " % self.num_matched_kps)
        if kUseEssentialMatrixFitting:
            # estimate camera orientation and inlier matches by fitting and essential matrix (see the limitations above)
            idxs_ref, idxs_cur = self.estimate_pose_by_fitting_ess_mat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        if kUseDynamicDesDistanceTh:
            self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                f_ref, f_cur, idxs_ref, idxs_cur)

        # propagate map point matches from kf_ref to f_cur  (do not override idxs_ref, idxs_cur)
        num_found_map_pts_inter_frame, idx_ref_prop, idx_cur_prop = propagate_map_point_matches(
            f_ref,
            f_cur,
            idxs_ref,
            idxs_cur,
            max_descriptor_distance=self.descriptor_distance_sigma)
        #print("# matched map points in prev frame: %d " % num_found_map_pts_inter_frame)

        if kDebugDrawMatches and True:
            img_matches = draw_feature_matches(f_ref.img,
                                               f_cur.img,
                                               f_ref.kps[idx_ref_prop],
                                               f_cur.kps[idx_cur_prop],
                                               f_ref.sizes[idx_ref_prop],
                                               f_cur.sizes[idx_cur_prop],
                                               horizontal=False)
            cv2.imshow('tracking frame (no projection) - matches', img_matches)
            cv2.waitKey(1)

        # store tracking info (for possible reuse)
        self.idxs_ref = idxs_ref
        self.idxs_cur = idxs_cur

        # f_cur pose optimization using last matches with kf_ref:
        # here, we use first guess of f_cur pose and propated map point matches from f_ref (matched keypoints)
        self.pose_optimization(f_cur, name)
        # update matched map points; discard outliers detected in last pose optimization
        num_matched_points = f_cur.clean_outlier_map_points()
        #print('      # num_matched_map_points: %d' % (self.num_matched_map_points) )
        #print('     # matched points: %d' % (num_matched_points) )
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackFrame:
            f_cur.remove_frame_views(idxs_cur)
            f_cur.reset_points()
            #Printer.red('failure in tracking reference %d, # matched map points: %d' %(f_ref.id,self.num_matched_map_points))
            self.pose_is_ok = False

    # track camera motion of f_cur w.r.t. given keyframe
    # estimate motion by matching keypoint descriptors
    def track_keyframe(self, keyframe, f_cur, name='match-frame-keyframe'):
        f_cur.update_pose(self.f_ref.pose.copy()
                          )  # start pose optimization from last frame pose
        self.track_reference_frame(keyframe, f_cur, name)

    def update_local_map(self):
        self.f_cur.clean_bad_map_points()
        #self.local_points = self.map.local_map.get_points()
        self.kf_ref, self.local_keyframes, self.local_points = self.map.local_map.get_frame_covisibles(
            self.f_cur)
        self.f_cur.kf_ref = self.kf_ref

    # track camera motion of f_cur w.r.t. the built local map
    # find matches between {local map points} (points in the built local map) and {unmatched keypoints of f_cur}
    def track_local_map(self, f_cur):
        if self.map.local_map.is_empty():
            return
        print('>>>> tracking local map...')
        self.timer_seach_map.start()

        self.update_local_map()

        num_found_map_pts, reproj_err_frame_map_sigma, matched_points_frame_idxs = search_map_by_projection(
            self.local_points,
            f_cur,
            max_reproj_distance=self.
            reproj_err_frame_map_sigma,  #Parameters.kMaxReprojectionDistanceMap, 
            max_descriptor_distance=self.descriptor_distance_sigma,
            ratio_test=Parameters.kMatchRatioTestMap
        )  # use the updated local map
        self.timer_seach_map.refresh()
        #print('reproj_err_sigma: ', reproj_err_frame_map_sigma, ' used: ', self.reproj_err_frame_map_sigma)
        #print("# new matched map points in local map: %d " % num_found_map_pts)
        #print("# local map points ", self.map.local_map.num_points())

        if kDebugDrawMatches and True:
            img_matched_trails = f_cur.draw_feature_trails(
                f_cur.img.copy(),
                matched_points_frame_idxs,
                trail_max_length=3)
            cv2.imshow('tracking local map - matched trails',
                       img_matched_trails)
            cv2.waitKey(1)

        # f_cur pose optimization 2 with all the matched local map points
        self.pose_optimization(f_cur, 'proj-map-frame')
        f_cur.update_map_points_statistics(
        )  # here we do not reset outliers; we let them reach the keyframe generation
        # and then bundle adjustment will possible decide if remove them or not;
        # only after keyframe generation the outliers are cleaned!
        #print('     # num_matched_map_points: %d' % (self.num_matched_map_points) )
        if not self.pose_is_ok or self.num_matched_map_points < kNumMinInliersPoseOptimizationTrackLocalMap:
            Printer.red(
                'failure in tracking local map, # matched map points: ',
                self.num_matched_map_points)
            self.pose_is_ok = False

        #if kUseDynamicDesDistanceTh:
        #    self.reproj_err_frame_map_sigma = self.dyn_config.update_reproj_err_map_stat(reproj_err_frame_map_sigma)

    # store frame history in order to retrieve the complete camera trajectory
    def update_tracking_history(self):
        if self.state == SlamState.OK:
            isometry3d_Tcr = self.f_cur.isometry3d * self.f_cur.kf_ref.isometry3d.inverse(
            )  # pose of current frame w.r.t. current reference keyframe kf_ref
            self.tracking_history.relative_frame_poses.append(isometry3d_Tcr)
            self.tracking_history.kf_references.append(self.kf_ref)
            self.tracking_history.timestamps.append(self.f_cur.timestamp)
        else:
            self.tracking_history.relative_frame_poses.append(
                self.tracking_history.relative_frame_poses[-1])
            self.tracking_history.kf_references.append(
                self.tracking_history.kf_references[-1])
            self.tracking_history.timestamps.append(
                self.tracking_history.timestamps[-1])
        self.tracking_history.slam_states.append(self.state)

    def need_new_keyframe(self, f_cur):
        num_keyframes = self.map.num_keyframes()
        nMinObs = kNumMinObsForKeyFrameDefault
        if num_keyframes <= 2:
            nMinObs = 2  # if just two keyframes then we can have just two observations
        num_kf_ref_tracked_points = self.kf_ref.num_tracked_points(
            nMinObs)  # number of tracked points in k_ref
        num_f_cur_tracked_points = f_cur.num_matched_inlier_map_points(
        )  # number of inliers in f_cur
        #Printer.purple('F(%d) #points: %d, KF(%d) #points: %d ' %(f_cur.id, num_f_cur_tracked_points, self.kf_ref.id, num_kf_ref_tracked_points))

        if kLogKFinfoToFile:
            self.kf_info_logger.info(
                'F(%d) #points: %d, KF(%d) #points: %d ' %
                (f_cur.id, num_f_cur_tracked_points, self.kf_ref.id,
                 num_kf_ref_tracked_points))

        self.num_kf_ref_tracked_points = num_kf_ref_tracked_points

        is_local_mapping_idle = self.local_mapping.is_idle()
        local_mapping_queue_size = self.local_mapping.queue_size()
        #print('is_local_mapping_idle: ', is_local_mapping_idle,', local_mapping_queue_size: ', local_mapping_queue_size)

        # condition 1: more than "max_frames_between_kfs" have passed from last keyframe insertion
        cond1 = f_cur.id >= (self.kf_last.id + self.max_frames_between_kfs)

        # condition 2: more than "min_frames_between_kfs" have passed and local mapping is idle
        cond2 = (f_cur.id >=
                 (self.kf_last.id +
                  self.min_frames_between_kfs)) & is_local_mapping_idle
        #cond2 = (f_cur.id >= (self.kf_last.id + self.min_frames_between_kfs))

        # condition 3: few tracked features compared to reference keyframe
        cond3 = (num_f_cur_tracked_points <
                 num_kf_ref_tracked_points * Parameters.kThNewKfRefRatio) and (
                     num_f_cur_tracked_points >
                     Parameters.kNumMinPointsForNewKf)

        #print('KF conditions: %d %d %d' % (cond1, cond2, cond3) )
        ret = (cond1 or cond2) and cond3

        if ret:
            if is_local_mapping_idle:
                return True
            else:
                self.local_mapping.interrupt_optimization()
                if True:
                    if local_mapping_queue_size <= 3:
                        return True
                    else:
                        return False
                else:
                    return False
        else:
            return False

    # @ main track method @
    def track(self, img, frame_id, timestamp=None):
        Printer.cyan('@tracking')
        time_start = time.time()

        # check image size is coherent with camera params
        #print("img.shape: ", img.shape)
        #print("camera ", self.camera.height," x ", self.camera.width)
        assert img.shape[0:2] == (self.camera.height, self.camera.width)
        #if timestamp is not None:
        #print('timestamp: ', timestamp)

        self.timer_main_track.start()

        # build current frame
        self.timer_frame.start()
        f_cur = Frame(img, self.camera, timestamp=timestamp)
        self.f_cur = f_cur
        #print("frame: ", f_cur.id)
        self.timer_frame.refresh()

        # reset indexes of matches
        self.idxs_ref = []
        self.idxs_cur = []

        if self.state == SlamState.NO_IMAGES_YET:
            # push first frame in the inizializer
            self.intializer.init(f_cur)
            self.state = SlamState.NOT_INITIALIZED
            return  # EXIT (jump to second frame)

        if self.state == SlamState.NOT_INITIALIZED:
            # try to inizialize
            initializer_output, intializer_is_ok = self.intializer.initialize(
                f_cur, img)
            if intializer_is_ok:
                kf_ref = initializer_output.kf_ref
                kf_cur = initializer_output.kf_cur
                # add the two initialized frames in the map
                self.map.add_frame(
                    kf_ref)  # add first frame in map and update its frame id
                self.map.add_frame(
                    kf_cur)  # add second frame in map and update its frame id
                # add the two initialized frames as keyframes in the map
                self.map.add_keyframe(
                    kf_ref)  # add first keyframe in map and update its kid
                self.map.add_keyframe(
                    kf_cur)  # add second keyframe in map and update its kid
                kf_ref.init_observations()
                kf_cur.init_observations()
                # add points in map
                new_pts_count, _, _ = self.map.add_points(
                    initializer_output.pts,
                    None,
                    kf_cur,
                    kf_ref,
                    initializer_output.idxs_cur,
                    initializer_output.idxs_ref,
                    img,
                    do_check=False)
                #Printer.green("map: initialized %d new points" % (new_pts_count))
                # update covisibility graph connections
                kf_ref.update_connections()
                kf_cur.update_connections()

                # update tracking info
                self.f_cur = kf_cur
                self.f_cur.kf_ref = kf_ref
                self.kf_ref = kf_cur  # set reference keyframe
                self.kf_last = kf_cur  # set last added keyframe
                self.map.local_map.update(self.kf_ref)
                self.state = SlamState.OK

                self.update_tracking_history()
                self.motion_model.update_pose(kf_cur.timestamp,
                                              kf_cur.position,
                                              kf_cur.quaternion)
                self.motion_model.is_ok = False  # after initialization you cannot use motion model for next frame pose prediction (time ids of initialized poses may not be consecutive)

                self.intializer.reset()

                if kUseDynamicDesDistanceTh:
                    self.descriptor_distance_sigma = self.dyn_config.update_descriptor_stat(
                        kf_ref, kf_cur, initializer_output.idxs_ref,
                        initializer_output.idxs_cur)
            return  # EXIT (jump to next frame)

        # get previous frame in map as reference
        f_ref = self.map.get_frame(-1)
        #f_ref_2 = self.map.get_frame(-2)
        self.f_ref = f_ref

        # add current frame f_cur to map
        self.map.add_frame(f_cur)
        self.f_cur.kf_ref = self.kf_ref

        # reset pose state flag
        self.pose_is_ok = False

        with self.map.update_lock:
            # check for map point replacements in previous frame f_ref (some points might have been replaced by local mapping during point fusion)
            self.f_ref.check_replaced_map_points()

            if kUseDynamicDesDistanceTh:
                #print('descriptor_distance_sigma: ', self.descriptor_distance_sigma)
                self.local_mapping.descriptor_distance_sigma = self.descriptor_distance_sigma

            # udpdate (velocity) old motion model                                             # c1=ref_ref, c2=ref, c3=cur;  c=cur, r=ref
            #self.velocity = np.dot(f_ref.pose, inv_T(f_ref_2.pose))                          # Tc2c1 = Tc2w * Twc1   (predicted Tcr)
            #self.predicted_pose = g2o.Isometry3d(np.dot(self.velocity, f_ref.pose))          # Tc3w = Tc2c1 * Tc2w   (predicted Tcw)

            # set intial guess for current pose optimization
            if kUseMotionModel and self.motion_model.is_ok:
                #print('using motion model for next pose prediction')
                # update f_ref pose according to its reference keyframe (the pose of the reference keyframe could be updated by local mapping)
                self.f_ref.update_pose(
                    self.tracking_history.relative_frame_poses[-1] *
                    self.f_ref.kf_ref.isometry3d)
                # predict pose by using motion model
                self.predicted_pose, _ = self.motion_model.predict_pose(
                    timestamp, self.f_ref.position, self.f_ref.orientation)
                f_cur.update_pose(self.predicted_pose)
            else:
                #print('setting f_cur.pose <-- f_ref.pose')
                # use reference frame pose as initial guess
                f_cur.update_pose(f_ref.pose)

            # track camera motion from f_ref to f_cur
            self.track_previous_frame(f_ref, f_cur)

            if not self.pose_is_ok:
                # if previous track didn't go well then track the camera motion from kf_ref to f_cur
                self.track_keyframe(self.kf_ref, f_cur)

            # now, having a better estimate of f_cur pose, we can find more map point matches:
            # find matches between {local map points} (points in the local map) and {unmatched keypoints of f_cur}
            if self.pose_is_ok:
                self.track_local_map(f_cur)

        # end block {with self.map.update_lock:}

        # TODO: add relocalization

        # HACK: since local mapping is not fast enough in python (and tracking is not in real-time) => give local mapping more time to process stuff
        self.wait_for_local_mapping(
        )  # N.B.: this must be outside the `with self.map.update_lock:` block

        with self.map.update_lock:

            # update slam state
            if self.pose_is_ok:
                self.state = SlamState.OK
            else:
                self.state = SlamState.LOST
                Printer.red('tracking failure')

            # update motion model state
            self.motion_model.is_ok = self.pose_is_ok

            if self.pose_is_ok:  # if tracking was successful

                # update motion model
                self.motion_model.update_pose(timestamp, f_cur.position,
                                              f_cur.quaternion)

                f_cur.clean_vo_map_points()

                # do we need a new KeyFrame?
                need_new_kf = self.need_new_keyframe(f_cur)

                if need_new_kf:
                    #Printer.green('adding new KF with frame id % d: ' %(f_cur.id))
                    if kLogKFinfoToFile:
                        self.kf_info_logger.info(
                            'adding new KF with frame id % d: ' % (f_cur.id))
                    kf_new = KeyFrame(f_cur, img)
                    self.kf_last = kf_new
                    self.kf_ref = kf_new
                    f_cur.kf_ref = kf_new

                    self.local_mapping.push_keyframe(kf_new)
                    if not kLocalMappingOnSeparateThread:
                        self.local_mapping.do_local_mapping()
                #else:
                #Printer.yellow('NOT KF')

                # From ORBSLAM2:
                # Clean outliers once keyframe generation has been managed:
                # we allow points with high innovation (considered outliers by the Huber Function)
                # pass to the new keyframe, so that bundle adjustment will finally decide
                # if they are outliers or not. We don't want next frame to estimate its position
                # with those points so we discard them in the frame.
                f_cur.clean_outlier_map_points()

            if self.f_cur.kf_ref is None:
                self.f_cur.kf_ref = self.kf_ref

            self.update_tracking_history(
            )  # must stay after having updated slam state (self.state)

            #Printer.green("map: %d points, %d keyframes" % (self.map.num_points(), self.map.num_keyframes()))
            #self.update_history()

            self.timer_main_track.refresh()

            duration = time.time() - time_start
            #print('tracking duration: ', duration)

    # Since we do not have real-time performances, we can slow-down things and make tracking wait till local mapping gets idle
    # N.B.: this function must be called outside 'with self.map.update_lock' blocks,
    #       since both self.track() and the local-mapping optimization use the RLock 'map.update_lock'
    #       => they cannot wait for each other once map.update_lock is locked (deadlock)
    def wait_for_local_mapping(self):
        if kTrackingWaitForLocalMappingToGetIdle:
            #while not self.local_mapping.is_idle() or self.local_mapping.queue_size()>0:
            if not self.local_mapping.is_idle():
                print('>>>> waiting for local mapping...')
                self.local_mapping.wait_idle()
        else:
            if not self.local_mapping.is_idle(
            ) and kTrackingWaitForLocalMappingSleepTime > 0:
                print('>>>> sleeping for local mapping...')
                time.sleep(kTrackingWaitForLocalMappingSleepTime)
示例#16
0
class ParticleFilter:

    # toggle robot driving in a circle in the simulator
    DRIVING = True
    # set to True if working in the simulator, else False
    SIM = True
    """
    *** IF WORKING ON THE REAL ROBOT: ***
    - in params.yaml:
        - change particle_filter_frame from 'base_link_pf' to 'base_link'
        - change odom_topic from '/odom' to '/vesc/odom'
    - in localize.launch:
        - uncomment out region that initializes map server to use stata basement map
    - use SIM = False and DRIVING = False
    """
    def __init__(self):
        # using locks so threads can't access self.particles at the same time (avoiding race condition)
        self.lock = Lock()

        self.lock.acquire()

        # Get parameters
        self.particle_filter_frame = \
                rospy.get_param("~particle_filter_frame") # should be '/base_link_pf' in sim and '/base_link' on car

        # Initialize the models
        self.motion_model = MotionModel()
        self.sensor_model = SensorModel()

        # Topics
        self.odom_topic = rospy.get_param("~odom_topic")
        self.scan_topic = rospy.get_param("~scan_topic")
        self.particle_marker_topic = '/particle_marker'
        self.click_topic = '/move_base_simple/goal'
        self.pose_array_topic = '/pose_array'
        self.inferred_pose_topic = '/inferred_pose'

        self.error_x_topic = '/error_x'
        self.error_y_topic = '/error_y'
        self.error_t_topic = '/error_t'

        # Subscribers and Publishers
        rospy.Subscriber(
            self.odom_topic, Odometry,
            self.odom_callback)  # '/odom' for sim and 'vesc/odom' for car
        rospy.Subscriber(self.scan_topic, LaserScan, self.scan_callback)

        self.tf_broadcaster = TransformBroadcaster()

        # Listening to clicks in rviz and publishing particle markers to rviz
        rospy.Subscriber(self.click_topic, PoseStamped, self.click_callback)
        self.particle_marker_pub = rospy.Publisher(self.particle_marker_topic,
                                                   Marker,
                                                   queue_size=1)
        self.pose_array_pub = rospy.Publisher(self.pose_array_topic,
                                              PoseArray,
                                              queue_size=1)
        self.inferred_pose_pub = rospy.Publisher(self.inferred_pose_topic,
                                                 Marker,
                                                 queue_size=1)

        self.error_x_pub = rospy.Publisher(self.error_x_topic,
                                           Float32,
                                           queue_size=1)
        self.error_y_pub = rospy.Publisher(self.error_y_topic,
                                           Float32,
                                           queue_size=1)
        self.error_t_pub = rospy.Publisher(self.error_t_topic,
                                           Float32,
                                           queue_size=1)

        self.rate = rospy.Rate(30)  # should be greater than 20 Hz

        # Implement the MCL algorithm
        # using the sensor model and the motion model
        #
        # Make sure you include some way to initialize
        # your particles, ideally with some sort
        # of interactive interface in rviz
        #
        # Publish a transformation frame between the map
        # and the particle_filter_frame.

        # Initializing particles and weights
        self.num_particles = rospy.get_param('~num_particles')
        # currently intializing all particles as origin
        self.particles = np.zeros((self.num_particles, 3))
        # all particles are initially equally likely
        self.weights = np.full((self.num_particles), 1. / self.num_particles)

        # for error calculations
        self.groundtruth = [0, 0, 0]

        # send drive commands to test in sim
        self.drive_pub = rospy.Publisher('/drive',
                                         AckermannDriveStamped,
                                         queue_size=1)
        self.drive_msg = AckermannDriveStamped()
        self.drive_msg.drive.steering_angle = 0
        self.drive_msg.drive.speed = 1

        if self.SIM == False:  # don't toggle on driving if working on real robot
            self.DRIVE = False

        self.publish_transform()

        self.lock.release()

    def odom_callback(self, odom_data):
        '''
        whenever you get odometry data, use the motion model to update the particle positions
        determine the "average" particle pose and publish that transform
        :param odom_data: odometry data (type - Odometry)
        :return: no return, but updates self.particles positions and publishes transform
        '''
        self.lock.acquire()
        #start = time()

        x = odom_data.pose.pose.position.x
        y = odom_data.pose.pose.position.y
        q_w = odom_data.pose.pose.orientation.w
        q_z = odom_data.pose.pose.orientation.z
        euler = transformations.euler_from_quaternion(
            np.array([0, 0, q_z, q_w]))
        self.groundtruth = [x, y, euler]

        self.particles = self.motion_model.evaluate(
            self.particles,
            self.motion_model.get_delta_x(odom_data),
            noise=0.2)

        self.publish_transform()

        #end = time()
        #print('odom_callback time:', end-start)
        self.lock.release()

    def scan_callback(self, scan_data):
        '''
        whenever you get sensor data, use the sensor model to compute the particle probabilities. Then resample the particles based on these probabilities
        determine the "average" particle pose and publish that transform
        :param scan_data: scan data (type - LaserScan)
        :return: no return, but updates self.particles by resampling particles and publishes transform
        '''
        self.lock.acquire()

        probabilities = self.sensor_model.evaluate(self.particles,
                                                   scan_data,
                                                   downsample=20,
                                                   sim=self.SIM)

        # if map wasn't initialized in sensor_model, probabilites=None
        if isinstance(probabilities, type(None)):
            self.lock.release()
            rospy.loginfo('no probabilities to use in scan_callback')
            return

        smooth_p = np.power(probabilities, [1 / 3.] * len(probabilities))

        self.weights = smooth_p / np.sum(
            smooth_p)  # normalize weights so they sum to 1

        # resample particles based on weights
        self.particles = self.resample_particles()

        self.publish_transform()

        self.lock.release()

    def resample_particles(self):
        '''
        reseample particles based on their probability weights
        allow repeats of particles; to not allow, use replace=False
        '''
        # choose with indices in particles array to keep using probabilities as weights
        indices = np.random.choice(self.num_particles,
                                   self.num_particles,
                                   p=self.weights)
        # return particles with chosen indices
        return self.particles[indices]

    def publish_transform(self):
        '''
        anytime the particles are updated, determine the 'average' particle pose and broadcast that transform
        :return: no return, but broadcast transform between 'map' frame and self.particle_filter_frame
        '''
        x_avg, y_avg, theta_avg = self.avg_pose()

        q = transformations.quaternion_from_euler(0, 0, theta_avg)

        # broadcast transformation
        self.tf_broadcaster.sendTransform(
            (x_avg, y_avg, 0), q, rospy.Time.now(), self.particle_filter_frame,
            'map')

        self.publish_pose_array()
        #self.publish_particle_markers()

        # Driving in the simulator
        if self.DRIVING:
            self.drive_pub.publish(self.drive_msg)

        # publish inferred position in rviz as arrow
        if self.inferred_pose_pub.get_num_connections > 0:
            a = Marker()
            a.header.frame_id = 'map'
            a.type = Marker.ARROW
            a.color = ColorRGBA(0, 1, 1, 1)

            a.scale.x = 0.5
            a.scale.y = 0.1
            a.scale.z = 0.1

            q = transformations.quaternion_from_euler(0, 0, theta_avg)
            quat = Quaternion()
            quat.x = q[0]
            quat.y = q[1]
            quat.z = q[2]
            quat.w = q[3]

            a.pose.position.x = x_avg
            a.pose.position.y = y_avg
            a.pose.orientation = quat

            self.inferred_pose_pub.publish(a)

        # for error plots
        try:
            x, y, theta = self.groundtruth
            x_error = x_avg - x
            y_error = y_avg - y
            t_error = theta_avg - theta[2]
            self.error_x_pub.publish(x_error)
            self.error_y_pub.publish(y_error)
            self.error_t_pub.publish(t_error)
        except Exception as e:
            rospy.loginfo(e)

    def avg_pose(self):
        '''
        determine the average particle pose
        ** have to use mean of circular quantities for theta
        ** distirbution might be multi modal
        :param: no params, but uses instance variable self.particles
        :return: average particle pose as [x, y, theta]
        '''
        x = np.mean(self.particles[:, 0])
        y = np.mean(self.particles[:, 1])
        # circular mean is angle between average x component and y component of the angles data
        theta = np.arctan2(np.mean(np.sin(self.particles[:, 2])),
                           np.mean(np.cos(self.particles[:, 2])))

        return [x, y, theta]

    def click_callback(self, pose_data):
        '''
        use click in rviz to initialize particles
        :param pose_data: pose of rviz click (type - PoseStamped)
        use 2D Nav Goal to guess car location
        :return: no return, but updates self.particles
        '''
        self.lock.acquire()

        x = pose_data.pose.position.x
        y = pose_data.pose.position.y
        q = pose_data.pose.orientation
        theta = transformations.euler_from_quaternion([q.x, q.y, q.z, q.w])[2]

        # random points from normal distribution centered at [x, y, theta]
        self.particles = np.random.normal([x, y, theta], 1,
                                          (self.num_particles, 3))
        self.weights = np.full((self.num_particles), 1. / self.num_particles)

        self.publish_pose_array()
        #self.publish_particle_markers()

        self.lock.release()

    def publish_particle_markers(self):
        '''
        publish particles as markers in rviz
        '''
        # if something is listening for marker publications
        if self.particle_marker_pub.get_num_connections > 0:
            m = Marker()
            m.header.frame_id = 'map'
            m.type = Marker.POINTS
            #m.color = ColorRGBA(1, 0, 0, 1)

            m.scale.x = 0.2
            m.scale.y = 0.2
            m.scale.z = 0.2

            for i in range(len(self.particles)):
                p = self.particles[i]
                w = self.weights[i]
                m.points.append(Point(p[0], p[1], 0))
                m.colors.append(self.prob_to_color(w))

            self.particle_marker_pub.publish(m)

    def prob_to_color(self, prob):
        '''
        convert a probability to a color
        red is high probability
        orange
        yellow
        green
        cyan
        light blue
        blue
        purple
        magenta
        black is low
        :param prob: probability between 0 and 1
        :return: ColorRBGA corresponding to prob
        '''
        m = max(self.weights)  # max weight
        if prob > m * 0.9:
            return ColorRGBA(1, 0, 0, 1)
        if prob > m * 0.8:
            return ColorRGBA(1, 0.5, 0, 1)
        if prob > m * 0.7:
            return ColorRGBA(1, 1, 0, 1)
        if prob > m * 0.6:
            return ColorRGBA(0, 1, 0, 1)
        if prob > m * 0.5:
            return ColorRGBA(0, 1, 1, 1)
        if prob > m * 0.4:
            return ColorRGBA(0, 0.5, 1, 1)
        if prob > m * 0.3:
            return ColorRGBA(0, 0, 1, 1)
        if prob > m * 0.2:
            return ColorRGBA(0.5, 0, 1, 1)
        if prob > m * 0.1:
            return ColorRGBA(1, 0, 1, 1)
        else:
            return ColorRGBA(0, 0, 0, 1)

    def publish_pose_array(self):
        '''
        publish arrows in rviz for to visualize particles
        '''
        particles = np.copy(self.particles)
        if self.pose_array_topic > 0:
            pa = PoseArray()
            pa.header.frame_id = 'map'
            for p in particles[:20]:
                pose = Pose()
                pose.position = Point(p[0], p[1], 0)
                q = transformations.quaternion_from_euler(0, 0, p[2])
                quat = Quaternion()
                quat.x = q[0]
                quat.y = q[1]
                quat.z = q[2]
                quat.w = q[3]
                pose.orientation = quat
                pa.poses.append(pose)
            self.pose_array_pub.publish(pa)
示例#17
0
                        help='Debug mode, only send out 10 beams')
    parser.add_argument('--adaptive',
                        action='store_true',
                        help='Adaptively select particles or not')
    args = parser.parse_args()

    src_path_map = args.path_to_map
    src_path_log = args.path_to_log
    os.makedirs(args.output, exist_ok=True)

    map_obj = MapReader(src_path_map)
    occupancy_map = map_obj.get_map()
    with open(src_path_log, 'r') as f:
        logfile = f.readlines()

    motion_model = MotionModel()
    sensor_model = SensorModel(occupancy_map)
    resampler = Resampling()

    num_particles = args.num_particles
    X_bar = init_particles_freespace(num_particles, occupancy_map)

    num_beams = 10 if args.debug else 18
    """
    Monte Carlo Localization Algorithm : Main Loop
    """
    if args.visualize:
        visualize_map(occupancy_map)
        visualize_timestep(X_bar, 0, args.output)

    first_time_idx = True
示例#18
0
    parser.add_argument('--path_to_map', default='../data/map/wean.dat')
    parser.add_argument('--path_to_log', default='../data/log/robotdata1.log')
    parser.add_argument('--output', default='results')
    parser.add_argument('--num_particles', default=500, type=int)
    parser.add_argument('--visualize', action='store_true')
    args = parser.parse_args()

    src_path_map = args.path_to_map
    src_path_log = args.path_to_log
    os.makedirs(args.output, exist_ok=True)

    map_obj = MapReader(src_path_map)
    occupancy_map = map_obj.get_map()
    logfile = open(src_path_log, 'r')

    motion_model = MotionModel()
    sensor_model = SensorModel(occupancy_map)
    resampler = Resampling()

    num_particles = args.num_particles
    X_bar = init_particles_random(num_particles, occupancy_map)
    # X_bar = init_particles_freespace(num_particles, occupancy_map)
    """
    Monte Carlo Localization Algorithm : Main Loop
    """
    if args.visualize:
        visualize_map(occupancy_map)

    first_time_idx = True
    for time_idx, line in enumerate(logfile):
class MotionSub():
    def __init__(self):

        self.modelMotion = MotionModel()
        self.controlFilter = ColorFiltering()
        self.controlPath = ControllerPath()
        self.entityDocument = EntityDocument()

        self.listFile = []
        self.folder = "result_dir"
        self.burnin = 0
        self.scan = 0
        self.frame_rate = 0
        self.def_area = 'draw'
        self.setROI = False
        self.includedROI = None
        self.set_areaCounter = False
        self.fileDest = " "
        self.play = False
        #folder destination
        self.folder_dest = "hasilUji"
        self.file_dest = ''

        self.saveVid = "both"

        self.minVal = []
        self.maxVal = []
        self.path_time = "/home/pi/coding/fire_detect/nilai_HSV.csv"

    def askFile(self):
        self.answer = askquestion('choose file', 'Input Video ?')
        if self.answer == 'yes':
            self.frame_seq = True
            self.pathname = askopenfilename()

        else:
            self.frame_seq = False
            self.pathname = askdirectory()
            self.images = True

        return self.pathname

    def readVids(self):
        '''
		method ini digunakan unutk melakukan ekstraksi terhadap video yang kemungkinan 
		akan diolah di proses filtering dan motion apabila menggunakan 
		image sequence
		'''
        #method to get input from user

        self.askFile()
        os.mkdir(self.folder)
        cap = cv2.VideoCapture(self.pathname)
        #cap.set(cv2.CAP_PROP_POS_MSEC,5000) #Skip to 5s
        self.count = 0
        while True:
            #start reading frame
            ret, frame = cap.read()
            if not ret:
                break
            #set the name of image
            names = os.path.join(self.folder, "img%d.jpg" % self.count)
            cv2.imwrite(names, frame)
            self.listFile.append(names)
            self.count += 1
        return self.listFile

    def preProc(self, lblImg):
        '''
		this method provide to recognizing input file, set ROI for processing image
		set learning rate, threshold value,initiliaze background'''
        #set stamp lidt
        self.stamp = []

        #write header row for stamp
        self.stamp.append(("Time", "Frame", 'Position'))

        #capture output for area detectiom
        self.result_img = []

        #capture output
        self.frameHasil = 0

        ##No contours, there was not enough motion compared to background, did not meet threshold
        self.noCountr = 0

        #capture minimumbox size for plotting
        self.avg_box = []

        #Empty list for area counter
        self.areaCounter = []
        self.areaCounter.append(("Time", "Frame", "X", "Y"))

        #to set draw ROI
        self.top = 0
        self.bottom = 1
        self.left = 0
        self.right = 1

        #flag on
        self.noMotion = False

        #label Image
        self.labelImg = lblImg

        #################################
        #Begin video capture
        #################################

        #if self.frame_seq is False means using video
        if self.frame_seq:

            ext = os.path.splitext(self.pathname)[1]

            #get from video capture
            self.cap = cv2.VideoCapture(self.pathname)

            #get global frame rate
            self.frame_rate = round(self.cap.get(5))
            #make sure extension is .avi
            if ext in ['.avi', '.AVI']:
                #self.frame_rate=1
                print "frame rate set to 1"
            else:
                showinfo(title="Info",
                         message="make sure your file is .avi extension")
                return 0
            #get frame related to time of begin video
            self.frame_time = self.cap.get(0)

            #get total number of frame
            self.total_frames = self.cap.get(7)
            #skip x frame according to user input
            for x in range(1,
                           int(float(self.burnin) * int(self.frame_rate) *
                               60)):
                self.cap.grab()

            #set frame skip counter
            self.frameSkip = 0

            #get the lower and upper mask of video
            self.low = self.controlFilter.getLower()
            self.up = self.controlFilter.getUpper()
            print '{0}-{1}'.format(self.low, self.up)

            #capture the first image
            self.cap.set(1, 1)
            ret, firstImage = self.cap.read()
            #if not read
            if not ret:
                raise ValueError("no file image capture")
            #convert background to be like mask filtering
            rgbImg = self.controlFilter.bgr2rgb(firstImage)
            hsvImg = self.controlFilter.rgb2hsv(rgbImg)

            firstImage = self.controlFilter.filterMask(hsvImg, self.low,
                                                       self.up)

            print "begin motion detection"
        else:
            #self.frame_sequence is True using image sequence
            self.cap = cv2.VideoCapture(self.pathname)
            #begin read file image sequence
            image_ext = ["*.jpg", "*.jpeg", "*.png"]
            pathImg = [os.path.join(self.pathname, x) for x in image_ext]
            self.jpgs = []
            for extension in pathImg:
                ketemu = glob.glob(extension)
                self.jpgs.extend(ketemu)
            print("banyaknya image di folder : %d" % len(self.jpgs))

            #set the first image as initialize background
            firstImage = cv2.imread(self.jpgs[0])
            #convert to gray Img
            #self.showImg=cv2.cvtColor(firstImage,cv2.COLOR_BGR2GRAY)
            self.total_frames = len(self.jpgs)
            self.frame_rate = 1

        #make copy for markup
        imageCopy = firstImage.copy()

        #self.includeRoi = raw_input(str("include ROI?"))
        #set ROI
        if self.setROI:
            self.selectedRoi = draw_controller.Udef(imageCopy,
                                                    "Region of Interest")
            self.selectedRoi = self.selectedRoi[-4:]
            if len(self.selectedRoi) == 0:
                raise ValueError('Error:please select an area to be ROI')
            if self.includedROI == "include":
                print "cropping frame...complete"
                self.showImg = firstImage[
                    self.selectedRoi[1]:self.selectedRoi[1] +
                    self.selectedRoi[3],
                    self.selectedRoi[0]:self.selectedRoi[0] +
                    self.selectedRoi[2]]
                print self.showImg.shape[0:2]
            else:
                firstImage[self.selectedRoi[1]:self.selectedRoi[3],
                           self.selectedRoi[0]:self.selectedRoi[2]] = 255
                self.showImg = firstImage
        else:
            self.showImg = firstImage

        #show image
        if self.setROI:
            cv2.imshow("ROI area", self.showImg)
            #cv2.imshow("user self area",self.cropImg)
            cv2.waitKey(5)
            cv2.destroyAllWindows()

        self.controlFilter.displayinTk(self.showImg, self.labelImg)

        self.width = np.size(self.showImg, 1)
        self.height = np.size(self.showImg, 0)
        frameSize = (self.width, self.height)
        print frameSize

        return self.showImg

    #=======================================#
    #compute background selama video looping
    #=======================================#

    def running(self, pathname, board, alpha, thresh, minVal, maxVal, imgLabel,
                mask, cap, roiLabel):

        self.mask = mask
        self.learningRate = alpha
        self.threshVal = thresh
        self.cap = cap

        start = time.time()
        self.images = False

        self.minVal = minVal
        self.maxVal = maxVal

        self.pathname = pathname
        self.times = []

        self.backgroundInit = backSubs.Background(self.learningRate, self.mask,
                                                  self.threshVal)
        #print self.backgroundInit[10:15,25:30]
        #hitung jumlah frames
        self.frame_total = 0
        self.total_frames = 0

        try:
            #read video input
            ret, oriFrame = self.cap.read()
            if not ret:
                print '=========last frame  reach======='

                return 0
            #convert current frame to be like mask filtering
            rgbImg_cf = self.controlFilter.bgr2rgb(oriFrame)
            hsvImg_cf = self.controlFilter.rgb2hsv(rgbImg_cf)

            #put mask filtering image  here
            currentFrame = self.controlFilter.filterMask(
                hsvImg_cf, self.minVal, self.maxVal)

            #if not the last frame, scan frames
            if not self.scan == 0:
                if self.noMotion:
                    for x in range(1, self.scan):
                        if not self.images:
                            self.cap.grab()
                        else:
                            oriFrame = jpgs.pop()
                            self.frame_total = self.frame_count + 1
                else:
                    print "theres no motion in video"
                    return 0
            else:
                #dibuat pass karena self.scan !=0 maka terbaca dan dilanjutkan k proses slnjutnya
                pass

            #capture from video
            if self.images is False:
                #skip read frame every n second
                frameId = int(round(
                    self.cap.get(1)))  #current frame per second
                t_frame = int(self.cap.get(0) / 1000)  #current time fo frame

                #print "frame %d detik ke-%d" %(frameId,t_frame)
            else:
                currentFrame = cv2.imread(self.jpgs.pop())
                for count, item in enumerate(self.jpgs):
                    print 'frame ke-{0}'.format(count)

            self.frame_total += 1
            #frame_t0=time.time()
            #============================#
            #background substraction
            #============================#
            #self.start = time.time()
            #do background substraction here-> foreground identification
            grayImg = self.backgroundInit.getForeground(currentFrame)
            #print grayImg[10:15,25:30]

            #if set Roi, subset the image
            if self.setROI:
                if self.includedROI == "include":
                    mask = np.ones(grayImg.shape, np.bool)
                    mask[self.selectedRoi[1]:self.selectedRoi[1] +
                         self.selectedRoi[3],
                         self.selectedRoi[0]:self.selectedRoi[0] +
                         self.selectedRoi[2]] = False
                    grayImg[mask] = 0
                else:
                    mask = cv2.bitwise_and(currentFrame,
                                           currentFrame,
                                           mask=grayImg)
                    grayImg[mask] = 0

            #==========================#
            #contour analysis and post procesing
            #==========================#
            coords = []
            bbox_list = []
            counter = []

            #Empty list for position stamp
            stamp_position = []

            imgCopy = grayImg.copy()
            #calculate white pixel as motion data
            _, contours, _ = cv2.findContours(imgCopy, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)
            length = len(contours)
            maxArea = -1
            i = 0

            # define minimum area Rect and minimum Enclosing circle
            for c in contours:

                #get bounding Rect
                rect = cv2.boundingRect(c)
                #set contour smaller
                if rect[2] < 50 or rect[3] < 50:
                    continue
                point = rect
                #print "{0}x{1} + {2}x{3}".format(point[0],point[1],point[2],point[3])
                self.setRoi(point[0], point[1], point[2], point[3], rgbImg_cf)
                #get minimum area rect
                #rect=cv2.minAreaRect(c)
                #box=cv2.boxPoints(rect)
                #print type(box)

                #convert all coord floating point to int
                #box=np.int0(box)

                #write to file frame has detected contour
                self.modelMotion.writeToFile(self.pathname, rgbImg_cf, frameId)
            #contours for
            imgContour = cv2.drawContours(imgCopy, contours, -1, (255, 0, 0),
                                          1)

            #display in Tkinter format
            self.controlFilter.displayinTk(rgbImg_cf, imgLabel)
            self.controlFilter.displayinTk(imgCopy, roiLabel)

            board.after(100,
                        func=lambda: self.
                        running(pathname, board, alpha, thresh, minVal, maxVal,
                                imgLabel, mask, self.cap, roiLabel))
            #set for time

        except Exception, e:
            print str(e)

        end = time.time() - start

        self.times = [
            "alpha: %.4f, frame %d detik ke-%d, lama:%.2f s" %
            (self.learningRate, frameId, t_frame, end)
        ]
        self.entityDocument.csv_writer(self.times, self.path_time)
示例#20
0
class ParticleFilter(object):
    """
    This is the main particle filter object.
    """
    def __init__(self, map_file='../data/map/wean.dat'):
        self.map = Map(map_file)
        #self.map.display_gaussian([], 'Gaussian Blurred Map')
        self._number_particles = 1000
        self.particles = list()
        self._particle_probabilities = []
        for _ in range(0, self._number_particles):
            print 'Initializing particle', _
            particle = Particle(self.map)
            #particle.x = 4080 + np.random.normal(scale=35)
            #particle.y = 3980 + np.random.normal(scale=15)
            #particle.theta = math.pi + 0.1 + np.random.normal(scale=.1)
            self.particles.append(particle)
            self._particle_probabilities.append(1)
        self._motion_model = MotionModel(0.001, 0.001, 0.1, 0.1)
        self._sensor_model = SensorModel(self.map)
        self._ranges = []

    def log(self, file_handle):
        line = list()
        for particle in self.particles:
            loc = str(particle.x) + ',' + str(particle.y)
            line.append(loc)
        file_handle.write(';'.join(line))
        file_handle.write('\n')

    def display(self):
        self.map.display(self.particles, ranges=self._ranges)

    def update(self, line):
        """
        Update step.
        Reading is a single reading (i.e. line) from the log file
        Could be either an odometry or laser reading
        """
        measurement_type = line[0]  # O = odometry, L = laser scan
        measurements = np.fromstring(line[2:], sep=' ')
        odometry = measurements[0:3]
        time_stamp = measurements[-1]  # last variable
        for particle in self.particles:
            self._motion_model.update(particle, odometry, time_stamp)
        if measurement_type == "L":
            odometry_laser = measurements[3:6]  # coordinates of the laser in standard odometry frame
            self._ranges = measurements[6:-1]  # exclude last variable, the time stamp
            self._particle_probabilities = list()  # unnormalized sensor model probabilities of the particles
            for particle in self.particles:
                self._particle_probabilities.append(self._sensor_model.get_probability(particle, self._ranges))
            argsorted_probabilities = np.argsort(-np.asarray(self._particle_probabilities))
            self.particles[argsorted_probabilities[0]].debug = True
            self.particles[argsorted_probabilities[1]].debug = True
            self.particles[argsorted_probabilities[2]].debug = True

    def _resample(self):
        """
        Resamples the particles given unnormalized particle probabilites
        """
        particle_probabilities = np.asarray(self._particle_probabilities)/sum(self._particle_probabilities)  # normalize
        indices = np.random.choice(range(0, self._number_particles), size=self._number_particles, replace=True,
                                   p=particle_probabilities)
        indices.sort()
        previous_index = -1
        new_particles = list()
        for index in indices:
            if index != previous_index:
                new_particles.append(self.particles[index])
            else:
                new_particles.append(deepcopy(self.particles[index]))
            previous_index = index
        self.particles = new_particles
示例#21
0
class ParticleFilter(object):
    """
    This is the main particle filter object.
    """
    def __init__(self, map_file='../data/map/wean.dat'):
        self.map = Map(map_file)
        #self.map.display_gaussian([], 'Gaussian Blurred Map')
        self._number_particles = 1000
        self.particles = list()
        self._particle_probabilities = []
        for _ in range(0, self._number_particles):
            print 'Initializing particle', _
            particle = Particle(self.map)
            #particle.x = 4080 + np.random.normal(scale=35)
            #particle.y = 3980 + np.random.normal(scale=15)
            #particle.theta = math.pi + 0.1 + np.random.normal(scale=.1)
            self.particles.append(particle)
            self._particle_probabilities.append(1)
        self._motion_model = MotionModel(0.001, 0.001, 0.1, 0.1)
        self._sensor_model = SensorModel(self.map)
        self._ranges = []

    def log(self, file_handle):
        line = list()
        for particle in self.particles:
            loc = str(particle.x) + ',' + str(particle.y)
            line.append(loc)
        file_handle.write(';'.join(line))
        file_handle.write('\n')

    def display(self):
        self.map.display(self.particles, ranges=self._ranges)

    def update(self, line):
        """
        Update step.
        Reading is a single reading (i.e. line) from the log file
        Could be either an odometry or laser reading
        """
        measurement_type = line[0]  # O = odometry, L = laser scan
        measurements = np.fromstring(line[2:], sep=' ')
        odometry = measurements[0:3]
        time_stamp = measurements[-1]  # last variable
        for particle in self.particles:
            self._motion_model.update(particle, odometry, time_stamp)
        if measurement_type == "L":
            odometry_laser = measurements[
                3:6]  # coordinates of the laser in standard odometry frame
            self._ranges = measurements[
                6:-1]  # exclude last variable, the time stamp
            self._particle_probabilities = list(
            )  # unnormalized sensor model probabilities of the particles
            for particle in self.particles:
                self._particle_probabilities.append(
                    self._sensor_model.get_probability(particle, self._ranges))
            argsorted_probabilities = np.argsort(
                -np.asarray(self._particle_probabilities))
            self.particles[argsorted_probabilities[0]].debug = True
            self.particles[argsorted_probabilities[1]].debug = True
            self.particles[argsorted_probabilities[2]].debug = True

    def _resample(self):
        """
        Resamples the particles given unnormalized particle probabilites
        """
        particle_probabilities = np.asarray(
            self._particle_probabilities) / sum(
                self._particle_probabilities)  # normalize
        indices = np.random.choice(range(0, self._number_particles),
                                   size=self._number_particles,
                                   replace=True,
                                   p=particle_probabilities)
        indices.sort()
        previous_index = -1
        new_particles = list()
        for index in indices:
            if index != previous_index:
                new_particles.append(self.particles[index])
            else:
                new_particles.append(deepcopy(self.particles[index]))
            previous_index = index
        self.particles = new_particles
示例#22
0
    def __init__(self):
        # using locks so threads can't access self.particles at the same time (avoiding race condition)
        self.lock = Lock()

        self.lock.acquire()

        # Get parameters
        self.particle_filter_frame = \
                rospy.get_param("~particle_filter_frame") # should be '/base_link_pf' in sim and '/base_link' on car

        # Initialize the models
        self.motion_model = MotionModel()
        self.sensor_model = SensorModel()

        # Topics
        self.odom_topic = rospy.get_param("~odom_topic")
        self.scan_topic = rospy.get_param("~scan_topic")
        self.particle_marker_topic = '/particle_marker'
        self.click_topic = '/move_base_simple/goal'
        self.pose_array_topic = '/pose_array'
        self.inferred_pose_topic = '/inferred_pose'

        self.error_x_topic = '/error_x'
        self.error_y_topic = '/error_y'
        self.error_t_topic = '/error_t'

        # Subscribers and Publishers
        rospy.Subscriber(
            self.odom_topic, Odometry,
            self.odom_callback)  # '/odom' for sim and 'vesc/odom' for car
        rospy.Subscriber(self.scan_topic, LaserScan, self.scan_callback)

        self.tf_broadcaster = TransformBroadcaster()

        # Listening to clicks in rviz and publishing particle markers to rviz
        rospy.Subscriber(self.click_topic, PoseStamped, self.click_callback)
        self.particle_marker_pub = rospy.Publisher(self.particle_marker_topic,
                                                   Marker,
                                                   queue_size=1)
        self.pose_array_pub = rospy.Publisher(self.pose_array_topic,
                                              PoseArray,
                                              queue_size=1)
        self.inferred_pose_pub = rospy.Publisher(self.inferred_pose_topic,
                                                 Marker,
                                                 queue_size=1)

        self.error_x_pub = rospy.Publisher(self.error_x_topic,
                                           Float32,
                                           queue_size=1)
        self.error_y_pub = rospy.Publisher(self.error_y_topic,
                                           Float32,
                                           queue_size=1)
        self.error_t_pub = rospy.Publisher(self.error_t_topic,
                                           Float32,
                                           queue_size=1)

        self.rate = rospy.Rate(30)  # should be greater than 20 Hz

        # Implement the MCL algorithm
        # using the sensor model and the motion model
        #
        # Make sure you include some way to initialize
        # your particles, ideally with some sort
        # of interactive interface in rviz
        #
        # Publish a transformation frame between the map
        # and the particle_filter_frame.

        # Initializing particles and weights
        self.num_particles = rospy.get_param('~num_particles')
        # currently intializing all particles as origin
        self.particles = np.zeros((self.num_particles, 3))
        # all particles are initially equally likely
        self.weights = np.full((self.num_particles), 1. / self.num_particles)

        # for error calculations
        self.groundtruth = [0, 0, 0]

        # send drive commands to test in sim
        self.drive_pub = rospy.Publisher('/drive',
                                         AckermannDriveStamped,
                                         queue_size=1)
        self.drive_msg = AckermannDriveStamped()
        self.drive_msg.drive.steering_angle = 0
        self.drive_msg.drive.speed = 1

        if self.SIM == False:  # don't toggle on driving if working on real robot
            self.DRIVE = False

        self.publish_transform()

        self.lock.release()
 def __init__(self, motion_model_noise_params, measurement_model_params, filter_params, measurement_generator_callback):
     self.filter_params = copy(filter_params)
     self.motion_model = MotionModel(motion_model_noise_params)
     self.measurement_model = MeasurementModel(measurement_model_params, measurement_generator_callback)
     self.particles = list()
     self.generate_new_particle_set()
示例#24
0
def verify_rotation_matrices(t_start=0, t_end=1):
    motion_model = MotionModel()

    # Get real ankle angle data
    ankle_data = load_data('./data/ankle_vs_gait.csv')
    ankle_data = np.array(ankle_data)
    ankle_data = get_regress_general(ankle_data)

    # Get real ankle height data
    ankle_height = load_data('./data/Foot-Centroid-Height_OG-vs-Gait.csv')
    ankle_height = np.array(ankle_height)
    ankle_height = np.transpose(ankle_height)
    ankle_height[0] = ankle_height[0] / 5
    ankle_height[1] = ankle_height[1] / 1000

    # Get real ankle horizontal data
    ankle_hor = load_data('./data/Foot-Centroid-Horizontal_OG-vs-Gait.csv')
    ankle_hor = np.array(ankle_hor)
    ankle_hor = np.transpose(ankle_hor)
    ankle_hor[0] = ankle_hor[0] / 5
    ankle_hor[1] = ankle_hor[1] / 1000

    x = np.arange(t_start, t_end, .01)

    position = [[], []]
    for ite in x:
        coord = motion_model.get_global(
            ankle_data.eval(ite * 100)[0] * np.pi / 180, 0.06674, -0.03581,
            ite)  #gets global coordinate of ankle
        position[0].append(coord[0])
        position[1].append(coord[1])

    # Plot ankle from literature
    plt.figure()
    plt.plot(x * 100, ankle_data.eval(x * 100) * np.pi / 180)
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Ankle Angle Literature (rad)")
    plt.show()

    # Plot global horizontal of centroid
    plt.figure()
    plt.plot(ankle_hor[0][:-5] * 100, ankle_hor[1][:-5])
    plt.plot(x * 100, position[0], '--')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Horizontal Position (m)")
    plt.title("Horizontal Position of the COM over the Gait Cycle")
    plt.show()

    # Plot global vertical of centroid
    plt.figure()
    plt.plot(ankle_height[0][:-5] * 100, ankle_height[1][:-5])
    plt.plot(x * 100, position[1], '--')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Vertical Position (m)")
    plt.title("Vertical Position of the COM over the Gait Cycle")
    plt.show()

    # Plot phase portraits of centroid
    plt.figure()
    plt.plot(ankle_hor[1][:-5], ankle_height[1][:-5])

    plt.plot(position[0], position[1], '--')
    # plt.scatter(position[0][0], position[1][0], marker='x', color='r')
    # plt.text(position[0][0], position[1][0], 'start')
    # plt.scatter(position[0][-1], position[1][-1], marker='x', color='g')
    # plt.text(position[0][-1], position[1][-1], 'end')

    #  plt.scatter(ankle_hor[1][0], ankle_height[1][0], marker='x', color='r')
    #  plt.text(ankle_hor[1][0], ankle_height[1][0], 'start')
    #  plt.scatter(ankle_hor[1][-1], ankle_height[1][-1], marker='x', color='g')
    #  plt.text(ankle_hor[1][-1], ankle_height[1][-1], 'end')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("Horizontal Position (m)")
    plt.ylabel("Vertical Position (m)")
    plt.title("Phase Portrait of Centroid Trajectory over the Gait Cycle")
    plt.show()
class ParticleFilter(object):
    '''Defines a utility for localising a robot using a particle filter.

    Author -- Aleksandar Mitrevski

    '''
    def __init__(self, motion_model_noise_params, measurement_model_params, filter_params, measurement_generator_callback):
        self.filter_params = copy(filter_params)
        self.motion_model = MotionModel(motion_model_noise_params)
        self.measurement_model = MeasurementModel(measurement_model_params, measurement_generator_callback)
        self.particles = list()
        self.generate_new_particle_set()

    def iterate_filter(self, motion_command, measurements):
        '''Performs an update of the filter with the given motion command
        and set of sensor measurements.

        Keyword arguments:
        motion_command -- A 'velocity.Velocity' object.
        measurements -- A dictionary where each key is a name of a sensor frame and the values are measurements obtained with the respective sensor.

        '''
        self.move_particles(motion_command)
        resample = self.update_particle_weights(measurements)
        if resample:
            self.particles = self.resample_particles()

    def move_particles(self, motion_command):
        '''Moves all particles based on the given motion command.

        Keyword arguments:
        motion_command -- A 'velocity.Velocity' object.

        '''
        for i in xrange(self.filter_params.number_of_particles):
            self.particles[i].pose = self.motion_model.sample_motion_model(self.particles[i].pose, motion_command)

    def update_particle_weights(self, measurements):
        '''Updates the particle weights based on the given sensor measurements.

        Keyword arguments:
        measurements -- A dictionary where each key is a name of a sensor frame and the values are measurements obtained with the respective sensor.

        '''
        weight_sum = 0.
        for i in xrange(self.filter_params.number_of_particles):
            self.particles[i].weight = self.measurement_model.calculate_measurement_likelihood(self.particles[i].pose, measurements)
            weight_sum = weight_sum + self.particles[i].weight

        resample = True
        if weight_sum < self.filter_params.weight_sum_tolerance:
            resample = False
            self.generate_new_particle_set()
        else:
            for i in xrange(self.filter_params.number_of_particles):
                self.particles[i].weight = self.particles[i].weight / weight_sum
        return resample

    def resample_particles(self):
        '''Performs an importance particle resampling step based on the current particle weights.
        '''
        number_resampled_particles = self.filter_params.number_of_particles - self.filter_params.number_of_random_particles
        step = 1. / number_resampled_particles
        start_weight = uniform(0, step)
        current_particle = 0
        current_weight = self.particles[0].weight
        weight_sum = 0.

        #we add random particles to the particle set
        new_particles = list()
        for i in xrange(self.filter_params.number_of_random_particles):
            particle = Particle.random_particle(self.filter_params.neg_x_limit, self.filter_params.x_limit, self.filter_params.neg_y_limit, self.filter_params.y_limit, 1/self.filter_params.number_of_particles)
            weight_sum = weight_sum + particle.weight
            new_particles.append(particle)

        #we resample particles using low variance resampling
        for i in xrange(number_resampled_particles):
            weight = start_weight + i * step
            while weight > current_weight:
                current_particle = current_particle + 1
                current_weight = current_weight + self.particles[current_particle].weight
            particle = deepcopy(self.particles[current_particle])
            weight_sum = weight_sum + particle.weight
            new_particles.append(particle)

        for i in xrange(self.filter_params.number_of_particles):
            new_particles[i].weight = new_particles[i].weight / weight_sum

        return new_particles

    def generate_new_particle_set(self):
        '''Discards the current particle set and creates a new set of random particles.
        '''
        self.particles = list()
        for i in xrange(self.filter_params.number_of_particles):
            self.particles.append(Particle.random_particle(self.filter_params.neg_x_limit, self.filter_params.x_limit, self.filter_params.neg_y_limit, self.filter_params.y_limit, 1./self.filter_params.number_of_particles))

    def get_most_likely_pose(self):
        '''Returns the pose of the particle with the largest weight.
        '''
        max_weight_particle = 0
        max_weight = self.particles[0].weight

        for i,particle in enumerate(self.particles):
            if particle.weight > max_weight:
                max_weight_particle = i
                max_weight = particle.weight

        return copy(self.particles[max_weight_particle].pose)
示例#26
0
from resampling import Resampling
from readtable import ReadTable

from matplotlib import pyplot as plt
from matplotlib import figure as fig
import time

src_path_map = '../data/map/wean.dat'
src_path_log = '../data/log/robotdata1.log'
map_obj = MapReader(src_path_map)
occupancy_map = map_obj.get_map()
bool_occ_map = (occupancy_map < 0.35) & (occupancy_map >= 0)
print(bool_occ_map[500][400])
logfile = open(src_path_log, 'r')

motion_model = MotionModel()
sensor_model = SensorModel(occupancy_map)
resampler = Resampling()


def visualize_ray_cast(x_t1, bool_occ_map, tstep, output_path):
    walk_stride = 8
    angle_stride = 10
    x, y, theta = x_t1
    laser_x_t1 = x + 25 * math.cos(theta)
    laser_y_t1 = y + 25 * math.sin(theta)
    xout = laser_x_t1 / 10
    yout = laser_y_t1 / 10
    for deg in range(-90, 90, angle_stride):
        laser_theta = (deg + theta * 180 / math.pi) * (math.pi / 180)
        calc_distance, x_end, y_end = ray_cast_visual(laser_x_t1, laser_y_t1,