Esempio n. 1
0
def update_particles(graph: MultiDiGraph,
                     particles: MMParticles,
                     new_observation: np.ndarray,
                     time_interval: float,
                     mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
                     proposal_func: Callable = optimal_proposal,
                     update: str = 'BSi',
                     lag: int = 3,
                     max_rejections: int = 20,
                     **kwargs) -> MMParticles:
    """
    Updates particle approximation in receipt of new observation

    :param graph: encodes road network, simplified and projected to UTM
    :param particles: unweighted particle approximation up to the previous observation time
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param proposal_func: function to propagate and weight single particle
    :param update:
        * 'PF' for particle filter fixed-lag update
        * 'BSi' for backward simulation fixed-lag update

        must be consistent across updates
    :param lag: fixed lag for resampling/stitching
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
            0 will do full fixed-lag stitching and track ess_stitch
    :param kwargs: optional parameters to pass to proposal
            i.e. d_max, d_refine or var
            as well as ess_threshold for backward simulation update
    :return: MMParticles object
    """

    if update == 'PF' or lag == 0:
        return update_particles_flpf(graph,
                                     particles,
                                     new_observation,
                                     time_interval,
                                     mm_model,
                                     proposal_func,
                                     lag,
                                     max_rejections,
                                     **kwargs)
    elif update == 'BSi':
        return update_particles_flbs(graph,
                                     particles,
                                     new_observation,
                                     time_interval,
                                     mm_model,
                                     proposal_func,
                                     lag,
                                     max_rejections,
                                     **kwargs)
    else:
        raise ValueError("update " + update + " not recognised, see bmm.updates for valid options")
Esempio n. 2
0
 def test_propose(self):
     self.particles = smc.initiate_particles(
         self.graph, self.test_data['POLYLINE_UTM'][0][0], 10)
     proposed_particle, weight, prior_norm = proposal.optimal_proposal(
         self.graph, self.particles[0],
         self.test_data['POLYLINE_UTM'][0][1], 15,
         ExponentialMapMatchingModel())
     self.assertEqual(proposed_particle.shape[1], 8)
     self.assertIsInstance(weight, float)
     self.assertGreater(weight, 0.)
     self.assertGreaterEqual(proposed_particle.shape[0], 2)
     self.assertEqual(proposed_particle.shape[1], 8)
     self.assertEqual(np.isnan(proposed_particle).sum(), 0)
     self.assertEqual(proposed_particle[:, 0].sum(), 15.)
     self.assertEqual(proposed_particle[-1, 0], 15.)
     self.assertGreaterEqual(proposed_particle[-1, -1], 0.)
Esempio n. 3
0
 def test_update(self):
     self.particles = smc.initiate_particles(
         self.graph,
         self.test_data['POLYLINE_UTM'][0][0],
         10,
         filter_store=True)
     updated_particles = smc.update_particles_flbs(
         self.graph, self.particles, self.test_data['POLYLINE_UTM'][0][1],
         15, ExponentialMapMatchingModel(), proposal.optimal_proposal)
     self.assertEqual(updated_particles.n, 10)
     self.assertEqual(len(updated_particles.particles), 10)
     for proposed_particle in updated_particles:
         self.assertEqual(proposed_particle.shape[1], 8)
         self.assertGreaterEqual(proposed_particle.shape[0], 2)
         self.assertEqual(proposed_particle.shape[1], 8)
         self.assertEqual(np.isnan(proposed_particle).sum(), 0)
         self.assertEqual(proposed_particle[:, 0].sum(), 15.)
         self.assertEqual(proposed_particle[-1, 0], 15.)
         self.assertGreaterEqual(proposed_particle[-1, -1], 0.)
     self.assertGreater(
         np.unique([pp[-1, 5] for pp in updated_particles]).size, 3)
Esempio n. 4
0
def initiate_particles(
        graph: MultiDiGraph,
        first_observation: np.ndarray,
        n_samps: int,
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        d_refine: float = 1,
        d_truncate: float = None,
        ess_all: bool = True,
        filter_store: bool = True) -> MMParticles:
    """
    Initiate start of a trajectory by sampling points around the first observation.
    Note that coordinate system of inputs must be the same, typically a UTM projection (not longtitude-latitude!).
    :param graph: encodes road network, simplified and projected to UTM
    :param mm_model: MapMatchingModel
    :param first_observation: cartesian coordinate in UTM
    :param n_samps: number of samples to generate
    :param d_refine: metres, resolution of distance discretisation
    :param d_truncate: metres, distance beyond which to assume zero likelihood probability
        defaults to 5 * mm_model.gps_sd
    :param ess_all: if true initiate effective sample size for each particle for each observation
        otherwise initiate effective sample size only for each observation
    :param filter_store: whether to initiate storage of filter particles and weights
    :return: MMParticles object
    """
    gps_sd = mm_model.gps_sd

    if d_truncate is None:
        d_truncate = gps_sd * 5

    start = tm()

    # Discretize edges within truncation
    dis_points, dists_to_first_obs = edges.get_truncated_discrete_edges(
        graph, first_observation, d_refine, d_truncate, True)

    if dis_points.size == 0:
        raise ValueError(
            "No edges found near initial observation: try increasing the initial_truncation"
        )

    # Likelihood weights
    weights = np.exp(-0.5 / gps_sd**2 * dists_to_first_obs**2)
    weights /= np.sum(weights)

    # Sample indices according to weights
    sampled_indices = np.random.choice(len(weights),
                                       n_samps,
                                       replace=True,
                                       p=weights)

    # Output
    out_particles = MMParticles(dis_points[sampled_indices])

    # Initiate ESS
    if ess_all:
        out_particles.ess_stitch = np.ones(
            (1, out_particles.n)) * out_particles.n
    out_particles.ess_pf = np.array([out_particles.n])

    if filter_store:
        out_particles.filter_particles = [out_particles.copy()]
        out_particles.filter_weights = np.ones((1, n_samps)) / n_samps

    end = tm()
    out_particles.time += end - start

    return out_particles
Esempio n. 5
0
def offline_map_match(
        graph: MultiDiGraph,
        polyline: np.ndarray,
        n_samps: int,
        timestamps: Union[float, np.ndarray],
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        proposal: str = 'optimal',
        d_refine: int = 1,
        initial_d_truncate: float = None,
        max_rejections: int = 20,
        ess_threshold: float = 1,
        store_norm_quants: bool = False,
        **kwargs) -> MMParticles:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.
    :param graph: encodes road network, simplified and projected to UTM
    :param polyline: series of cartesian cooridnates in UTM
    :param n_samps: int
        number of particles
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param mm_model: MapMatchingModel
    :param proposal: either 'optimal' or 'aux_dist'
        defaults to optimal (discretised) proposal
    :param d_refine: metres, resolution of distance discretisation
    :param initial_d_truncate: distance beyond which to assume zero likelihood probability at time zero
        defaults to 5 * mm_model.gps_sd
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :param ess_threshold: in [0,1], particle filter resamples if ess < ess_threshold * n_samps
    :param store_norm_quants: if True normalisation quanitities (including gradient evals) returned in out_particles
    :param kwargs: optional parameters to pass to proposal
        i.e. d_max, d_refine or var
        as well as ess_threshold for backward simulation update
    :return: MMParticles object
    """
    proposal_func = get_proposal(proposal)

    num_obs = len(polyline)

    ess_all = max_rejections == 0

    start = tm()

    filter_particles = [None] * num_obs
    filter_weights = np.zeros((num_obs, n_samps))

    # Initiate filter_particles
    filter_particles[0] = initiate_particles(graph,
                                             polyline[0],
                                             n_samps,
                                             mm_model=mm_model,
                                             d_refine=d_refine,
                                             d_truncate=initial_d_truncate,
                                             ess_all=ess_all)
    filter_weights[0] = 1 / n_samps
    live_weights = filter_weights[0].copy()

    ess_pf = np.zeros(num_obs)
    ess_pf[0] = n_samps

    print("0 PF ESS: " + str(ess_pf[0]))

    if 'd_refine' in inspect.getfullargspec(proposal_func)[0]:
        kwargs['d_refine'] = d_refine

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    # Forward filtering, storing x_t-1, x_t ~ p(x_t-1:t|y_t)
    for i in range(num_obs - 1):
        resample = ess_pf[i] < ess_threshold * n_samps
        filter_particles[i +
                         1], temp_weights, temp_prior_norm = propose_particles(
                             proposal_func,
                             live_weights if resample else None,
                             graph,
                             filter_particles[i],
                             polyline[i + 1],
                             time_interval_arr[i],
                             mm_model,
                             full_smoothing=False,
                             store_norm_quants=store_norm_quants,
                             **kwargs)

        filter_particles[i].prior_norm = temp_prior_norm

        if not resample:
            temp_weights *= live_weights

        temp_weights /= np.sum(temp_weights)
        filter_weights[i + 1] = temp_weights.copy()
        live_weights = temp_weights.copy()
        ess_pf[i + 1] = 1 / np.sum(temp_weights**2)

        print(
            str(filter_particles[i + 1].latest_observation_time) +
            " PF ESS: " + str(ess_pf[i + 1]))

    # Backward simulation
    out_particles = backward_simulate(graph,
                                      filter_particles,
                                      filter_weights,
                                      time_interval_arr,
                                      mm_model,
                                      max_rejections,
                                      verbose=True,
                                      store_norm_quants=store_norm_quants)
    out_particles.ess_pf = ess_pf

    end = tm()
    out_particles.time = end - start
    return out_particles
Esempio n. 6
0
def _offline_map_match_fl(
        graph: MultiDiGraph,
        polyline: np.ndarray,
        n_samps: int,
        timestamps: Union[float, np.ndarray],
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        proposal: str = 'optimal',
        update: str = 'BSi',
        lag: int = 3,
        d_refine: int = 1,
        initial_d_truncate: float = None,
        max_rejections: int = 20,
        **kwargs) -> MMParticles:
    """
    Runs offline map-matching but uses online fixed-lag techniques.
    Only recommended for simulation purposes.
    :param graph: encodes road network, simplified and projected to UTM
    :param polyline: series of cartesian coordinates in UTM
    :param n_samps: int
        number of particles
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param mm_model: MapMatchingModel
    :param proposal: either 'optimal' or 'aux_dist'
        defaults to optimal (discretised) proposal
    :param update:
        'PF' for particle filter fixed-lag update
        'BSi' for backward simulation fixed-lag update
        must be consistent across updates
    :param lag: fixed lag for resampling/stitching
    :param d_refine: metres, resolution of distance discretisation
    :param initial_d_truncate: distance beyond which to assume zero likelihood probability at time zero
        defaults to 5 * mm_model.gps_sd
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :param kwargs: optional parameters to pass to proposal
        i.e. d_max, d_refine or var
        as well as ess_threshold for backward simulation update
    :return: MMParticles object
    """
    proposal_func = get_proposal(proposal)

    num_obs = len(polyline)

    ess_all = max_rejections == 0

    # Initiate particles
    particles = initiate_particles(graph,
                                   polyline[0],
                                   n_samps,
                                   mm_model=mm_model,
                                   d_refine=d_refine,
                                   d_truncate=initial_d_truncate,
                                   ess_all=ess_all,
                                   filter_store=update == 'BSi')

    print(
        str(particles.latest_observation_time) + " PF ESS: " +
        str(np.mean(particles.ess_pf[-1])))

    if 'd_refine' in inspect.getfullargspec(proposal_func)[0]:
        kwargs['d_refine'] = d_refine

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    if update == 'PF' or lag == 0:
        update_func = update_particles_flpf
    elif update == 'BSi':
        update_func = update_particles_flbs
    else:
        raise ValueError('Update of ' + str(update) + ' not understood')

    # Update particles
    for i in range(num_obs - 1):
        particles = update_func(graph,
                                particles,
                                polyline[1 + i],
                                time_interval=time_interval_arr[i],
                                mm_model=mm_model,
                                proposal_func=proposal_func,
                                lag=lag,
                                max_rejections=max_rejections,
                                **kwargs)

        print(
            str(particles.latest_observation_time) + " PF ESS: " +
            str(np.mean(particles.ess_pf[-1])))

    return particles
Esempio n. 7
0
def sample_route(
        graph: MultiDiGraph,
        timestamps: Union[float, np.ndarray],
        num_obs: int = None,
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        d_refine: float = 1.,
        start_position: np.ndarray = None,
        num_inter_cut_off: int = None) -> Tuple[np.ndarray, np.ndarray]:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.

    :param graph: encodes road network, simplified and projected to UTM
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param num_obs: int length of observed polyline to generate
    :param mm_model: MapMatchingModel
    :param d_refine: metres, resolution of distance discretisation
    :param start_position: optional start position; array (u, v, k, alpha)
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :return: tuple with sampled route (array with same shape as a single MMParticles)
        and polyline (array with shape (num_obs, 2))
    """

    if isinstance(timestamps, np.ndarray):
        num_obs = len(timestamps) + 1

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    if start_position is None:
        start_position = random_positions(graph, 1)[0]

    start_geom = edges.get_geometry(graph, start_position)
    start_coords = edges.edge_interpolate(start_geom, start_position[-1])

    full_sampled_route = np.concatenate([[0.], start_position, start_coords,
                                         [0.]])[np.newaxis]

    for k in range(num_obs - 1):
        time_interval = time_interval_arr[k]
        d_max = mm_model.d_max(time_interval)

        num_inter_cut_off_i = max(
            int(time_interval /
                1.5), 10) if num_inter_cut_off is None else num_inter_cut_off

        prev_pos = full_sampled_route[-1:].copy()
        prev_pos[0, 0] = 0.
        prev_pos[0, -1] = 0.

        possible_routes = proposal.get_all_possible_routes_overshoot(
            graph, prev_pos, d_max, num_inter_cut_off=num_inter_cut_off_i)

        # Get all possible positions on each route
        discretised_routes_indices_list = []
        discretised_routes_list = []
        for i, route in enumerate(possible_routes):
            # All possible end positions of route
            discretised_edge_matrix = edges.discretise_edge(
                graph, route[-1, 1:4], d_refine)

            if route.shape[0] == 1:
                discretised_edge_matrix = discretised_edge_matrix[
                    discretised_edge_matrix[:, 0] >= full_sampled_route[-1, 4]]
                discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1,
                                                                          -1]
            else:
                discretised_edge_matrix[:, -1] += route[-2, -1]

            discretised_edge_matrix = discretised_edge_matrix[
                discretised_edge_matrix[:, -1] < d_max + 1e-5]

            # Track route index and append to list
            if discretised_edge_matrix is not None and len(
                    discretised_edge_matrix) > 0:
                discretised_routes_indices_list += [
                    np.ones(discretised_edge_matrix.shape[0], dtype=int) * i
                ]
                discretised_routes_list += [discretised_edge_matrix]

        # Concatenate into numpy.ndarray
        discretised_routes_indices = np.concatenate(
            discretised_routes_indices_list)
        discretised_routes = np.concatenate(discretised_routes_list)

        if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and
                                            discretised_routes[0][-1] == 0):
            warnings.warn('sample_route exited prematurely')
            break

        # Distance prior evals
        distances = discretised_routes[:, -1]
        distance_prior_evals = mm_model.distance_prior_evaluate(
            distances, time_interval)

        # Deviation prior evals
        deviation_prior_evals = mm_model.deviation_prior_evaluate(
            full_sampled_route[-1, 5:7], discretised_routes[:, 1:3],
            discretised_routes[:, -1])

        # Normalise prior/transition probabilities
        prior_probs = distance_prior_evals * deviation_prior_evals
        prior_probs_norm_const = prior_probs.sum()

        sampled_dis_route_index = np.random.choice(len(prior_probs),
                                                   1,
                                                   p=prior_probs /
                                                   prior_probs_norm_const)[0]
        sampled_dis_route = discretised_routes[sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[
            discretised_routes_indices[sampled_dis_route_index]]

        full_sampled_route = proposal.process_proposal_output(
            full_sampled_route, sampled_route, sampled_dis_route,
            time_interval, True)

    obs_indices = edges.observation_time_indices(full_sampled_route[:, 0])
    polyline = full_sampled_route[obs_indices, 5:7] \
               + mm_model.gps_sd * np.random.normal(size=(obs_indices.sum(), 2))

    return full_sampled_route, polyline