示例#1
0
def optimal_proposal(graph: MultiDiGraph,
                     particle: np.ndarray,
                     new_observation: Union[None, np.ndarray],
                     time_interval: float,
                     mm_model: MapMatchingModel,
                     full_smoothing: bool = True,
                     d_refine: float = 1.,
                     d_max: float = None,
                     d_max_fail_multiplier: float = 1.5,
                     d_max_threshold: tuple = (0.9, 0.1),
                     num_inter_cut_off: int = None,
                     only_norm_const: bool = False,
                     store_norm_quants: bool = False,
                     resample_fails: bool = True) -> Union[Tuple[Union[None, np.ndarray],
                                                                 float,
                                                                 Union[float, np.ndarray]], float]:
    """
    Samples a single particle from the (distance discretised) optimal proposal.
    :param graph: encodes road network, simplified and projected to UTM
    :param particle: single element of MMParticles.particles
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param full_smoothing: if True returns full trajectory
        otherwise returns only x_t-1 to x_t
    :param d_refine: metres, resolution of distance discretisation
    :param d_max: optional override of d_max = mm_model.d_max(time_interval)
    :param d_max_fail_multiplier: extension of d_max in case all probs are 0
    :param d_max_threshold: tuple defining when to extend d_max
        extend if total sample prob of distances > d_max * d_max_threshold[0] larger than d_max_threshold[1]
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :param only_norm_const: if true only return prior normalising constant (don't sample)
    :param store_norm_quants: whether to additionally return quantities needed for gradient EM step
        assuming deviation prior is used
    :param resample_fails: whether to return None (and induce later resampling of whole trajectory)
        if proposal fails to find route with positive probability
        if False assume distance=0
    :return: (particle, unnormalised weight, prior_norm) or (particle, unnormalised weight, dev_norm_quants)
    """
    if particle is None:
        return 0. if only_norm_const else (None, 0., 0.)

    if isinstance(new_observation, list):
        new_observation = np.array(new_observation)

    if num_inter_cut_off is None:
        num_inter_cut_off = max(int(time_interval / 1.5), 10)

    if d_max is None:
        d_max = mm_model.d_max(time_interval)

    # Extract all possible routes from previous position
    start_position = particle[-1:].copy()
    start_position[0, -1] = 0
    possible_routes = get_all_possible_routes_overshoot(graph, start_position, d_max,
                                                        num_inter_cut_off=num_inter_cut_off)

    # Get all possible positions on each route
    discretised_routes_indices_list = []
    discretised_routes_list = []
    for i, route in enumerate(possible_routes):
        # All possible end positions of route
        discretised_edge_matrix = discretise_edge(graph, route[-1, 1:4], d_refine)

        if route.shape[0] == 1:
            discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= particle[-1, 4]]
            discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
        else:
            discretised_edge_matrix[:, -1] += route[-2, -1]

        discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]

        # Track route index and append to list
        if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
            discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
            discretised_routes_list += [discretised_edge_matrix]

    # Concatenate into numpy.ndarray
    discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
    discretised_routes = np.concatenate(discretised_routes_list)

    if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
        if only_norm_const:
            return 0
        if resample_fails:
            return None, 0., 0.
        else:
            sampled_dis_route = discretised_routes[0]

            # Append sampled route to old particle
            sampled_route = possible_routes[0]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
            return proposal_out, 0., 0.

    # Distance prior evals
    distances = discretised_routes[:, -1]
    distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)

    # Deviation prior evals
    deviation_prior_evals = mm_model.deviation_prior_evaluate(particle[-1, 5:7],
                                                              discretised_routes[:, 1:3],
                                                              discretised_routes[:, -1])

    # Normalise prior/transition probabilities
    prior_probs = distance_prior_evals * deviation_prior_evals

    prior_probs_norm_const = prior_probs.sum()
    if only_norm_const:
        if store_norm_quants:
            deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
            deviations = np.abs(deviations - discretised_routes[:, -1])

            # Z, dZ/d(dist_params), dZ/d(deviation_beta)
            dev_norm_quants = np.array([prior_probs_norm_const,
                                        *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                                .reshape(len(mm_model.distance_params), len(distances))
                                                * deviation_prior_evals, axis=-1),
                                        -np.sum(deviations
                                                * distance_prior_evals
                                                * deviation_prior_evals)
                                        ])
            return dev_norm_quants
        else:
            return prior_probs_norm_const
    prior_probs /= prior_probs_norm_const

    # Likelihood evaluations
    likelihood_evals = mm_model.likelihood_evaluate(discretised_routes[:, 1:3], new_observation)

    # Calculate sample probabilities
    sample_probs = prior_probs[likelihood_evals > 0] * likelihood_evals[likelihood_evals > 0]
    # sample_probs = prior_probs * likelihood_evals

    # p(y_m | x_m-1^j)
    prop_weight = sample_probs.sum()

    model_d_max = mm_model.d_max(time_interval)

    if prop_weight < 1e-100 \
            or (np.sum(sample_probs[np.where(distances[likelihood_evals > 0]
                                             > (d_max * d_max_threshold[0]))[0]])/prop_weight > d_max_threshold[1]\
                and (not d_max > model_d_max)):
        if (d_max - np.max(distances)) < d_refine + 1e-5 \
                and d_max_fail_multiplier > 1 and (not d_max > model_d_max):
            return optimal_proposal(graph,
                                    particle,
                                    new_observation,
                                    time_interval,
                                    mm_model,
                                    full_smoothing,
                                    d_refine,
                                    d_max=d_max * d_max_fail_multiplier,
                                    num_inter_cut_off=num_inter_cut_off,
                                    only_norm_const=only_norm_const,
                                    store_norm_quants=store_norm_quants,
                                    resample_fails=resample_fails)
        if resample_fails:
            proposal_out = None
        else:
            sampled_dis_route_index = np.where(discretised_routes[:, -1] == 0)[0][0]
            sampled_dis_route = discretised_routes[sampled_dis_route_index]

            # Append sampled route to old particle
            sampled_route = possible_routes[discretised_routes_indices[sampled_dis_route_index]]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
        prop_weight = 0.
    else:
        # Sample an edge and distance
        sampled_dis_route_index = np.random.choice(len(sample_probs), 1, p=sample_probs / prop_weight)[0]
        sampled_dis_route = discretised_routes[likelihood_evals > 0][sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[discretised_routes_indices[likelihood_evals > 0][sampled_dis_route_index]]

        proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                               full_smoothing)

    if store_norm_quants:
        deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
        deviations = np.abs(deviations - discretised_routes[:, -1])

        # Z, dZ/d(dist_params), dZ/d(deviation_beta)
        dev_norm_quants = np.array([prior_probs_norm_const,
                                    *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                            .reshape(len(mm_model.distance_params), len(distances))
                                            * deviation_prior_evals, axis=-1),
                                    -np.sum(deviations
                                            * distance_prior_evals
                                            * deviation_prior_evals)
                                    ])

        return proposal_out, prop_weight, dev_norm_quants
    else:
        return proposal_out, prop_weight, prior_probs_norm_const
示例#2
0
def fixed_lag_stitch_post_split(graph: MultiDiGraph,
                                fixed_particles: MMParticles,
                                new_particles: MMParticles,
                                new_weights: np.ndarray,
                                mm_model: MapMatchingModel,
                                max_rejections: int) -> MMParticles:
    """
    Stitch together fixed_particles with samples from new_particles according to joint fixed-lag posterior
    :param graph: encodes road network, simplified and projected to UTM
    :param fixed_particles: trajectories before stitching time (won't be changed)
    :param new_particles: trajectories after stitching time (to be resampled)
        one observation time overlap with fixed_particles
    :param new_weights: weights applied to new_particles
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :return: MMParticles object
    """

    n = len(fixed_particles)
    full_fixed_lag_resample = max_rejections == 0

    min_resample_time = new_particles.observation_times[1]
    min_resample_time_indices = [
        np.where(particle[:, 0] == min_resample_time)[0][0]
        if particle is not None else 0 for particle in new_particles
    ]
    originial_stitching_distances = np.array([
        new_particles[j][min_resample_time_indices[j],
                         -1] if new_particles[j] is not None else 0
        for j in range(n)
    ])

    max_fixed_time = fixed_particles._first_non_none_particle[-1, 0]

    stitch_time_interval = min_resample_time - max_fixed_time

    distance_prior_evals = mm_model.distance_prior_evaluate(
        originial_stitching_distances, stitch_time_interval)

    fixed_last_coords = np.array([
        part[0, 5:7] if part is not None else [0, 0] for part in new_particles
    ])
    new_coords = np.array([
        new_particles[j][min_resample_time_indices[j],
                         5:7] if new_particles[j] is not None else [0, 0]
        for j in range(n)
    ])
    deviation_prior_evals = mm_model.deviation_prior_evaluate(
        fixed_last_coords, new_coords, originial_stitching_distances)

    original_prior_evals = np.zeros(n)
    pos_inds = new_particles.prior_norm > 1e-5
    original_prior_evals[pos_inds] = distance_prior_evals[pos_inds] \
                                     * deviation_prior_evals[pos_inds] \
                                     * new_particles.prior_norm[pos_inds]

    out_particles = fixed_particles

    # Initiate some required quantities depending on whether to do rejection sampling or not
    if full_fixed_lag_resample:
        ess_stitch_track = np.zeros(n)

        # distance_prior_bound = None
        # adjusted_weights = None
    else:
        ess_stitch_track = None

        pos_prior_bound = mm_model.pos_distance_prior_bound(
            stitch_time_interval)
        prior_bound = mm_model.distance_prior_bound(stitch_time_interval)
        store_out_parts = fixed_particles.copy()

    adjusted_weights = new_weights.copy()
    adjusted_weights[original_prior_evals > 1e-5] /= original_prior_evals[
        original_prior_evals > 1e-5]
    adjusted_weights[original_prior_evals < 1e-5] = 0
    adjusted_weights /= np.sum(adjusted_weights)

    resort_to_full = False

    # Iterate through particles
    for j in range(n):
        fixed_particle = fixed_particles[j]

        # Check if particle is None
        # i.e. fixed lag approx has failed
        if fixed_particle is None:
            out_particles[j] = None
            if full_fixed_lag_resample:
                ess_stitch_track[j] = 0
            continue

        last_edge_fixed = fixed_particle[-1]
        last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
        last_edge_fixed_length = last_edge_fixed_geom.length

        if full_fixed_lag_resample:
            # Full resampling
            out_particles[j], ess_stitch_track[j] = full_fixed_lag_stitch(
                fixed_particle, last_edge_fixed, last_edge_fixed_length,
                new_particles, adjusted_weights, stitch_time_interval,
                min_resample_time_indices, mm_model, True)

        else:
            # Rejection sampling
            out_particles[j] = rejection_fixed_lag_stitch(
                fixed_particle,
                last_edge_fixed,
                last_edge_fixed_length,
                new_particles,
                adjusted_weights,
                stitch_time_interval,
                min_resample_time_indices,
                pos_prior_bound,
                mm_model,
                max_rejections,
                break_on_zero=True)
            if out_particles[j] is None:
                # Rejection sampling reached max_rejections -> try full resampling
                out_particles[j] = full_fixed_lag_stitch(
                    fixed_particle, last_edge_fixed, last_edge_fixed_length,
                    new_particles, adjusted_weights, stitch_time_interval,
                    min_resample_time_indices, mm_model, False)

            if isinstance(out_particles[j], int) and out_particles[j] == 0:
                resort_to_full = True
                break

    if resort_to_full:
        for j in range(n):
            fixed_particle = store_out_parts[j]

            # Check if particle is None
            # i.e. fixed lag approx has failed
            if fixed_particle is None:
                out_particles[j] = None
                if full_fixed_lag_resample:
                    ess_stitch_track[j] = 0
                continue

            last_edge_fixed = fixed_particle[-1]
            last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
            last_edge_fixed_length = last_edge_fixed_geom.length

            # Rejection sampling with full bound
            out_particles[j] = rejection_fixed_lag_stitch(
                fixed_particle, last_edge_fixed, last_edge_fixed_length,
                new_particles, adjusted_weights, stitch_time_interval,
                min_resample_time_indices, prior_bound, mm_model,
                max_rejections)
            if out_particles[j] is None:
                # Rejection sampling reached max_rejections -> try full resampling
                out_particles[j] = full_fixed_lag_stitch(
                    fixed_particle, last_edge_fixed, last_edge_fixed_length,
                    new_particles, adjusted_weights, stitch_time_interval,
                    min_resample_time_indices, mm_model, False)

    if full_fixed_lag_resample:
        out_particles.ess_stitch = np.append(out_particles.ess_stitch,
                                             np.atleast_2d(ess_stitch_track),
                                             axis=0)

    # Do full resampling where fixed lag approx broke
    none_inds = np.array([p is None for p in out_particles])
    good_inds = ~none_inds
    n_good = good_inds.sum()

    if n_good == 0:
        raise ValueError(
            "Map-matching failed: all stitching probabilities zero,"
            "try increasing the lag or number of particles")

    if n_good < n:
        none_inds_res_indices = np.random.choice(n,
                                                 n - n_good,
                                                 p=good_inds / n_good)
        for i, j in enumerate(np.where(none_inds)[0]):
            out_particles[j] = out_particles[none_inds_res_indices[i]]
        if full_fixed_lag_resample:
            out_particles.ess_stitch[-1,
                                     none_inds] = 1 / (new_weights**2).sum()

    return out_particles
示例#3
0
def full_fixed_lag_stitch(
    fixed_particle: np.ndarray,
    last_edge_fixed: np.ndarray,
    last_edge_fixed_length: float,
    new_particles: MMParticles,
    adjusted_weights: np.ndarray,
    stitch_time_interval: float,
    min_resample_time_indices: Union[list, np.ndarray],
    mm_model: MapMatchingModel,
    return_ess_stitch: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, float]]:
    """
    Evaluate full interacting weights, normalise and sample (stitch) for a single fixed particle
    :param fixed_particle: trajectory prior to stitching time
    :param last_edge_fixed: row of last fixed particle
    :param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
    :param new_particles: particles proposed to stitching
    :param adjusted_weights: non-interacting weights for new_particles
    :param stitch_time_interval: time between stitching observations
    :param min_resample_time_indices: indices for row of min_resample_time in new_particles
    :param mm_model: MapMatchingModel
    :param return_ess_stitch: whether to calculate and return the ESS of the full stitching weights
    :return: stitched particle (and ess_stitch if return_ess_stitch)
    """
    n = len(new_particles)

    # Possible particles to be resampled placeholder
    newer_particles_adjusted = [None] * n

    # Stitching distances
    new_stitching_distances = np.empty(n)
    new_stitching_distances[:] = np.nan

    new_cart_coords = np.empty((n, 2))

    for k in range(n):
        # if adjusted_weights[k] == 0:
        #     continue

        if new_particles[k] is None:
            continue

        new_particle = new_particles[k].copy()

        # Check both particles start from same edge
        if np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
            # Check that new edge overtakes fixed edge. i.e. distance isn't negative
            if np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
                    new_particle[1, 4] < (last_edge_fixed[4] - 1e-6):
                continue

            new_cart_coords[k] = new_particle[min_resample_time_indices[k],
                                              5:7]

            # Calculate distance modification
            first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]
                                     ) * last_edge_fixed_length
            first_distance_k = new_particle[1, -1]

            change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)

            new_particle[1:(min_resample_time_indices[k] + 1),
                         -1] += change_dist

            new_stitching_distances[k] = new_particle[
                min_resample_time_indices[k], -1]

            # Store adjusted particle
            newer_particles_adjusted[k] = new_particle[1:]

    # Calculate adjusted weight
    res_weights = np.zeros(n)
    possible_inds = ~np.isnan(new_stitching_distances)

    new_stitching_distances_trimmed = new_stitching_distances[possible_inds]
    new_cart_coords_trimmed = new_cart_coords[possible_inds]

    adjusted_weights_trimmed = adjusted_weights[possible_inds]
    if adjusted_weights_trimmed.sum() == 0:
        adjusted_weights_trimmed[:] = 1
    stitched_distance_prior_evals_trimmed = mm_model.distance_prior_evaluate(
        new_stitching_distances_trimmed, stitch_time_interval)

    stitched_deviation_prior_trimmed = mm_model.deviation_prior_evaluate(
        fixed_particle[-1, 5:7], new_cart_coords_trimmed,
        new_stitching_distances_trimmed)

    res_weights[possible_inds] = adjusted_weights_trimmed \
                                 * stitched_distance_prior_evals_trimmed \
                                 * stitched_deviation_prior_trimmed

    # Normalise adjusted resample weights
    with np.errstate(invalid='ignore'):
        res_weights /= res_weights.sum()

    # If only particle on fixed edge resample full trajectory
    if max(res_weights) == 0 or np.all(np.isnan(res_weights)):
        out_particle = None
        ess_stitch = 1 / np.sum(adjusted_weights**2)

    # Otherwise fixed-lag resample and stitch
    else:
        # Resample index
        res_index = np.random.choice(n, 1, p=res_weights)[0]

        # Update output
        out_particle = np.append(fixed_particle,
                                 newer_particles_adjusted[res_index],
                                 axis=0)

        # Track ESS
        ess_stitch = 1 / np.sum(res_weights**2)

    if return_ess_stitch:
        return out_particle, ess_stitch
    else:
        return out_particle
示例#4
0
def gradient_em_step(mm_model: MapMatchingModel, map_matchings: list,
                     time_interval_arrs: list, polylines: list,
                     stepsize: float):
    """
    For given map-matching results, take gradient step on prior hyperparameters (but fully optimise gps_sd)
    Updates mm_model hyperparameters in place
    :param mm_model: MapMatchingModel
    :param map_matchings: list of MMParticles objects
    :param time_interval_arrs: time interval arrays for each route
    :param polylines: observations for each route
    :param stepsize: stepsize for gradient step (applied to each coord)
    """
    n_particles = map_matchings[0].n

    # Get key quantities
    distances = np.array([])
    time_interval_arrs_concat = np.array([])
    devs = np.array([])
    sq_obs_dists = np.array([])
    dev_norm_quants = []
    for map_matching, time_interval_arr, polyline in zip(
            map_matchings, time_interval_arrs, polylines):
        distances_single, devs_and_norms_single, sq_obs_dists_single = extract_mm_quantities(
            map_matching, polyline)
        distances = np.append(distances, distances_single)
        time_interval_arrs_concat = np.append(
            time_interval_arrs_concat,
            np.concatenate([time_interval_arr] * len(map_matching)))

        devs_single, dev_norm_quants_single = devs_and_norms_single
        devs = np.append(devs, devs_single)
        dev_norm_quants.append(dev_norm_quants_single)

        sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)

    # Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
    dev_norm_quants = np.concatenate(dev_norm_quants)

    # # Optimise zero dist prob
    # def zero_dist_prob_root_func(neg_exp: float) -> float:
    #     return - np.sum(- time_interval_arrs_concat * (distances < 1e-5)
    #                     + time_interval_arrs_concat * np.exp(-neg_exp * time_interval_arrs_concat)
    #                     / (1 - np.exp(-neg_exp * time_interval_arrs_concat)) * (distances >= 1e-5))
    #
    # mm_model.zero_dist_prob_neg_exponent = root_scalar(zero_dist_prob_root_func, bracket=(1e-5, 1e20)).root
    # pos_distances = distances[distances > 1e-5]
    # pos_time_interval_arrs_concat = time_interval_arrs_concat[distances > 1e-5]
    # pos_dev_norm_quants = dev_norm_quants[distances > 1e-5]
    # pos_devs = devs[distances > 1e-5]

    pos_distances = distances
    pos_time_interval_arrs_concat = time_interval_arrs_concat
    pos_dev_norm_quants = dev_norm_quants
    pos_devs = devs

    # non_zero_inds = pos_dev_norm_quants[:, 0] > 0
    #
    # distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)[:,
    #                            non_zero_inds]
    #                            / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)[
    #                                non_zero_inds]
    #                            - pos_dev_norm_quants[non_zero_inds, 1:-1].T / pos_dev_norm_quants[
    #                                non_zero_inds, 0]).sum(axis=1) \
    #                           / n_particles
    #
    # deviation_beta_gradient_evals = (-pos_devs[non_zero_inds] - pos_dev_norm_quants[non_zero_inds, -1] /
    #                                  pos_dev_norm_quants[non_zero_inds, 0]).sum() \
    #                                 / n_particles

    distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)
                               / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)
                               - pos_dev_norm_quants[:, 1:-1].T / pos_dev_norm_quants[:, 0]).sum(axis=1) \
                              / n_particles

    deviation_beta_gradient_evals = (-pos_devs - pos_dev_norm_quants[:, -1] /
                                     pos_dev_norm_quants[:, 0]).sum() \
                                    / n_particles

    # Take gradient step in distance params
    for i, k in enumerate(mm_model.distance_params.keys()):
        bounds = mm_model.distance_params_bounds[k]
        mm_model.distance_params[k] = min(
            max(
                mm_model.distance_params[k] +
                stepsize * distance_gradient_evals[i], bounds[0]), bounds[1])

    # Take gradient step in deviation beta
    mm_model.deviation_beta = min(
        max(mm_model.deviation_beta + stepsize * deviation_beta_gradient_evals,
            mm_model.deviation_beta_bounds[0]),
        mm_model.deviation_beta_bounds[1])

    # Optimise GPS noise
    mm_model.gps_sd = min(
        max(np.sqrt(sq_obs_dists.mean() / 2), mm_model.gps_sd_bounds[0]),
        mm_model.gps_sd_bounds[1])
示例#5
0
def rejection_fixed_lag_stitch(
        fixed_particle: np.ndarray,
        last_edge_fixed: np.ndarray,
        last_edge_fixed_length: float,
        new_particles: MMParticles,
        adjusted_weights: np.ndarray,
        stitch_time_interval: float,
        min_resample_time_indices: Union[list, np.ndarray],
        dist_prior_bound: float,
        mm_model: MapMatchingModel,
        max_rejections: int,
        break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
    """
    Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
    :param fixed_particle: trajectory prior to stitching time
    :param last_edge_fixed: row of last fixed particle
    :param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
    :param new_particles: particles proposed to stitching
    :param adjusted_weights: non-interacting stitching weights
    :param stitch_time_interval: time between stitching observations
    :param min_resample_time_indices: indices for row of min_resample_time in new_particles
    :param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt, if none succeed return None
    :param break_on_zero: whether to return 0 if new_stitching_distance=0
    :return: stitched particle
    """
    n = len(new_particles)

    for reject_ind in range(max_rejections):
        new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
        new_particle = new_particles[new_index].copy()

        # Reject if new_particle starts from different edge
        if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
            continue
        # Reject if new_particle doesn't overtake fixed_particles
        elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
                new_particle[1, 4] < last_edge_fixed[4]:
            continue

        # Calculate stitching distance
        first_distance_j_to_k = (new_particle[1, 4] -
                                 last_edge_fixed[4]) * last_edge_fixed_length
        first_distance_k = new_particle[1, -1]

        change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)

        new_particle[1:(min_resample_time_indices[new_index] + 1),
                     -1] += change_dist

        new_stitching_distance = new_particle[
            min_resample_time_indices[new_index], -1]

        if break_on_zero and new_stitching_distance < 1e-5:
            return 0

        # Evaluate distance prior
        new_stitching_distance_prior = mm_model.distance_prior_evaluate(
            new_stitching_distance, stitch_time_interval)
        new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(
            fixed_particle[-1, 5:7],
            new_particle[None, min_resample_time_indices[new_index],
                         5:7], new_stitching_distance)

        accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
        if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
            out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
            return out_particle
    return None
示例#6
0
def optimise_hyperparameters(mm_model: MapMatchingModel, map_matchings: list,
                             time_interval_arrs: list, polylines: list):
    """
    For given map-matching results, optimise model hyperparameters.
    Updates mm_model hyperparameters in place
    :param mm_model: MapMatchingModel
    :param map_matchings: list of MMParticles objects
    :param time_interval_arrs: time interval arrays for each route
    :param polylines: observations for each route
    """
    # Get key quantities
    distances = np.array([])
    time_interval_arrs_concat = np.array([])
    sq_obs_dists = np.array([])
    for map_matching, time_interval_arr, polyline in zip(
            map_matchings, time_interval_arrs, polylines):
        distances_single, sq_obs_dists_single = extract_mm_quantities(
            map_matching, polyline, extract_devs=False)
        distances = np.append(distances, np.concatenate(distances_single))
        time_interval_arrs_concat = np.append(
            time_interval_arrs_concat,
            np.concatenate([time_interval_arr] * len(map_matching)))
        sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)

    # # Optimise zero dist prob
    # def zero_dist_prob_root_func(neg_exp: float) -> float:
    #     return - np.sum(- time_interval_arrs_concat * (distances < 1e-5)
    #                     + time_interval_arrs_concat * np.exp(-neg_exp * time_interval_arrs_concat)
    #                     / (1 - np.exp(-neg_exp * time_interval_arrs_concat)) * (distances >= 1e-5))
    #
    # mm_model.zero_dist_prob_neg_exponent = root_scalar(zero_dist_prob_root_func, bracket=(1e-3, 1e20)).root
    #
    # pos_distances = distances[distances > 1e-5]
    # pos_time_interval_arrs_concat = time_interval_arrs_concat[distances > 1e-5]

    pos_distances = distances
    pos_time_interval_arrs_concat = time_interval_arrs_concat

    bounds = list(mm_model.distance_params_bounds.values())
    bounds = [(a - 1e-5, a + 1e-5) if a == b else (a, b) for a, b in bounds]

    # Optimise distance params
    def distance_minim_func(distance_params_vals: np.ndarray) -> float:
        for i, k in enumerate(mm_model.distance_params.keys()):
            mm_model.distance_params[k] = distance_params_vals[i]
        return -np.sum(
            np.log(
                mm_model.distance_prior_evaluate(
                    pos_distances, pos_time_interval_arrs_concat)))

    # Optimise distance params
    optim_dist_params = minimize(
        distance_minim_func,
        np.array([a for a in mm_model.distance_params.values()]),
        # method='powell',
        bounds=bounds)

    for i, k in enumerate(mm_model.distance_params.keys()):
        mm_model.distance_params[k] = optim_dist_params.x[i]

    # Optimise GPS noise
    mm_model.gps_sd = min(
        max(np.sqrt(sq_obs_dists.mean() / 2), mm_model.gps_sd_bounds[0]),
        mm_model.gps_sd_bounds[1])
示例#7
0
def offline_em(graph: MultiDiGraph,
               mm_model: MapMatchingModel,
               timestamps: Union[list, float],
               polylines: list,
               save_path: str,
               n_ffbsi: int = 100,
               n_iter: int = 10,
               gradient_stepsize_scale: float = 1e-3,
               gradient_stepsize_neg_exp: float = 0.5,
               **kwargs):
    """
    Run expectation maximisation to optimise prior hyperparameters.
    Updates the hyperparameters of mm_model in place.
    :param graph: encodes road network, simplified and projected to UTM
    :param mm_model: MapMatchingModel - of which parameters will be updated
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
        if timestamps given, must be in a list matching dimensions of polylines
    :param polylines: UTM polylines
    :param save_path: path to save learned parameters
    :param n_ffbsi: number of samples for FFBSi algorithm
    :param n_iter: number of EM iterations
    :param gradient_stepsize_scale: starting stepsize
    :param gradient_stepsize_neg_exp: rate of decay of stepsize, in [0.5, 1]
    :param **kwargs additional arguments for FFBSi
    :return: dict of optimised parameters
    """

    params_track = {
        'distance_params': {
            key: np.asarray(value)
            for key, value in mm_model.distance_params.items()
        },
        'deviation_beta': np.asarray(mm_model.deviation_beta),
        'gps_sd': np.asarray(mm_model.gps_sd)
    }

    if isinstance(polylines, np.ndarray):
        polylines = [polylines]

    if isinstance(timestamps, (float, int)):
        timestamps = [timestamps] * len(polylines)

    # If no deviation prior - can optimise prior directly, otherwise can only take gradient step
    no_deviation_prior = mm_model.deviation_beta_bounds[1] == 0
    if no_deviation_prior:
        mm_model.deviation_beta = 0

    time_interval_arrs_full = [
        get_time_interval_array(timestamps_single, len(polyline))
        for timestamps_single, polyline in zip(timestamps, polylines)
    ]

    for k in range(n_iter):
        # Run FFBSi over all given polylines with latest hyperparameters
        mm_ind = 0
        map_matchings = []
        time_interval_arrs_int = []
        polylines_int = []
        for time_ints_single, polyline in zip(time_interval_arrs_full,
                                              polylines):
            print(f'Polyline {mm_ind}')
            success = True
            try:
                mm = offline_map_match(
                    graph,
                    polyline,
                    n_ffbsi,
                    time_ints_single,
                    mm_model,
                    store_norm_quants=not no_deviation_prior,
                    **kwargs)
            except ValueError:
                print(f'Map-matching {mm_ind} failed')
                success = False
            if success:
                map_matchings.append(mm)
                time_interval_arrs_int.append(time_ints_single)
                polylines_int.append(polyline)
            mm_ind += 1

        if no_deviation_prior:
            # Optimise hyperparameters
            optimise_hyperparameters(mm_model, map_matchings,
                                     time_interval_arrs_int, polylines_int)
        else:
            # Take gradient step
            gradient_em_step(
                mm_model, map_matchings, time_interval_arrs_int, polylines_int,
                gradient_stepsize_scale / (k + 1)**gradient_stepsize_neg_exp)

        # Update tracking of hyperparameters
        params_track = update_params_track(params_track, mm_model)

        print(f'EM iter: {k}')
        print(params_track)
        pickle.dump(params_track, open(save_path, 'wb'))

    return params_track
示例#8
0
def gradient_em_step(mm_model: MapMatchingModel, map_matchings: list,
                     time_interval_arrs: list, polylines: list,
                     stepsize: float):
    """
    For given map-matching results, take gradient step on prior hyperparameters (but fully optimise gps_sd)
    Updates mm_model hyperparameters in place
    :param mm_model: MapMatchingModel
    :param map_matchings: list of MMParticles objects
    :param time_interval_arrs: time interval arrays for each route
    :param polylines: observations for each route
    :param stepsize: stepsize for gradient step (applied to each coord)
    """
    n_particles = map_matchings[0].n

    # Get key quantities
    distances = np.array([])
    time_interval_arrs_concat = np.array([])
    devs = np.array([])
    sq_obs_dists = np.array([])
    dev_norm_quants = []
    for map_matching, time_interval_arr, polyline in zip(
            map_matchings, time_interval_arrs, polylines):
        distances_single, devs_and_norms_single, sq_obs_dists_single = extract_mm_quantities(
            map_matching, polyline)
        distances = np.append(distances, distances_single)
        time_interval_arrs_concat = np.append(
            time_interval_arrs_concat,
            np.concatenate([time_interval_arr] * len(map_matching)))

        devs_single, dev_norm_quants_single = devs_and_norms_single
        devs = np.append(devs, devs_single)
        dev_norm_quants.append(dev_norm_quants_single)

        sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)

    # Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
    dev_norm_quants = np.concatenate(dev_norm_quants)

    pos_distances = distances
    pos_time_interval_arrs_concat = time_interval_arrs_concat
    pos_dev_norm_quants = dev_norm_quants
    pos_devs = devs

    distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)
                               / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)
                               - pos_dev_norm_quants[:, 1:-1].T / pos_dev_norm_quants[:, 0]).sum(axis=1) \
                              / n_particles

    deviation_beta_gradient_evals = (-pos_devs - pos_dev_norm_quants[:, -1] /
                                     pos_dev_norm_quants[:, 0]).sum() \
                                    / n_particles

    # Take gradient step in distance params
    for i, k in enumerate(mm_model.distance_params.keys()):
        bounds = mm_model.distance_params_bounds[k]
        mm_model.distance_params[k] = min(
            max(
                mm_model.distance_params[k] +
                stepsize * distance_gradient_evals[i], bounds[0]), bounds[1])

    # Take gradient step in deviation beta
    mm_model.deviation_beta = min(
        max(mm_model.deviation_beta + stepsize * deviation_beta_gradient_evals,
            mm_model.deviation_beta_bounds[0]),
        mm_model.deviation_beta_bounds[1])

    # Optimise GPS noise
    mm_model.gps_sd = min(
        max(np.sqrt(sq_obs_dists.mean() / 2), mm_model.gps_sd_bounds[0]),
        mm_model.gps_sd_bounds[1])
示例#9
0
def backward_simulate(graph: MultiDiGraph,
                      filter_particles: MMParticles,
                      filter_weights: np.ndarray,
                      time_interval_arr: np.ndarray,
                      mm_model: MapMatchingModel,
                      max_rejections: int,
                      verbose: bool = False,
                      store_ess_back: bool = None,
                      store_norm_quants: bool = False) -> MMParticles:
    """
    Given particle filter output, run backwards simulation to output smoothed trajectories
    :param graph: encodes road network, simplified and projected to UTM
    :param filter_particles: marginal outputs from particle filter
    :param filter_weights: weights
    :param time_interval_arr: times between observations, must be length one less than filter_particles
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full backward simulation and track ess_back
    :param verbose: print ess_pf or ess_back
    :param store_ess_back: whether to store ess_back (if possible) in MMParticles object
    :param store_norm_quants: if True normalisation quantities returned in out_particles
    :return: MMParticles object
    """
    n_samps = filter_particles[-1].n
    num_obs = len(filter_particles)

    if len(time_interval_arr) + 1 != num_obs:
        raise ValueError(
            "time_interval_arr must be length one less than that of filter_particles"
        )

    full_sampling = max_rejections == 0
    if store_ess_back is None:
        store_ess_back = full_sampling

    # Multinomial resample end particles if weighted
    if np.all(filter_weights[-1] == filter_weights[-1][0]):
        out_particles = filter_particles[-1].copy()
    else:
        out_particles = multinomial(filter_particles[-1], filter_weights[-1])
    if full_sampling:
        ess_back = np.zeros((num_obs, n_samps))
        ess_back[0] = 1 / (filter_weights[-1]**2).sum()
    else:
        ess_back = None

    if num_obs < 2:
        return out_particles

    if store_norm_quants:
        norm_quants = np.zeros(
            (num_obs - 1, *filter_particles[0].prior_norm.shape))

    for i in range(num_obs - 2, -1, -1):
        next_time = filter_particles[i + 1].latest_observation_time

        if not full_sampling:
            pos_prior_bound = mm_model.pos_distance_prior_bound(
                time_interval_arr[i])
            prior_bound = mm_model.distance_prior_bound(time_interval_arr[i])
            store_out_parts = out_particles.copy()

        if filter_particles[i].prior_norm.ndim == 2:
            prior_norm = filter_particles[i].prior_norm[:, 0]
        else:
            prior_norm = filter_particles[i].prior_norm
        adjusted_weights = filter_weights[i].copy()
        good_inds = np.logical_and(adjusted_weights != 0, prior_norm != 0)
        adjusted_weights[good_inds] /= prior_norm[good_inds]
        adjusted_weights[~good_inds] = 0
        adjusted_weights /= adjusted_weights.sum()

        if store_norm_quants:
            sampled_inds = np.zeros(n_samps, dtype=int)

        resort_to_full = False
        for j in range(n_samps):
            fixed_particle = out_particles[j].copy()
            first_edge_fixed = fixed_particle[0]
            first_edge_fixed_geom = get_geometry(graph, first_edge_fixed[1:4])
            first_edge_fixed_length = first_edge_fixed_geom.length
            fixed_next_time_index = np.where(
                fixed_particle[:, 0] == next_time)[0][0]

            if full_sampling:
                back_output = full_backward_sample(
                    fixed_particle,
                    first_edge_fixed,
                    first_edge_fixed_length,
                    filter_particles[i],
                    adjusted_weights,
                    time_interval_arr[i],
                    fixed_next_time_index,
                    mm_model,
                    return_ess_back=True,
                    return_sampled_index=store_norm_quants)

                if store_norm_quants:
                    out_particles[j], ess_back[
                        i, j], sampled_inds[j] = back_output
                else:
                    out_particles[j], ess_back[i, j] = back_output

            else:
                back_output = rejection_backward_sample(
                    fixed_particle,
                    first_edge_fixed,
                    first_edge_fixed_length,
                    filter_particles[i],
                    adjusted_weights,
                    time_interval_arr[i],
                    fixed_next_time_index,
                    pos_prior_bound,
                    mm_model,
                    max_rejections,
                    return_sampled_index=store_norm_quants,
                    break_on_zero=True)

                first_back_output = back_output[
                    0] if store_norm_quants else back_output

                if first_back_output is None:
                    back_output = full_backward_sample(
                        fixed_particle,
                        first_edge_fixed,
                        first_edge_fixed_length,
                        filter_particles[i],
                        adjusted_weights,
                        time_interval_arr[i],
                        fixed_next_time_index,
                        mm_model,
                        return_ess_back=False,
                        return_sampled_index=store_norm_quants)

                if isinstance(first_back_output,
                              int) and first_back_output == 0:
                    resort_to_full = True
                    break

                if store_norm_quants:
                    out_particles[j], sampled_inds[j] = back_output
                else:
                    out_particles[j] = back_output

        if resort_to_full:
            if store_norm_quants:
                sampled_inds = np.zeros(n_samps, dtype=int)
            for j in range(n_samps):
                fixed_particle = store_out_parts[j]
                first_edge_fixed = fixed_particle[0]
                first_edge_fixed_geom = get_geometry(graph,
                                                     first_edge_fixed[1:4])
                first_edge_fixed_length = first_edge_fixed_geom.length
                fixed_next_time_index = np.where(
                    fixed_particle[:, 0] == next_time)[0][0]

                back_output = rejection_backward_sample(
                    fixed_particle,
                    first_edge_fixed,
                    first_edge_fixed_length,
                    filter_particles[i],
                    adjusted_weights,
                    time_interval_arr[i],
                    fixed_next_time_index,
                    prior_bound,
                    mm_model,
                    max_rejections,
                    return_sampled_index=store_norm_quants,
                    break_on_zero=False)

                first_back_output = back_output[
                    0] if store_norm_quants else back_output

                if first_back_output is None:
                    back_output = full_backward_sample(
                        fixed_particle,
                        first_edge_fixed,
                        first_edge_fixed_length,
                        filter_particles[i],
                        adjusted_weights,
                        time_interval_arr[i],
                        fixed_next_time_index,
                        mm_model,
                        return_ess_back=False,
                        return_sampled_index=store_norm_quants)

                if store_norm_quants:
                    out_particles[j], sampled_inds[j] = back_output
                else:
                    out_particles[j] = back_output

        if store_norm_quants:
            norm_quants[i] = filter_particles[i].prior_norm[sampled_inds]

        none_inds = np.array([p is None or None in p for p in out_particles])
        good_inds = ~none_inds
        n_good = good_inds.sum()
        if n_good < n_samps:
            none_inds_res_indices = np.random.choice(n_samps,
                                                     n_samps - n_good,
                                                     p=good_inds / n_good)
            for i_none, j_none in enumerate(np.where(none_inds)[0]):
                out_particles[j_none] = out_particles[
                    none_inds_res_indices[i_none]].copy()
                if store_norm_quants:
                    norm_quants[:, j_none] = norm_quants[:,
                                                         none_inds_res_indices[
                                                             i_none]]
            if store_ess_back:
                out_particles.ess_back[i, none_inds] = n_samps

        if verbose:
            if full_sampling:
                print(
                    str(filter_particles[i].latest_observation_time) +
                    " Av Backward ESS: " + str(np.mean(ess_back[i])))
            else:
                print(str(filter_particles[i].latest_observation_time))

        if store_ess_back:
            out_particles.ess_back = ess_back

    if store_norm_quants:
        out_particles.dev_norm_quants = norm_quants

    return out_particles
示例#10
0
def full_backward_sample(fixed_particle: np.ndarray,
                         first_edge_fixed: np.ndarray,
                         first_edge_fixed_length: float,
                         filter_particles: MMParticles,
                         adjusted_weights: Union[list, np.ndarray],
                         time_interval: float,
                         next_time_index: int,
                         mm_model: MapMatchingModel,
                         return_ess_back: bool = False,
                         return_sampled_index: bool = False) \
        -> Union[Optional[np.ndarray], tuple]:
    """
    Evaluate full interacting weights, normalise and backwards sample a past coordinate
    for a single fixed particle of future coordinates
    :param fixed_particle: trajectory post backwards sampling time
    :param first_edge_fixed: first row of fixed particle
    :param first_edge_fixed_length: metres
    :param filter_particles: proposal particles to be sampled
    :param adjusted_weights: non-interacting weights for filter_particles
    :param time_interval: time between observations at backwards sampling time
    :param next_time_index: index of second observation time in fixed_particle
    :param mm_model: MapMatchingModel
    :param return_ess_back: whether to calculate and return the ESS of the full interacting weights
    :param return_sampled_index: whether to return index of selected back sample
    :return: appended particle (and ess_back if return_ess_back)
    """
    n = filter_particles.n

    smoothing_distances = np.empty(n)
    smoothing_distances[:] = np.nan

    distances_j_to_k = np.empty(n)
    new_prev_cart_coords = np.empty((n, 2))

    for k in range(n):
        if adjusted_weights[k] == 0:
            continue

        filter_particle = filter_particles[k]

        # Check first fixed edge and last filter edge coincide
        if np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
            # Check that fixed edge overtakes filter edge. i.e. distance isn't negative
            if np.array_equal(filter_particle[-1, 1:4], fixed_particle[next_time_index, 1:4]) and \
                    filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
                continue

            distances_j_to_k[k] = np.round(
                (first_edge_fixed[4] - filter_particle[-1, 4]) *
                first_edge_fixed_length, 5)
            smoothing_distances[k] = fixed_particle[next_time_index,
                                                    -1] + distances_j_to_k[k]

            if smoothing_distances[k] < 0:
                raise ValueError('Negative smoothing distance')

            new_prev_cart_coords[k] = filter_particle[-1, 5:7]

    possible_inds = ~np.isnan(smoothing_distances)
    if not np.any(possible_inds):
        if return_ess_back:
            if return_sampled_index:
                return None, 0, 0
            else:
                return None, 0
        else:
            if return_sampled_index:
                return None, 0
            else:
                return None

    smoothing_weights = adjusted_weights[possible_inds] \
                        * mm_model.distance_prior_evaluate(smoothing_distances[possible_inds],
                                                           time_interval) \
                        * mm_model.deviation_prior_evaluate(new_prev_cart_coords[possible_inds],
                                                            fixed_particle[None, next_time_index, 5:7],
                                                            smoothing_distances[possible_inds])

    smoothing_weights /= smoothing_weights.sum()

    sampled_index = np.where(possible_inds)[0][np.random.choice(
        len(smoothing_weights), 1, p=smoothing_weights)[0]]

    fixed_particle[1:(next_time_index + 1),
                   -1] += distances_j_to_k[sampled_index]

    out_particle = np.append(filter_particles[sampled_index],
                             fixed_particle[1:],
                             axis=0)

    ess_back = 1 / (smoothing_weights**2).sum()

    if return_ess_back:
        if return_sampled_index:
            return out_particle, ess_back, sampled_index
        else:
            return out_particle, ess_back
    else:
        if return_sampled_index:
            return out_particle, sampled_index
        else:
            return out_particle
示例#11
0
def rejection_backward_sample(
        fixed_particle: np.ndarray,
        first_edge_fixed: np.ndarray,
        first_edge_fixed_length: float,
        filter_particles: MMParticles,
        filter_weights: np.ndarray,
        time_interval: float,
        next_time_index: int,
        prior_bound: float,
        mm_model: MapMatchingModel,
        max_rejections: int,
        return_sampled_index: bool = False,
        break_on_zero: bool = False
) -> Union[Optional[np.ndarray], tuple, int]:
    """
    Attempt up to max_rejections of rejection sampling to backwards sample a single particle
    :param fixed_particle: trajectory prior to stitching time
    :param first_edge_fixed: first row of fixed particle
    :param first_edge_fixed_length: metres
    :param filter_particles: proposal particles to be sampled
    :param filter_weights: weights for filter_particles
    :param time_interval: time between observations at backwards sampling time
    :param next_time_index: index of second observation time in fixed_particle
    :param prior_bound: bound on distance transition density (given positive if break_on_zero)
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt, if none succeed return None
    :param return_sampled_index: whether to return index of selected back sample
    :param break_on_zero: whether to return 0 if smoothing_distance=0
    :return: appended particle
    """
    n = filter_particles.n

    for k in range(max_rejections):
        filter_index = np.random.choice(n, 1, p=filter_weights)[0]
        filter_particle = filter_particles[filter_index]

        if not np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
            continue
        elif np.array_equal(fixed_particle[next_time_index, 1:4], filter_particle[-1, 1:4]) and \
                filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
            continue

        distance_j_to_k = np.round(
            (first_edge_fixed[4] - filter_particle[-1, 4]) *
            first_edge_fixed_length, 5)

        smoothing_distance = fixed_particle[next_time_index,
                                            -1] + distance_j_to_k

        if break_on_zero and smoothing_distance < 1e-5:
            return (0, filter_index) if return_sampled_index else 0

        smoothing_distance_prior = mm_model.distance_prior_evaluate(
            smoothing_distance, time_interval)
        smoothing_deviation_prior = mm_model.deviation_prior_evaluate(
            filter_particle[-1, 5:7], fixed_particle[None, next_time_index,
                                                     5:7], smoothing_distance)
        accept_prob = smoothing_distance_prior * smoothing_deviation_prior / prior_bound
        if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
            fixed_particle[1:(next_time_index + 1), -1] += distance_j_to_k
            out_part = np.append(filter_particle, fixed_particle[1:], axis=0)
            if return_sampled_index:
                return out_part, filter_index
            else:
                return out_part

    return (None, 0) if return_sampled_index else None
示例#12
0
def sample_route(
        graph: MultiDiGraph,
        timestamps: Union[float, np.ndarray],
        num_obs: int = None,
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        d_refine: float = 1.,
        start_position: np.ndarray = None,
        num_inter_cut_off: int = None) -> Tuple[np.ndarray, np.ndarray]:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.

    :param graph: encodes road network, simplified and projected to UTM
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param num_obs: int length of observed polyline to generate
    :param mm_model: MapMatchingModel
    :param d_refine: metres, resolution of distance discretisation
    :param start_position: optional start position; array (u, v, k, alpha)
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :return: tuple with sampled route (array with same shape as a single MMParticles)
        and polyline (array with shape (num_obs, 2))
    """

    if isinstance(timestamps, np.ndarray):
        num_obs = len(timestamps) + 1

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    if start_position is None:
        start_position = random_positions(graph, 1)[0]

    start_geom = edges.get_geometry(graph, start_position)
    start_coords = edges.edge_interpolate(start_geom, start_position[-1])

    full_sampled_route = np.concatenate([[0.], start_position, start_coords,
                                         [0.]])[np.newaxis]

    for k in range(num_obs - 1):
        time_interval = time_interval_arr[k]
        d_max = mm_model.d_max(time_interval)

        num_inter_cut_off_i = max(
            int(time_interval /
                1.5), 10) if num_inter_cut_off is None else num_inter_cut_off

        prev_pos = full_sampled_route[-1:].copy()
        prev_pos[0, 0] = 0.
        prev_pos[0, -1] = 0.

        possible_routes = proposal.get_all_possible_routes_overshoot(
            graph, prev_pos, d_max, num_inter_cut_off=num_inter_cut_off_i)

        # Get all possible positions on each route
        discretised_routes_indices_list = []
        discretised_routes_list = []
        for i, route in enumerate(possible_routes):
            # All possible end positions of route
            discretised_edge_matrix = edges.discretise_edge(
                graph, route[-1, 1:4], d_refine)

            if route.shape[0] == 1:
                discretised_edge_matrix = discretised_edge_matrix[
                    discretised_edge_matrix[:, 0] >= full_sampled_route[-1, 4]]
                discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1,
                                                                          -1]
            else:
                discretised_edge_matrix[:, -1] += route[-2, -1]

            discretised_edge_matrix = discretised_edge_matrix[
                discretised_edge_matrix[:, -1] < d_max + 1e-5]

            # Track route index and append to list
            if discretised_edge_matrix is not None and len(
                    discretised_edge_matrix) > 0:
                discretised_routes_indices_list += [
                    np.ones(discretised_edge_matrix.shape[0], dtype=int) * i
                ]
                discretised_routes_list += [discretised_edge_matrix]

        # Concatenate into numpy.ndarray
        discretised_routes_indices = np.concatenate(
            discretised_routes_indices_list)
        discretised_routes = np.concatenate(discretised_routes_list)

        if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and
                                            discretised_routes[0][-1] == 0):
            warnings.warn('sample_route exited prematurely')
            break

        # Distance prior evals
        distances = discretised_routes[:, -1]
        distance_prior_evals = mm_model.distance_prior_evaluate(
            distances, time_interval)

        # Deviation prior evals
        deviation_prior_evals = mm_model.deviation_prior_evaluate(
            full_sampled_route[-1, 5:7], discretised_routes[:, 1:3],
            discretised_routes[:, -1])

        # Normalise prior/transition probabilities
        prior_probs = distance_prior_evals * deviation_prior_evals
        prior_probs_norm_const = prior_probs.sum()

        sampled_dis_route_index = np.random.choice(len(prior_probs),
                                                   1,
                                                   p=prior_probs /
                                                   prior_probs_norm_const)[0]
        sampled_dis_route = discretised_routes[sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[
            discretised_routes_indices[sampled_dis_route_index]]

        full_sampled_route = proposal.process_proposal_output(
            full_sampled_route, sampled_route, sampled_dis_route,
            time_interval, True)

    obs_indices = edges.observation_time_indices(full_sampled_route[:, 0])
    polyline = full_sampled_route[obs_indices, 5:7] \
               + mm_model.gps_sd * np.random.normal(size=(obs_indices.sum(), 2))

    return full_sampled_route, polyline