Exemplo n.º 1
0
def full_fixed_lag_stitch(
    fixed_particle: np.ndarray,
    last_edge_fixed: np.ndarray,
    last_edge_fixed_length: float,
    new_particles: MMParticles,
    adjusted_weights: np.ndarray,
    stitch_time_interval: float,
    min_resample_time_indices: Union[list, np.ndarray],
    mm_model: MapMatchingModel,
    return_ess_stitch: bool = False
) -> Union[np.ndarray, Tuple[np.ndarray, float]]:
    """
    Evaluate full interacting weights, normalise and sample (stitch) for a single fixed particle
    :param fixed_particle: trajectory prior to stitching time
    :param last_edge_fixed: row of last fixed particle
    :param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
    :param new_particles: particles proposed to stitching
    :param adjusted_weights: non-interacting weights for new_particles
    :param stitch_time_interval: time between stitching observations
    :param min_resample_time_indices: indices for row of min_resample_time in new_particles
    :param mm_model: MapMatchingModel
    :param return_ess_stitch: whether to calculate and return the ESS of the full stitching weights
    :return: stitched particle (and ess_stitch if return_ess_stitch)
    """
    n = len(new_particles)

    # Possible particles to be resampled placeholder
    newer_particles_adjusted = [None] * n

    # Stitching distances
    new_stitching_distances = np.empty(n)
    new_stitching_distances[:] = np.nan

    new_cart_coords = np.empty((n, 2))

    for k in range(n):
        # if adjusted_weights[k] == 0:
        #     continue

        if new_particles[k] is None:
            continue

        new_particle = new_particles[k].copy()

        # Check both particles start from same edge
        if np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
            # Check that new edge overtakes fixed edge. i.e. distance isn't negative
            if np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
                    new_particle[1, 4] < (last_edge_fixed[4] - 1e-6):
                continue

            new_cart_coords[k] = new_particle[min_resample_time_indices[k],
                                              5:7]

            # Calculate distance modification
            first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]
                                     ) * last_edge_fixed_length
            first_distance_k = new_particle[1, -1]

            change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)

            new_particle[1:(min_resample_time_indices[k] + 1),
                         -1] += change_dist

            new_stitching_distances[k] = new_particle[
                min_resample_time_indices[k], -1]

            # Store adjusted particle
            newer_particles_adjusted[k] = new_particle[1:]

    # Calculate adjusted weight
    res_weights = np.zeros(n)
    possible_inds = ~np.isnan(new_stitching_distances)

    new_stitching_distances_trimmed = new_stitching_distances[possible_inds]
    new_cart_coords_trimmed = new_cart_coords[possible_inds]

    adjusted_weights_trimmed = adjusted_weights[possible_inds]
    if adjusted_weights_trimmed.sum() == 0:
        adjusted_weights_trimmed[:] = 1
    stitched_distance_prior_evals_trimmed = mm_model.distance_prior_evaluate(
        new_stitching_distances_trimmed, stitch_time_interval)

    stitched_deviation_prior_trimmed = mm_model.deviation_prior_evaluate(
        fixed_particle[-1, 5:7], new_cart_coords_trimmed,
        new_stitching_distances_trimmed)

    res_weights[possible_inds] = adjusted_weights_trimmed \
                                 * stitched_distance_prior_evals_trimmed \
                                 * stitched_deviation_prior_trimmed

    # Normalise adjusted resample weights
    with np.errstate(invalid='ignore'):
        res_weights /= res_weights.sum()

    # If only particle on fixed edge resample full trajectory
    if max(res_weights) == 0 or np.all(np.isnan(res_weights)):
        out_particle = None
        ess_stitch = 1 / np.sum(adjusted_weights**2)

    # Otherwise fixed-lag resample and stitch
    else:
        # Resample index
        res_index = np.random.choice(n, 1, p=res_weights)[0]

        # Update output
        out_particle = np.append(fixed_particle,
                                 newer_particles_adjusted[res_index],
                                 axis=0)

        # Track ESS
        ess_stitch = 1 / np.sum(res_weights**2)

    if return_ess_stitch:
        return out_particle, ess_stitch
    else:
        return out_particle
Exemplo n.º 2
0
def optimal_proposal(graph: MultiDiGraph,
                     particle: np.ndarray,
                     new_observation: Union[None, np.ndarray],
                     time_interval: float,
                     mm_model: MapMatchingModel,
                     full_smoothing: bool = True,
                     d_refine: float = 1.,
                     d_max: float = None,
                     d_max_fail_multiplier: float = 1.5,
                     d_max_threshold: tuple = (0.9, 0.1),
                     num_inter_cut_off: int = None,
                     only_norm_const: bool = False,
                     store_norm_quants: bool = False,
                     resample_fails: bool = True) -> Union[Tuple[Union[None, np.ndarray],
                                                                 float,
                                                                 Union[float, np.ndarray]], float]:
    """
    Samples a single particle from the (distance discretised) optimal proposal.
    :param graph: encodes road network, simplified and projected to UTM
    :param particle: single element of MMParticles.particles
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param full_smoothing: if True returns full trajectory
        otherwise returns only x_t-1 to x_t
    :param d_refine: metres, resolution of distance discretisation
    :param d_max: optional override of d_max = mm_model.d_max(time_interval)
    :param d_max_fail_multiplier: extension of d_max in case all probs are 0
    :param d_max_threshold: tuple defining when to extend d_max
        extend if total sample prob of distances > d_max * d_max_threshold[0] larger than d_max_threshold[1]
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :param only_norm_const: if true only return prior normalising constant (don't sample)
    :param store_norm_quants: whether to additionally return quantities needed for gradient EM step
        assuming deviation prior is used
    :param resample_fails: whether to return None (and induce later resampling of whole trajectory)
        if proposal fails to find route with positive probability
        if False assume distance=0
    :return: (particle, unnormalised weight, prior_norm) or (particle, unnormalised weight, dev_norm_quants)
    """
    if particle is None:
        return 0. if only_norm_const else (None, 0., 0.)

    if isinstance(new_observation, list):
        new_observation = np.array(new_observation)

    if num_inter_cut_off is None:
        num_inter_cut_off = max(int(time_interval / 1.5), 10)

    if d_max is None:
        d_max = mm_model.d_max(time_interval)

    # Extract all possible routes from previous position
    start_position = particle[-1:].copy()
    start_position[0, -1] = 0
    possible_routes = get_all_possible_routes_overshoot(graph, start_position, d_max,
                                                        num_inter_cut_off=num_inter_cut_off)

    # Get all possible positions on each route
    discretised_routes_indices_list = []
    discretised_routes_list = []
    for i, route in enumerate(possible_routes):
        # All possible end positions of route
        discretised_edge_matrix = discretise_edge(graph, route[-1, 1:4], d_refine)

        if route.shape[0] == 1:
            discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= particle[-1, 4]]
            discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
        else:
            discretised_edge_matrix[:, -1] += route[-2, -1]

        discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]

        # Track route index and append to list
        if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
            discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
            discretised_routes_list += [discretised_edge_matrix]

    # Concatenate into numpy.ndarray
    discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
    discretised_routes = np.concatenate(discretised_routes_list)

    if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
        if only_norm_const:
            return 0
        if resample_fails:
            return None, 0., 0.
        else:
            sampled_dis_route = discretised_routes[0]

            # Append sampled route to old particle
            sampled_route = possible_routes[0]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
            return proposal_out, 0., 0.

    # Distance prior evals
    distances = discretised_routes[:, -1]
    distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)

    # Deviation prior evals
    deviation_prior_evals = mm_model.deviation_prior_evaluate(particle[-1, 5:7],
                                                              discretised_routes[:, 1:3],
                                                              discretised_routes[:, -1])

    # Normalise prior/transition probabilities
    prior_probs = distance_prior_evals * deviation_prior_evals

    prior_probs_norm_const = prior_probs.sum()
    if only_norm_const:
        if store_norm_quants:
            deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
            deviations = np.abs(deviations - discretised_routes[:, -1])

            # Z, dZ/d(dist_params), dZ/d(deviation_beta)
            dev_norm_quants = np.array([prior_probs_norm_const,
                                        *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                                .reshape(len(mm_model.distance_params), len(distances))
                                                * deviation_prior_evals, axis=-1),
                                        -np.sum(deviations
                                                * distance_prior_evals
                                                * deviation_prior_evals)
                                        ])
            return dev_norm_quants
        else:
            return prior_probs_norm_const
    prior_probs /= prior_probs_norm_const

    # Likelihood evaluations
    likelihood_evals = mm_model.likelihood_evaluate(discretised_routes[:, 1:3], new_observation)

    # Calculate sample probabilities
    sample_probs = prior_probs[likelihood_evals > 0] * likelihood_evals[likelihood_evals > 0]
    # sample_probs = prior_probs * likelihood_evals

    # p(y_m | x_m-1^j)
    prop_weight = sample_probs.sum()

    model_d_max = mm_model.d_max(time_interval)

    if prop_weight < 1e-100 \
            or (np.sum(sample_probs[np.where(distances[likelihood_evals > 0]
                                             > (d_max * d_max_threshold[0]))[0]])/prop_weight > d_max_threshold[1]\
                and (not d_max > model_d_max)):
        if (d_max - np.max(distances)) < d_refine + 1e-5 \
                and d_max_fail_multiplier > 1 and (not d_max > model_d_max):
            return optimal_proposal(graph,
                                    particle,
                                    new_observation,
                                    time_interval,
                                    mm_model,
                                    full_smoothing,
                                    d_refine,
                                    d_max=d_max * d_max_fail_multiplier,
                                    num_inter_cut_off=num_inter_cut_off,
                                    only_norm_const=only_norm_const,
                                    store_norm_quants=store_norm_quants,
                                    resample_fails=resample_fails)
        if resample_fails:
            proposal_out = None
        else:
            sampled_dis_route_index = np.where(discretised_routes[:, -1] == 0)[0][0]
            sampled_dis_route = discretised_routes[sampled_dis_route_index]

            # Append sampled route to old particle
            sampled_route = possible_routes[discretised_routes_indices[sampled_dis_route_index]]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
        prop_weight = 0.
    else:
        # Sample an edge and distance
        sampled_dis_route_index = np.random.choice(len(sample_probs), 1, p=sample_probs / prop_weight)[0]
        sampled_dis_route = discretised_routes[likelihood_evals > 0][sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[discretised_routes_indices[likelihood_evals > 0][sampled_dis_route_index]]

        proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                               full_smoothing)

    if store_norm_quants:
        deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
        deviations = np.abs(deviations - discretised_routes[:, -1])

        # Z, dZ/d(dist_params), dZ/d(deviation_beta)
        dev_norm_quants = np.array([prior_probs_norm_const,
                                    *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                            .reshape(len(mm_model.distance_params), len(distances))
                                            * deviation_prior_evals, axis=-1),
                                    -np.sum(deviations
                                            * distance_prior_evals
                                            * deviation_prior_evals)
                                    ])

        return proposal_out, prop_weight, dev_norm_quants
    else:
        return proposal_out, prop_weight, prior_probs_norm_const
Exemplo n.º 3
0
def rejection_fixed_lag_stitch(
        fixed_particle: np.ndarray,
        last_edge_fixed: np.ndarray,
        last_edge_fixed_length: float,
        new_particles: MMParticles,
        adjusted_weights: np.ndarray,
        stitch_time_interval: float,
        min_resample_time_indices: Union[list, np.ndarray],
        dist_prior_bound: float,
        mm_model: MapMatchingModel,
        max_rejections: int,
        break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
    """
    Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
    :param fixed_particle: trajectory prior to stitching time
    :param last_edge_fixed: row of last fixed particle
    :param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
    :param new_particles: particles proposed to stitching
    :param adjusted_weights: non-interacting stitching weights
    :param stitch_time_interval: time between stitching observations
    :param min_resample_time_indices: indices for row of min_resample_time in new_particles
    :param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt, if none succeed return None
    :param break_on_zero: whether to return 0 if new_stitching_distance=0
    :return: stitched particle
    """
    n = len(new_particles)

    for reject_ind in range(max_rejections):
        new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
        new_particle = new_particles[new_index].copy()

        # Reject if new_particle starts from different edge
        if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
            continue
        # Reject if new_particle doesn't overtake fixed_particles
        elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
                new_particle[1, 4] < last_edge_fixed[4]:
            continue

        # Calculate stitching distance
        first_distance_j_to_k = (new_particle[1, 4] -
                                 last_edge_fixed[4]) * last_edge_fixed_length
        first_distance_k = new_particle[1, -1]

        change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)

        new_particle[1:(min_resample_time_indices[new_index] + 1),
                     -1] += change_dist

        new_stitching_distance = new_particle[
            min_resample_time_indices[new_index], -1]

        if break_on_zero and new_stitching_distance < 1e-5:
            return 0

        # Evaluate distance prior
        new_stitching_distance_prior = mm_model.distance_prior_evaluate(
            new_stitching_distance, stitch_time_interval)
        new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(
            fixed_particle[-1, 5:7],
            new_particle[None, min_resample_time_indices[new_index],
                         5:7], new_stitching_distance)

        accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
        if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
            out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
            return out_particle
    return None
Exemplo n.º 4
0
def fixed_lag_stitch_post_split(graph: MultiDiGraph,
                                fixed_particles: MMParticles,
                                new_particles: MMParticles,
                                new_weights: np.ndarray,
                                mm_model: MapMatchingModel,
                                max_rejections: int) -> MMParticles:
    """
    Stitch together fixed_particles with samples from new_particles according to joint fixed-lag posterior
    :param graph: encodes road network, simplified and projected to UTM
    :param fixed_particles: trajectories before stitching time (won't be changed)
    :param new_particles: trajectories after stitching time (to be resampled)
        one observation time overlap with fixed_particles
    :param new_weights: weights applied to new_particles
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
        0 will do full fixed-lag stitching and track ess_stitch
    :return: MMParticles object
    """

    n = len(fixed_particles)
    full_fixed_lag_resample = max_rejections == 0

    min_resample_time = new_particles.observation_times[1]
    min_resample_time_indices = [
        np.where(particle[:, 0] == min_resample_time)[0][0]
        if particle is not None else 0 for particle in new_particles
    ]
    originial_stitching_distances = np.array([
        new_particles[j][min_resample_time_indices[j],
                         -1] if new_particles[j] is not None else 0
        for j in range(n)
    ])

    max_fixed_time = fixed_particles._first_non_none_particle[-1, 0]

    stitch_time_interval = min_resample_time - max_fixed_time

    distance_prior_evals = mm_model.distance_prior_evaluate(
        originial_stitching_distances, stitch_time_interval)

    fixed_last_coords = np.array([
        part[0, 5:7] if part is not None else [0, 0] for part in new_particles
    ])
    new_coords = np.array([
        new_particles[j][min_resample_time_indices[j],
                         5:7] if new_particles[j] is not None else [0, 0]
        for j in range(n)
    ])
    deviation_prior_evals = mm_model.deviation_prior_evaluate(
        fixed_last_coords, new_coords, originial_stitching_distances)

    original_prior_evals = np.zeros(n)
    pos_inds = new_particles.prior_norm > 1e-5
    original_prior_evals[pos_inds] = distance_prior_evals[pos_inds] \
                                     * deviation_prior_evals[pos_inds] \
                                     * new_particles.prior_norm[pos_inds]

    out_particles = fixed_particles

    # Initiate some required quantities depending on whether to do rejection sampling or not
    if full_fixed_lag_resample:
        ess_stitch_track = np.zeros(n)

        # distance_prior_bound = None
        # adjusted_weights = None
    else:
        ess_stitch_track = None

        pos_prior_bound = mm_model.pos_distance_prior_bound(
            stitch_time_interval)
        prior_bound = mm_model.distance_prior_bound(stitch_time_interval)
        store_out_parts = fixed_particles.copy()

    adjusted_weights = new_weights.copy()
    adjusted_weights[original_prior_evals > 1e-5] /= original_prior_evals[
        original_prior_evals > 1e-5]
    adjusted_weights[original_prior_evals < 1e-5] = 0
    adjusted_weights /= np.sum(adjusted_weights)

    resort_to_full = False

    # Iterate through particles
    for j in range(n):
        fixed_particle = fixed_particles[j]

        # Check if particle is None
        # i.e. fixed lag approx has failed
        if fixed_particle is None:
            out_particles[j] = None
            if full_fixed_lag_resample:
                ess_stitch_track[j] = 0
            continue

        last_edge_fixed = fixed_particle[-1]
        last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
        last_edge_fixed_length = last_edge_fixed_geom.length

        if full_fixed_lag_resample:
            # Full resampling
            out_particles[j], ess_stitch_track[j] = full_fixed_lag_stitch(
                fixed_particle, last_edge_fixed, last_edge_fixed_length,
                new_particles, adjusted_weights, stitch_time_interval,
                min_resample_time_indices, mm_model, True)

        else:
            # Rejection sampling
            out_particles[j] = rejection_fixed_lag_stitch(
                fixed_particle,
                last_edge_fixed,
                last_edge_fixed_length,
                new_particles,
                adjusted_weights,
                stitch_time_interval,
                min_resample_time_indices,
                pos_prior_bound,
                mm_model,
                max_rejections,
                break_on_zero=True)
            if out_particles[j] is None:
                # Rejection sampling reached max_rejections -> try full resampling
                out_particles[j] = full_fixed_lag_stitch(
                    fixed_particle, last_edge_fixed, last_edge_fixed_length,
                    new_particles, adjusted_weights, stitch_time_interval,
                    min_resample_time_indices, mm_model, False)

            if isinstance(out_particles[j], int) and out_particles[j] == 0:
                resort_to_full = True
                break

    if resort_to_full:
        for j in range(n):
            fixed_particle = store_out_parts[j]

            # Check if particle is None
            # i.e. fixed lag approx has failed
            if fixed_particle is None:
                out_particles[j] = None
                if full_fixed_lag_resample:
                    ess_stitch_track[j] = 0
                continue

            last_edge_fixed = fixed_particle[-1]
            last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
            last_edge_fixed_length = last_edge_fixed_geom.length

            # Rejection sampling with full bound
            out_particles[j] = rejection_fixed_lag_stitch(
                fixed_particle, last_edge_fixed, last_edge_fixed_length,
                new_particles, adjusted_weights, stitch_time_interval,
                min_resample_time_indices, prior_bound, mm_model,
                max_rejections)
            if out_particles[j] is None:
                # Rejection sampling reached max_rejections -> try full resampling
                out_particles[j] = full_fixed_lag_stitch(
                    fixed_particle, last_edge_fixed, last_edge_fixed_length,
                    new_particles, adjusted_weights, stitch_time_interval,
                    min_resample_time_indices, mm_model, False)

    if full_fixed_lag_resample:
        out_particles.ess_stitch = np.append(out_particles.ess_stitch,
                                             np.atleast_2d(ess_stitch_track),
                                             axis=0)

    # Do full resampling where fixed lag approx broke
    none_inds = np.array([p is None for p in out_particles])
    good_inds = ~none_inds
    n_good = good_inds.sum()

    if n_good == 0:
        raise ValueError(
            "Map-matching failed: all stitching probabilities zero,"
            "try increasing the lag or number of particles")

    if n_good < n:
        none_inds_res_indices = np.random.choice(n,
                                                 n - n_good,
                                                 p=good_inds / n_good)
        for i, j in enumerate(np.where(none_inds)[0]):
            out_particles[j] = out_particles[none_inds_res_indices[i]]
        if full_fixed_lag_resample:
            out_particles.ess_stitch[-1,
                                     none_inds] = 1 / (new_weights**2).sum()

    return out_particles
Exemplo n.º 5
0
def gradient_em_step(mm_model: MapMatchingModel, map_matchings: list,
                     time_interval_arrs: list, polylines: list,
                     stepsize: float):
    """
    For given map-matching results, take gradient step on prior hyperparameters (but fully optimise gps_sd)
    Updates mm_model hyperparameters in place
    :param mm_model: MapMatchingModel
    :param map_matchings: list of MMParticles objects
    :param time_interval_arrs: time interval arrays for each route
    :param polylines: observations for each route
    :param stepsize: stepsize for gradient step (applied to each coord)
    """
    n_particles = map_matchings[0].n

    # Get key quantities
    distances = np.array([])
    time_interval_arrs_concat = np.array([])
    devs = np.array([])
    sq_obs_dists = np.array([])
    dev_norm_quants = []
    for map_matching, time_interval_arr, polyline in zip(
            map_matchings, time_interval_arrs, polylines):
        distances_single, devs_and_norms_single, sq_obs_dists_single = extract_mm_quantities(
            map_matching, polyline)
        distances = np.append(distances, distances_single)
        time_interval_arrs_concat = np.append(
            time_interval_arrs_concat,
            np.concatenate([time_interval_arr] * len(map_matching)))

        devs_single, dev_norm_quants_single = devs_and_norms_single
        devs = np.append(devs, devs_single)
        dev_norm_quants.append(dev_norm_quants_single)

        sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)

    # Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
    dev_norm_quants = np.concatenate(dev_norm_quants)

    # # Optimise zero dist prob
    # def zero_dist_prob_root_func(neg_exp: float) -> float:
    #     return - np.sum(- time_interval_arrs_concat * (distances < 1e-5)
    #                     + time_interval_arrs_concat * np.exp(-neg_exp * time_interval_arrs_concat)
    #                     / (1 - np.exp(-neg_exp * time_interval_arrs_concat)) * (distances >= 1e-5))
    #
    # mm_model.zero_dist_prob_neg_exponent = root_scalar(zero_dist_prob_root_func, bracket=(1e-5, 1e20)).root
    # pos_distances = distances[distances > 1e-5]
    # pos_time_interval_arrs_concat = time_interval_arrs_concat[distances > 1e-5]
    # pos_dev_norm_quants = dev_norm_quants[distances > 1e-5]
    # pos_devs = devs[distances > 1e-5]

    pos_distances = distances
    pos_time_interval_arrs_concat = time_interval_arrs_concat
    pos_dev_norm_quants = dev_norm_quants
    pos_devs = devs

    # non_zero_inds = pos_dev_norm_quants[:, 0] > 0
    #
    # distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)[:,
    #                            non_zero_inds]
    #                            / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)[
    #                                non_zero_inds]
    #                            - pos_dev_norm_quants[non_zero_inds, 1:-1].T / pos_dev_norm_quants[
    #                                non_zero_inds, 0]).sum(axis=1) \
    #                           / n_particles
    #
    # deviation_beta_gradient_evals = (-pos_devs[non_zero_inds] - pos_dev_norm_quants[non_zero_inds, -1] /
    #                                  pos_dev_norm_quants[non_zero_inds, 0]).sum() \
    #                                 / n_particles

    distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)
                               / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)
                               - pos_dev_norm_quants[:, 1:-1].T / pos_dev_norm_quants[:, 0]).sum(axis=1) \
                              / n_particles

    deviation_beta_gradient_evals = (-pos_devs - pos_dev_norm_quants[:, -1] /
                                     pos_dev_norm_quants[:, 0]).sum() \
                                    / n_particles

    # Take gradient step in distance params
    for i, k in enumerate(mm_model.distance_params.keys()):
        bounds = mm_model.distance_params_bounds[k]
        mm_model.distance_params[k] = min(
            max(
                mm_model.distance_params[k] +
                stepsize * distance_gradient_evals[i], bounds[0]), bounds[1])

    # Take gradient step in deviation beta
    mm_model.deviation_beta = min(
        max(mm_model.deviation_beta + stepsize * deviation_beta_gradient_evals,
            mm_model.deviation_beta_bounds[0]),
        mm_model.deviation_beta_bounds[1])

    # Optimise GPS noise
    mm_model.gps_sd = min(
        max(np.sqrt(sq_obs_dists.mean() / 2), mm_model.gps_sd_bounds[0]),
        mm_model.gps_sd_bounds[1])
Exemplo n.º 6
0
def gradient_em_step(mm_model: MapMatchingModel, map_matchings: list,
                     time_interval_arrs: list, polylines: list,
                     stepsize: float):
    """
    For given map-matching results, take gradient step on prior hyperparameters (but fully optimise gps_sd)
    Updates mm_model hyperparameters in place
    :param mm_model: MapMatchingModel
    :param map_matchings: list of MMParticles objects
    :param time_interval_arrs: time interval arrays for each route
    :param polylines: observations for each route
    :param stepsize: stepsize for gradient step (applied to each coord)
    """
    n_particles = map_matchings[0].n

    # Get key quantities
    distances = np.array([])
    time_interval_arrs_concat = np.array([])
    devs = np.array([])
    sq_obs_dists = np.array([])
    dev_norm_quants = []
    for map_matching, time_interval_arr, polyline in zip(
            map_matchings, time_interval_arrs, polylines):
        distances_single, devs_and_norms_single, sq_obs_dists_single = extract_mm_quantities(
            map_matching, polyline)
        distances = np.append(distances, distances_single)
        time_interval_arrs_concat = np.append(
            time_interval_arrs_concat,
            np.concatenate([time_interval_arr] * len(map_matching)))

        devs_single, dev_norm_quants_single = devs_and_norms_single
        devs = np.append(devs, devs_single)
        dev_norm_quants.append(dev_norm_quants_single)

        sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)

    # Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
    dev_norm_quants = np.concatenate(dev_norm_quants)

    pos_distances = distances
    pos_time_interval_arrs_concat = time_interval_arrs_concat
    pos_dev_norm_quants = dev_norm_quants
    pos_devs = devs

    distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)
                               / mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)
                               - pos_dev_norm_quants[:, 1:-1].T / pos_dev_norm_quants[:, 0]).sum(axis=1) \
                              / n_particles

    deviation_beta_gradient_evals = (-pos_devs - pos_dev_norm_quants[:, -1] /
                                     pos_dev_norm_quants[:, 0]).sum() \
                                    / n_particles

    # Take gradient step in distance params
    for i, k in enumerate(mm_model.distance_params.keys()):
        bounds = mm_model.distance_params_bounds[k]
        mm_model.distance_params[k] = min(
            max(
                mm_model.distance_params[k] +
                stepsize * distance_gradient_evals[i], bounds[0]), bounds[1])

    # Take gradient step in deviation beta
    mm_model.deviation_beta = min(
        max(mm_model.deviation_beta + stepsize * deviation_beta_gradient_evals,
            mm_model.deviation_beta_bounds[0]),
        mm_model.deviation_beta_bounds[1])

    # Optimise GPS noise
    mm_model.gps_sd = min(
        max(np.sqrt(sq_obs_dists.mean() / 2), mm_model.gps_sd_bounds[0]),
        mm_model.gps_sd_bounds[1])
Exemplo n.º 7
0
def full_backward_sample(fixed_particle: np.ndarray,
                         first_edge_fixed: np.ndarray,
                         first_edge_fixed_length: float,
                         filter_particles: MMParticles,
                         adjusted_weights: Union[list, np.ndarray],
                         time_interval: float,
                         next_time_index: int,
                         mm_model: MapMatchingModel,
                         return_ess_back: bool = False,
                         return_sampled_index: bool = False) \
        -> Union[Optional[np.ndarray], tuple]:
    """
    Evaluate full interacting weights, normalise and backwards sample a past coordinate
    for a single fixed particle of future coordinates
    :param fixed_particle: trajectory post backwards sampling time
    :param first_edge_fixed: first row of fixed particle
    :param first_edge_fixed_length: metres
    :param filter_particles: proposal particles to be sampled
    :param adjusted_weights: non-interacting weights for filter_particles
    :param time_interval: time between observations at backwards sampling time
    :param next_time_index: index of second observation time in fixed_particle
    :param mm_model: MapMatchingModel
    :param return_ess_back: whether to calculate and return the ESS of the full interacting weights
    :param return_sampled_index: whether to return index of selected back sample
    :return: appended particle (and ess_back if return_ess_back)
    """
    n = filter_particles.n

    smoothing_distances = np.empty(n)
    smoothing_distances[:] = np.nan

    distances_j_to_k = np.empty(n)
    new_prev_cart_coords = np.empty((n, 2))

    for k in range(n):
        if adjusted_weights[k] == 0:
            continue

        filter_particle = filter_particles[k]

        # Check first fixed edge and last filter edge coincide
        if np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
            # Check that fixed edge overtakes filter edge. i.e. distance isn't negative
            if np.array_equal(filter_particle[-1, 1:4], fixed_particle[next_time_index, 1:4]) and \
                    filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
                continue

            distances_j_to_k[k] = np.round(
                (first_edge_fixed[4] - filter_particle[-1, 4]) *
                first_edge_fixed_length, 5)
            smoothing_distances[k] = fixed_particle[next_time_index,
                                                    -1] + distances_j_to_k[k]

            if smoothing_distances[k] < 0:
                raise ValueError('Negative smoothing distance')

            new_prev_cart_coords[k] = filter_particle[-1, 5:7]

    possible_inds = ~np.isnan(smoothing_distances)
    if not np.any(possible_inds):
        if return_ess_back:
            if return_sampled_index:
                return None, 0, 0
            else:
                return None, 0
        else:
            if return_sampled_index:
                return None, 0
            else:
                return None

    smoothing_weights = adjusted_weights[possible_inds] \
                        * mm_model.distance_prior_evaluate(smoothing_distances[possible_inds],
                                                           time_interval) \
                        * mm_model.deviation_prior_evaluate(new_prev_cart_coords[possible_inds],
                                                            fixed_particle[None, next_time_index, 5:7],
                                                            smoothing_distances[possible_inds])

    smoothing_weights /= smoothing_weights.sum()

    sampled_index = np.where(possible_inds)[0][np.random.choice(
        len(smoothing_weights), 1, p=smoothing_weights)[0]]

    fixed_particle[1:(next_time_index + 1),
                   -1] += distances_j_to_k[sampled_index]

    out_particle = np.append(filter_particles[sampled_index],
                             fixed_particle[1:],
                             axis=0)

    ess_back = 1 / (smoothing_weights**2).sum()

    if return_ess_back:
        if return_sampled_index:
            return out_particle, ess_back, sampled_index
        else:
            return out_particle, ess_back
    else:
        if return_sampled_index:
            return out_particle, sampled_index
        else:
            return out_particle
Exemplo n.º 8
0
def rejection_backward_sample(
        fixed_particle: np.ndarray,
        first_edge_fixed: np.ndarray,
        first_edge_fixed_length: float,
        filter_particles: MMParticles,
        filter_weights: np.ndarray,
        time_interval: float,
        next_time_index: int,
        prior_bound: float,
        mm_model: MapMatchingModel,
        max_rejections: int,
        return_sampled_index: bool = False,
        break_on_zero: bool = False
) -> Union[Optional[np.ndarray], tuple, int]:
    """
    Attempt up to max_rejections of rejection sampling to backwards sample a single particle
    :param fixed_particle: trajectory prior to stitching time
    :param first_edge_fixed: first row of fixed particle
    :param first_edge_fixed_length: metres
    :param filter_particles: proposal particles to be sampled
    :param filter_weights: weights for filter_particles
    :param time_interval: time between observations at backwards sampling time
    :param next_time_index: index of second observation time in fixed_particle
    :param prior_bound: bound on distance transition density (given positive if break_on_zero)
    :param mm_model: MapMatchingModel
    :param max_rejections: number of rejections to attempt, if none succeed return None
    :param return_sampled_index: whether to return index of selected back sample
    :param break_on_zero: whether to return 0 if smoothing_distance=0
    :return: appended particle
    """
    n = filter_particles.n

    for k in range(max_rejections):
        filter_index = np.random.choice(n, 1, p=filter_weights)[0]
        filter_particle = filter_particles[filter_index]

        if not np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
            continue
        elif np.array_equal(fixed_particle[next_time_index, 1:4], filter_particle[-1, 1:4]) and \
                filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
            continue

        distance_j_to_k = np.round(
            (first_edge_fixed[4] - filter_particle[-1, 4]) *
            first_edge_fixed_length, 5)

        smoothing_distance = fixed_particle[next_time_index,
                                            -1] + distance_j_to_k

        if break_on_zero and smoothing_distance < 1e-5:
            return (0, filter_index) if return_sampled_index else 0

        smoothing_distance_prior = mm_model.distance_prior_evaluate(
            smoothing_distance, time_interval)
        smoothing_deviation_prior = mm_model.deviation_prior_evaluate(
            filter_particle[-1, 5:7], fixed_particle[None, next_time_index,
                                                     5:7], smoothing_distance)
        accept_prob = smoothing_distance_prior * smoothing_deviation_prior / prior_bound
        if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
            fixed_particle[1:(next_time_index + 1), -1] += distance_j_to_k
            out_part = np.append(filter_particle, fixed_particle[1:], axis=0)
            if return_sampled_index:
                return out_part, filter_index
            else:
                return out_part

    return (None, 0) if return_sampled_index else None
Exemplo n.º 9
0
def sample_route(
        graph: MultiDiGraph,
        timestamps: Union[float, np.ndarray],
        num_obs: int = None,
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        d_refine: float = 1.,
        start_position: np.ndarray = None,
        num_inter_cut_off: int = None) -> Tuple[np.ndarray, np.ndarray]:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.

    :param graph: encodes road network, simplified and projected to UTM
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param num_obs: int length of observed polyline to generate
    :param mm_model: MapMatchingModel
    :param d_refine: metres, resolution of distance discretisation
    :param start_position: optional start position; array (u, v, k, alpha)
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :return: tuple with sampled route (array with same shape as a single MMParticles)
        and polyline (array with shape (num_obs, 2))
    """

    if isinstance(timestamps, np.ndarray):
        num_obs = len(timestamps) + 1

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    if start_position is None:
        start_position = random_positions(graph, 1)[0]

    start_geom = edges.get_geometry(graph, start_position)
    start_coords = edges.edge_interpolate(start_geom, start_position[-1])

    full_sampled_route = np.concatenate([[0.], start_position, start_coords,
                                         [0.]])[np.newaxis]

    for k in range(num_obs - 1):
        time_interval = time_interval_arr[k]
        d_max = mm_model.d_max(time_interval)

        num_inter_cut_off_i = max(
            int(time_interval /
                1.5), 10) if num_inter_cut_off is None else num_inter_cut_off

        prev_pos = full_sampled_route[-1:].copy()
        prev_pos[0, 0] = 0.
        prev_pos[0, -1] = 0.

        possible_routes = proposal.get_all_possible_routes_overshoot(
            graph, prev_pos, d_max, num_inter_cut_off=num_inter_cut_off_i)

        # Get all possible positions on each route
        discretised_routes_indices_list = []
        discretised_routes_list = []
        for i, route in enumerate(possible_routes):
            # All possible end positions of route
            discretised_edge_matrix = edges.discretise_edge(
                graph, route[-1, 1:4], d_refine)

            if route.shape[0] == 1:
                discretised_edge_matrix = discretised_edge_matrix[
                    discretised_edge_matrix[:, 0] >= full_sampled_route[-1, 4]]
                discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1,
                                                                          -1]
            else:
                discretised_edge_matrix[:, -1] += route[-2, -1]

            discretised_edge_matrix = discretised_edge_matrix[
                discretised_edge_matrix[:, -1] < d_max + 1e-5]

            # Track route index and append to list
            if discretised_edge_matrix is not None and len(
                    discretised_edge_matrix) > 0:
                discretised_routes_indices_list += [
                    np.ones(discretised_edge_matrix.shape[0], dtype=int) * i
                ]
                discretised_routes_list += [discretised_edge_matrix]

        # Concatenate into numpy.ndarray
        discretised_routes_indices = np.concatenate(
            discretised_routes_indices_list)
        discretised_routes = np.concatenate(discretised_routes_list)

        if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and
                                            discretised_routes[0][-1] == 0):
            warnings.warn('sample_route exited prematurely')
            break

        # Distance prior evals
        distances = discretised_routes[:, -1]
        distance_prior_evals = mm_model.distance_prior_evaluate(
            distances, time_interval)

        # Deviation prior evals
        deviation_prior_evals = mm_model.deviation_prior_evaluate(
            full_sampled_route[-1, 5:7], discretised_routes[:, 1:3],
            discretised_routes[:, -1])

        # Normalise prior/transition probabilities
        prior_probs = distance_prior_evals * deviation_prior_evals
        prior_probs_norm_const = prior_probs.sum()

        sampled_dis_route_index = np.random.choice(len(prior_probs),
                                                   1,
                                                   p=prior_probs /
                                                   prior_probs_norm_const)[0]
        sampled_dis_route = discretised_routes[sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[
            discretised_routes_indices[sampled_dis_route_index]]

        full_sampled_route = proposal.process_proposal_output(
            full_sampled_route, sampled_route, sampled_dis_route,
            time_interval, True)

    obs_indices = edges.observation_time_indices(full_sampled_route[:, 0])
    polyline = full_sampled_route[obs_indices, 5:7] \
               + mm_model.gps_sd * np.random.normal(size=(obs_indices.sum(), 2))

    return full_sampled_route, polyline