コード例 #1
0
ファイル: proposal.py プロジェクト: SamDuffield/bmm
def optimal_proposal(graph: MultiDiGraph,
                     particle: np.ndarray,
                     new_observation: Union[None, np.ndarray],
                     time_interval: float,
                     mm_model: MapMatchingModel,
                     full_smoothing: bool = True,
                     d_refine: float = 1.,
                     d_max: float = None,
                     d_max_fail_multiplier: float = 1.5,
                     d_max_threshold: tuple = (0.9, 0.1),
                     num_inter_cut_off: int = None,
                     only_norm_const: bool = False,
                     store_norm_quants: bool = False,
                     resample_fails: bool = True) -> Union[Tuple[Union[None, np.ndarray],
                                                                 float,
                                                                 Union[float, np.ndarray]], float]:
    """
    Samples a single particle from the (distance discretised) optimal proposal.
    :param graph: encodes road network, simplified and projected to UTM
    :param particle: single element of MMParticles.particles
    :param new_observation: cartesian coordinate in UTM
    :param time_interval: time between last observation and newly received observation
    :param mm_model: MapMatchingModel
    :param full_smoothing: if True returns full trajectory
        otherwise returns only x_t-1 to x_t
    :param d_refine: metres, resolution of distance discretisation
    :param d_max: optional override of d_max = mm_model.d_max(time_interval)
    :param d_max_fail_multiplier: extension of d_max in case all probs are 0
    :param d_max_threshold: tuple defining when to extend d_max
        extend if total sample prob of distances > d_max * d_max_threshold[0] larger than d_max_threshold[1]
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :param only_norm_const: if true only return prior normalising constant (don't sample)
    :param store_norm_quants: whether to additionally return quantities needed for gradient EM step
        assuming deviation prior is used
    :param resample_fails: whether to return None (and induce later resampling of whole trajectory)
        if proposal fails to find route with positive probability
        if False assume distance=0
    :return: (particle, unnormalised weight, prior_norm) or (particle, unnormalised weight, dev_norm_quants)
    """
    if particle is None:
        return 0. if only_norm_const else (None, 0., 0.)

    if isinstance(new_observation, list):
        new_observation = np.array(new_observation)

    if num_inter_cut_off is None:
        num_inter_cut_off = max(int(time_interval / 1.5), 10)

    if d_max is None:
        d_max = mm_model.d_max(time_interval)

    # Extract all possible routes from previous position
    start_position = particle[-1:].copy()
    start_position[0, -1] = 0
    possible_routes = get_all_possible_routes_overshoot(graph, start_position, d_max,
                                                        num_inter_cut_off=num_inter_cut_off)

    # Get all possible positions on each route
    discretised_routes_indices_list = []
    discretised_routes_list = []
    for i, route in enumerate(possible_routes):
        # All possible end positions of route
        discretised_edge_matrix = discretise_edge(graph, route[-1, 1:4], d_refine)

        if route.shape[0] == 1:
            discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= particle[-1, 4]]
            discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
        else:
            discretised_edge_matrix[:, -1] += route[-2, -1]

        discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]

        # Track route index and append to list
        if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
            discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
            discretised_routes_list += [discretised_edge_matrix]

    # Concatenate into numpy.ndarray
    discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
    discretised_routes = np.concatenate(discretised_routes_list)

    if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
        if only_norm_const:
            return 0
        if resample_fails:
            return None, 0., 0.
        else:
            sampled_dis_route = discretised_routes[0]

            # Append sampled route to old particle
            sampled_route = possible_routes[0]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
            return proposal_out, 0., 0.

    # Distance prior evals
    distances = discretised_routes[:, -1]
    distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)

    # Deviation prior evals
    deviation_prior_evals = mm_model.deviation_prior_evaluate(particle[-1, 5:7],
                                                              discretised_routes[:, 1:3],
                                                              discretised_routes[:, -1])

    # Normalise prior/transition probabilities
    prior_probs = distance_prior_evals * deviation_prior_evals

    prior_probs_norm_const = prior_probs.sum()
    if only_norm_const:
        if store_norm_quants:
            deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
            deviations = np.abs(deviations - discretised_routes[:, -1])

            # Z, dZ/d(dist_params), dZ/d(deviation_beta)
            dev_norm_quants = np.array([prior_probs_norm_const,
                                        *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                                .reshape(len(mm_model.distance_params), len(distances))
                                                * deviation_prior_evals, axis=-1),
                                        -np.sum(deviations
                                                * distance_prior_evals
                                                * deviation_prior_evals)
                                        ])
            return dev_norm_quants
        else:
            return prior_probs_norm_const
    prior_probs /= prior_probs_norm_const

    # Likelihood evaluations
    likelihood_evals = mm_model.likelihood_evaluate(discretised_routes[:, 1:3], new_observation)

    # Calculate sample probabilities
    sample_probs = prior_probs[likelihood_evals > 0] * likelihood_evals[likelihood_evals > 0]
    # sample_probs = prior_probs * likelihood_evals

    # p(y_m | x_m-1^j)
    prop_weight = sample_probs.sum()

    model_d_max = mm_model.d_max(time_interval)

    if prop_weight < 1e-100 \
            or (np.sum(sample_probs[np.where(distances[likelihood_evals > 0]
                                             > (d_max * d_max_threshold[0]))[0]])/prop_weight > d_max_threshold[1]\
                and (not d_max > model_d_max)):
        if (d_max - np.max(distances)) < d_refine + 1e-5 \
                and d_max_fail_multiplier > 1 and (not d_max > model_d_max):
            return optimal_proposal(graph,
                                    particle,
                                    new_observation,
                                    time_interval,
                                    mm_model,
                                    full_smoothing,
                                    d_refine,
                                    d_max=d_max * d_max_fail_multiplier,
                                    num_inter_cut_off=num_inter_cut_off,
                                    only_norm_const=only_norm_const,
                                    store_norm_quants=store_norm_quants,
                                    resample_fails=resample_fails)
        if resample_fails:
            proposal_out = None
        else:
            sampled_dis_route_index = np.where(discretised_routes[:, -1] == 0)[0][0]
            sampled_dis_route = discretised_routes[sampled_dis_route_index]

            # Append sampled route to old particle
            sampled_route = possible_routes[discretised_routes_indices[sampled_dis_route_index]]

            proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                                   full_smoothing)
        prop_weight = 0.
    else:
        # Sample an edge and distance
        sampled_dis_route_index = np.random.choice(len(sample_probs), 1, p=sample_probs / prop_weight)[0]
        sampled_dis_route = discretised_routes[likelihood_evals > 0][sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[discretised_routes_indices[likelihood_evals > 0][sampled_dis_route_index]]

        proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
                                               full_smoothing)

    if store_norm_quants:
        deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
        deviations = np.abs(deviations - discretised_routes[:, -1])

        # Z, dZ/d(dist_params), dZ/d(deviation_beta)
        dev_norm_quants = np.array([prior_probs_norm_const,
                                    *np.sum(mm_model.distance_prior_gradient(distances, time_interval)
                                            .reshape(len(mm_model.distance_params), len(distances))
                                            * deviation_prior_evals, axis=-1),
                                    -np.sum(deviations
                                            * distance_prior_evals
                                            * deviation_prior_evals)
                                    ])

        return proposal_out, prop_weight, dev_norm_quants
    else:
        return proposal_out, prop_weight, prior_probs_norm_const
コード例 #2
0
def sample_route(
        graph: MultiDiGraph,
        timestamps: Union[float, np.ndarray],
        num_obs: int = None,
        mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
        d_refine: float = 1.,
        start_position: np.ndarray = None,
        num_inter_cut_off: int = None) -> Tuple[np.ndarray, np.ndarray]:
    """
    Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
    of trajectories.
    Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.

    :param graph: encodes road network, simplified and projected to UTM
    :param timestamps: seconds
        either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
    :param num_obs: int length of observed polyline to generate
    :param mm_model: MapMatchingModel
    :param d_refine: metres, resolution of distance discretisation
    :param start_position: optional start position; array (u, v, k, alpha)
    :param num_inter_cut_off: maximum number of intersections to cross in the time interval
    :return: tuple with sampled route (array with same shape as a single MMParticles)
        and polyline (array with shape (num_obs, 2))
    """

    if isinstance(timestamps, np.ndarray):
        num_obs = len(timestamps) + 1

    time_interval_arr = get_time_interval_array(timestamps, num_obs)

    if start_position is None:
        start_position = random_positions(graph, 1)[0]

    start_geom = edges.get_geometry(graph, start_position)
    start_coords = edges.edge_interpolate(start_geom, start_position[-1])

    full_sampled_route = np.concatenate([[0.], start_position, start_coords,
                                         [0.]])[np.newaxis]

    for k in range(num_obs - 1):
        time_interval = time_interval_arr[k]
        d_max = mm_model.d_max(time_interval)

        num_inter_cut_off_i = max(
            int(time_interval /
                1.5), 10) if num_inter_cut_off is None else num_inter_cut_off

        prev_pos = full_sampled_route[-1:].copy()
        prev_pos[0, 0] = 0.
        prev_pos[0, -1] = 0.

        possible_routes = proposal.get_all_possible_routes_overshoot(
            graph, prev_pos, d_max, num_inter_cut_off=num_inter_cut_off_i)

        # Get all possible positions on each route
        discretised_routes_indices_list = []
        discretised_routes_list = []
        for i, route in enumerate(possible_routes):
            # All possible end positions of route
            discretised_edge_matrix = edges.discretise_edge(
                graph, route[-1, 1:4], d_refine)

            if route.shape[0] == 1:
                discretised_edge_matrix = discretised_edge_matrix[
                    discretised_edge_matrix[:, 0] >= full_sampled_route[-1, 4]]
                discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1,
                                                                          -1]
            else:
                discretised_edge_matrix[:, -1] += route[-2, -1]

            discretised_edge_matrix = discretised_edge_matrix[
                discretised_edge_matrix[:, -1] < d_max + 1e-5]

            # Track route index and append to list
            if discretised_edge_matrix is not None and len(
                    discretised_edge_matrix) > 0:
                discretised_routes_indices_list += [
                    np.ones(discretised_edge_matrix.shape[0], dtype=int) * i
                ]
                discretised_routes_list += [discretised_edge_matrix]

        # Concatenate into numpy.ndarray
        discretised_routes_indices = np.concatenate(
            discretised_routes_indices_list)
        discretised_routes = np.concatenate(discretised_routes_list)

        if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and
                                            discretised_routes[0][-1] == 0):
            warnings.warn('sample_route exited prematurely')
            break

        # Distance prior evals
        distances = discretised_routes[:, -1]
        distance_prior_evals = mm_model.distance_prior_evaluate(
            distances, time_interval)

        # Deviation prior evals
        deviation_prior_evals = mm_model.deviation_prior_evaluate(
            full_sampled_route[-1, 5:7], discretised_routes[:, 1:3],
            discretised_routes[:, -1])

        # Normalise prior/transition probabilities
        prior_probs = distance_prior_evals * deviation_prior_evals
        prior_probs_norm_const = prior_probs.sum()

        sampled_dis_route_index = np.random.choice(len(prior_probs),
                                                   1,
                                                   p=prior_probs /
                                                   prior_probs_norm_const)[0]
        sampled_dis_route = discretised_routes[sampled_dis_route_index]

        # Append sampled route to old particle
        sampled_route = possible_routes[
            discretised_routes_indices[sampled_dis_route_index]]

        full_sampled_route = proposal.process_proposal_output(
            full_sampled_route, sampled_route, sampled_dis_route,
            time_interval, True)

    obs_indices = edges.observation_time_indices(full_sampled_route[:, 0])
    polyline = full_sampled_route[obs_indices, 5:7] \
               + mm_model.gps_sd * np.random.normal(size=(obs_indices.sum(), 2))

    return full_sampled_route, polyline