Пример #1
0
def predict_idm(df,
                lag_vid,
                lead_vid,
                t0,
                t1,
                n_steps,
                is_merge=True,
                onramp_vid=np.nan,
                **kwargs):
    front_vid = onramp_vid if is_merge else lead_vid
    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, front_vid, t0, t1)
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl_oracle = ut.get_velocities(df, front_vid, t1 - 1, t1 + n_steps - 1)
    if yl.size == 0:
        # vid entered at t1
        yl = ut.get_positions(df, front_vid, t1, t1 + 1)
        vl_oracle = np.hstack((vl_oracle[0], vl_oracle))
        assert vl_oracle.size == n_steps
    # if is_merge:
    vehicle_length = ut.get_length(df, front_vid)
    g_0 = yl[-1] - y[-1] - vehicle_length
    # else:
    #     g_0 = ut.get_spacing(df, lag_vid, t0, t1)[-1]
    y_hat, _ = apply_model(y[-1], v[-1], g_0, yl[-1], vl_oracle)
    return (*ut.single_prediction2probabilistic_format(y_hat)), {}
Пример #2
0
def predict_constant_velocity(df, lag_vid, lead_vid, t0, t1, n_steps,
                              **kwargs):
    y = ut.get_positions(df, lag_vid, t0, t1)
    vdt = y[1:] - y[:-1]
    mean_vdt = vdt.mean()
    y_hat = y[-1] + mean_vdt * np.arange(1, n_steps + 1)
    return (*ut.single_prediction2probabilistic_format(y_hat)), {}
Пример #3
0
def predict_sampling_gprior(df, lag_vid, lead_vid, t0, t1, n_steps, **kwargs):
    n_samples = 1000
    alpha = 1.
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_positions(df, lead_vid, t0, t1) - ut.get_positions(
        df, lag_vid, t0, t1)
    g0 = g.mean()
    A = np.array([vl - v, g, 0 * g - 1]).T[:-1, :]  # k-1, 3
    Ap = np.vstack((np.hstack(
        (A, 0 * A[:, [0]])), np.array([0, 0, 0, np.sqrt(alpha)])))  # n, 4
    b = (v[1:] - v[:-1]) / DT
    bp = np.hstack((b, np.sqrt(alpha) * g0))

    # ------ CV trajectory regularization ------
    beta = 1.
    Ap = np.vstack((Ap, np.zeros((2, 4))))
    Ap[-1, 0] = np.sqrt(beta) * g0
    Ap[-1, 1] = np.sqrt(beta) * g0
    bp = np.hstack((bp, 0, 0))

    if np.linalg.matrix_rank(A) < A.shape[1]:
        # print('bad rank')
        params = np.array([0., 0, 0, g0])
    else:
        params = solve_sdp(Ap, bp)
    kv, kg, g_star = params[0], params[1], params[3]
    # print(kv, kg, g_star, np.linalg.cond(A))
    # print(g.mean())

    theta_hat = np.array([kv, kg, g_star])
    sigmas = np.array(
        [.5, .5, 5]
    ) * 2  # doesn't change much with this (if var enough, >=*1) - ie sampled well
    kv_kg_gs, p, w_f1 = importance_sample_thetas(theta_hat, sigmas, Ap, bp,
                                                 n_samples)
    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vehicle_length = yl[-1] - y[-1] - g[-1]
    xv = apply_model_v1(y[-1], v[-1], yl[-1], np.mean(vl), vehicle_length,
                        kv_kg_gs, n_steps)
    y_hats = xv[:, 0, :]
    p, w_f2 = post_weight1_samples(y_hats, p, v[-1])
    f_thetas = w_f1 + w_f2
    return y_hats, p, dict(f_thetas=f_thetas)
Пример #4
0
def predict_no_sampling(df, lag_vid, lead_vid, t0, t1, n_steps, **kwargs):
    # make A, 'b'
    # solve for parameters [kv, kg, kg*g*]
    # predict with parameters
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_spacing(df, lag_vid, t0, t1)
    A = np.array([vl - v, g, 0 * g - 1]).T  # n, 3
    b = ut.get_accel(df, lag_vid, t0, t1)
    print(np.linalg.cond(A))
    params = solve_ls(A, b)
    kv, kg, g_star = params[0], params[1], params[2] / params[1]

    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vl_hat = np.zeros(n_steps) + np.mean(vl)
    y_hat, _ = apply_model_v0(y[-1], v[-1], yl[-1], vl_hat, kv, kg, g_star)
    return (*ut.single_prediction2probabilistic_format(y_hat)), {}
Пример #5
0
def predict_mm(df, lag_vid, lead_vid, t0, t1, n_steps, difP_yield,
               difP_noyield, **kwargs):
    n_samples = 1000
    y = ut.get_positions(df, lag_vid, t0, t1)
    v = ut.get_velocities(df, lag_vid, t0, t1)
    v_hat, p = model.sample_predictions(difP_yield, difP_noyield, v, n_samples,
                                        n_steps)
    y_hat = y[-1] + model.DT * v_hat.cumsum(axis=0)
    return y_hat, p, {}
Пример #6
0
def predict_sampling(df, lag_vid, lead_vid, t0, t1, n_steps, **kwargs):
    n_samples = 1000
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_spacing(df, lag_vid, t0, t1)
    A = np.array([vl - v, g, 0 * g - 1]).T  # n, 3
    b = ut.get_accel(df, lag_vid, t0, t1)
    params = solve_ls(A, b)
    kv, kg, g_star = params[0], params[1], params[2] / params[1]
    theta_hat = np.array([kv, kg, g_star])
    sigmas = np.array([.1, .1, 1])
    kv_kg_gs, p, w_f = importance_sample_thetas_no_reg(theta_hat, sigmas, A, b,
                                                       n_samples)
    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vehicle_length = yl[-1] - y[-1] - g[-1]
    xv = apply_model_v1(y[-1], v[-1], yl[-1], np.mean(vl), vehicle_length,
                        kv_kg_gs, n_steps)
    y_hats = xv[:, 0, :]
    return y_hats, p, {}
Пример #7
0
def predict_sampling_ess_baseline(df,
                                  lag_vid,
                                  lead_vid,
                                  t0,
                                  t1,
                                  n_steps,
                                  n_samples=100,
                                  **kwargs):
    # n_samples = 10000
    alpha = 1. / 50**2
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_spacing(df, lag_vid, t0, t1)
    g0 = g.mean()
    A = np.array([vl - v, g, 0 * g - 1]).T[:-1, :]  # k-1, 3
    b = (v[1:] - v[:-1]) / DT

    # baseline monte carlo sampler
    kv_kg_gs = np.zeros((n_samples, 3), dtype=np.float)
    kv_kg_gs[:, :2] = np.random.rand(n_samples, 2) * (K_UB - EPS) + EPS
    sigma_a = 1 / np.sqrt(2 * alpha)
    kv_kg_gs[:, 2] = ss.truncnorm.rvs(
        (EPS - g0) / sigma_a, np.inf, size=n_samples) * sigma_a + g0

    p = np.ones(n_samples) / n_samples
    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vehicle_length = yl[-1] - y[-1] - g[-1]
    xv = apply_model_v1(y[-1], v[-1], yl[-1], np.mean(vl), vehicle_length,
                        kv_kg_gs, n_steps)
    y_hats = xv[:, 0, :]

    thetas = kv_kg_gs.copy()
    thetas[:, 2] *= thetas[:, 1]
    r = (A.dot(thetas.T)).T - b  # n_samples, b.size
    w_f1 = np.sqrt((r**2).sum(axis=1))
    p, w_f2 = post_weight1_samples(y_hats, p, v[-1])
    f_thetas = w_f1 + w_f2
    return y_hats, p, dict(f_thetas=f_thetas)
Пример #8
0
def predict_sampling_propagate(df,
                               lag_vid,
                               lead_vid,
                               t0,
                               t1,
                               n_steps,
                               leadlead_vid=np.nan,
                               **kwargs):
    n_samples = 100
    if np.isnan(leadlead_vid):
        return predict_sampling(df, lag_vid, lead_vid, t0, t1, n_steps)
    y_hats_lead, p_lead = predict_sampling(df, lead_vid, leadlead_vid, t0, t1,
                                           n_steps)[:2]
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vl_hats = y_hats_lead[1:, :] - y_hats_lead[:-1, :]
    vl_hats = np.vstack((y_hats_lead[0, :] - yl[-1], vl_hats)) / DT

    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_spacing(df, lag_vid, t0, t1)
    A = np.array([vl - v, g, 0 * g - 1]).T  # n, 3
    b = ut.get_accel(df, lag_vid, t0, t1)
    params = solve_ls(A, b)
    kv, kg, g_star = params[0], params[1], params[2] / params[1]

    theta_hat = np.array([kv, kg, g_star])
    sigmas = np.array([.1, .1, 1])
    kv_kg_gs, p, w_f1 = importance_sample_thetas_no_reg(
        theta_hat, sigmas, A, b, n_samples)
    p *= p_lead
    p /= p.sum()
    y_hats = np.zeros((n_steps, n_samples), dtype=np.float)
    y = ut.get_positions(df, lag_vid, t0, t1)
    for i in range(n_samples):
        y_hats[:, i], _ = apply_model_v0(y[-1], v[-1], yl[-1], vl_hats[:, i],
                                         *kv_kg_gs[i, :])
    return y_hats, p, {}
Пример #9
0
def predict_sampling_gprior(df, lag_vid, lead_vid, t0, t1, n_steps, **kwargs):
    n_samples = 100
    alpha = 1. / 50**2
    v = ut.get_velocities(df, lag_vid, t0, t1)
    vl = ut.get_velocities(df, lead_vid, t0, t1)
    g = ut.get_positions(df, lead_vid, t0, t1) - ut.get_positions(
        df, lag_vid, t0, t1)
    g0 = g.mean()
    A = np.array([vl - v, g, 0 * g - 1]).T[:-1, :]  # k-1, 3
    Ap = np.vstack((np.hstack(
        (A, 0 * A[:, [0]])), np.array([0, 0, 0, np.sqrt(alpha)])))  # n, 4
    b = (v[1:] - v[:-1]) / DT
    bp = np.hstack((b, np.sqrt(alpha) * g0))
    if np.linalg.matrix_rank(A) < A.shape[1]:
        print('bad rank')
        params = np.array([0., 0, 0, g0])
    else:
        params = solve_sdp(Ap, bp)
    kv, kg, g_star = params[0], params[1], params[3]
    print(kv, kg, g_star, np.linalg.cond(A))
    print(g.mean())
    # print(ut.get_time_spacing(df, lag_vid, t0, t1).mean())

    theta_hat = np.array([kv, kg, g_star])
    sigmas = np.array([.1, .1, 1])
    kv_kg_gs, p, w_f1 = importance_sample_thetas(theta_hat, sigmas, Ap, bp,
                                                 n_samples)
    y = ut.get_positions(df, lag_vid, t0, t1)
    yl = ut.get_positions(df, lead_vid, t0, t1)
    vehicle_length = yl[-1] - y[-1] - g[-1]
    xv = apply_model_v1(y[-1], v[-1], yl[-1], np.mean(vl), vehicle_length,
                        kv_kg_gs, n_steps)
    y_hats = xv[:, 0, :]
    p, w_f2 = post_weight1_samples(y_hats, p, v[-1])
    f_thetas = w_f1 + w_f2
    return y_hats, p, dict(f_thetas=f_thetas)