def particle_generator(x_hat, v_hat, t_final, N_steps, convolve=True):
    """
    a generator which gives particles and weights
    Takes:
    x_hat: np.array(2): position measurement
    v_hat: np.array(2): velocity measurement
    t_final: float
    dt: float
    Example:
    for x,w in particle_generator( scene, x_hat, v_hat, 3.0):
        plt.scatter(x[0], x[1]) #NOTE: This plots the points
        print w.sum() #This p(rints the total mass
    """
    num_nl_classes = len(scene.P_of_c) - 1

    #Initializes particles for nonlinear classes
    x_span = np.linspace(-3 * sigma_x, 3 * sigma_x, 5)
    dvol_nl = (x_span[1] - x_span[0])**2
    X, Y = np.meshgrid(x_span + x_hat[0], x_span + x_hat[1])
    x0 = np.vstack([X.flatten(), Y.flatten()])

    #Initializes a regular grid for evaluation of the linear class
    x_span = np.linspace(-scene.width / 2, scene.width / 2, 250)
    dx = x_span[1] - x_span[0]
    y_span = np.linspace(-scene.height / 2, scene.height / 2, 250)
    dy = y_span[1] - y_span[0]
    X, Y = np.meshgrid(x_span, y_span)
    x_lin = np.vstack([X.flatten(), Y.flatten()])

    N_ptcl = x0.shape[1]
    x_arr = np.zeros((num_nl_classes, 2 * N_steps + 1, 2, N_ptcl))
    for k in range(num_nl_classes):
        x_arr[k] = integrate_class(k, x0, t_final, N_steps)
    #At time t=0, rho(x,t=0) is nothing but P(x0 | x0_hat),
    # which equals P(x0_hat | x0).
    w_arr = posteriors.x_hat_given_x(x0, x_hat) * dvol_nl
    w_out = w_arr.flatten()
    x_out = x0
    yield (x_out, w_out), (x_lin, np.zeros_like(x_lin[0]))
    #For later times, the class of the agent matters.
    w_arr_base = np.zeros((num_nl_classes, 2 * N_steps + 1, N_ptcl))
    for k in range(num_nl_classes):
        for m in range(-N_steps, N_steps + 1):
            w_arr_base[k, m] = joint_k_x_x_hat_v_hat(k, x0, x_hat,
                                                     v_hat)  #TODO: Memoize??
    veloc = [
        scene.director_field_vectorized(k, x0) for k in range(num_nl_classes)
    ]

    from joblib import Parallel, delayed
    Parallel(n_jobs=18)(delayed(f)(n, x_arr, x_lin, w_arr_base, veloc)
                        for n in range(1, N_steps))
    n = 1

    yield (x_out, w_out / prob_of_mu), (x_lin, w_lin / prob_of_mu)
Exemplo n.º 2
0
def joint_k_s_x_x_hat_v_hat(k, s, x, x_hat, v_hat):
    """
    returns the joint probability of k,x,x_hat,v_hat
    
    args:
    k: int
    s: float
    x: numpy.ndarray, shape=(2,N)
    x_hat: numpy.ndarray, shape=(2,)
    v_hat: numpy.ndarray, shape=(2,)
    """
    r2 = (x[0] - x_hat[0])**2 + (x[1] - x_hat[1])**2
    out = np.exp(-r2 /
                 (2 * sigma_x**2)) / (2 * np.pi * sigma_x**2)  #P(x_hat|x)
    v = s * scene.director_field_vectorized(k, x)
    r2 = (v[0] - v_hat[0])**2 + (v[1] - v_hat[1])**2
    out *= np.exp(-r2 /
                  (2 * sigma_v**2)) / (2 * np.pi * sigma_v**2)  #P(v_hat|v)
    out *= posteriors.x_given_k(x, k)
    out *= scene.P_of_c[k]
    out *= 1.0 / (2 * s_max) * (s <= s_max) * (s >= -s_max)  #P(s)
    return out
Exemplo n.º 3
0

if __name__ == "__main__":
    print "Test: \int P(k,s,x,\hat{x},\hat{v}) d\hat{v} = P(k,s,x,\hat{x})."
    k = 0
    s = np.random.rand() * s_max

    #FIND A POINT WHERE P(x|k) is large
    x_arr = np.zeros((2, 40))
    x_arr[0, :20] = np.linspace(-scene.width / 2, scene.width / 2, 20)
    x_arr[1, 20:] = np.linspace(-scene.height / 2, scene.height / 2, 20)
    store = posteriors.x_given_k(x_arr, k)
    i_max = store.argmax()
    x = x_arr[:, i_max]
    x_hat = x + sigma_x * np.random.randn(2)
    v = s * scene.director_field_vectorized(k, x)

    #Now we integrate over v_hat
    u_arr = np.linspace(v[0] - 5 * sigma_v, v[0] + 5 * sigma_v, 100)
    v_arr = np.linspace(v[1] - 5 * sigma_v, v[1] + 5 * sigma_v, 100)
    dv_hat = (u_arr[1] - u_arr[0]) * (v_arr[1] - v_arr[0])
    Q = 0
    from itertools import product
    for u, v in product(u_arr, v_arr):
        v_hat = np.array([u, v])
        Q += joint_k_s_x_x_hat_v_hat(k, s, x, x_hat, v_hat) * dv_hat

    print "computed answer = " + str(Q)
    answer = scene.P_of_c[k] / (2 * s_max) * posteriors.x_given_k(x, k)
    from scipy.stats import multivariate_normal
    answer *= multivariate_normal.pdf(x_hat, mean=x, cov=sigma_x**2)
 def f(x, t):
     x = x.reshape(2, len(x) / 2)
     return s_max * scene.director_field_vectorized(k, x).flatten()
def particle_generator(x_hat, v_hat, t_final, N_steps, convolve=True):
    """
    a generator which gives particles and weights
    Takes:
    x_hat: np.array(2): position measurement
    v_hat: np.array(2): velocity measurement
    t_final: float
    dt: float

    Example:
    for x,w in particle_generator( scene, x_hat, v_hat, 3.0):
        plt.scatter(x[0], x[1]) #NOTE: This plots the points
        print w.sum() #This p(rints the total mass
    """
    num_nl_classes = len(scene.P_of_c) - 1

    #Initializes particles for nonlinear classes
    x_span = np.linspace(-3 * sigma_x, 3 * sigma_x, 5)
    dvol_nl = (x_span[1] - x_span[0])**2
    X, Y = np.meshgrid(x_span + x_hat[0], x_span + x_hat[1])
    x0 = np.vstack([X.flatten(), Y.flatten()])

    #Initializes a regular grid for evaluation of the linear class
    x_span = np.linspace(-scene.width / 2, scene.width / 2, 250)
    dx = x_span[1] - x_span[0]
    y_span = np.linspace(-scene.height / 2, scene.height / 2, 250)
    dy = y_span[1] - y_span[0]
    X, Y = np.meshgrid(x_span, y_span)
    x_lin = np.vstack([X.flatten(), Y.flatten()])

    N_ptcl = x0.shape[1]
    x_arr = np.zeros((num_nl_classes, 2 * N_steps + 1, 2, N_ptcl))
    for k in range(num_nl_classes):
        x_arr[k] = integrate_class(k, x0, t_final, N_steps)
    #At time t=0, rho(x,t=0) is nothing but P(x0 | x0_hat),
    # which equals P(x0_hat | x0).
    w_arr = posteriors.x_hat_given_x(x0, x_hat) * dvol_nl
    w_out = w_arr.flatten()
    x_out = x0
    yield (x_out, w_out), (x_lin, np.zeros_like(x_lin[0]))
    #For later times, the class of the agent matters.
    w_arr_base = np.zeros((num_nl_classes, 2 * N_steps + 1, N_ptcl))
    for k in range(num_nl_classes):
        for m in range(-N_steps, N_steps + 1):
            w_arr_base[k, m] = joint_k_x_x_hat_v_hat(k, x0, x_hat,
                                                     v_hat)  #TODO: Memoize??

    veloc = [
        scene.director_field_vectorized(k, x0) for k in range(num_nl_classes)
    ]
    for n in range(1, N_steps):
        #The following computations handle the nonlinear classes
        t = n * t_final / float(N_steps)
        ds = s_max / n
        w_arr = np.zeros((num_nl_classes, 2 * n + 1, N_ptcl))

        for k in range(num_nl_classes):
            for m in range(-n, n + 1):
                w_arr[k, m] = w_arr_base[k, m]
                s = s_max * m / n
                v = s * veloc[k]
                r2 = (v[0] - v_hat[0])**2 + (v[1] - v_hat[1])**2
                w_arr[k,
                      m] *= np.exp(-r2 /
                                   (2 * sigma_v**2)) / (2 * np.pi * sigma_v**2)
                w_arr[k, m] *= 1.0 / (2 * s_max) * (s <= s_max) * (s >= -s_max)
                w_arr[k, m] *= ds * dvol_nl
        x_out = np.zeros((num_nl_classes, 2 * n + 1, 2, N_ptcl))
        x_out[:, -n:, :, :] = x_arr[:, -n:, :, :]
        x_out[:, :n + 1, :, :] = x_arr[:, :n + 1, :, :]
        w_out = w_arr.flatten()
        x_out = np.vstack(
            [x_out[:, :, 0, :].flatten(), x_out[:, :, 1, :].flatten()])
        if convolve:
            #BEGIN GAUSSIAN CONVOLVE
            from numpy.random import normal
            from scipy.stats import multivariate_normal
            N_conv = 15
            length = len(w_out) * N_conv
            gauss = np.vstack(
                (np.random.normal(0, kappa * t_final / float(N_steps) * n,
                                  length),
                 np.random.normal(0, kappa * t_final / float(N_steps) * n,
                                  length)))
            positions = np.repeat(x_out, N_conv, axis=1) + gauss
            weights = multivariate_normal.pdf(
                gauss.transpose(),
                mean=np.array([0, 0]),
                cov=(kappa * t_final / float(N_steps) * n)**2) * np.repeat(
                    w_out, N_conv) / N_conv
            x_out = positions
            w_out = weights
            #END GAUSSIAN CONVOLVE
        #The following computations handle the linear predictor class
        w_lin = joint_lin_x_t_x_hat_v_hat(t, x_lin, x_hat, v_hat) * dy * dx
        #TODO: append regular grid and weights to x_out, w_out
        #x_out = np.concatenate( [x_out, x_lin], axis=1)
        #w_out = np.concatenate( [w_out, w_lin])
        prob_of_mu = w_out.sum() + w_lin.sum()
        yield (x_out, w_out / prob_of_mu), (x_lin, w_lin / prob_of_mu)
    pass