예제 #1
0
파일: test_conv.py 프로젝트: HIPS/DESI-MCMC
def test_im2col_convolve():
    """ compares my im2col based dot product convolve with scipy convolve """
    skip = 1
    block_size = (11, 11)
    img   = np.random.randn(227, 227, 3)
    filt  = np.dstack([cv.gauss_filt_2D(shape=block_size,sigma=2) for k in range(3)])

    # im2col the image and filter
    img_cols = cv.im2col(img, block_size=block_size, skip=skip)
    out      = cv.convolve_im2col(img_cols, filt, block_size, skip, img.shape)

    # check against scipy convolve
    outc = np.dstack([ sconvolve(img[:,:,k], filt[:,:,k], mode='valid') for k in range(3)])
    outc = np.sum(outc, axis=2)
    assert np.allclose(out, outc), "im2col skip 1 failed!"
예제 #2
0
파일: test_conv.py 프로젝트: HIPS/DESI-MCMC
def test_fast_conv():
    """ compares my fast_conv to scipy convolve """
    skip = 1
    block_size = (11, 11)
    depth = 5
    img   = np.random.randn(51, 51, depth)
    filt  = np.dstack([cv.gauss_filt_2D(shape=block_size,sigma=2) for k in range(depth)])

    # im2col the image and filter
    out = fc.convolve(filt, img)

    # check against scipy convolve
    outc = np.dstack([ auto_convolve(img[:,:,k], filt[:,:,k], mode='valid') for k in range(3)])
    outc = np.sum(outc, axis=2)
    assert np.allclose(out, outc), "fast_conv (cythonized) failed!"
예제 #3
0
파일: plot.py 프로젝트: mackelab/sbibm
def box_meshgrid(func, xbound, ybound, nx=50, ny=50):
    """
    Form a meshed grid (to be used with a contour plot) on a box
    specified by xbound, ybound. Evaluate the grid with [func]: (n x 2) -> n.

    - xbound: a tuple (xmin, xmax)
    - ybound: a tuple (ymin, ymax)
    - nx: number of points to evluate in the x direction

    return XX, YY, ZZ where XX is a 2D nd-array of size nx x ny
    """

    # form a test location grid to try
    minx, maxx = xbound
    miny, maxy = ybound
    loc0_cands = np.linspace(minx, maxx, nx)
    loc1_cands = np.linspace(miny, maxy, ny)
    lloc0, lloc1 = np.meshgrid(loc0_cands, loc1_cands)
    # nd1 x nd0 x 2
    loc3d = np.dstack((lloc0, lloc1))
    # #candidates x 2
    all_loc2s = np.reshape(loc3d, (-1, 2))
    # evaluate the function
    func_grid = func(all_loc2s)
    func_grid = np.reshape(func_grid, (ny, nx))

    assert lloc0.shape[0] == ny
    assert lloc0.shape[1] == nx
    assert np.all(lloc0.shape == lloc1.shape)

    return lloc0, lloc1, func_grid
예제 #4
0
def location_mixture_logpdf(samps, locations, location_weights, distr_at_origin, contr_var = False, variant = 1):
#    lpdfs = zeroprop.logpdf()
    diff = samps - locations[:, np.newaxis, :]
    lpdfs = distr_at_origin.logpdf(diff.reshape([np.prod(diff.shape[:2]), diff.shape[-1]])).reshape(diff.shape[:2])
    logprop_weights = log(location_weights/location_weights.sum())[:, np.newaxis]
    if not contr_var: 
        return logsumexp(lpdfs + logprop_weights, 0)
    #time_m1 = np.hstack([time0[:,:-1],time0[:,-1:]])
    else:
        time0 = lpdfs + logprop_weights + log(len(location_weights))
        
        if variant == 1:
            time1 = np.hstack([time0[:,1:],time0[:,:1]])
            cov = np.mean(time0**2-time0*time1)
            var = np.mean((time0-time1)**2)
            lpdfs = lpdfs  -    cov/var * (time0-time1)        
            return logsumexp(lpdfs - log(len(location_weights)), 0)
        elif variant == 2:
            cvar = (time0[:,:,np.newaxis] - 
                    np.dstack([np.hstack([time0[:, 1:], time0[:, :1]]),
                               np.hstack([time0[:,-1:], time0[:,:-1]])]))

            
            ## self-covariance matrix of control variates
            K_cvar = np.diag(np.mean(cvar**2, (0, 1)))
            #add off diagonal
            K_cvar = K_cvar + (1.-np.eye(2)) * np.mean(cvar[:,:,0]*cvar[:,:,1])
            
            ## covariance of control variates with random variable
            cov = np.mean(time0[:,:,np.newaxis] * cvar, 0).mean(0)
            
            optimal_comb = np.linalg.inv(K_cvar) @ cov
            lpdfs = lpdfs  -  cvar @ optimal_comb
            return logsumexp(lpdfs - log(len(location_weights)), 0)
예제 #5
0
    def _forward(self, g, beta, initval, ifx):
        """
        Applies the forward iteration of the Picard series
        """
        g = g.reshape((self.dim.R, self.dim.N)).T

        struct_mats = np.array([
            sum(brd * Ld for brd, Ld in zip(br, self.basis_mats))
            for br in beta
        ])

        # construct the sets of A(t_n) shape: (N, K, K)
        A = np.array([
            sum(gnr * Ar
                for gnr, Ar in zip(gn, struct_mats[1:])) + struct_mats[0]
            for gn in g
        ])

        # initial layer shape (N, K, N_samples)
        layer = np.dstack([np.row_stack([m] * self.dim.N) for m in initval])

        # weight matrix
        weights = self._get_weight_matrix(self.ttc, ifx)

        for m in range(self.order):
            layer = get_next_layer(layer, initval, A, weights)

        return layer
예제 #6
0
파일: test_conv.py 프로젝트: HIPS/DESI-MCMC
def test_fast_conv_grad():
    skip = 1
    block_size = (11, 11)
    depth = 1
    img   = np.random.randn(51, 51, depth)
    filt  = np.dstack([cv.gauss_filt_2D(shape=block_size,sigma=2) for k in range(depth)])
    filt = cv.gauss_filt_2D(shape=block_size, sigma=2)
    def loss_fun(filt):
        out = fc.convolve(filt, img)
        return np.sum(np.sin(out) + out**2)
    loss_fun(filt)
    loss_grad = grad(loss_fun)

    def loss_fun_slow(filt):
        out = auto_convolve(img.squeeze(), filt, mode='valid') 
        return np.sum(np.sin(out) + out**2)
    loss_fun_slow(filt)
    loss_grad_slow = grad(loss_fun_slow)

    # compare gradient timing
    loss_grad_slow(filt)
    loss_grad(filt)

    ## check numerical gradients
    num_grad = np.zeros(filt.shape)
    for i in xrange(filt.shape[0]):
        for j in xrange(filt.shape[1]):
            de = np.zeros(filt.shape)
            de[i, j] = 1e-4
            num_grad[i,j] = (loss_fun(filt + de) - loss_fun(filt - de)) / (2*de[i,j])

    assert np.allclose(loss_grad(filt), num_grad), "convolution gradient failed!"
예제 #7
0
    def _forward(self, g, beta, initval, ifx):
        """
        Applies the forward iteration of the Picard series
        """
        g = g.reshape((self.dim.R, self.dim.N)).T

        struct_mats = np.array([sum(brd * Ld
                                    for brd, Ld in zip(br, self.basis_mats))
                                for br in beta])

        # construct the sets of A(t_n) shape: (N, K, K)
        A = np.array([sum(gnr * Ar
                          for gnr, Ar in zip(gn, struct_mats[1:])) +
                      struct_mats[0]
                      for gn in g])

        # initial layer shape (N, K, N_samples)
        layer = np.dstack([np.row_stack([m]*self.dim.N)
                           for m in initval])

        # weight matrix
        weights = self._get_weight_matrix(self.ttc, ifx)

        for m in range(self.order):
            layer = get_next_layer(layer,
                                   initval,
                                   A,
                                   weights)

        return layer
예제 #8
0
def create_job(kwargs):
    import warnings
    warnings.filterwarnings("ignore")

    # pendulum env
    env = gym.make('Pendulum-TO-v0')
    env._max_episode_steps = 10000
    env.unwrapped.dt = 0.02
    env.unwrapped.umax = np.array([2.5])
    env.unwrapped.periodic = False

    dm_state = env.observation_space.shape[0]
    dm_act = env.action_space.shape[0]

    state = env.reset()
    init_state = tuple([state, 1e-4 * np.eye(dm_state)])
    solver = MBGPS(env,
                   init_state=init_state,
                   init_action_sigma=25.,
                   nb_steps=300,
                   kl_bound=.1,
                   action_penalty=1e-3,
                   activation={
                       'shift': 250,
                       'mult': 0.5
                   })

    solver.run(nb_iter=100, verbose=False)

    solver.ctl.sigma = np.dstack([1e-1 * np.eye(dm_act)] * 300)
    data = solver.rollout(nb_episodes=1, stoch=True, init=state)

    obs, act = np.squeeze(data['x'], axis=-1).T, np.squeeze(data['u'],
                                                            axis=-1).T
    return obs, act
예제 #9
0
def test_im2col_convolve():
    """ compares my im2col based dot product convolve with scipy convolve """
    skip = 1
    block_size = (11, 11)
    img = np.random.randn(227, 227, 3)
    filt = np.dstack(
        [cv.gauss_filt_2D(shape=block_size, sigma=2) for k in range(3)])

    # im2col the image and filter
    img_cols = cv.im2col(img, block_size=block_size, skip=skip)
    out = cv.convolve_im2col(img_cols, filt, block_size, skip, img.shape)

    # check against scipy convolve
    outc = np.dstack([
        sconvolve(img[:, :, k], filt[:, :, k], mode='valid') for k in range(3)
    ])
    outc = np.sum(outc, axis=2)
    assert np.allclose(out, outc), "im2col skip 1 failed!"
예제 #10
0
def test_fast_conv():
    """ compares my fast_conv to scipy convolve """
    skip = 1
    block_size = (11, 11)
    depth = 5
    img = np.random.randn(51, 51, depth)
    filt = np.dstack(
        [cv.gauss_filt_2D(shape=block_size, sigma=2) for k in range(depth)])

    # im2col the image and filter
    out = fc.convolve(filt, img)

    # check against scipy convolve
    outc = np.dstack([
        auto_convolve(img[:, :, k], filt[:, :, k], mode='valid')
        for k in range(3)
    ])
    outc = np.sum(outc, axis=2)
    assert np.allclose(out, outc), "fast_conv (cythonized) failed!"
예제 #11
0
파일: 123.py 프로젝트: yanzhizhang/CSC_412
    def log_density(w, t):
        w_reshape = w.T.reshape(784, 10, samples)
        w_squared = ((w_reshape / sigma_prior)**2) / 2.0

        z = np.tensordot(train_images, w_reshape, axes=1)
        sf_sum = logsumexp(z, axis=1, keepdims=True)
        # should be positive
        log_softmax = z - np.hstack([sf_sum for i in xrange(10)])
        expected = np.dstack([train_labels for i in xrange(samples)])
        thing = expected * log_softmax
        return thing.sum(axis=0).mean(axis=0) - w_squared.sum(axis=0).sum(
            axis=0)
def setup_plot(u_func):
    '''
        Function used to set up plot of target density, returns axis object for additional
        plotting
    '''
    try:
        X, Y = numpy.mgrid[-4:4:0.05, -4:4:0.05]
        dat = np.dstack((X, Y))
        U_z1 = u_func(dat)

        fig, ax = plt.subplots()
        ax.contourf(X, Y, U_z1, cmap='Reds', levels=15)
    except (TypeError, ValueError):
        plt.close()
        x = np.linspace(-8, 8, 1000)
        fig, ax = plt.subplots()
        ax.plot(x, u_func(x), label="Target Distribution")
    return ax
예제 #13
0
def test_fast_conv_grad():
    skip = 1
    block_size = (11, 11)
    depth = 1
    img = np.random.randn(51, 51, depth)
    filt = np.dstack(
        [cv.gauss_filt_2D(shape=block_size, sigma=2) for k in range(depth)])
    filt = cv.gauss_filt_2D(shape=block_size, sigma=2)

    def loss_fun(filt):
        out = fc.convolve(filt, img)
        return np.sum(np.sin(out) + out**2)

    loss_fun(filt)
    loss_grad = grad(loss_fun)

    def loss_fun_slow(filt):
        out = auto_convolve(img.squeeze(), filt, mode='valid')
        return np.sum(np.sin(out) + out**2)

    loss_fun_slow(filt)
    loss_grad_slow = grad(loss_fun_slow)

    # compare gradient timing
    loss_grad_slow(filt)
    loss_grad(filt)

    ## check numerical gradients
    num_grad = np.zeros(filt.shape)
    for i in xrange(filt.shape[0]):
        for j in xrange(filt.shape[1]):
            de = np.zeros(filt.shape)
            de[i, j] = 1e-4
            num_grad[i, j] = (loss_fun(filt + de) -
                              loss_fun(filt - de)) / (2 * de[i, j])

    assert np.allclose(loss_grad(filt),
                       num_grad), "convolution gradient failed!"
예제 #14
0
def create_job(kwargs):
    import warnings
    warnings.filterwarnings("ignore")

    # pendulum env
    env = gym.make('Pendulum-TO-v0')
    env._max_episode_steps = 10000
    env.unwrapped.dt = 0.02
    env.unwrapped.umax = np.array([2.5])
    env.unwrapped.periodic = True

    dm_state = env.observation_space.shape[0]
    dm_act = env.action_space.shape[0]

    horizon, nb_steps = 15, 100

    state = np.zeros((dm_state, nb_steps + 1))
    action = np.zeros((dm_act, nb_steps))

    state[:, 0] = env.reset()
    for t in range(nb_steps):
        init_state = tuple([state[:, t], 1e-4 * np.eye(dm_state)])
        solver = MBGPS(env,
                       init_state=init_state,
                       init_action_sigma=2.5,
                       nb_steps=horizon,
                       kl_bound=1.,
                       action_penalty=1e-3)
        trace = solver.run(nb_iter=5, verbose=False)

        solver.ctl.sigma = np.dstack([1e-2 * np.eye(dm_act)] * horizon)
        u = solver.ctl.sample(state[:, t], 0, stoch=True)
        action[:, t] = np.clip(u, -env.ulim, env.ulim)
        state[:, t + 1], _, _, _ = env.step(action[:, t])

        # print('Time Step:', t, 'Cost:', trace[-1])

    return state[:, :-1].T, action.T
예제 #15
0
n_samples = 2000

R = 2
d = 2
ms = np.array([[0, 0], [R, R], [-R, -R], [-R, R], [R, -R]])
k = len(ms)
ps = np.ones(k) / k
ts = 0.5 * np.ones(k)

zs = np.array([rng.multinomial(1, ps) for _ in range(n_samples)]).T
xs = [
    z[:, np.newaxis] *
    rng.multivariate_normal(m, t * np.eye(2), size=n_samples)
    for z, m, t in zip(zs, ms, ts)
]
data = np.sum(np.dstack(xs), axis=2)

n_test = 100
test_zs = np.array([rng.multinomial(1, ps) for _ in range(n_test)]).T
test_xs = [
    z[:, np.newaxis] * rng.multivariate_normal(m, t * np.eye(2), size=n_test)
    for z, m, t in zip(test_zs, ms, ts)
]
test_data = np.sum(np.dstack(test_xs), axis=2)

T = 1000
C = 1.0
q = 0.003
sigma = 2.74
k = 5
l = 2 * k - 1 + k * d
예제 #16
0
파일: utils.py 프로젝트: ecat/adbs
def calculate_crb_for_tissue(J_n_tuple):

    J_n = np.dstack(J_n_tuple)

    N, xy_comps, p = J_n.shape

    assert (xy_comps == 2)

    #I_n = np.matmul(np.transpose(J_n, (0, 2, 1)), J_n) # I_n is size (N x p x p) # ideally would use this

    # we loop over N because matmul is not supported for nested object arrays if we are trying to differentiate trace of crb
    I_n = []

    for ii in range(0, N):
        I_n.append(np.dot(np.transpose(J_n[ii, :, :]), J_n[ii, :, :]))

    I = np.sum(np.array(I_n), axis=0)  # sum over echos

    #def matrix_inv_fun(A): # this won't work for nested derivatives
    #    return np.linalg.inv(A)

    def matrix_inv_fun_1x1(A):
        return 1. / A

    def matrix_inv_fun_2x2(A):  # this is analytical solution for 2x2
        # np.linalg does not support inverse for I, when it is full of autograd boxes, so we resort to the analytical inverse
        a, b, c, d = (A[0, 0], A[0, 1], A[1, 0], A[1, 1])
        det_A = a * d - b * c
        return (1. / det_A) * np.array([[d, -b], [-c, a]])

    def matrix_inv_fun_3x3(
        A
    ):  # analytical solution for 3x3, only compute diagonal elements to save some computation
        # https://ardoris.wordpress.com/2008/07/18/general-formula-for-the-inverse-of-a-3x3-matrix/
        #a, b, c, d, e, f, g, h, i = A[:]
        a, b, c, d, e, f, g, h, i = (A[0, 0], A[0, 1], A[0, 2], A[1, 0],
                                     A[1, 1], A[1, 2], A[2, 0], A[2, 1], A[2,
                                                                           2])
        det_A = a * (e * i - f * h) - b * (d * i - f * g) + c * (d * h - e * g)
        #mat = np.array([[e * i - f * h, c * h - b * i, b * f - c * e],
        #               [f * g - d * i, a * i - c * g, c * d - a * f],
        #               [d * h - e * g, b * g - a * h, a * e - b * d]])

        # have to be careful to wrap inside np array to maintain autograd status
        mat = np.diag(np.array([e * i - f * h, a * i - c * g, a * e - b * d]))

        return (1. / det_A) * mat

    # http://www.cs.nthu.edu.tw/~jang/book/addenda/matinv/matinv/
    def matrix_inv_fun_4x4(
            A_in):  # could get away with only calculating diagonal elements...

        A = A_in[0:3, 0:3]
        c = A_in[3, 3]
        b = A_in[0:3, 3][:, np.newaxis]

        k = c - np.dot(np.dot(np.transpose(b), matrix_inv_fun_3x3(A)), b)
        A_inv_00 = matrix_inv_fun_3x3(A - np.dot(b, np.transpose(b)) / c)
        A_inv_01 = -1 / k * np.dot(matrix_inv_fun_3x3(A), b)
        A_inv_11 = 1 / k

        A_inv_tmp_1 = np.concatenate((A_inv_00, A_inv_01), axis=1)
        A_inv_tmp_2 = np.concatenate((np.transpose(A_inv_01), A_inv_11),
                                     axis=1)
        A_inv = np.concatenate((A_inv_tmp_1, A_inv_tmp_2), axis=0)

        return A_inv

    if (p == 2):
        matrix_inv_fun = matrix_inv_fun_2x2
    elif (p == 3):
        matrix_inv_fun = matrix_inv_fun_3x3
    elif (p == 4):
        matrix_inv_fun = matrix_inv_fun_4x4
    else:
        matrix_inv_fun = matrix_inv_fun_1x1

    crb = matrix_inv_fun(I)

    return crb
예제 #17
0
def take_rollouts(policy, env, nrollouts=1, trajectory_len=100):
    rollouts = [
        take_samples(generate_trajectory(policy, *env), trajectory_len)
        for _ in range(nrollouts)
    ]
    return np.dstack(rollouts)