Exemple #1
0
 def mapping(self, s, t, s_new, k, c):
     """ Map s_new to t_new based on known mapping of
         s (source) to t (target),
         with s original/intrinsic coordinates
         and t intrinsic/original coordinates """
     n, s_dim = s.shape
     t_dim = t.shape[1]
     n_new = s_new.shape[0]
     # 1. determine nearest neighbors
     dist = np.sum((s[np.newaxis] - s_new[:, np.newaxis])**2, -1)
     nn_ids = np.argsort(dist)[:, :k]  # change to [:,:k]
     nns = np.row_stack([s[nn_ids[:, ki]] for ki in range(k)])
     nns = nns.reshape((n_new, k, s_dim), order='F')
     # 2 determine gram matris;
     dif = s_new[:, np.newaxis] - nns
     G = np.tensordot(dif, dif, axes=([2], [2]))
     G = G[np.arange(n_new), :, np.arange(n_new)]
     # 3. determine weights not worth vectorizing this
     weights = np.zeros((n_new, k))
     for i_n in range(n_new):
         weights[i_n] = np.linalg.inv(G[i_n] + c * np.eye(k)).dot(
             np.ones((k, )))
     weights /= np.sum(weights, -1, keepdims=True)
     # 4. compute coordinates
     t_nns = np.row_stack([t[nn_ids[:, ki]] for ki in range(k)])
     t_nns = t_nns.reshape((n_new, k, t_dim), order='F')
     t_new = np.dot(weights, t_nns)
     t_new = t_new[np.arange(n_new), np.arange(n_new)]
     return t_new
Exemple #2
0
    def _forward(self, g, beta, initval, ifx):
        """
        Applies the forward iteration of the Picard series
        """
        g = g.reshape((self.dim.R, self.dim.N)).T

        struct_mats = np.array([
            sum(brd * Ld for brd, Ld in zip(br, self.basis_mats))
            for br in beta
        ])

        # construct the sets of A(t_n) shape: (N, K, K)
        A = np.array([
            sum(gnr * Ar
                for gnr, Ar in zip(gn, struct_mats[1:])) + struct_mats[0]
            for gn in g
        ])

        # initial layer shape (N, K, N_samples)
        layer = np.dstack([np.row_stack([m] * self.dim.N) for m in initval])

        # weight matrix
        weights = self._get_weight_matrix(self.ttc, ifx)

        for m in range(self.order):
            layer = get_next_layer(layer, initval, A, weights)

        return layer
Exemple #3
0
    def _forward(self, g, beta, initval, ifx):
        """
        Applies the forward iteration of the Picard series
        """
        g = g.reshape((self.dim.R, self.dim.N)).T

        struct_mats = np.array([sum(brd * Ld
                                    for brd, Ld in zip(br, self.basis_mats))
                                for br in beta])

        # construct the sets of A(t_n) shape: (N, K, K)
        A = np.array([sum(gnr * Ar
                          for gnr, Ar in zip(gn, struct_mats[1:])) +
                      struct_mats[0]
                      for gn in g])

        # initial layer shape (N, K, N_samples)
        layer = np.dstack([np.row_stack([m]*self.dim.N)
                           for m in initval])

        # weight matrix
        weights = self._get_weight_matrix(self.ttc, ifx)

        for m in range(self.order):
            layer = get_next_layer(layer,
                                   initval,
                                   A,
                                   weights)

        return layer
Exemple #4
0
def plot_posterior_spikes(q, model, ys, us, tr=0):

    q_lem_x = q.mean_continuous_states[tr]
    J_diag = q._params[tr]["J_diag"]
    J_lower_diag = q._params[tr]["J_lower_diag"]
    J = blocks_to_full(J_diag, J_lower_diag)
    Jinv = np.linalg.inv(J)
    q_lem_std = np.sqrt(np.diag(Jinv))

    q_lem_z = model.most_likely_states(q_lem_x, ys[tr])

    f, (a0, a1, a2) = plt.subplots(3,
                                   1,
                                   gridspec_kw={'height_ratios': [1, 3.5, 1]},
                                   figsize=[8, 6])

    yhat = model.smooth(q_lem_x, ys[tr], input=us[tr])
    # zhat = model.most_likely_states(q_lem_x, ys[tr], input=us[tr])
    zhat = np.argmax(q.mean_discrete_states[tr][0], axis=1)
    a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=1)
    a0.set_xticks([])
    a0.set_yticks([0, 1])
    a0.set_yticklabels(["$z$", "$\hat{z}$"])
    a0.set_xlim([0, ys[tr].shape[0] - 1])
    # a0.axis("off")
    a1.plot(xs[tr], 'b', label="true")
    a1.plot(q_lem_x, 'k', label="inferred")
    a1.fill_between(np.arange(np.shape(ys[tr])[0]),
                    (q_lem_x - q_lem_std * 2.0)[:, 0],
                    (q_lem_x + q_lem_std * 2.0)[:, 0],
                    facecolor='k',
                    alpha=0.3)
    for j in range(5):
        x_sample = q.sample_continuous_states()[tr]
        a1.plot(x_sample, 'k', alpha=0.3)
    a1.plot(np.array([0, np.shape(ys[tr])[0]]),
            np.array([1.0, 1.0]),
            'k--',
            linewidth=1)
    a1.set_ylim([-0.1, 1.1])
    a1.set_ylabel("$x$")
    a1.set_xlim([0, ys[tr].shape[0] - 1])
    a1.legend()
    a2.set_ylabel("$y$")
    for n in range(ys[tr].shape[1]):
        a2.eventplot(np.where(ys[tr][:, n] > 0)[0],
                     linelengths=0.5,
                     lineoffsets=1 + n,
                     color='k')
    sns.despine()
    a2.set_yticks([])
    a2.set_xlim([0, ys[tr].shape[0] - 1])
    plt.tight_layout()
    plt.show()
Exemple #5
0
    def _evaluate_elementwise(self, X, calc_gradient, out, *args, **kwargs):
        # NOTE: to use self-calculated dF (gradient) rather than autograd.numpy, which is not supported by Pymoo
        ret = []

        def func(_x):
            _out = {}
            self._evaluate(_x,
                           _out,
                           *args,
                           calc_gradient=calc_gradient,
                           **kwargs)
            return _out

        parallelization = self.parallelization
        if not isinstance(parallelization, (list, tuple)):
            parallelization = [self.parallelization]
        _type = parallelization[0]
        if len(parallelization) >= 1:
            _params = parallelization[1:]
        # just serialize evaluation
        if _type is None:
            [ret.append(func(x)) for x in X]
        elif _type == "threads":
            if len(_params) == 0:
                n_threads = cpu_count() - 1
            else:
                n_threads = _params[0]
            with ThreadPool(n_threads) as pool:
                params = []
                for k in range(len(X)):
                    params.append(
                        [X[k], calc_gradient, self._evaluate, args, kwargs])
                ret = np.array(pool.starmap(evaluate_in_parallel, params))
        elif _type == "dask":
            if len(_params) != 2:
                raise Exception(
                    "A distributed client objective is need for using dask. parallelization=(dask, "
                    "<client>, <function>).")
            else:
                client, fun = _params
            jobs = []
            for k in range(len(X)):
                jobs.append(client.submit(fun, X[k]))
            ret = [job.result() for job in jobs]
        else:
            raise Exception(
                "Unknown parallelization method: %s (None, threads, dask)" %
                self.parallelization)
        # stack all the single outputs together
        for key in ret[0].keys():
            out[key] = row_stack([ret[i][key] for i in range(len(ret))])
        return out
Exemple #6
0
def plot_trial(tr=0, legend=False):

	q_x = q.mean_continuous_states[tr]
	J_diag = q._params[tr]["J_diag"]
	J_lower_diag= q._params[tr]["J_lower_diag"]
	J = blocks_to_full(J_diag, J_lower_diag)
	Jinv = np.linalg.inv(J)
	q_lem_std = np.sqrt(np.diag(Jinv))
	q_lem_std = q_lem_std.reshape((T,D))
	q_std_1 = q_lem_std[:,0]
	q_std_2 = q_lem_std[:,1]

	yhat = test_acc.smooth(q_x, ys[tr], input=us[tr])
	zhat = test_acc.most_likely_states(q_x, ys[tr], input=us[tr])

	f, (a0, a1, a2, a3) = plt.subplots(4, 1, gridspec_kw={'height_ratios': [0.5, 0.5, 3, 1]})
	a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=2)
	a0.set_xticks([])
	a0.set_yticks([0, 1], ["$z_{\\mathrm{true}}$", "$z_{\\mathrm{inf}}$"])
	a0.axis("off")
	a2.plot(xs[tr][:,0],color=[1.0,0.0,0.0],label="$x_1$",alpha=0.9)
	a2.plot(xs[tr][:,1],color=[0.0,0.0,1.0],label="$x_2$",alpha=0.9)
	a2.plot(q_x[:,0],color=[1.0,0.3,0.3],linestyle='--',label="$\hat{x}_1$",alpha=0.9)
	a2.plot(q_x[:,1],color=[0.3,0.3,1.0],linestyle='--',label="$\hat{x}_2$",alpha=0.9)

	a2.fill_between(np.arange(T),q_x[:,0]-q_std_1*2.0, q_x[:,0]+q_std_1*2.0, facecolor='r', alpha=0.3)
	a2.fill_between(np.arange(T),q_x[:,1]-q_std_2*2.0, q_x[:,1]+q_std_2*2.0, facecolor='b', alpha=0.3)
	a2.plot(np.array([0,100]),np.array([1,1]),'k--',linewidth=1.0,label=None)
	a2.set_ylim([-0.4,1.4])
	a2.set_xlim([-1,101])
	a2.set_xticks([])
	a2.set_yticks([0,1])
	a2.set_ylabel("x")
	if legend:
		a2.legend()
	sns.despine()
	for n in range(10):
		a3.eventplot(np.where(ys[tr][:,n]>0)[0], linelengths=0.5, lineoffsets=1+n,color='k')
	sns.despine()
	a3.set_yticks([])
	a3.set_xlim([-1,101])

	a1.plot(0.2*us[tr][:,0],color=[1.0,0.5,0.5], label=None,alpha=0.9)
	a1.plot(0.2*us[tr][:,1],color=[0.5,0.5,1.0], label=None,alpha=0.9)
	a1.set_yticks([])
	a1.set_xticks([])
	a1.axes.get_yaxis().set_visible(False)
	plt.tight_layout()

	return
Exemple #7
0
def block_diag(arrs):
    arrs = [np.atleast_2d(a) for a in arrs]

    bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2]
    if bad_args:
        raise ValueError("arguments in the following positions have dimension "
                         "greater than 2: %s" % bad_args)

    rows = []
    for ix, a in enumerate(arrs):
        left_shape = sum(a.shape[0] for a in arrs[:ix])
        right_shape = sum(a.shape[0] for a in arrs[ix + 1:])
        left_mat = np.zeros((a.shape[0], left_shape))
        right_mat = np.zeros((a.shape[0], right_shape))
        rows.append(np.column_stack([left_mat, a, right_mat]))

    return np.row_stack(rows)
Exemple #8
0
def make_nascar_model():
    As = [
        random_rotation(D_latent, np.pi / 24.),
        random_rotation(D_latent, np.pi / 48.)
    ]

    # Set the center points for each system
    centers = [np.array([+2.0, 0.]), np.array([-2.0, 0.])]
    bs = [
        -(A - np.eye(D_latent)).dot(center) for A, center in zip(As, centers)
    ]

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([+0.1, 0.]))

    # Add a "right" state
    As.append(np.eye(D_latent))
    bs.append(np.array([-0.25, 0.]))

    # Construct multinomial regression to divvy up the space
    w1, b1 = np.array([+1.0, 0.0]), np.array([-2.0])  # x + b > 0 -> x > -b
    w2, b2 = np.array([-1.0, 0.0]), np.array([-2.0])  # -x + b > 0 -> x < b
    w3, b3 = np.array([0.0, +1.0]), np.array([0.0])  # y > 0
    w4, b4 = np.array([0.0, -1.0]), np.array([0.0])  # y < 0
    Rs = np.row_stack((100 * w1, 100 * w2, 10 * w3, 10 * w4))
    r = np.concatenate((100 * b1, 100 * b2, 10 * b3, 10 * b4))

    true_rslds = SLDS(D_obs,
                      K,
                      D_latent,
                      transitions="recurrent_only",
                      dynamics="diagonal_gaussian",
                      emissions="gaussian_orthog",
                      single_subspace=True)
    true_rslds.dynamics.mu_init = np.tile(np.array([[0, 1]]), (K, 1))
    true_rslds.dynamics.sigmasq_init = 1e-4 * np.ones((K, D_latent))
    true_rslds.dynamics.As = np.array(As)
    true_rslds.dynamics.bs = np.array(bs)
    true_rslds.dynamics.sigmasq = 1e-4 * np.ones((K, D_latent))

    true_rslds.transitions.Rs = Rs
    true_rslds.transitions.r = r

    true_rslds.emissions.inv_etas = np.log(1e-2) * np.ones((1, D_obs))
    return true_rslds
Exemple #9
0
def plot_trial_particles(tr=0, legend=False):

	idx = np.where(trials==tr)[0][0]
	q_x = np.mean(new_particles2[idx],axis=0)
	q_std = np.std(new_particles2[idx], axis=0)
	q_std_1 = q_std[:,0]
	q_std_2 = q_std[:,1]

	yhat = test_acc_pem.smooth(q_x, ys[tr], input=us[tr])
	zhat = test_acc_pem.most_likely_states(q_x, ys[tr], input=us[tr])

	f, (a0, a1, a2, a3) = plt.subplots(4, 1, gridspec_kw={'height_ratios': [0.5, 0.5, 3, 1]})
	a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=2)
	a0.set_xticks([])
	a0.set_yticks([0, 1], ["$z_{\\mathrm{true}}$", "$z_{\\mathrm{inf}}$"])
	a0.axis("off")
	a2.plot(xs[tr][:,0],color=[1.0,0.0,0.0],label="$x_1$",alpha=0.9)
	a2.plot(xs[tr][:,1],color=[0.0,0.0,1.0],label="$x_2$",alpha=0.9)
	a2.plot(q_x[:,0],color=[1.0,0.3,0.3],linestyle='--',label="$\hat{x}_1$",alpha=0.9)
	a2.plot(q_x[:,1],color=[0.3,0.3,1.0],linestyle='--',label="$\hat{x}_2$",alpha=0.9)
	a2.fill_between(np.arange(T),q_x[:,0]-q_std_1*2.0, q_x[:,0]+q_std_1*2.0, facecolor='r', alpha=0.3)
	a2.fill_between(np.arange(T),q_x[:,1]-q_std_2*2.0, q_x[:,1]+q_std_2*2.0, facecolor='b', alpha=0.3)

	a2.plot(np.array([0,100]),np.array([1,1]),'k--',linewidth=1.0,label=None)
	a2.set_ylim([-0.4,1.4])
	a2.set_xlim([-1,101])
	a2.set_xticks([])
	a2.set_yticks([0,1])
	a2.set_ylabel("x")
	if legend:
		a2.legend()
	sns.despine()
	for n in range(10):
		a3.eventplot(np.where(ys[tr][:,n]>0)[0], linelengths=0.5, lineoffsets=1+n,color='k')
	sns.despine()
	a3.set_yticks([])
	a3.set_xlim([-1,101])

	a1.plot(0.2*us[tr][:,0],color=[1.0,0.5,0.5], label=None,alpha=0.9)
	a1.plot(0.2*us[tr][:,1],color=[0.5,0.5,1.0], label=None,alpha=0.9)
	a1.set_yticks([])
	a1.set_xticks([])
	a1.axes.get_yaxis().set_visible(False)
	plt.tight_layout()

	return
Exemple #10
0
    def _calc_pareto_front(self, n_points=100, flatten=True):
        regions = [[0, 0.0830015349], [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041], [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pf = []

        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pf.append(anp.array([x1, x2]).T)

        if not flatten:
            pf = anp.concatenate([pf[None, ...] for pf in pf])
        else:
            pf = anp.row_stack(pf)

        return pf
Exemple #11
0
    def _fit_init(self, is_fixed_vars, **kwargs):
        """
        Handles model initialisation.
        """
        init_strategies = {
            'g':
            lambda: np.zeros(self.dim.N * self.dim.R),
            'beta':
            lambda: np.row_stack((np.zeros(self.dim.D),
                                  np.eye(self.dim.R, self.dim.D))).ravel(),
            'mu_ivp':
            lambda: _mu_ivp_init()
        }

        def _mu_ivp_init():
            mu_ivp = np.zeros(
                (len(self._ifix), self.dim.K, len(self.Y_train_)))

            for q, Y in enumerate(self.Y_train_):
                for k in range(self.dim.K):
                    u = interp1d(self.x_train_[q], Y[:, k])
                    ivp = u(self.ttc[self._ifix])

                    mu_ivp[:, k, q] = u(self.ttc[self._ifix])

            return mu_ivp

        var_names = ['g', 'beta', 'mu_ivp']
        full_init = [
            kwargs.pop(''.join((vn, '0')), init_strategies[vn]()).ravel()
            for vn in var_names
        ]

        free_vars, fixed_vars = ([], [])
        for item, boolean in zip(full_init, is_fixed_vars):
            if boolean:
                fixed_vars.append(item)
            else:
                free_vars.append(item)
            free_vars_shape = [item.size for item in free_vars]

        return np.concatenate(free_vars), free_vars_shape, fixed_vars
Exemple #12
0
def reproj_error(normal_factor, img_coor, world_coor, camMatrix, rotMatrixCam,
                 rotMatrixBoard, transVecCam, transVecBoard):
    """
	Parameters
	----------
	normal_factor   normalized area in pixels of a checkerboard tile
	img_coor        image coordinate, from umeas
	world_coor      world coordinate
	camMatrix       3 x 3: camera matrix for this camera
	rotMatrixCam    3 x 3: rotation matrix for this camera
	rotMatrixBoard  3 x 3: rotation matrix for this image
	transVecCam     3: translational vector for this camera
	transVecBoard   3: translational vector for this image

	Returns
	-------
	the reprojection error
	"""

    # TODO scaling parameter k for principal optical axis assumed here.

    # [R_c t_c].
    transMatCam = np.column_stack((np.transpose(rotMatrixCam), transVecCam))
    # transMatCam = np.column_stack((rotMatrixCam, -transVecCam))
    # Corresponds to eq(6) in Muller paper. 4x4 matrix with image rot matrices and trans vectors
    transMatImg = np.column_stack((rotMatrixBoard, transVecBoard))
    rowToAdd = np.zeros(4)
    rowToAdd[3] = 1
    transMatImg = np.row_stack((transMatImg, rowToAdd))
    aug_world_coor = np.append(world_coor, 1)

    # Compute matrix multiplication
    product = np.matmul(
        camMatrix,
        np.matmul(transMatCam, np.matmul(transMatImg, aug_world_coor)))

    # Compute reprojection error term.
    # TODO revisit normal factor from Muller paper. Setting to 1 because for our purposes, volume depth is shallow.
    return 1 / np.sqrt(normal_factor) * np.linalg.norm(img_coor -
                                                       proj_red(product))
Exemple #13
0
def hmm_filter(log_pi0, log_Ps, ll):
    T, K = ll.shape

    # Make sure everything is C contiguous
    log_pi0 = to_c(log_pi0)
    log_Ps = to_c(log_Ps)
    ll = to_c(ll)

    # Forward pass gets the predicted state at time t given
    # observations up to and including those from time t
    alphas = np.zeros((T, K))
    forward_pass(log_pi0, log_Ps, ll, alphas)

    # Predict forward with the transition matrix
    pz_tt = np.exp(alphas - logsumexp(alphas, axis=1, keepdims=True))
    pz_tp1t = np.matmul(pz_tt[:-1,None,:], np.exp(log_Ps))[:,0,:]

    # Include the initial state distribution
    pz_tp1t = np.row_stack((np.exp(log_pi0 - logsumexp(log_pi0)), pz_tp1t))

    assert np.allclose(np.sum(pz_tp1t, axis=1), 1.0)
    return pz_tp1t
Exemple #14
0
    def _fit_init(self, is_fixed_vars, **kwargs):
        """
        Handles model initialisation.
        """
        init_strategies = {
            'g': lambda : np.zeros(self.dim.N*self.dim.R),
            'beta': lambda : np.row_stack((np.zeros(self.dim.D),
                                           np.eye(self.dim.R, self.dim.D))).ravel(),
            'mu_ivp': lambda : _mu_ivp_init()
            }

        def _mu_ivp_init():
            mu_ivp = np.zeros((len(self._ifix),
                               self.dim.K,
                               len(self.Y_train_)))

            for q, Y in enumerate(self.Y_train_):
                for k in range(self.dim.K):
                    u = interp1d(self.x_train_[q], Y[:, k])
                    ivp = u(self.ttc[self._ifix])

                    mu_ivp[:, k, q] = u(self.ttc[self._ifix])

            return mu_ivp

        var_names = ['g', 'beta', 'mu_ivp']
        full_init = [kwargs.pop(''.join((vn, '0')),
                                init_strategies[vn]()).ravel()
                     for vn in var_names]

        free_vars, fixed_vars = ([], [])
        for item, boolean in zip(full_init, is_fixed_vars):
            if boolean:
                fixed_vars.append(item)
            else:
                free_vars.append(item)
            free_vars_shape = [item.size for item in free_vars]

        return np.concatenate(free_vars), free_vars_shape, fixed_vars
Exemple #15
0
    def do(self, X, out, *args, **kwargs):

        # do an elementwise evaluation and return the results
        ret = self.func_eval(self.func_elementwise_eval, self, X, out, *args, **kwargs)

        # the first element decides what keys will be set
        keys = list(ret[0].keys())

        # now stack all the results for each of them together
        for key in keys:
            assert all([key in _out for _out in ret]), f"For some elements the {key} value has not been set."

            vals = []
            for elem in ret:
                val = elem[key]

                if val is not None:

                    # if it is just a float
                    if isinstance(val, list) or isinstance(val, tuple):
                        val = np.array(val)
                    elif not isinstance(val, np.ndarray):
                        val = np.full(1, val)

                    # otherwise prepare the value to be stacked with each other by extending the dimension
                    val = at_least_2d_array(val, extend_as="row")

                vals.append(val)

            # that means the key has never been set at all
            if all([val is None for val in vals]):
                out[key] = None
            else:
                out[key] = np.row_stack(vals)

        return out
Exemple #16
0
q_struct_z = slds.most_likely_states(q_struct_x, y)

# Plot the ELBOS
plt.figure()
plt.plot(q_mf_elbos, label="MF")
plt.plot(q_struct_elbos, label="LDS")
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.legend()

# Plot the true and inferred states
plt.figure(figsize=(8,6))
xlim = (0, 1000)

plt.subplot(311)
plt.imshow(np.row_stack((z, q_mf_z, q_struct_z)), aspect="auto")
plt.yticks([0, 1, 2], ["$z_{{\\mathrm{{true}}}}$", "$z_{{\\mathrm{{mf}}}}$", "$z_{{\\mathrm{{lds}}}}$"])
plt.xlim(xlim)

plt.subplot(312)
plt.plot(x, '-k', label="True")
plt.plot(q_mf_x, '--b', label="$q_{\\text{MF}}$")
plt.plot(q_struct_x, ':r', label="$q_{\\text{LDS}}$")
plt.ylabel("$x$")
plt.xlim(xlim)

plt.subplot(313)
for n in range(N):
    plt.plot(y[:, n] + 4 * n, '-k', label="True" if n == 0 else None)
    plt.plot(q_mf_y[:, n] + 4 * n, '--b', label="MF" if n == 0 else None)
    plt.plot(q_struct_y[:, n] + 4 * n, ':r', label="LDS" if n == 0 else None)
Exemple #17
0
# Plot the true states
plt.sca(axs[0])
plt.imshow(z[None, :], aspect="auto", cmap="jet")
plt.title("true")
plt.xticks()

# Plot the inferred states
for i, obs in enumerate(observations):
    zs = []
    for method, ls in zip(methods, ['-', ':']):
        _, _, _, smoothed_z, _ = results[(obs, method)]
        zs.append(smoothed_z)

    plt.sca(axs[i + 1])
    plt.imshow(np.row_stack(zs), aspect="auto", cmap="jet")
    plt.yticks([0, 1], methods)
    if i != len(observations) - 1:
        plt.xticks()
    else:
        plt.xlabel("time")
    plt.title(obs)

plt.tight_layout()

# Plot smoothed observations
fig, axs = plt.subplots(D, 1, figsize=(12, 8))

# Plot the true data
for d in range(D):
    plt.sca(axs[d])
Exemple #18
0
	J_diag = q._params[tr]["J_diag"]
	J_lower_diag= q._params[tr]["J_lower_diag"]
	J = blocks_to_full(J_diag, J_lower_diag)
	Jinv = np.linalg.inv(J)
	q_lem_std = np.sqrt(np.diag(Jinv))

    q_z = q_lem.mean_discrete_states[tr][0]
    q_lem_z = np.argmax(q_z,axis=1)
    # q_lem_z = model.most_likely_states(q_lem_x, ys[tr])

	f, (a0, a1, a2) = plt.subplots(3, 1, gridspec_kw={'height_ratios': [1, 3.5, 1]}, figsize=[8,6])

	yhat = model.smooth(q_lem_x, ys[tr], input=us[tr])
	# zhat = model.most_likely_states(q_lem_x, ys[tr], input=us[tr])
	zhat = np.argmax(q.mean_discrete_states[tr][0],axis=1)
	a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=1)
	a0.set_xticks([])
	a0.set_yticks([0,1])
	a0.set_yticklabels(["$z$", "$\hat{z}$"])
	a0.set_xlim([0,ys[tr].shape[0]-1])
	# a0.axis("off")
	a1.plot(xs[tr],'b',label="true")
	a1.plot(q_lem_x,'k',label="inferred")
	a1.fill_between(np.arange(np.shape(ys[tr])[0]),(q_lem_x-q_lem_std*2.0)[:,0], (q_lem_x+q_lem_std*2.0)[:,0], facecolor='k', alpha=0.3)
	for j in range(5):
		x_sample = q.sample_continuous_states()[tr]
		a1.plot(x_sample, 'k', alpha=0.3)
	a1.plot(np.array([0,np.shape(ys[tr])[0]]),np.array([1.0,1.0]),'k--', linewidth=1)
	a1.set_ylim([-0.1,1.1])
	a1.set_ylabel("$x$")
	a1.set_xlim([0,ys[tr].shape[0]-1])
Exemple #19
0
    with open('gal_fluxes_mog.pkl', 'wb') as f:
        pickle.dump(gal_flux_mog, f)

    with open('star_fluxes_mog.pkl', 'wb') as f:
        pickle.dump(star_flux_mog, f)

    #####################################################################
    # fit model to galaxy shape parameters
    # 
    #   re  - [0, infty], transformation log
    #   ab  - [0, 1], transformation log (ab / (1 - ab))
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    print "fitting galaxy shape"
    shape_df = np.row_stack([ coadd_df[['expRad_r', 'expAB_r', 'expPhi_r']].values,
                              coadd_df[['deVRad_r', 'deVAB_r', 'deVPhi_r']].values ])[::3,:]
    shape_df[:,0] = np.log(shape_df[:,0])
    shape_df[:,1] = np.log(shape_df[:,1]) - np.log(1.-shape_df[:,1])
    shape_df[:,2] = shape_df[:,2] * (np.pi / 180.)

    bad_idx = np.any(np.isinf(shape_df), axis=1)
    shape_df = shape_df[~bad_idx,:]
    gal_re_mog = fit_mog(shape_df[:,0], mog_class = GalRadiusMoG, max_comps=50)
    gal_ab_mog = fit_mog(shape_df[:,1], mog_class = GalAbMoG, max_comps=50)

    with open('gal_re_mog.pkl', 'wb') as f:
        pickle.dump(gal_re_mog, f)

    with open('gal_ab_mog.pkl', 'wb') as f:
        pickle.dump(gal_ab_mog, f)
Exemple #20
0
def plot_trial(model, q, posterior, tr=0, legend=False):

    if posterior is "laplace_em":
        q_x = q.mean_continuous_states[tr]
        q_std = np.sqrt(q._continuous_expectations[tr][2][:, 0, 0])
    elif posterior is "mf":
        q_x = q_mf.mean[tr]
        # q_std = np.sqrt(np.exp(q_mf.params[tr][1])[:,0])

    yhat = model.smooth(q_x, ys[tr], input=us[tr])
    zhat = model.most_likely_states(q_x, ys[tr], input=us[tr])

    f, (a0, a1, a2,
        a3) = plt.subplots(4,
                           1,
                           gridspec_kw={'height_ratios': [0.5, 0.5, 3, 1]})
    # f, (a0, a1, a2) = plt.subplots(3, 1, gridspec_kw={'height_ratios': [0.5, 0.5, 3]})
    a0.imshow(np.row_stack((zs[tr], zhat)), aspect="auto", vmin=0, vmax=2)
    a0.set_xticks([])
    a0.set_yticks([0, 1], ["$z_{\\mathrm{true}}$", "$z_{\\mathrm{inf}}$"])
    a0.axis("off")
    a2.plot(xs[tr][:, 0], color='k', label="$x_1$", alpha=0.9)
    a2.plot(q_x[:, 0],
            color=[0.3, 0.3, 0.3],
            linestyle='--',
            label="$\hat{x}_1$",
            alpha=0.9)
    # for i in range(5):
    # 	a2.plot(q_lem.sample_continuous_states()[tr],'k',alpha=0.5)

    a2.fill_between(np.arange(T),
                    q_x[:, 0] - q_std * 2.0,
                    q_x[:, 0] + q_std * 2.0,
                    facecolor='k',
                    alpha=0.3)
    ub = bound_func(np.arange(T), 1.0, latent_acc.transitions.ap,
                    latent_acc.transitions.lamb, 3.0)
    a2.plot(np.arange(T), ub, 'b--')
    a2.plot(np.arange(T), -1.0 * ub, 'r--')
    a2.set_ylim([-1.2, 1.2])
    a2.set_xlim([-1, 101])
    a2.set_xticks([])
    a2.set_yticks([-1, 0, 1])
    a2.set_ylabel("x")
    if legend:
        a2.legend()
    sns.despine()
    for n in range(N):
        a3.eventplot(np.where(ys[tr][:, n] > 0)[0],
                     linelengths=0.5,
                     lineoffsets=1 + n,
                     color='k')
    sns.despine()
    a3.set_yticks([])
    a3.set_xlim([-1, 101])

    a1.plot(0.2 * us[tr][:, 0], color=[1.0, 0.5, 0.5], label=None, alpha=0.9)
    a1.set_yticks([])
    a1.set_xticks([])
    a1.axes.get_yaxis().set_visible(False)
    plt.tight_layout()

    return
Exemple #21
0
max_ll = -np.inf
for n in range(N):
    test_hmm_temp = HMM(K, D, observations="poisson") 
    poiss_lls_temp = test_hmm_temp.fit(y, num_iters=20)
    if poiss_lls_temp[-1] > max_ll:
        max_ll = poiss_lls_temp[-1]
        poiss_lls = poiss_lls_temp 
        test_hmm = test_hmm_temp
# test_hmm = HMM(K, D, observations="poisson") 
# poiss_lls = test_hmm.fit(y, num_iters=20)
test_hmm.permute(find_permutation(z, test_hmm.most_likely_states(y)))
smoothed_z = test_hmm.most_likely_states(y)

plt.figure()
plt.subplot(211)
plt.imshow(np.row_stack((z, smoothed_z)), aspect="auto")
plt.xlim([0,T_plot])
plt.subplot(212)
# plt.plot(y)
for n in range(D):
    plt.eventplot(np.where(y[:,n]>0)[0]+1, linelengths=0.5, lineoffsets=D-n,color='k')
plt.xlim([0,T_plot])


As = np.clip(0.8 + 0.1 * npr.randn(D), 0.6, 0.95)
betas = 1.0 * np.ones(D)
inv_etas = np.log(1e-2 * np.ones(D))
etas = np.exp(inv_etas)
mus = np.zeros_like(y) # start with zero mean
y_ca = np.zeros((T, D))
y_ca_test = np.zeros_like(y_test)
Exemple #22
0
 def build_thmat(mu_n):
     mumat = np.row_stack([mus[:n, :], mu_n, mus[n + 1:, :]])
     return np.column_stack([mumat, lns])
Exemple #23
0
def get_points(X, scalings):
    vals = []
    for i in range(len(X)):
        vals.append(scale_reference_directions(X[i], scalings[i]))
    X = anp.row_stack(vals)
    return X
Exemple #24
0
        pickle.dump(gal_flux_mog, f)

    with open('star_fluxes_mog.pkl', 'wb') as f:
        pickle.dump(star_flux_mog, f)

    #####################################################################
    # fit model to galaxy shape parameters
    #
    #   re  - [0, infty], transformation log
    #   ab  - [0, 1], transformation log (ab / (1 - ab))
    #   phi - [0, 180], transformation log (phi / (180 - phi))
    #
    ######################################################################
    print "fitting galaxy shape"
    shape_df = np.row_stack([
        coadd_df[['expRad_r', 'expAB_r', 'expPhi_r']].values,
        coadd_df[['deVRad_r', 'deVAB_r', 'deVPhi_r']].values
    ])[::3, :]
    shape_df[:, 0] = np.log(shape_df[:, 0])
    shape_df[:, 1] = np.log(shape_df[:, 1]) - np.log(1. - shape_df[:, 1])
    shape_df[:, 2] = shape_df[:, 2] * (np.pi / 180.)

    bad_idx = np.any(np.isinf(shape_df), axis=1)
    shape_df = shape_df[~bad_idx, :]
    gal_re_mog = fit_mog(shape_df[:, 0], mog_class=GalRadiusMoG, max_comps=50)
    gal_ab_mog = fit_mog(shape_df[:, 1], mog_class=GalAbMoG, max_comps=50)

    with open('gal_re_mog.pkl', 'wb') as f:
        pickle.dump(gal_re_mog, f)

    with open('gal_ab_mog.pkl', 'wb') as f:
        pickle.dump(gal_ab_mog, f)
Exemple #25
0
    def _evaluate_elementwise(self, X, calc_gradient, *args, **kwargs):
        ret = []

        def func(_x):
            _out = {}
            if calc_gradient:
                _out["dF"], _ = run_and_trace(self._evaluate, _x, *[_out])
            else:
                self._evaluate(_x, _out, *args, **kwargs)
            return _out

        parallelization = self.parallelization
        if not isinstance(parallelization, (list, tuple)):
            parallelization = [self.parallelization]

        _type = parallelization[0]
        if len(parallelization) >= 1:
            _params = parallelization[1:]

        # just serialize evaluation
        if _type is None:
            [ret.append(func(x)) for x in X]

        elif _type == "threads":

            if len(_params) == 0:
                n_threads = multiprocessing.cpu_count() - 1
            else:
                n_threads = _params[0]

            with multiprocessing.Pool(n_threads) as pool:
                params = []
                for k in range(len(X)):
                    params.append(
                        [X[k], calc_gradient, self._evaluate, args, kwargs])

                ret = np.array(pool.starmap(evaluate_in_parallel, params))

        elif _type == "dask":

            if len(_params) != 2:
                raise Exception(
                    "A distributed client objective is need for using dask. parallelization=(dask, "
                    "<client>, <function>).")
            else:
                client, fun = _params

            jobs = []
            for k in range(len(X)):
                jobs.append(client.submit(fun, X[k]))

            ret = [job.result() for job in jobs]

        else:
            raise Exception(
                "Unknown parallelization method: %s (None, threads, dask)" %
                self.parallelization)

        # stack all the single outputs together
        out = {}
        for key in ret[0].keys():
            out[key] = anp.row_stack([ret[i][key] for i in range(len(ret))])

        return out
Exemple #26
0
def fit_mixture_jointly(num_comps,
                        lnpdf,
                        D,
                        num_iters=1000,
                        step_size=.2,
                        num_samps_per_component=100,
                        fix_samples=True,
                        init_comp_list=None,
                        ax=None,
                        xlim=None,
                        ylim=None):

    # define the mixture elbo as a function of only mixing weights.
    # to do this, we take L samples from each component, and note that
    # the ELBO decomposes into the sum of expectations wrt each component
    #   ELBO(rho) = Eq[lnpi(x) - ln q(x; rho)]
    #             = sum_c rho_c \int q_c(x; rho) [lnpi(x) - ln q(x; rho)]
    C = num_comps
    L = num_samps_per_component

    from autil.util.misc import WeightsParser
    parser = WeightsParser()
    parser.add_shape("ln_weights", (C - 1, ))
    parser.add_shape("means", (C, D))
    parser.add_shape("lnstds", (C, D))

    init_rhos = simplex_to_unconstrained(np.ones(C) * (1. / C))
    init_vars = -2 * np.ones((C, D))
    init_means = .001 * np.random.randn(C, D)
    if init_comp_list is not None:
        assert len(init_comp_list) == C
        pis = np.array([c[0] for c in init_comp_list])
        init_rhos = simplex_to_unconstrained(pis)
        init_means = np.row_stack([c[1][:D] for c in init_comp_list])
        init_vars = np.row_stack([c[1][D:] for c in init_comp_list])
    init_params = np.zeros(parser.num_weights)
    init_params = parser.set(init_params, "ln_weights", init_rhos)
    init_params = parser.set(init_params, "means", init_means)
    init_params = parser.set(init_params, "lnstds", init_vars)

    def joint_elbo(params, i, eps_tens=None):
        # sample from each cluster's normal --- transform into
        if eps_tens is None:
            eps_tens = np.random.randn(C, L, D)
        lnstds = parser.get(params, "lnstds")
        means = parser.get(params, "means")
        Csamps = eps_tens * np.exp(lnstds)[:, None, :] + means[:, None, :]

        # make qln pdf for params
        icovs = np.array(
            [np.diag(np.exp(-2 * lnstds[c])) for c in xrange(lnstds.shape[0])])
        dets = np.exp(np.sum(2 * lnstds, 1))
        lnws = parser.get(params, "ln_weights")
        pis = unconstrained_to_simplex(lnws)
        qlogprob = lambda x: mog.mog_logprob(x, means, icovs, dets, pis)

        # compute E_q_c[ lnq(x) ] for each component
        lnq_terms = np.reshape(qlogprob(np.reshape(Csamps, (-1, D))), (C, L))
        lnq_means = np.mean(lnq_terms, 1)

        # compute E[pi(x)] for each component
        pi_lls = np.array([lnpdf(c, 0) for c in Csamps])
        pi_lls_mean = np.mean(pi_lls, 1)
        return np.sum(pis * (pi_lls_mean - lnq_means))

    # first fit a single gaussian using BBVI
    def callback(params, i, g):
        if i % 2 == 0:
            print "weight opt iter %d, lower bound %2.4f" % (
                i, joint_elbo(params, i))
            #print "  weights    = ", unconstrained_to_simplex(params)
            #print "  gmag, grad = ", np.sqrt(np.sum(g**2)), g

            if ax is not None:
                import matplotlib.pyplot as plt
                plt.ion()
                import seaborn as sns
                sns.set_style('white')
                import autil.util.plots as pu
                ax.cla()
                # background isocontours (target) + foreground isocontours (approx)
                pu.plot_isocontours(ax,
                                    lambda x: np.exp(lnpdf(x, i)),
                                    xlim=xlim,
                                    ylim=ylim,
                                    fill=True)
                pis = unconstrained_to_simplex(params)
                pu.plot_isocontours(ax,
                                    lambda x: np.exp(qlogprob(x, pis)),
                                    xlim=xlim,
                                    ylim=ylim,
                                    colors='darkred')
                plt.draw()
                plt.pause(1. / 30.)

    def break_cond(x, i, g):
        gmag = np.sqrt(np.sum(g**2))
        #if gmag < 1e-4:
        #    return True
        return False

    if fix_samples:
        eps_tens = np.random.randn(C, L, D)
        var_obj = lambda x, t: -1. * joint_elbo(x, t, eps_tens=eps_tens)
    else:
        var_obj = lambda x, t: -1. * joint_elbo(x, t)

    # optimize component
    var_obj_grad = grad(var_obj)
    #fit_params = adam(var_obj_grad, init_params, num_iters=num_iters,
    #                  step_size=step_size, callback=callback,
    #                  break_cond=break_cond)
    fit_params = sgd(var_obj_grad,
                     init_params,
                     num_iters=num_iters,
                     step_size=step_size,
                     callback=callback,
                     break_cond=break_cond,
                     mass=.01)

    # unpack new var params --- compute normalized rho's
    pis_new = unconstrained_to_simplex(parser.get(fit_params, "ln_weights"))
    means_new = parser.get(fit_params, "means")
    stds_new = parser.get(fit_params, "lnstds")
    lams_new = np.column_stack([means_new, stds_new])
    comp_list_new = [(p, l) for p, l in zip(pis_new, lams_new)]
    return comp_list_new
Exemple #27
0
 def convex_combine(mogs, mixing_weights):
     return MixtureOfGaussians(
         means = np.row_stack([mog.means for mog in mogs]),
         covs  = np.row_stack([mog.covs for mog in mogs]),
         pis   = np.concatenate([ w*mog.pis for w, mog in zip(mixing_weights, mogs)])
     )
Exemple #28
0
plt.plot(q_lem_elbos, label="Laplace EM")
plt.plot(q_struct_elbos, label="LDS")
plt.plot(q_mf_elbos, label="MF")
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.legend()
plt.tight_layout()

# Plot the true and inferred states
plt.figure(figsize=(8,9))
xlim = (0, 1000)

plt.subplot(411)
plt.imshow(z[None, :], aspect="auto")
# plt.imshow(np.row_stack((z, q_mf_z, q_struct_z)), aspect="auto")
plt.imshow(np.row_stack((z, q_lem_z)), aspect="auto")
# plt.yticks([0, 1, 2], ["$z_{{\\mathrm{{true}}}}$", "$z_{{\\mathrm{{mf}}}}$", "$z_{{\\mathrm{{lds}}}}$"])
plt.yticks([0, 1, 2], ["$z_{{\\mathrm{{true}}}}$", "$z_{{\\mathrm{{L. EM}}}}$"])
plt.xlim(xlim)
plt.title("True and Most Likely Inferred States")

plt.subplot(412)
plt.imshow(q_lem_Ez[0].T, aspect="auto", cmap="Greys")
plt.xlim(xlim)
plt.title("Inferred State Probability")

plt.subplot(413)
plt.plot(x, '-k', label="True")
# plt.plot(q_mf_x, '--b', label="$q_{\\text{MF}}$")
# plt.plot(q_struct_x, ':r', label="$q_{\\text{LDS}}$")
plt.plot(q_lem_x_trans, ':r', label="$q_{\\text{Laplace}}$")
Exemple #29
0
    elbo_val = vboost.elbo_mc(comp_list, n_samps=Nsamp)
    return elbo_val


#################################################################
# create table of the following shape of ELBO values            #
#                                                               #
#          |  1-comp | 2-comp | 5-comp | 10-comp | 20-comp      #
#  vboost  |         |        |        |         |              #
#  npvi    |         |        |        |         |              #
#                                                               #
#################################################################

model = "frisk"
ncomps = [1, 2, 5, 10, 15, 20]

# compute elbos
vboost_elbos = np.array([vboost_elbo(model, c, 0) for c in ncomps])
npvi_elbos = np.array([npvi_elbo(model, c) for c in ncomps])

elbo_df = pd.DataFrame(np.row_stack([vboost_elbos, npvi_elbos]),
                       index=["vboost", "npvi"],
                       columns=["%d" % c for c in ncomps])

print elbo_df

tstr = elbo_df.to_latex(float_format="%2.3f")
with open(model + "_output/npvi_vboost_table.tex", 'w') as f:
    pickle.dump(tstr, f)