Exemplo n.º 1
0
def transfer(fs, ts, khat, xyz, L=5e9):
    khat = tt.reshape(khat, (1, 3))

    fstar = 2.99792e8 / (2 * pi * L)

    tfs_re = tt.zeros((3, 3, ts.shape[0]))
    tfs_im = tt.zeros((3, 3, ts.shape[0]))
    for i in range(3):
        for j in range(i + 1, 3):
            if not (j == i):
                rij = xyz[j, :, :] - xyz[i, :, :]
                r2 = tt.reshape(pmm.sum(rij * rij, axis=1), (-1, 1))
                rij = rij / pmm.sqrt(r2)

                rk = pmm.sum(rij * khat, axis=1)
                w = fs / (2 * fstar) * (1 - rk)
                wp = fs / (2 * fstar) * (1 + rk
                                         )  # Reverse direction, get a plus
                sc = pmm.sin(w) / w
                scp = pmm.sin(wp) / wp

                tfs_re = tt.set_subtensor(tfs_re[i, j, :], sc * pmm.cos(w))
                tfs_im = tt.set_subtensor(tfs_im[i, j, :], sc * pmm.sin(w))

                tfs_re = tt.set_subtensor(tfs_re[j, i, :], scp * pmm.cos(wp))
                tfs_im = tt.set_subtensor(tfs_im[j, i, :], scp * pmm.sin(wp))

    return tfs_re, tfs_im
Exemplo n.º 2
0
def uvk(nhat):
    phi = tt.arctan2(nhat[1], nhat[0])
    cos_theta = nhat[2]
    sin_theta = pmm.sqrt(1 - cos_theta * cos_theta)

    cphi = pmm.cos(phi)
    sphi = pmm.sin(phi)

    u = tt.as_tensor_variable([cos_theta * cphi, cos_theta * sphi, -sin_theta])
    v = tt.as_tensor_variable([sphi, -cphi, 0.0])
    k = tt.as_tensor_variable(
        [-sin_theta * cphi, -sin_theta * sphi, -cos_theta])

    return tt.stack((u, v, k), dim=0)
Exemplo n.º 3
0
def dp_dc(xyz, uvk):
    dps = tt.zeros((3, 3, xyz.shape[1]))
    dcs = tt.zeros((3, 3, xyz.shape[1]))
    for i in range(3):
        for j in range(i + 1, 3):
            rij = xyz[j, :, :] - xyz[i, :, :]
            r2 = pmm.sum(rij * rij, axis=1)
            rij = rij / tt.reshape(pmm.sqrt(r2), (-1, 1))

            ru = pmm.sum(rij * uvk[0, :], axis=1)
            rv = pmm.sum(rij * uvk[1, :], axis=1)

            dps = tt.set_subtensor(dps[i, j, :], ru * ru - rv * rv)
            dcs = tt.set_subtensor(dcs[i, j, :], ru * rv + ru * rv)
            dps = tt.set_subtensor(dps[j, i, :], dps[i, j, :])
            dcs = tt.set_subtensor(dcs[j, i, :], dcs[i, j, :])

    return dps, dcs
Exemplo n.º 4
0
def Bcdf(x0,mu,sigma,skew):
    
    A = 6.5
    G = Ghat(-abs_(skew))
    BM = Mhat(G)
    BV = Vhat(G)
    
    xs=(x0-mu)/sigma
    x = xs*sqrt(BV)+BM
    
    #print(sgn(skew).eval()==-1.0)

    #if sgn(skew)==-1.0:
    if le(skew,0.0):
        xc = clip(x,0,5)
        y = 1 - (1+xc**G)**-A
        #y[x<0] = 0
        #print('Bcdf: step1')
    else:
        #print('Bcdf: step2')
        x = -x+2*BM
        xc = clip(x,0,5)
        y = (1+xc**G)**-A        
    return y #, xc
Exemplo n.º 5
0
def cumulative_normal(x):
    """Cummulative normal distribution"""
    return 0.5 + 0.5 * math.erf(x / math.sqrt(2))
Exemplo n.º 6
0
def make_model(A_re_data,
               A_im_data,
               E_re_data,
               E_im_data,
               Tobs,
               f0,
               fdot,
               fddot,
               sigma,
               hbin,
               lnAlow,
               lnAhigh,
               N,
               start_pt={}):
    f0_mean = f0
    fdot_mean = fdot
    fddot_mean = fddot

    with pm.Model() as model:
        _ = pm.Data('sigma', sigma)
        _ = pm.Data('hbin', hbin)
        _ = pm.Data('Tobs', Tobs)
        _ = pm.Data('N', N)
        A_re_data = pm.Data('A_re_data', A_re_data)
        A_im_data = pm.Data('A_im_data', A_im_data)
        E_re_data = pm.Data('E_re_data', E_re_data)
        E_im_data = pm.Data('E_im_data', E_im_data)

        n_phi = pm.Normal('n_phi',
                          mu=zeros(2),
                          sigma=ones(2),
                          shape=(2, ),
                          testval=start_pt.get('n_phi', randn(2)))
        phi0 = pm.Deterministic('phi0', tt.arctan2(n_phi[1], n_phi[0]))

        dphi_f0 = pm.Normal('dphi_f0', mu=0, sigma=pi, testval=0)
        dphi_fdot = pm.Normal('dphi_fdot', mu=0, sigma=pi, testval=0)
        dphi_fddot = pm.Normal('dphi_fddot', mu=0, sigma=pi, testval=0)

        f0 = pm.Deterministic('f0', f0_mean + dphi_f0 / (2 * pi * Tobs))
        fdot = pm.Deterministic('fdot',
                                fdot_mean + dphi_fdot / (pi * Tobs * Tobs))
        fddot = pm.Deterministic(
            'fddot', fddot_mean + 3.0 * dphi_fddot / (pi * Tobs * Tobs * Tobs))

        cos_iota = pm.Uniform('cos_iota',
                              lower=-1,
                              upper=1,
                              testval=start_pt.get(
                                  'cos_iota', np.random.uniform(low=-1,
                                                                high=1)))
        iota = pm.Deterministic('iota', tt.arccos(cos_iota))

        # This 2-vector gives 2*psi
        n_2psi = pm.Normal('n_2psi',
                           mu=zeros(2),
                           sigma=ones(2),
                           shape=(2, ),
                           testval=start_pt.get('n_2psi', randn(2)))
        psi = pm.Deterministic('psi', tt.arctan2(n_2psi[1], n_2psi[0]) / 2)

        n_ra_dec = pm.Normal('n_ra_dec',
                             mu=zeros(3),
                             sigma=ones(3),
                             shape=(3, ),
                             testval=start_pt.get('nhat', randn(3)))
        nhat = pm.Deterministic(
            'nhat',
            n_ra_dec / pmm.sqrt(tt.tensordot(n_ra_dec, n_ra_dec, axes=1)))
        _ = pm.Deterministic('phi', tt.arctan2(n_ra_dec[1], n_ra_dec[0]))
        _ = pm.Deterministic('theta', tt.arccos(nhat[2]))

        lnA = pm.Uniform('lnA',
                         lower=lnAlow,
                         upper=lnAhigh,
                         testval=start_pt.get(
                             'lnA', np.random.uniform(low=lnAlow,
                                                      high=lnAhigh)))
        A = pm.Deterministic('A', pmm.exp(lnA))

        y_re, y_im = y_fd(Tobs, f0, fdot, fddot, phi0, nhat, cos_iota, psi,
                          hbin, N)
        ((X_re, X_im), (Y_re, Y_im),
         (Z_re, Z_im)) = XYZ_freq(y_re, y_im, Tobs, hbin, N)
        ((A_re, A_im), (E_re, E_im),
         (T_re, T_im)) = AET_XYZ(X_re, X_im, Y_re, Y_im, Z_re, Z_im)

        A_re = pm.Deterministic('A_re', A * A_re)
        A_im = pm.Deterministic('A_im', A * A_im)
        E_re = pm.Deterministic('E_re', A * E_re)
        E_im = pm.Deterministic('E_im', A * E_im)

        snr = pm.Deterministic(
            'SNR',
            tt.sqrt(
                tt.sum(tt.square(A_re / sigma)) +
                tt.sum(tt.square(A_im / sigma)) +
                tt.sum(tt.square(E_re / sigma)) +
                tt.sum(tt.square(E_im / sigma))))

        _ = pm.Normal('A_re_obs', mu=A_re, sigma=sigma, observed=A_re_data)
        _ = pm.Normal('A_im_obs', mu=A_im, sigma=sigma, observed=A_im_data)
        _ = pm.Normal('E_re_obs', mu=E_re, sigma=sigma, observed=E_re_data)
        _ = pm.Normal('E_im_obs', mu=E_im, sigma=sigma, observed=E_im_data)

    return model
Exemplo n.º 7
0
def distance_f():
    return D.d('v_mean') * cos(D.d('theta')) * \
           (D.d('v_mean') * sin(D.d('theta')) + sqrt((D.d('v_mean') * sin(D.d('theta'))) ** 2 +\
                                                     2 * g * D.d('height')) / g)
Exemplo n.º 8
0
def hmetad_rm1way(data: dict, sample_model: bool = True, **kwargs: int):
    """Compute hierachical meta-d' at the subject level.

    This is an internal function. The repeated measures model must be
    called using :py:func:`metadPy.hierarchical.hmetad`.

    Parameters
    ----------
    data : dict
        Response data.
    sample_model : boolean
        If `False`, only the model is returned without sampling.
    **kwargs : keyword arguments
        All keyword arguments are passed to `func::pymc3.sampling.sample`.

    Returns
    -------
    model : :py:class:`pymc3.Model` instance
        The pymc3 model. Encapsulates the variables and likelihood factors.
    trace : :py:class:`pymc3.backends.base.MultiTrace` or
        :py:class:`arviz.InferenceData`
        A `MultiTrace` or `ArviZ InferenceData` object that contains the
        samples.

    References
    ----------
    .. [#] Fleming, S.M. (2017) HMeta-d: hierarchical Bayesian estimation
    of metacognitive efficiency from confidence ratings, Neuroscience of
    Consciousness, 3(1) nix007, https://doi.org/10.1093/nc/nix007
    """
    nSubj = data["nSubj"]
    nCond = data["nCond"]
    nRatings = data["nRatings"]
    hits = data["hits"].reshape(nSubj, 2)
    falsealarms = data["falsealarms"].reshape(nSubj, 2)
    counts = data["counts"]
    Tol = data["Tol"]
    cr = data["cr"].reshape(nSubj, 2)
    m = data["m"].reshape(nSubj, 2)
    c1 = data["c1"].reshape(nSubj, 2, 1)
    d1 = data["d1"].reshape(nSubj, 2, 1)

    with Model() as model:

        #############
        # Hyperpriors
        #############
        mu_c2 = Normal("mu_c2",
                       tau=0.01,
                       shape=(1, ),
                       testval=np.random.rand() * 0.1)
        sigma_c2 = HalfNormal("sigma_c2",
                              tau=0.01,
                              shape=(1, ),
                              testval=np.random.rand() * 0.1)

        mu_D = Normal("mu_D",
                      tau=0.001,
                      shape=(1),
                      testval=np.random.rand() * 0.1)
        sigma_D = HalfNormal("sigma_D",
                             tau=0.1,
                             shape=(1),
                             testval=np.random.rand() * 0.1)

        mu_Cond1 = Normal("mu_Cond1",
                          mu=0,
                          tau=0.001,
                          shape=(1),
                          testval=np.random.rand() * 0.1)
        sigma_Cond1 = HalfNormal("sigma_Cond1",
                                 tau=0.1,
                                 shape=(1),
                                 testval=np.random.rand() * 0.1)

        #############################
        # Hyperpriors - Subject level
        #############################
        dbase_tilde = Normal(
            "dbase_tilde",
            mu=0,
            sigma=1,
            shape=(nSubj, 1, 1),
        )
        dbase = Deterministic("dbase", mu_D + sigma_D * dbase_tilde)

        Bd_Cond1_tilde = Normal(
            "Bd_Cond1_tilde",
            mu=0,
            sigma=1,
            shape=(nSubj, 1, 1),
        )

        Bd_Cond1 = Deterministic(
            "Bd_Cond1",
            mu_Cond1 + sigma_Cond1 * Bd_Cond1_tilde,
        )

        lambda_logMratio = Gamma(
            "lambda_logMratio",
            alpha=0.001,
            beta=0.001,
            shape=(nSubj, 1, 1),
        )
        sigma_logMratio = Deterministic("sigma_logMratio",
                                        1 / math.sqrt(lambda_logMratio))

        ###############################
        # Hypterprior - Condition level
        ###############################
        mu_regression = [dbase + (Bd_Cond1 * c) for c in range(nCond)]

        log_mRatio_tilde = Normal("log_mRatio_tilde",
                                  mu=0,
                                  sigma=1,
                                  shape=(nSubj, 1, 1))
        log_mRatio = Deterministic(
            "log_mRatio",
            tt.stack(mu_regression, axis=1)[:, :, :, 0] +
            tt.tile(log_mRatio_tilde,
                    (1, 2, 1)) * tt.tile(sigma_logMratio, (1, 2, 1)),
        )

        mRatio = Deterministic("mRatio", tt.exp(log_mRatio))

        # Means of SDT distributions
        metad = Deterministic("metad", mRatio * d1)
        S2mu = Deterministic("S2mu", metad / 2)
        S1mu = Deterministic("S1mu", -metad / 2)

        # TYPE 2 SDT MODEL (META-D)
        # Multinomial likelihood for response counts
        # Specify ordered prior on criteria
        # bounded above and below by Type 1 c
        cS1_hn = Normal(
            "cS1_hn",
            mu=0,
            sigma=1,
            shape=(nSubj, nCond, nRatings - 1),
            testval=np.linspace(-1.5, -0.5, nRatings - 1).reshape(
                1, 1, nRatings - 1).repeat(nSubj, axis=0).repeat(nCond,
                                                                 axis=1),
        )
        cS1 = Deterministic("cS1", -mu_c2 + (cS1_hn * sigma_c2))

        cS2_hn = Normal(
            "cS2_hn",
            mu=0,
            sigma=1,
            shape=(nSubj, nCond, nRatings - 1),
            testval=np.linspace(0.5, 1.5, nRatings - 1).reshape(
                1, 1, nRatings - 1).repeat(nSubj, axis=0).repeat(nCond,
                                                                 axis=1),
        )
        cS2 = Deterministic("cS2", mu_c2 + (cS2_hn * sigma_c2))

        # Calculate normalisation constants
        C_area_rS1 = cumulative_normal(c1 - S1mu)
        I_area_rS1 = cumulative_normal(c1 - S2mu)
        C_area_rS2 = 1 - cumulative_normal(c1 - S2mu)
        I_area_rS2 = 1 - cumulative_normal(c1 - S1mu)

        # Get nC_rS1 probs
        nC_rS1 = cumulative_normal(cS1 - S1mu) / C_area_rS1
        nC_rS1 = Deterministic(
            "nC_rS1",
            math.concatenate(
                ([
                    cumulative_normal(cS1[:, :, 0].reshape((nSubj, 2, 1)) -
                                      S1mu) / C_area_rS1,
                    nC_rS1[:, :, 1:] - nC_rS1[:, :, :-1],
                    ((cumulative_normal(c1 - S1mu) -
                      cumulative_normal(cS1[:, :, (nRatings - 2)].reshape(
                          (nSubj, 2, 1)) - S1mu)) / C_area_rS1),
                ]),
                axis=2,
            ),
        )

        # Get nI_rS2 probs
        nI_rS2 = (1 - cumulative_normal(cS2 - S1mu)) / I_area_rS2
        nI_rS2 = Deterministic(
            "nI_rS2",
            math.concatenate(
                ([
                    ((1 - cumulative_normal(c1 - S1mu)) -
                     (1 - cumulative_normal(cS2[:, :, 0].reshape(
                         (nSubj, nCond, 1)) - S1mu))) / I_area_rS2,
                    nI_rS2[:, :, :-1] -
                    (1 - cumulative_normal(cS2[:, :, 1:] - S1mu)) / I_area_rS2,
                    (1 - cumulative_normal(cS2[:, :, nRatings - 2].reshape(
                        (nSubj, nCond, 1)) - S1mu)) / I_area_rS2,
                ]),
                axis=2,
            ),
        )

        # Get nI_rS1 probs
        nI_rS1 = (-cumulative_normal(cS1 - S2mu)) / I_area_rS1
        nI_rS1 = Deterministic(
            "nI_rS1",
            math.concatenate(
                ([
                    cumulative_normal(cS1[:, :, 0].reshape((nSubj, nCond, 1)) -
                                      S2mu) / I_area_rS1,
                    nI_rS1[:, :, :-1] +
                    (cumulative_normal(cS1[:, :, 1:] - S2mu)) / I_area_rS1,
                    (cumulative_normal(c1 - S2mu) -
                     cumulative_normal(cS1[:, :, nRatings - 2].reshape(
                         (nSubj, nCond, 1)) - S2mu)) / I_area_rS1,
                ]),
                axis=2,
            ),
        )

        # Get nC_rS2 probs
        nC_rS2 = (1 - cumulative_normal(cS2 - S2mu)) / C_area_rS2
        nC_rS2 = Deterministic(
            "nC_rS2",
            math.concatenate(
                ([
                    ((1 - cumulative_normal(c1 - S2mu)) -
                     (1 - cumulative_normal(cS2[:, :, 0].reshape(
                         (nSubj, nCond, 1)) - S2mu))) / C_area_rS2,
                    nC_rS2[:, :, :-1] -
                    ((1 - cumulative_normal(cS2[:, :, 1:] - S2mu)) /
                     C_area_rS2),
                    (1 - cumulative_normal(cS2[:, :, nRatings - 2].reshape(
                        (nSubj, nCond, 1)) - S2mu)) / C_area_rS2,
                ]),
                axis=2,
            ),
        )

        # Avoid underflow of probabilities
        nC_rS1 = math.switch(nC_rS1 < Tol, Tol, nC_rS1)
        nI_rS2 = math.switch(nI_rS2 < Tol, Tol, nI_rS2)
        nI_rS1 = math.switch(nI_rS1 < Tol, Tol, nI_rS1)
        nC_rS2 = math.switch(nC_rS2 < Tol, Tol, nC_rS2)

        for c in range(nCond):
            Multinomial(
                f"CR_counts_{c}",
                n=cr[:, c],
                p=nC_rS1[:, c, :],
                observed=counts[:, c, :nRatings],
                shape=(nSubj, nRatings),
            )
            Multinomial(
                f"H_counts_{c}",
                n=hits[:, c],
                p=nC_rS2[:, c, :],
                observed=counts[:, c, nRatings * 3:nRatings * 4],
                shape=(nSubj, nRatings),
            )
            Multinomial(
                f"FA_counts_{c}",
                n=falsealarms[:, c],
                p=nI_rS2[:, c, :],
                observed=counts[:, c, nRatings:nRatings * 2],
                shape=(nSubj, nRatings),
            )
            Multinomial(
                f"M_counts_{c}",
                n=m[:, c],
                p=nI_rS1[:, c, :],
                observed=counts[:, c, nRatings * 2:nRatings * 3],
                shape=(nSubj, nRatings),
            )

        if sample_model is True:

            trace = sample(return_inferencedata=True, **kwargs)

            return model, trace

        else:
            return model
Exemplo n.º 9
0
def n_star_inference(n_stars,
                     iteration,
                     elem_err=False,
                     n_init=20000,
                     n_samples=1000,
                     max_stars=100):
    ## Define which stars to use
    these_stars = np.arange(max_stars)[iteration * n_stars:(iteration + 1) *
                                       n_stars]

    ## Load in mock dataset
    mock_data = np.load(mock_data_file)  #dataset
    mu_times = mock_data.f.obs_time[these_stars]  #time of birth
    sigma_times = mock_data.f.obs_time_err[these_stars]  #error on age
    all_els = mock_data.f.elements

    full_abundances = mock_data.f.abundances[
        these_stars]  # chemical element abundances for data
    full_errors = mock_data.f.abundance_errs[
        these_stars]  # error on abundances

    # Filter out correct elements:
    els = ['C', 'Fe', 'He', 'Mg', 'N', 'Ne', 'O', 'Si']  # TNG elements
    n_els = len(els)
    el_indices = np.zeros(len(els), dtype=int)
    for e, el in enumerate(els):
        for j in range(len(all_els)):
            if els[e] == str(all_els[j]):
                el_indices[e] = j
                break
            if j == len(all_els) - 1:
                print("Failed to find element %s" % el)
    obs_abundances = full_abundances[:, el_indices]
    obs_errors = full_errors[:, el_indices]

    # Now standardize dataset
    norm_data = (obs_abundances - output_mean) / output_std
    norm_sd = obs_errors / output_std

    data_obs = norm_data.ravel()
    data_sd = np.asarray(norm_sd).ravel()

    std_times_mean = (mu_times - input_mean[-1]) / input_std[-1]
    std_times_width = sigma_times / input_std[-1]

    # Define stacked local priors
    Local_prior_mean = np.vstack([
        np.hstack([std_Theta_prior_mean, std_times_mean[i]])
        for i in range(n_stars)
    ])
    Local_prior_sigma = np.vstack([
        np.hstack([std_Theta_prior_width, std_times_width[i]])
        for i in range(n_stars)
    ])

    # Bound variables to ensure they don't exit the training parameter space
    lowBound = tt._shared(np.asarray([-5, std_log_SFR_crit, -5, std_min_time]))
    upBound = tt._shared(np.asarray([5, 5, 5, std_max_time]))

    # Create stacked mean and variances
    loc_mean = np.hstack([
        np.asarray(std_Theta_prior_mean).reshape(1, -1) *
        np.ones([n_stars, 1]),
        std_times_mean.reshape(-1, 1)
    ])
    loc_std = np.hstack([
        np.asarray(std_Theta_prior_width).reshape(1, -1) *
        np.ones([n_stars, 1]),
        std_times_width.reshape(-1, 1)
    ])

    # Share theano variables
    w0 = tt._shared(w_array_0)
    b0 = tt._shared(b_array_0)
    w1 = tt._shared(w_array_1)
    b1 = tt._shared(b_array_1)
    ones_tensor = tt.ones([n_stars, 1])
    b0_all = ma.matrix_dot(ones_tensor, b0)
    b1_all = ma.matrix_dot(ones_tensor, b1)

    # Define PyMC3 Model
    simple_model = pm.Model()

    with simple_model:
        # Define priors
        Lambda = pm.Normal('Std-Lambda',
                           mu=std_Lambda_prior_mean,
                           sd=std_Lambda_prior_width,
                           shape=(1, len(std_Lambda_prior_mean)))

        Locals = pm.Normal(
            'Std-Local',
            mu=loc_mean,
            sd=loc_std,
            shape=loc_mean.shape,
            transform=pm.distributions.transforms.Interval(lowBound, upBound),
        )
        TimeSq = tt.reshape(Locals[:, -1]**2., (n_stars, 1))

        TruLa = pm.Deterministic('Lambda',
                                 Lambda * input_std[:2] + input_mean[:2])
        TruTh = pm.Deterministic(
            'Thetas', Locals[:, :3] * input_std[2:5] + input_mean[2:5])
        TruTi = pm.Deterministic(
            'Times', Locals[:, -1] * input_std[-1] + input_mean[-1])

        ## NEURAL NET
        Lambda_all = ma.matrix_dot(ones_tensor, Lambda)
        InputVariables = ma.concatenate([Lambda_all, Locals, TimeSq], axis=1)

        layer1 = ma.matrix_dot(InputVariables, w0) + b0_all
        output = ma.matrix_dot(ma.tanh(layer1), w1) + b1_all

        if elem_err:
            # ERRORS
            #element_error = pm.Normal('Element-Error',mu=-2,sd=1,shape=(1,n_els))
            element_error = pm.HalfCauchy('Std-Element-Error',
                                          beta=0.01 / output_std,
                                          shape=(1, n_els))
            TruErr = pm.Deterministic('Element-Error',
                                      element_error * output_std)
            stacked_error = ma.matrix_dot(ones_tensor, element_error)
            tot_error = ma.sqrt(
                stacked_error**2. +
                norm_sd**2.)  # NB this is all standardized by output_std here
        else:
            tot_error = norm_sd  # NB: all quantities are standardized here

        predictions = pm.Deterministic("Predicted-Abundances",
                                       output * output_std + output_mean)

        # Define likelihood function (unravelling output to make a multivariate gaussian)
        likelihood = pm.Normal('likelihood',
                               mu=output.ravel(),
                               sd=tot_error.ravel(),
                               observed=norm_data.ravel())

    # Now sample
    init_time = ttime.time()
    with simple_model:
        samples = pm.sample(draws=n_samples,
                            chains=chains,
                            cores=cores,
                            tune=tune,
                            nuts_kwargs={'target_accept': 0.9},
                            init='advi+adapt_diag',
                            n_init=n_init)
    end_time = ttime.time() - init_time

    def construct_output(samples):
        Lambda = samples.get_values('Lambda')[:, 0, :]
        Thetas = samples.get_values('Thetas')[:, :, :]
        Times = samples.get_values('Times')[:, :]

        predictions = samples.get_values('Predicted-Abundances')[:, :, :]

        if elem_err:
            Errs = samples.get_values('Element-Error')[:, 0, :]
            return Lambda, Thetas, Times, Errs, predictions
        else:
            return Lambda, Thetas, Times, predictions

    print("Finished after %.2f seconds" % end_time)

    if elem_err:
        Lambda, Thetas, Times, Errs, predictions = construct_output(samples)
        return Lambda, Thetas, Times, end_time, Errs, predictions
    else:
        Lambda, Thetas, Times, predictions = construct_output(samples)
        return Lambda, Thetas, Times, end_time, predictions