def fit(self, X, y):
        self.model = pm.Model()

        with self.model:
            xi = pm.Bernoulli(
                'xi', .05,
                shape=X.shape[1])  # inclusion probability for each variable
            alpha = pm.Cauchy('alpha', alpha=0, beta=2.0)  # Intercept
            coeff_shape = pm.Exponential("coeff_shape", lam=0.05)
            beta = pm.Cauchy(
                'beta', alpha=0, beta=coeff_shape,
                shape=X.shape[1])  # Prior for the non-zero coefficients
            p = pm.math.dot(
                X, xi * beta
            )  # Deterministic function to map the stochastics to the output
            y_obs = pm.Bernoulli('y_obs', invlogit(p + alpha),
                                 observed=y)  # Data likelihood

        with self.model:
            self.trace = pm.sample(4000,
                                   random_seed=4816,
                                   cores=1,
                                   progressbar=False,
                                   chains=1)

        return None
Ejemplo n.º 2
0
    def _setup_y(self, y_data, ar, by_run):
        ''' Sets up y to be a theano shared variable. '''
        if 'y' not in self.shared_params:
            self.shared_params['y'] = shared(y_data)

            with self.model:

                n_vols = self.dataset.n_vols
                n_runs = int(len(y_data) / n_vols)

                for i in range(1, ar + 1):

                    _pad = shared(np.zeros((i, )))
                    _trunc = self.shared_params['y'][:-i]
                    y_shifted = T.concatenate((_pad, _trunc))
                    weights = np.r_[np.zeros(i), np.ones(n_vols - i)]

                    # Model an AR term for each run or use just one for all runs
                    if by_run:
                        smoother = pm.Cauchy('AR(%d)' % i, alpha=0, beta=1)
                        _ar = T.repeat(smoother, n_vols) * y_shifted
                    else:
                        smoother = pm.Cauchy('AR(%d)' % i, alpha=0, beta=1)
                        weights = np.tile(weights, n_runs)
                        _ar = shared(weights) * y_shifted * smoother

                    self.mu += _ar

                sigma = pm.HalfCauchy('sigma_y_obs', beta=2)
                y_obs = pm.Normal('Y_obs',
                                  mu=self.mu,
                                  sd=sigma,
                                  observed=self.shared_params['y'])
        else:
            self.shared_params['y'].set_value(y_data)
Ejemplo n.º 3
0
    def _model_setup(self):
        with self._model:
            # COSMOLOGY


            omega_m = pm.Uniform("OmegaM", lower=0, upper=1.)

            # dark energy EOS
            w = pm.Normal("w", mu=-1, sd=1)

            # My custom distance mod. function to enable
            # ADVI and HMC smapling.

            dm = distmod_w_flat(omega_m, self._h0, w, self._zcmb)

            # PHILIPS PARAMETERS

            # M0 is the location parameter for the distribution
            # sys_scat is the scale parameter for the M0 distribution
            # rather than "unexpalined variance"
            M0 = pm.Normal("M0", mu=-19.3, sd=2.)
            sys_scat = pm.HalfCauchy('sys_scat', beta=2.5)  # Gelman recommendation for variance parameter
            M_true = pm.Normal('M_true', M0, sys_scat, shape=self._n_SN)

            # following Rubin's Unity model... best idea? not sure
            taninv_alpha = pm.Uniform("taninv_alpha", lower=-.2, upper=.3)
            taninv_beta = pm.Uniform("taninv_beta", lower=-1.4, upper=1.4)

            # Transform variables
            alpha = pm.Deterministic('alpha', T.tan(taninv_alpha))
            beta = pm.Deterministic('beta', T.tan(taninv_beta))

            # Again using Rubin's Unity model.
            # After discussion with Rubin, the idea is that
            # these parameters are ideally sampled from a Gaussian,
            # but we know they are not entirely correct. So instead,
            # the Cauchy is less informative around the mean, while
            # still having informative tails.

            xm = pm.Cauchy('xm', alpha=0, beta=1)
            cm = pm.Cauchy('cm', alpha=0, beta=1)

            Rx_log = pm.Uniform('Rx_log', lower=-0.5, upper=0.5)
            Rc_log = pm.Uniform('Rc_log', lower=-1.5, upper=1.5)

            # Transformed variables
            Rx = pm.Deterministic("Rx", T.pow(10., Rx_log))
            Rc = pm.Deterministic("Rc", T.pow(10., Rc_log))

            x_true = pm.Normal('x_true', mu=xm, sd=Rx, shape=self._n_SN)
            c_true = pm.Normal('c_true', mu=cm, sd=Rc, shape=self._n_SN)

            # Do the correction
            mb = pm.Deterministic("mb", M_true + dm - alpha * x_true + beta * c_true)

            # Likelihood and measurement error

            obsc = pm.Normal("obsc", mu=c_true, sd=self._dcolor, observed=self._color)
            obsx = pm.Normal("obsx", mu=x_true, sd=self._dx1, observed=self._x1)
            obsm = pm.Normal("obsm", mu=mb, sd=self._dmb_obs, observed=self._mb_obs)
Ejemplo n.º 4
0
    def create_pymc_model(self, min_observations=0, observer_bias=False):
        """Returns a PyMC3 model."""
        dfobs = self.obs.data[self.obs.data.observations > min_observations]

        with pm.Model() as model:
            delta = self.earth_distance_func(dfobs.time.values)
            r = self.sun_distance_func(dfobs.time.values)
            n = pm.Normal('n', mu=3.49, sigma=1.36)  # activity parameter
            h = pm.Normal('h', mu=6.66, sigma=1.98)  # absolute magnitude

            model_mag = comet_magnitude_power_law(h=h, n=n, delta=delta, r=r)

            if observer_bias == True:
                observers = dfobs.observer.unique()
                for obs in observers:
                    mask = np.array(dfobs.observer.values == obs)
                    beta = pm.HalfNormal('beta_' + obs, sigma=0.5)
                    bias = pm.Normal('bias_' + obs, mu=0., sigma=.5)
                    obsmag = pm.Cauchy('obsmag_' + obs,
                                       alpha=model_mag[mask] + bias,
                                       beta=beta,
                                       observed=dfobs.magnitude[mask])
            else:
                beta = 0.47 + pm.HalfNormal('beta', sigma=0.02)
                obsmag = pm.Cauchy('obsmag',
                                   alpha=model_mag,
                                   beta=beta,
                                   observed=dfobs.magnitude)

        self.model = model
        return self.model
Ejemplo n.º 5
0
def fit0(p0, q0):
    with pm.Model() as m:
        loga = pm.Cauchy('loga', 0, 5)
        c = pm.Cauchy('c', 0, 5, testval=-5)
        μ0 = pm.Deterministic('μ0', np.exp(loga + c * np.log(p0)))
        qval = pm.Poisson('q', μ0, observed=q0)
        t = pm.sample()
    return t
Ejemplo n.º 6
0
def fitrep(p0, μ0, n):
    qobs = np.random.poisson(np.outer(np.ones(n), μ0))
    with pm.Model() as m:
        α = pm.Cauchy('α', 0, 5, shape=n)
        β = pm.Cauchy('β', 0, 5, shape=n)
        μ0rep = np.exp(α + β * (np.log(p0) - logp0mean).reshape(-1, 1)).T
        qval = pm.Poisson('q0', μ0rep, observed=qobs)
        t = pm.sample()
    return t
Ejemplo n.º 7
0
def fit(p0, q0):
    with pm.Model() as m:
        α = pm.Cauchy('α', 0, 5)
        β = pm.Cauchy('β', 0, 5)
        logμ0 = α + β * (np.log(p0) - logp0mean)
        μ0 = pm.Deterministic('μ0', np.exp(logμ0))
        qval = pm.Poisson('q0', μ0, observed=q0)
        t = pm.sample()
    return t
Ejemplo n.º 8
0
def run_breakpoint_model(prices):
    with pm.Model() as trend_change_model:
        scaling_factor = 10
        relative_changes = list(map(lambda p: p.value * scaling_factor,
                                    prices))
        dates = list(map(lambda p: p.date, prices))

        relative_changes_np = np.array(relative_changes)
        date_indexes = np.arange(0, len(prices))

        # Prior distributions
        trend_change_point = pm.Cauchy('changepoint', alpha=100, beta=2)
        early_trend = pm.Normal('early_trend', mu=0.1, sd=0.1)
        late_trend = pm.Normal('late_trend', mu=-0.05, sd=0.1)

        # Transformed variable
        trend = pm.math.switch(trend_change_point >= date_indexes, early_trend,
                               late_trend)

        # Likelyhood
        price_change = pm.Normal('price_change',
                                 trend,
                                 observed=relative_changes_np)

        samples = pm.sample(draws=1000, tune=1000, cores=1)

        # az.plot_trace(samples, var_names=['changepoint', 'early_trend', 'late_trend'])
        # trend_change_model.early_trend.summary()#
        # trend_change_model.trace['early_trend']

        pm.traceplot(samples)
        plt.show()
        print(pm.summary(samples, kind="stats"))
Ejemplo n.º 9
0
Archivo: crv.py Proyecto: weakit/sympy
    def _sample_pymc3(cls, dist, size):
        """Sample from PyMC3."""

        import pymc3
        pymc3_rv_map = {
            'BetaDistribution': lambda dist:
                pymc3.Beta('X', alpha=float(dist.alpha), beta=float(dist.beta)),
            'CauchyDistribution': lambda dist:
                pymc3.Cauchy('X', alpha=float(dist.x0), beta=float(dist.gamma)),
            'ChiSquaredDistribution': lambda dist:
                pymc3.ChiSquared('X', nu=float(dist.k)),
            'ExponentialDistribution': lambda dist:
                pymc3.Exponential('X', lam=float(dist.rate)),
            'GammaDistribution': lambda dist:
                pymc3.Gamma('X', alpha=float(dist.k), beta=1/float(dist.theta)),
            'LogNormalDistribution': lambda dist:
                pymc3.Lognormal('X', mu=float(dist.mean), sigma=float(dist.std)),
            'NormalDistribution': lambda dist:
                pymc3.Normal('X', float(dist.mean), float(dist.std)),
            'GaussianInverseDistribution': lambda dist:
                pymc3.Wald('X', mu=float(dist.mean), lam=float(dist.shape)),
            'ParetoDistribution': lambda dist:
                pymc3.Pareto('X', alpha=float(dist.alpha), m=float(dist.xm)),
            'UniformDistribution': lambda dist:
                pymc3.Uniform('X', lower=float(dist.left), upper=float(dist.right))
        }

        dist_list = pymc3_rv_map.keys()

        if dist.__class__.__name__ not in dist_list:
            return None

        with pymc3.Model():
            pymc3_rv_map[dist.__class__.__name__](dist)
            return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
Ejemplo n.º 10
0
def test_pymc3_convert_dists():
    """Just a basic check that all PyMC3 RVs will convert to and from Theano RVs."""
    tt.config.compute_test_value = "ignore"
    theano.config.cxx = ""

    with pm.Model() as model:
        norm_rv = pm.Normal("norm_rv", 0.0, 1.0, observed=1.0)
        mvnorm_rv = pm.MvNormal("mvnorm_rv",
                                np.r_[0.0],
                                np.c_[1.0],
                                shape=1,
                                observed=np.r_[1.0])
        cauchy_rv = pm.Cauchy("cauchy_rv", 0.0, 1.0, observed=1.0)
        halfcauchy_rv = pm.HalfCauchy("halfcauchy_rv", 1.0, observed=1.0)
        uniform_rv = pm.Uniform("uniform_rv", observed=1.0)
        gamma_rv = pm.Gamma("gamma_rv", 1.0, 1.0, observed=1.0)
        invgamma_rv = pm.InverseGamma("invgamma_rv", 1.0, 1.0, observed=1.0)
        exp_rv = pm.Exponential("exp_rv", 1.0, observed=1.0)
        halfnormal_rv = pm.HalfNormal("halfnormal_rv", 1.0, observed=1.0)
        beta_rv = pm.Beta("beta_rv", 2.0, 2.0, observed=1.0)
        binomial_rv = pm.Binomial("binomial_rv", 10, 0.5, observed=5)
        dirichlet_rv = pm.Dirichlet("dirichlet_rv",
                                    np.r_[0.1, 0.1],
                                    observed=np.r_[0.1, 0.1])
        poisson_rv = pm.Poisson("poisson_rv", 10, observed=5)
        bernoulli_rv = pm.Bernoulli("bernoulli_rv", 0.5, observed=0)
        betabinomial_rv = pm.BetaBinomial("betabinomial_rv",
                                          0.1,
                                          0.1,
                                          10,
                                          observed=5)
        categorical_rv = pm.Categorical("categorical_rv",
                                        np.r_[0.5, 0.5],
                                        observed=1)
        multinomial_rv = pm.Multinomial("multinomial_rv",
                                        5,
                                        np.r_[0.5, 0.5],
                                        observed=np.r_[2])

    # Convert to a Theano `FunctionGraph`
    fgraph = model_graph(model)

    rvs_by_name = {
        n.owner.inputs[1].name: n.owner.inputs[1]
        for n in fgraph.outputs
    }

    pymc_rv_names = {n.name for n in model.observed_RVs}
    assert all(
        isinstance(rvs_by_name[n].owner.op, RandomVariable)
        for n in pymc_rv_names)

    # Now, convert back to a PyMC3 model
    pymc_model = graph_model(fgraph)

    new_pymc_rv_names = {n.name for n in pymc_model.observed_RVs}
    pymc_rv_names == new_pymc_rv_names
 def build_model(self, name='normal_model'):
     # Define Stochastic variables
     with pm.Model(name=name) as self.model:
         # Global mean pitch angle
         self.mu_phi = pm.Uniform('mu_phi', lower=0, upper=90)
         self.sigma_phi = pm.InverseGamma('sigma_phi',
                                          alpha=2,
                                          beta=15,
                                          testval=8)
         self.sigma_gal = pm.InverseGamma('sigma_gal',
                                          alpha=2,
                                          beta=15,
                                          testval=8)
         # define a mean galaxy pitch angle
         self.phi_gal = pm.TruncatedNormal(
             'phi_gal',
             mu=self.mu_phi,
             sd=self.sigma_phi,
             lower=0,
             upper=90,
             shape=len(self.galaxies),
         )
         # draw arm pitch angles centred around this mean
         self.phi_arm = pm.TruncatedNormal(
             'phi_arm',
             mu=self.phi_gal[self.gal_arm_map],
             sd=self.sigma_gal,
             lower=0,
             upper=90,
             shape=len(self.gal_arm_map),
         )
         # convert to a gradient for a linear fit
         self.b = tt.tan(np.pi / 180 * self.phi_arm)
         # arm offset parameter
         self.c = pm.Cauchy('c',
                            alpha=0,
                            beta=10,
                            shape=self.n_arms,
                            testval=np.tile(0, self.n_arms))
         # radial noise
         self.sigma_r = pm.InverseGamma('sigma_r', alpha=2, beta=0.5)
         r = pm.Deterministic(
             'r',
             tt.exp(self.b[self.point_arm_map] * self.data['theta'] +
                    self.c[self.point_arm_map]))
         # likelihood function
         self.likelihood = pm.Normal(
             'Likelihood',
             mu=r,
             sigma=self.sigma_r,
             observed=self.data['r'],
         )
    def build_model(self, name=''):
        # Define Stochastic variables
        with pm.Model(name=name) as self.model:
            # Global mean pitch angle
            self.phi_gal = pm.Uniform('phi_gal',
                                      lower=0,
                                      upper=90,
                                      shape=len(self.galaxies))
            # note we don't model inter-galaxy dispersion here
            # intra-galaxy dispersion
            self.sigma_gal = pm.InverseGamma('sigma_gal',
                                             alpha=2,
                                             beta=20,
                                             testval=5)
            # arm offset parameter
            self.c = pm.Cauchy('c',
                               alpha=0,
                               beta=10,
                               shape=self.n_arms,
                               testval=np.tile(0, self.n_arms))

            # radial noise
            self.sigma_r = pm.InverseGamma('sigma_r', alpha=2, beta=0.5)

            # define prior for Student T degrees of freedom
            # self.nu = pm.Uniform('nu', lower=1, upper=100)

            # Define Dependent variables
            self.phi_arm = pm.TruncatedNormal(
                'phi_arm',
                mu=self.phi_gal[self.gal_arm_map],
                sd=self.sigma_gal,
                lower=0,
                upper=90,
                shape=self.n_arms)

            # convert to a gradient for a linear fit
            self.b = tt.tan(np.pi / 180 * self.phi_arm)
            r = pm.Deterministic(
                'r',
                tt.exp(self.b[self.data['arm_index'].values] *
                       self.data['theta'] +
                       self.c[self.data['arm_index'].values]))

            # likelihood function
            self.likelihood = pm.StudentT(
                'Likelihood',
                mu=r,
                sigma=self.sigma_r,
                nu=1,  #self.nu,
                observed=self.data['r'],
            )
def model(data):
    J = data["J"]  # number of schools
    y_obs = np.array(data["y"])  # estimated treatment
    sigma = np.array(data["sigma"])  # std of estimated effect
    with pm3.Model() as pymc_model:

        mu = pm3.Normal("mu", mu=0,
                        sd=5)  # hyper-parameter of mean, non-informative prior
        tau = pm3.Cauchy("tau", alpha=0, beta=5)  # hyper-parameter of sd
        theta_trans = pm3.Normal("theta_trans", mu=0, sd=1, shape=J)
        theta = mu + tau * theta_trans
        y = pm3.Normal("y", mu=theta, sd=sigma, observed=y_obs)
    return pymc_model
def bayesianCenter(data):

    with pm.Model():
        loc = pm.Uniform('location', lower=-1000., upper=1000.)
        scale = pm.Uniform('scale', lower=0.01, upper=1000.)

        pm.Cauchy('y', alpha=loc, beta=scale, observed=data)

        trace = pm.sample(3000, tune=3000, target_accept=0.92)
        pm.traceplot(trace)
        plt.show()

    return np.mean(trace['location'])
Ejemplo n.º 15
0
    def build_model(self, name=''):
        # Define Stochastic variables
        with pm.Model(name=name) as self.model:
            # Global mean pitch angle
            self.phi_gal = pm.Uniform('phi_gal',
                                      lower=0,
                                      upper=90,
                                      shape=len(self.galaxies))
            # note we don't model inter-galaxy dispersion here
            # intra-galaxy dispersion
            self.sigma_gal = pm.InverseGamma('sigma_gal',
                                             alpha=2,
                                             beta=20,
                                             testval=5)
            # arm offset parameter
            self.c = pm.Cauchy('c',
                               alpha=0,
                               beta=10,
                               shape=self.n_arms,
                               testval=np.tile(0, self.n_arms))

            # radial noise
            self.sigma_r = pm.InverseGamma('sigma_r', alpha=2, beta=0.5)

            # ----- Define Dependent variables -----

            # Phi arm is drawn from a truncated normal centred on phi_gal with
            # spread sigma_gal
            gal_idx = self.gal_arm_map.astype('int32')
            self.phi_arm = pm.TruncatedNormal('phi_arm',
                                              mu=self.phi_gal[gal_idx],
                                              sd=self.sigma_gal,
                                              lower=0,
                                              upper=90,
                                              shape=self.n_arms)

            # transform to gradient for fitting
            self.b = tt.tan(np.pi / 180 * self.phi_arm)

            # r = exp(theta * tan(phi) + c)
            # do not track this as it uses a lot of memory
            arm_idx = self.data['arm_index'].values.astype('int32')
            r = tt.exp(self.b[arm_idx] * self.data['theta'] + self.c[arm_idx])

            # likelihood function (assume likelihood here)
            self.likelihood = pm.Normal(
                'Likelihood',
                mu=r,
                sigma=self.sigma_r,
                observed=self.data['r'],
            )
Ejemplo n.º 16
0
    def fit(self, X_train, y_train):
        #compute the mean, and standard deviation for feature preprocessing (Gelman, 2008)
        #print(X_train.shape)
        if X_train.ndim == 1:
            X_train = X_train.reshape(-1, 1)
        self.scaler = StandardScaler()
        self.scaler.fit(X_train)
        #to transform data to have standard deviation of 0.5 (Gelman, 2008)
        self.scaler.var_ *= 4
        X_train = self.transform_data(X_train)

        samples = 1000
        with pm.Model() as _:
            # betas
            alpha = pm.Cauchy('alpha', 0., 10)
            betas = []
            for i in range(X_train.shape[1]):
                betas.append(pm.Cauchy('beta' + str(i), 0., 2.5))
            #beta = pm.Cauchy('beta', 0., 2.5)
# logit
            logit_p = alpha  # + beta * X_train)
            for i in range(X_train.shape[1]):
                logit_p += betas[
                    i] * X_train[:,
                                 i]  #.append(pm.Cauchy('beta' + str(i), 0., 2.5))
            p = Tht.exp(logit_p) / (1 + Tht.exp(logit_p))
            # likelihood
            _ = pm.Binomial('likelihood', n=1, p=p, observed=y_train)
            # inference
            start = pm.find_MAP()
            #step  = pm.NUTS(scaling = start)
            #trace = pm.sample(samples, step, progressbar=False, chains=2, cores=1)
            #summary = pm.summary(trace)['mean']
        self.b_map = start['alpha']
        self.a_map = []
        for i in range(X_train.shape[1]):
            self.a_map.append(start['beta' + str(i)])
        self.a_map = np.array(self.a_map)
Ejemplo n.º 17
0
 def __init__(self,
              wave,
              flux,
              templates,
              adegree=None,
              mdegree=None,
              reddening=False):
     with pm.Model() as hierarchical_model:
         # Hyperpriors
         mu_age = pm.Normal("Age", mu=9, sd=1)
         mu_metal = pm.Normal("Metal", mu=0, sd=0.1)
         mu_alpha = pm.Normal("Alpha", mu=0.2, sd=.1)
         sigma_age = pm.Exponential('SAge', lam=1)
         sigma_metal = pm.Exponential('SMetal', lam=.1)
         sigma_alpha = pm.Exponential('SAlpha', lam=.1)
         ages = pm.Normal("age", mu=mu_age, sd=sigma_age, shape=N)
         metals = pm.Normal("metal", mu=mu_metal, sd=sigma_metal, shape=N)
         alphas = pm.Normal("alpha", mu=mu_alpha, sd=sigma_alpha, shape=N)
         eps = pm.Exponential("eps", lam=1, shape=N)
         w = [pm.Deterministic("w_{}".format(i), \
                               T.exp(-0.5 * T.pow(
                                   (metals[i] - ssps.metals1D) / sigma_metal,
                                   2)) *
                               T.exp(-0.5 * T.pow(
                                   (ages[i] - ssps.ages1D) / sigma_age, 2)) *
                               T.exp(
                                   -0.5 * T.pow((alphas[i] - ssps.alphas1D) /
                                                sigma_alpha, 2))) for i in
              range(N)]
         bestfit = [
             pm.math.dot(w[i].T / T.sum(w[i]), ssps.flux) for i in range(N)
         ]
         like = [
             pm.Cauchy('like_{}'.format(i),
                       alpha=bestfit[i],
                       beta=eps[i],
                       observed=obs[i]) for i in range(N)
         ]
     with hierarchical_model:
         trace = pm.sample(500, tune=500)
     vars = [
         "Age", "Metal", "Alpha", "SAge", "SMetal", "SAlpha", "age",
         "metal", "alpha", "eps"
     ]
     d = dict([(v, trace[v]) for v in vars])
     with open(dbname, 'wb') as f:
         pickle.dump(d, f)
def mc_estimate_distr(x_obs):
    x_obs_p = np.nanpercentile(x_obs, 99)
    x_obs_filt = [v for v in x_obs if abs(v) < x_obs_p]

    rlim = x_obs_p

    with pm.Model() as model:
        b = pm.Uniform('b', 0, 5000)
        a = pm.Uniform('a', -5000, 5000)
        cte = pm.Cauchy('cte', alpha=a, beta=b, observed=x_obs)

        x_trace = pm.sample(500, tune=2000)

    a_est = np.mean(x_trace['a'])
    b_est = np.mean(x_trace['b'])

    return a_est, b_est
Ejemplo n.º 19
0
def test_pymc_convert_dists():
    """Just a basic check that all PyMC3 RVs will convert to and from Theano RVs."""
    tt.config.compute_test_value = 'ignore'
    theano.config.cxx = ''

    with pm.Model() as model:
        norm_rv = pm.Normal('norm_rv', 0.0, 1.0, observed=1.0)
        mvnorm_rv = pm.MvNormal('mvnorm_rv',
                                np.r_[0.0],
                                np.c_[1.0],
                                shape=1,
                                observed=np.r_[1.0])
        cauchy_rv = pm.Cauchy('cauchy_rv', 0.0, 1.0, observed=1.0)
        halfcauchy_rv = pm.HalfCauchy('halfcauchy_rv', 1.0, observed=1.0)
        uniform_rv = pm.Uniform('uniform_rv', observed=1.0)
        gamma_rv = pm.Gamma('gamma_rv', 1.0, 1.0, observed=1.0)
        invgamma_rv = pm.InverseGamma('invgamma_rv', 1.0, 1.0, observed=1.0)
        exp_rv = pm.Exponential('exp_rv', 1.0, observed=1.0)

    # Convert to a Theano `FunctionGraph`
    fgraph = model_graph(model)

    rvs_by_name = {
        n.owner.inputs[1].name: n.owner.inputs[1]
        for n in fgraph.outputs
    }

    pymc_rv_names = {n.name for n in model.observed_RVs}
    assert all(
        isinstance(rvs_by_name[n].owner.op, RandomVariable)
        for n in pymc_rv_names)

    # Now, convert back to a PyMC3 model
    pymc_model = graph_model(fgraph)

    new_pymc_rv_names = {n.name for n in pymc_model.observed_RVs}
    pymc_rv_names == new_pymc_rv_names
Ejemplo n.º 20
0
def ReparametrizedCauchy(name, alpha=None, beta=None, shape=1):
    """
    Create a reparametrized Cauchy distributed random variable.

    Parameters
    ----------
    name : str
        Name of the variable.
    alpha : float
        Mode of Cauchy distribution.
    beta : float, > 0
        Scale parameter of Cauchy distribution
    shape: int or tuple of ints, default 1
        Shape of array of variables. If 1, then a single scalar.

    Returns
    -------
    output : pymc3 distribution
        Reparametrized Cauchy distribution.

    Notes
    -----
    .. The reparametrization procedure allows the sampler to sample
       a Cauchy distribution with alpha = 0 and beta = 1, and then do a
       deterministic reparametrization to achieve sampling of the 
       original desired Cauchy distribution.
    """
    # Check inputs
    if type(name) != str:
        raise RuntimeError('`name` must be a string.')
    if alpha is None or beta is None:
        raise RuntimeError('`alpha` and `beta` must be provided.')

    var_reparam = pm.Cauchy(name + '_reparam', alpha=0, beta=1, shape=shape)
    var = pm.Deterministic(name, alpha + var_reparam * beta)

    return var
Ejemplo n.º 21
0
    def _model_setup(self):
        with self._model:
            # COSMOLOGY


            omega_m = pm.Uniform("OmegaM", lower=0., upper=1.)
            omega_k = pm.Uniform("Omegak", lower=-1., upper=1.)

            # My custom distance mod. function to enable
            # ADVI and HMC sampling.



            #  We are going to have to break this into
            #  four likelihoods

            dm_0 = distmod_constant_curve(omega_m, omega_k, self._h0, self._zcmb_survey[0])
            dm_1 = distmod_constant_curve(omega_m, omega_k, self._h0, self._zcmb_survey[1])
            dm_2 = distmod_constant_curve(omega_m, omega_k, self._h0, self._zcmb_survey[2])
            dm_3 = distmod_constant_curve(omega_m, omega_k, self._h0, self._zcmb_survey[3])

            # PHILIPS PARAMETERS

            # M0 is the location parameter for the distribution
            # sys_scat is the scale parameter for the M0 distribution
            # rather than "unexpalined variance"
            M0 = pm.Uniform("M0", lower=-20., upper=-18.)
            sys_scat = pm.HalfCauchy('sys_scat', beta=2.5)  # Gelman recommendation for variance parameter

            M_true_0 = pm.Normal('M_true_0', M0, sys_scat, shape=self._n_SN_survey[0])
            M_true_1 = pm.Normal('M_true_1', M0, sys_scat, shape=self._n_SN_survey[1])
            M_true_2 = pm.Normal('M_true_2', M0, sys_scat, shape=self._n_SN_survey[2])
            M_true_3 = pm.Normal('M_true_3', M0, sys_scat, shape=self._n_SN_survey[3])

            # following Rubin's Unity model... best idea? not sure
            taninv_alpha = pm.Uniform("taninv_alpha", lower=-.2, upper=.3)
            taninv_beta = pm.Uniform("taninv_beta", lower=-1.4, upper=1.4)

            # Transform variables
            alpha = pm.Deterministic('alpha', T.tan(taninv_alpha))
            beta = pm.Deterministic('beta', T.tan(taninv_beta))

            # Again using Rubin's Unity model.
            # After discussion with Rubin, the idea is that
            # these parameters are ideally sampled from a Gaussian,
            # but we know they are not entirely correct. So instead,
            # the Cauchy is less informative around the mean, while
            # still having informative tails.

            xm = pm.Cauchy('xm', alpha=0, beta=1)

            cm = pm.Cauchy('cm', alpha=0, beta=1, shape=4)

            s = pm.Uniform('s', lower=-2, upper=2, shape=4)

            c_shift_0 = cm[0] + s[0] * self._zcmb_survey[0]
            c_shift_1 = cm[1] + s[1] * self._zcmb_survey[1]
            c_shift_2 = cm[2] + s[2] * self._zcmb_survey[2]
            c_shift_3 = cm[3] + s[3] * self._zcmb_survey[3]

            Rx_log = pm.Uniform('Rx_log', lower=-0.5, upper=0.5)
            Rc_log = pm.Uniform('Rc_log', lower=-1.5, upper=1.5, shape=4)

            # Transformed variables
            Rx = pm.Deterministic("Rx", T.pow(10., Rx_log))

            Rc = pm.Deterministic("Rc", T.pow(10., Rc_log))

            x_true_0 = pm.Normal('x_true_0', mu=xm, sd=Rx, shape=self._n_SN_survey[0])
            c_true_0 = pm.Normal('c_true_0', mu=c_shift_0, sd=Rc[0], shape=self._n_SN_survey[0])
            x_true_1 = pm.Normal('x_true_1', mu=xm, sd=Rx, shape=self._n_SN_survey[1])
            c_true_1 = pm.Normal('c_true_1', mu=c_shift_1, sd=Rc[1], shape=self._n_SN_survey[1])
            x_true_2 = pm.Normal('x_true_2', mu=xm, sd=Rx, shape=self._n_SN_survey[2])
            c_true_2 = pm.Normal('c_true_2', mu=c_shift_2, sd=Rc[2], shape=self._n_SN_survey[2])
            x_true_3 = pm.Normal('x_true_3', mu=xm, sd=Rx, shape=self._n_SN_survey[3])
            c_true_3 = pm.Normal('c_true_3', mu=c_shift_3, sd=Rc[3], shape=self._n_SN_survey[3])

            # Do the correction
            mb_0 = pm.Deterministic("mb_0", M_true_0 + dm_0 - alpha * x_true_0 + beta * c_true_0)
            mb_1 = pm.Deterministic("mb_1", M_true_1 + dm_1 - alpha * x_true_1 + beta * c_true_1)
            mb_2 = pm.Deterministic("mb_2", M_true_2 + dm_2 - alpha * x_true_2 + beta * c_true_2)
            mb_3 = pm.Deterministic("mb_3", M_true_3 + dm_3 - alpha * x_true_3 + beta * c_true_3)

            # Likelihood and measurement error

            obsc_0 = pm.Normal("obsc_0", mu=c_true_0, sd=self._dcolor_survey[0], observed=self._color_survey[0])
            obsx_0 = pm.Normal("obsx_0", mu=x_true_0, sd=self._dx1_survey[0], observed=self._x1_survey[0])
            obsm_0 = pm.Normal("obsm_0", mu=mb_0, sd=self._dmbObs_survey[0], observed=self._mbObs_survey[0])

            obsc_1 = pm.Normal("obsc_1", mu=c_true_1, sd=self._dcolor_survey[1], observed=self._color_survey[1])
            obsx_1 = pm.Normal("obsx_1", mu=x_true_1, sd=self._dx1_survey[1], observed=self._x1_survey[1])
            obsm_1 = pm.Normal("obsm_1", mu=mb_1, sd=self._dmbObs_survey[1], observed=self._mbObs_survey[1])

            obsc_2 = pm.Normal("obsc_2", mu=c_true_2, sd=self._dcolor_survey[2], observed=self._color_survey[2])
            obsx_2 = pm.Normal("obsx_2", mu=x_true_2, sd=self._dx1_survey[2], observed=self._x1_survey[2])
            obsm_2 = pm.Normal("obsm_2", mu=mb_2, sd=self._dmbObs_survey[2], observed=self._mbObs_survey[2])

            obsc_3 = pm.Normal("obsc_3", mu=c_true_3, sd=self._dcolor_survey[3], observed=self._color_survey[3])
            obsx_3 = pm.Normal("obsx_3", mu=x_true_3, sd=self._dx1_survey[3], observed=self._x1_survey[3])
            obsm_3 = pm.Normal("obsm_3", mu=mb_3, sd=self._dmbObs_survey[3], observed=self._mbObs_survey[3])
Ejemplo n.º 22
0
plt.plot(x, y, 'o')
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$f(x)$', fontsize=16, rotation=0)
plt.savefig('img801.png')
"""


def gauss_kernel(x, n_knots):
    knots = np.linspace(x.min(), x.max(), n_knots)
    w = 2
    return np.array([np.exp(-(x - k)**2 / w) for k in knots])


n_knots = 5
with pm.Model() as kernel_model:
    gamma = pm.Cauchy('gamma', alpha=0, beta=1, shape=n_knots)
    sd = pm.Uniform('sd', 0, 10)
    mu = pm.math.dot(gamma, gauss_kernel(x, n_knots))
    yl = pm.Normal('yl', mu=mu, sd=sd, observed=y)

    kernel_trace = pm.sample(5000, njobs=1)

#pm.traceplot(kernel_trace)
#plt.savefig('img802.png')

ppc = pm.sample_ppc(kernel_trace, model=kernel_model, samples=100)
plt.plot(x, ppc['yl'].T, 'ro', alpha=0.1)

plt.plot(x, y, 'bo')
plt.xlabel('$x$', fontsize=16)
plt.ylabel('f(x)', fontsize=16, rotation=0)
Ejemplo n.º 23
0
fig, (ax1, ax2) = plt.subplots(1, 2)

ax1.hist(y)
ax1.set_title('Normal distribution returns')
ax2.hist(returns)
ax2.set_title('Real returns')

plt.show()

#%%
# 3. now let's relax the normal distribution assumption: let's fit a Cauchy distribution.
with pm.Model() as model2:

    beta = pm.HalfNormal('beta', sd=10.)

    pm.Cauchy('returns', alpha=0.0, beta=beta, observed=returns)

    mean_field = pm.fit(n=150000,
                        method='advi',
                        obj_optimizer=pm.adam(learning_rate=.001))

    trace2 = mean_field.sample(draws=10000)

preds2 = pm.sample_ppc(trace2, samples=10000, model=model2)
y2 = np.reshape(np.mean(preds2['returns'], axis=0), [-1])

fig, (ax1, ax2) = plt.subplots(1, 2)

ax1.hist(y2)
ax1.set_title('Cauchy distribution returns')
ax2.hist(returns)
Ejemplo n.º 24
0
def cauchyvar(data):
    with pm.Model() as model:
        alfa = pm.Uniform('alfa', lower=-1.0, upper=1.0)
        beta = pm.Uniform('beta', lower=0.0, upper=200.0)
        cauchy = pm.Cauchy('cauchy', alpha=alfa, beta=beta, observed=data)
    return model
Ejemplo n.º 25
0
def main():
    """Download the Rouder et al. (2008) data set, organize it, fit the model, and
    plot the traces.

    """
    # load the data
    a = "https://raw.githubusercontent.com/PerceptionCognitionLab/"
    b = "data0/master/wmPNAS2008/lk2clean.csv"
    df = pd.read_csv(urlopen(a + b), index_col=0)

    # compress into "binomial" format
    data = []
    for (subj, N), _df in df.groupby(["sub", "N"]):

        data.append({
            "subj": subj,
            "M": N,
            "H": _df[_df.ischange.astype(bool)].resp.sum(),
            "D": _df.ischange.sum(),
            "F": _df[(1 - _df.ischange).astype(bool)].resp.sum(),
            "S": (1 - _df.ischange).sum(),
        })
    data = pd.DataFrame(data)
    subjects = data.subj.unique()

    # create a design matrix to map subjects to rows in data
    X = np.asarray(dmatrix("0 + C(subj)", data))

    # create model

    with pm.Model():

        # capacity
        mu = pm.Cauchy(name=r"$\mu_{(\kappa)}$", alpha=0, beta=5)
        de = pm.Normal(name=r"$\delta_{\kappa)}$",
                       mu=0,
                       sigma=1,
                       shape=len(subjects))
        si = pm.HalfCauchy(name=r"$\sigma_{(\kappa)}$", beta=5)
        x = pm.Deterministic(r"$\kappa$", mu + de * si)
        x = pm.Deterministic(r"$k$", tt.largest(x, tt.zeros(len(subjects))))
        k = pm.math.dot(X, x)

        # guesses "same"
        mu = pm.Cauchy(name=r"$\mu_{(\gamma)}$", alpha=0, beta=5)
        de = pm.Normal(name=r"$\delta_{\gamma)}$",
                       mu=0,
                       sigma=1,
                       shape=len(subjects))
        si = pm.HalfCauchy(name=r"$\sigma_{(\gamma)}$", beta=5)
        x = pm.Deterministic(r"$\gamma$", mu + de * si)
        x = pm.Deterministic(r"$g$", pm.math.sigmoid(x))
        g = pm.math.dot(X, x)

        # does not lapse
        mu = pm.Cauchy(name=r"$\mu_{(\zeta)}$", alpha=0, beta=5)
        de = pm.Normal(name=r"$\delta_{\zeta)}$",
                       mu=0,
                       sigma=1,
                       shape=len(subjects))
        si = pm.HalfCauchy(name=r"$\sigma_{(\zeta)}$", beta=5)
        x = pm.Deterministic(r"$\zeta$", mu + de * si)
        x = pm.Deterministic(r"$z$", pm.math.sigmoid(x))
        z = pm.math.dot(X, x)

        # probabilities
        q = tt.smallest(k / data.M, tt.ones(len(data)))
        h = (1 - z) * g + z * q + z * (1 - q) * g
        f = (1 - z) * g + z * (1 - q) * g

        # responses
        pm.Binomial(name="$H$", p=h, n=data.D, observed=data.H)
        pm.Binomial(name="$F$", p=f, n=data.S, observed=data.F)

        # sample and plot
        trace = pm.sample(draws=5000, tune=2000, chains=2)
        pm.traceplot(trace, compact=True)
        plt.savefig("../../assets/images/wm-cap.png",
                    bbox_inches=0,
                    transparent=True)
Ejemplo n.º 26
0
data = pd.read_csv('{}_r{}_{}C_{}_{}_growth.csv'.format(
    DATE, RUN_NUMBER, TEMP, CARBON, OPERATOR))
data = data[(data['absorbance'] >= 0.1) & (data['absorbance'] <= 0.8)]

with pm.Model() as model:
    a0 = pm.HalfNormal('a0', sd=1)
    lam = pm.HalfFlat('lambda')
    gamma = mwc.bayes.Jeffreys('gamma', lower=1E-9, upper=100)

    # Compute the expected value.
    time = data['elapsed_time_min'].values
    mu = np.log(a0) + time * lam

    # Define the likelihood and sample.
    like = pm.Cauchy('like',
                     mu,
                     gamma,
                     observed=np.log(data['absorbance'].values))
    trace = pm.sample(tune=5000, draws=5000)
    trace_df = mwc.stats.trace_to_dataframe(trace, model)
    stats = mwc.stats.compute_statistics(trace_df)

# %% Compute the best fit and credible region
modes = {}
hpds = {}
grouped = stats.groupby('parameter')
for g, d in grouped:
    modes[g] = d['mode'].values[0]
    hpds[g] = [d[['hpd_min', 'hpd_max']].values]

time_range = np.linspace(data['elapsed_time_min'].min(),
                         data['elapsed_time_min'].max(), 500)
Ejemplo n.º 27
0
# ----------------------------------------------------------------------
# Draw the sample from a Cauchy distribution
np.random.seed(44)
mu_0 = 0
gamma_0 = 2
xi = cauchy(mu_0, gamma_0).rvs(10)

# ----------------------------------------------------------------------
# Set up and run MCMC:
with pm.Model():
    mu = pm.Uniform('mu', -5, 5)
    log_gamma = pm.Uniform('log_gamma', -10, 10)

    # set up our observed variable x
    x = pm.Cauchy('x', mu, np.exp(log_gamma), observed=xi)

    trace = pm.sample(draws=12000, tune=1000, cores=1)

# compute histogram of results to plot below
L_MCMC, mu_bins, gamma_bins = np.histogram2d(trace['mu'],
                                             np.exp(trace['log_gamma']),
                                             bins=(np.linspace(-5, 5, 41),
                                                   np.linspace(0, 5, 41)))
L_MCMC[L_MCMC == 0] = 1E-16  # prevents zero-division errors

# ----------------------------------------------------------------------
# Compute likelihood analytically for comparison
mu = np.linspace(-5, 5, 70)
gamma = np.linspace(0.1, 5, 70)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
Ejemplo n.º 28
0

# In[2]:

# http://permalink.lanl.gov/object/tr?what=info:lanl-repo/lareport/LA-UR-93-1179


# * Shots are fired isotropically from a point and hit a position sensitive detector
# * There is no scattering
# * y is fixed to be 1 away

# In[3]:

# generate some data
with pm.Model() as model:
    x = pm.Cauchy(name='x', alpha=0, beta=1)
    trace = pm.sample(20000)
    pm.traceplot(trace)
sampledat = trace['x']


# In[4]:

trace.varnames, trace['x']
plt.hist(sampledat, 200, normed=True);
plt.yscale('log');


# In[5]:

np.random.randint(0, len(sampledat), 10)
Ejemplo n.º 29
0
# Load and trim the data to start at 0D = 0.1
data = pd.read_csv('{}_growth.csv'.format(BASE_NAME))
data = data[(data['OD_600'] >= 0.1) & (data['OD_600'] <= 0.8)]

with pm.Model() as model:
    a0 = pm.HalfNormal('a0', sd=1)
    lam = pm.HalfFlat('lambda')
    gamma = mwc.bayes.Jeffreys('gamma', lower=1E-9, upper=100)

    # Compute the expected value.
    time = data['elapsed_time_min'].values
    mu = np.log(a0) + time * lam

    # Define the likelihood and sample.
    like = pm.Cauchy('like', mu, gamma, observed=np.log(data['OD_600'].values))
    trace = pm.sample(tune=5000, draws=5000)
    trace_df = mwc.stats.trace_to_dataframe(trace, model)
    stats = mwc.stats.compute_statistics(trace_df)

# %% Compute the best fit and credible region
modes = {}
hpds = {}
grouped = stats.groupby('parameter')
for g, d in grouped:
    modes[g] = d['mode'].values[0]
    hpds[g] = [d[['hpd_min', 'hpd_max']].values]

time_range = np.linspace(data['elapsed_time_min'].min(),
                         data['elapsed_time_min'].max(), 500)
best_fit = modes['a0'] * np.exp(modes['lambda'] * time_range)
Ejemplo n.º 30
0
def rwfmm(functional_data,
          static_data,
          Y,
          func_coef_sd='prior',
          method='nuts',
          robust=False,
          func_coef_sd_hypersd=0.1,
          coefficient_prior='flat',
          include_random_effect=True,
          variable_func_scale=True,
          time_rescale_func=False,
          sampler_kwargs={
              'init': 'adapt_diag',
              'chains': 1,
              'tune': 500,
              'draws': 500
          },
          return_model_only=False,
          n_spline_knots=20,
          func_coef_type='random_walk',
          spline_degree=3,
          spline_coef_sd='prior',
          spline_coef_hyper_sd=2.,
          spline_coef_prior='random_walk',
          spline_rw_sd=1.,
          average_every_n=1,
          spline_rw_hyper_sd=1.,
          poly_order=4):
    '''
    Fits a functional mixed model with a random-walk model of
    the functional coefficient. A range of different priors is available for
    the model coefficients.

    Parameters
    ----------
    functional_data : 4D Numpy array
        Data inputs for functional covariates with expected shape (S,V,T,F)
        where S denotes the number of subjects, V denotes the number of
        visits or repeated observations for each subject, T denotes the
        dimension of the functional data (i.e. number of timesteps)
        and F denotes the number of functional coefficients.
    static_data: 3D Numpy array
        Data inputs for static (i.e. non-functional) covariates which are
        constant for each subject/visits combination.
        This array is expected to have the shape (S,V,C) where
        C denotes the number of static covariates.
    Y: 3D Numpy array
        Responses for the functional regression. This array is expected to
        have the same dimensions as static_dataself.
    tune: int
        Number of tuning steps used in MCMC
    draws: int
        Number of post-tuning draws sampled.
    chains: int
        Number of MCMC chains used.
    func_coef_sd: float or string
        The standard deviation of the Gaussian random walk for all
        functional coefficients. If set to "prior", then this quantity
        will also be treated as a parameter that needs to be estimated.
    method: string
        Designates the method to be ued to fit the model.
        This must be one of "nuts", "mh" or one of the approximate inference
        methods at https://docs.pymc.io/api/inference.html#variational.
    n_iter_approx: int
        Number of optimization iterations to be used if the model fitting
        method is an approximate inference method.
    robust: bool
        Determines whether a normal error model or a robust Student-T error
        model is assumed for the residuals.
    func_coef_sd_hypersd: float
        If func_coef_sd is set to "prior", then this parameter sets the
        standard deviation of the half-normal distribution over the
        functional coefficient standard deviation (func_coef_sd). Note that
        in this case, each functional coefficient gets its own standard
        deviation drawn from the same prior defined by this parameter.
    coefficient_prior: string
        Determines the prior placed on the static covariate coefficients as
        well as the mean (a.k.a. the level) of the functional coefficient.
        The options are "flat","normal","horseshoe","finnish_horseshoe".
    include_random_effect: bool
        Determines whether or not a per-subject random intercept is included.
    variable_func_scale : bool
        Determines whether or not to allow the functional coefficients be
        multiplied by a positive number. This can lead to identifiability issues
        if a weak prior is specified on the functional coefficient evolution
        variance.
    time_rescale_func : bool
        If true, divides the functional coefficient by T. This can help make
        the coefficient more interpretable.
    sampler_kwargs: dict
        Any additional arguments to be passed to pm.sample().
    return_model_only: bool
        If true, returns only the model object without sampling. This can be
        helpful for debugging.
    func_coef_type : string
        One of 'constant','random_walk', 'bspline_recursive', 'natural_spline',
        'linear','bspline_design' or 'polynomial'.
        This determines how the functional coefficient will be parameterized. If it
        is 'random_walk', then the coefficient will be computed as the cumulative
        sum of many small normally-distributed jumps whose standard deviation
        is controlled by 'func_coef_sd'. Alternatively, if one of the bspline
        options is used, then the functional coefficient will be a bspline. The option
        'bspline_recursive' builds the coefficient using the de Boor algorithm
        while the options 'bspline_design' and 'natural_spline' build a design
        matrix using patsy's functionality and then estimates the coefficients
        linking that matrix to the functional coefficients. Using 'polynomial'
        specifies the functional coefficient as a polynomial of order 'poly_order'.
        'linear' makes the functional coefficient a linear function of the function
        domain.
    poly_order : int
        The degree of the polynomial used if the functional coefficient type is
        set to 'polynomial'.
    n_spline_knots : int
        In the event that the functional coefficient is one of the bspline choices,
        then this controls how many knots or breakpoints the spline has. In general,
        higher numbers for this value are required for higher spline orders.
    spline_degree : int
        The order of the spline if the functional coefficient is parameterized as a
        bspline. This is also the order of the polynomial for each spline section
        plus 1. Set this equal to 4 for cubic polynomial approximations in the spline.
    spline_coef_sd : float
        The standard deviation of the normal prior on the spline coefficients.
    spline_coef_prior : string
        One of 'normal', 'flat', or 'random_walk'. This controls how the
        bspline coefficients are smoothed.
    spline_rw_sd : string or float
        Either 'prior' or a float. This controls how much the spline coefficients
        are allowed to jump when using a random walk for the spline coefficient
        prior.
    spline_rw_hyper_sd : float
        If 'spline_rw_sd' is set to 'prior', this is the standard deviation
        of the half-Normal prior on the spline random walk jump standard
        deviation.
    average_every_n : int
        This is used to average every n measurements of the functional data
        together. For example, if the functional data corresponds to 96 hourly
        timesteps' worth of data, setting this to 4 would take the 24 hour average
        and reduce the size of T from 96 to 24. The default setting of 1 leaves
        the data unchanged.

    Returns
    -------
    trace: pymc3 Trace
        Samples produced either via MCMC or approximate inference during
        fitting.
    model: pymc3 Model
        The model object describing the RWFMM.
    '''

    with pm.Model() as model:
        S, V, T, F = functional_data.shape
        _, _, C = static_data.shape

        #functional_data = np.mean(functional_data.reshape(-1, average_every_n), axis=1)

        # We want to make sure the two data arrays agree in the number of
        # subjects (S) and visits (V).
        assert static_data.shape[0:2] == functional_data.shape[0:2]

        # Total number of functional and static coefficients.
        # This does not include the random-walk jumps.
        n_covariates = F + C

        if include_random_effect:
            random_effect_mean = pm.Flat('random_effect_mean')
            random_effect_sd = pm.HalfCauchy('random_effect_sd', beta=1.)
            random_effect_unscaled = pm.Normal('random_effect_unscaled',
                                               shape=[S, 1])
            random_effect = pm.Deterministic(
                'random_effect',
                random_effect_unscaled * random_effect_sd + random_effect_mean)
        else:
            random_effect = 0.

        if coefficient_prior == 'flat':
            coef = pm.Flat('coef', shape=n_covariates)

        elif coefficient_prior == 'normal':
            coef_sd = pm.HalfCauchy('coef_sd', beta=1.)
            coef = pm.Normal('coef', sd=coef_sd, shape=[n_covariates])

        elif coefficient_prior == 'cauchy':
            coef_sd = pm.HalfCauchy('coef_sd', beta=1.0)
            coef = pm.Cauchy('coef',
                             alpha=0.,
                             beta=coef_sd,
                             shape=[n_covariates])

        elif coefficient_prior == 'horseshoe':
            loc_shrink = pm.HalfCauchy('loc_shrink',
                                       beta=1,
                                       shape=[n_covariates])
            glob_shrink = pm.HalfCauchy('glob_shrink', beta=1)
            coef = pm.Normal('coef',
                             sd=(loc_shrink * glob_shrink),
                             shape=[n_covariates])

        # Implemented per Piironnen and Vehtari '18
        elif coefficient_prior == 'finnish_horseshoe':

            loc_shrink = pm.HalfCauchy('loc_shrink',
                                       beta=1,
                                       shape=[n_covariates])
            glob_shrink = pm.HalfCauchy('glob_shrink', beta=1)

            # In order to get some of the values within the prior calculations,
            # we need to know the variance of the predictors.
            static_var = np.var(static_data, axis=(0, 1))
            func_var = np.var(functional_data, axis=(0, 1, 2))
            variances = np.concatenate([static_var, func_var])

            nu_c = pm.Gamma('nu_c', alpha=2.0, beta=0.1)
            c = pm.InverseGamma('c',
                                alpha=nu_c / 2,
                                beta=nu_c * variances / 2,
                                shape=[n_covariates])

            regularized_loc_shrink = c * loc_shrink**2 / (
                c + glob_shrink**2 * loc_shrink**2)

            coef = pm.Normal('coef',
                             sd=(regularized_loc_shrink * glob_shrink**2)**0.5,
                             shape=[n_covariates])

        if func_coef_type == 'constant':
            func_coef = pm.Deterministic('func_coef',
                                         tt.zeros([T, F]) + coef[C:])

        elif func_coef_type == 'random_walk':

            if func_coef_sd == 'prior':
                func_coef_sd = pm.HalfNormal('func_coef_sd',
                                             sd=func_coef_sd_hypersd,
                                             shape=F)

            # The 'jumps' are the small deviations about the mean of the functional
            # coefficient.
            if variable_func_scale:
                log_scale = pm.Normal('log_scale', shape=F)
            else:
                log_scale = 0.0

            jumps = pm.Normal('jumps', sd=func_coef_sd, shape=(T, F))
            random_walks = tt.cumsum(jumps,
                                     axis=0) * tt.exp(log_scale) + coef[C:]
            func_coef = pm.Deterministic('func_coef', random_walks)

        elif (func_coef_type == 'natural_spline'
              or func_coef_type == 'bspline_design'):

            x = np.arange(T)

            # The -1 in the design matrix creation is to make sure that there
            # is no constant term which would be made superfluous by 'coef'
            # which is added to the functional coefficient later.
            if func_coef_type == 'natural_spline':
                spline_basis = p.dmatrix(
                    "cr(x, df = {0}) - 1".format(n_spline_knots), {
                        "x": x
                    },
                    return_type='dataframe').values
            elif func_coef_type == 'bspline_design':
                spline_basis = p.dmatrix(
                    "bs(x, df = {0}) - 1".format(n_spline_knots), {
                        "x": x
                    },
                    return_type='dataframe').values

            # If this produces a curve which is too spiky or rapidly-varying,
            # then a smoothing prior such as a Gaussian random walk could
            # instead be used here.
            if spline_coef_prior == 'normal':
                spline_coef = pm.Normal('spline_coef',
                                        sd=spline_coef_sd,
                                        shape=[n_spline_knots, F])
            elif spline_coef_prior == 'flat':
                spline_coef = pm.Flat('spline_coef', shape=[n_spline_knots, F])
            elif spline_coef_prior == 'random_walk':
                if spline_rw_sd == 'prior':
                    spline_rw_sd = pm.HalfNormal('spline_rw_sd',
                                                 sd=spline_rw_hyper_sd,
                                                 shape=F)
                spline_jumps = pm.Normal('spline_jumps',
                                         shape=[n_spline_knots, F])

                spline_coef = pm.Deterministic(
                    'spline_coef',
                    tt.cumsum(spline_jumps * spline_rw_sd, axis=0))

            # This inner product sums over the spline coefficients
            func_coef = pm.Deterministic(
                'func_coef',
                (tt.tensordot(spline_basis, spline_coef, axes=[[1], [0]]) +
                 coef[C:]))

        # This is deprecated - it is missing some priors.
        elif func_coef_type == 'bspline_recursive':
            n_spline_coefficients = spline_degree + n_spline_knots + 1
            spline_coef = pm.Normal('spline_coef',
                                    sd=spline_coef_sd,
                                    shape=[n_spline_coefficients, F])
            x = np.linspace(-4, 4, T)

            func_coefs = []
            for f in range(F):
                func_coefs.append(
                    utilities.bspline(spline_coef[:, f], spline_degree,
                                      n_spline_knots, x))
            func_coef = pm.Deterministic('func_coef',
                                         (tt.stack(func_coefs, axis=1)))

        elif func_coef_type == 'polynomial':
            poly_basis = np.zeros([T, poly_order])
            for i in range(1, poly_order + 1):
                poly_basis[:, i - 1] = np.arange(T)**i

            poly_coef = pm.Flat('poly_coef', shape=[poly_order, F])
            func_coef = pm.Deterministic(
                'func_coef',
                tt.tensordot(poly_basis, poly_coef, axes=[[1], [0]]) +
                coef[C:])

        elif func_coef_type == 'linear':
            linear_basis = np.zeros([T, F])
            for i in range(F):
                linear_basis[:, i] = np.arange(T)

            linear_coef = pm.Flat('linear_coef', [F])
            func_coef = pm.Deterministic('func_coef',
                                         linear_basis * linear_coef + coef[C:])

        else:
            raise ValueError('Functional coefficient type not recognized.""')

        # This is the additive term in y_hat that comes from the functional
        # part of the model.
        func_contrib = tt.tensordot(functional_data,
                                    func_coef,
                                    axes=[[2, 3], [0, 1]])
        if time_rescale_func:
            func_contrib = func_contrib / T
        # The part of y_hat that comes from the static covariates
        static_contrib = tt.tensordot(static_data, coef[0:C], axes=[2, 0])

        noise_sd = pm.HalfCauchy('noise_sd', beta=1.0)

        # y_hat is the predictive mean.
        y_hat = pm.Deterministic('y_hat',
                                 static_contrib + func_contrib + random_effect)
        #y_hat = pm.Deterministic('y_hat', static_contrib +func_contrib )

        # If the robust error option is used, then a gamma-Student-T distribution
        # is placed on the residuals.
        if robust:
            DOF = pm.Gamma('DOF', alpha=2, beta=0.1)
            response = pm.StudentT('response',
                                   mu=y_hat,
                                   sd=noise_sd,
                                   nu=DOF,
                                   observed=Y)
        else:
            response = pm.Normal('response', mu=y_hat, sd=noise_sd, observed=Y)

        if return_model_only:
            return model

        # NUTS is the default PyMC3 sampler and is what we recommend for fitting.
        if method == 'nuts':
            trace = pm.sample(**sampler_kwargs)

        # Metropolis-Hastings does poorly with lots of correlated parameters,
        # so this fitting method should only be used if T is small or you are
        # fitting a scalarized model.
        elif method == 'mh':
            trace = pm.sample(step=pm.Metropolis(), **sampler_kwargs)

        # There are a number of approximate inference methods available, but
        # none of them gave results that were close to what we got with MCMC.
        else:
            approx = pm.fit(method=method, **sampler_kwargs)
            trace = approx.sample(draws)

    return trace, model