Exemplo n.º 1
0
def _noise_variance(hparams, tau_cmmn, verbose=0):
    dist_std_noise = hparams.dist_std_noise

    if dist_std_noise == 'tr_normal':
        h1 = HalfNormal('h1', tau=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = HalfNormal('h2', tau=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Truncated normal for prior scales')

    elif dist_std_noise == 'log_normal':
        h1 = Lognormal('h1', tau=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = Lognormal('h2', tau=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Log normal for prior scales')

    elif dist_std_noise == 'uniform':
        h1 = Uniform('h1', upper=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = Uniform('h2', upper=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Uniform for prior scales')

    else:
        raise ValueError("Invalid value of dist_std_noise: %s" %
                         dist_std_noise)

    return h1, h2
Exemplo n.º 2
0
def _noise_variance(
    hparams, tau_cmmn, HalfNormal, Lognormal, Uniform, floatX, verbose):
    if hparams['prior_scale'] == 'tr_normal':
        h1 = HalfNormal('h1', tau=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = HalfNormal('h2', tau=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Truncated normal for prior scales')

    elif hparams['prior_scale'] == 'log_normal':
        h1 = Lognormal('h1', tau=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = Lognormal('h2', tau=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Log normal for prior scales')

    elif hparams['prior_scale'] == 'uniform':
        h1 = Uniform('h1', upper=np.float32(1 / tau_cmmn[0]), dtype=floatX)
        h2 = Uniform('h2', upper=np.float32(1 / tau_cmmn[1]), dtype=floatX)

        if 10 <= verbose:
            print('Uniform for prior scales')

    else:
        raise ValueError("Invalid value of prior_scale: %s" % 
            hparams['prior_scale'])

    return h1, h2
Exemplo n.º 3
0
    def init_hierarchicals(self, problem_config):
        """
        Initialize hierarchical parameters.
        Ramp estimation in azimuth and range direction of a radar scene.
        """
        hierarchicals = problem_config.hierarchicals
        if self.config.fit_plane:
            logger.info('Estimating ramp for each dataset...')
            for data in self.datasets:
                if isinstance(data, heart.DiffIFG):
                    for hierarchical_name in data.plane_names():

                        hierarchical_keys = utility.list2string(
                            hierarchicals.keys())
                        if not self.config.fit_plane and \
                                hierarchical_name in hierarchicals:
                            raise ConfigInconsistentError(
                                'Plane removal disabled, but they are defined'
                                ' in the problem configuration'
                                ' (hierarchicals)! \n'
                                ' Got: %s' % hierarchical_keys)

                        if self.config.fit_plane and \
                                hierarchical_name not in hierarchicals:
                            raise ConfigInconsistentError(
                                'Plane corrections enabled, but they are'
                                ' not defined in the problem configuration!'
                                ' (hierarchicals). Looking for: %s \n'
                                ' Got: %s' %
                                (hierarchical_name, hierarchical_keys))

                        param = hierarchicals[hierarchical_name]
                        if not num.array_equal(param.lower, param.upper):
                            kwargs = dict(name=param.name,
                                          shape=param.dimension,
                                          lower=param.lower,
                                          upper=param.upper,
                                          testval=param.testvalue,
                                          transform=None,
                                          dtype=tconfig.floatX)
                            try:
                                self.hierarchicals[
                                    hierarchical_name] = Uniform(**kwargs)
                            except TypeError:
                                kwargs.pop('name')
                                self.hierarchicals[hierarchical_name] = \
                                    Uniform.dist(**kwargs)
                        else:
                            logger.info(
                                'not solving for %s, got fixed at %s' %
                                (param.name,
                                 utility.list2string(param.lower.flatten())))
                            self.hierarchicals[hierarchical_name] = param.lower
                else:
                    logger.info('No plane for GNSS data.')

        logger.info('Initialized %i hierarchical parameters '
                    '(ramps).' % len(self.hierarchicals))
Exemplo n.º 4
0
 def test_sample(self):
     X = np.linspace(0,1,100)[:,None]
     Y = np.random.randn(100,1)
     with Model() as model:
         M = gp.mean.Zero()
         l = Uniform('l', 0, 5)
         K = gp.cov.Matern32(1, l)
         sigma = Uniform('sigma', 0, 10)
         # make a Gaussian model
         random_test = gp.GP('random_test', mean_func=M, cov_func=K, sigma=sigma, observed={'X':X, 'Y':Y})
         tr = sample(500, init=None, progressbar=False, random_seed=self.random_seed)
Exemplo n.º 5
0
def get_garch_model():
    r = np.array([28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float64)
    sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float64)
    alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18], dtype=np.float64)
    shape = r.shape

    with Model() as garch:
        alpha1 = Uniform("alpha1", 0.0, 1.0, shape=shape)
        beta1 = Uniform("beta1", 0.0, 1 - alpha1, shape=shape)
        mu = Normal("mu", mu=0.0, sigma=100.0, shape=shape)
        theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) + beta1 * tt.pow(sigma1, 2))
        Normal("obs", mu, sigma=theta, observed=r)
    return garch
Exemplo n.º 6
0
    def test_linear_component(self):
        vars_to_create = {
            'sigma', 'sigma_interval__', 'y_obs', 'lm_x0', 'lm_Intercept'
        }
        with Model() as model:
            lm = LinearComponent(self.data_linear['x'],
                                 self.data_linear['y'],
                                 name='lm')  # yields lm_x0, lm_Intercept
            sigma = Uniform('sigma', 0, 20)  # yields sigma_interval__
            Normal('y_obs', mu=lm.y_est, sigma=sigma,
                   observed=self.y_linear)  # yields y_obs
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500,
                           tune=0,
                           step=step,
                           start=start,
                           progressbar=False,
                           random_seed=self.random_seed)

            assert round(abs(np.mean(trace['lm_Intercept']) - self.intercept),
                         1) == 0
            assert round(abs(np.mean(trace['lm_x0']) - self.slope), 1) == 0
            assert round(abs(np.mean(trace['sigma']) - self.sd), 1) == 0
        assert vars_to_create == set(model.named_vars.keys())
Exemplo n.º 7
0
    def test_linear_component(self):
        vars_to_create = {
            "sigma", "sigma_interval__", "y_obs", "lm_x0", "lm_Intercept"
        }
        with Model() as model:
            lm = LinearComponent(self.data_linear["x"],
                                 self.data_linear["y"],
                                 name="lm")  # yields lm_x0, lm_Intercept
            sigma = Uniform("sigma", 0, 20)  # yields sigma_interval__
            Normal("y_obs", mu=lm.y_est, sigma=sigma,
                   observed=self.y_linear)  # yields y_obs
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500,
                           tune=0,
                           step=step,
                           start=start,
                           progressbar=False,
                           random_seed=self.random_seed)

            assert round(abs(np.mean(trace["lm_Intercept"]) - self.intercept),
                         1) == 0
            assert round(abs(np.mean(trace["lm_x0"]) - self.slope), 1) == 0
            assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
        assert vars_to_create == set(model.named_vars.keys())
Exemplo n.º 8
0
    def get_random_variables(self):
        """
        Evaluate problem setup and return random variables dictionary.
        Has to be executed in a "with model context"!

        Returns
        -------
        rvs : dict
            variable random variables
        fixed_params : dict
            fixed random parameters
        """
        pc = self.config.problem_config

        logger.debug('Optimization for %i sources', pc.n_sources)

        rvs = dict()
        fixed_params = dict()
        for param in pc.priors.values():
            if not num.array_equal(param.lower, param.upper):

                shape = bconfig.get_parameter_shape(param, pc)

                kwargs = dict(
                    name=param.name,
                    shape=shape,
                    lower=param.lower,
                    upper=param.upper,
                    testval=param.testvalue,
                    transform=None,
                    dtype=tconfig.floatX)

                try:
                    rvs[param.name] = Uniform(**kwargs)

                except TypeError:
                    kwargs.pop('name')
                    rvs[param.name] = Uniform.dist(**kwargs)

            else:
                logger.info(
                    'not solving for %s, got fixed at %s' % (
                        param.name,
                        list2string(param.lower.flatten())))
                fixed_params[param.name] = param.lower

        return rvs, fixed_params
Exemplo n.º 9
0
def _get_L_cov(L_cov_21, floatX, Uniform, tt):
    if type(L_cov_21) in (float, int):
        return np.array([[1.0, L_cov_21], 
                         [L_cov_21, 1.0]]).astype(floatX)
    elif _is_uniform(L_cov_21):
        r = parse('U({:f},{:f})', L_cov_21.replace(' ', ''))
        L_cov_21_ = Uniform('L_cov_21_', lower=r[0], upper=r[1])
        return tt.stack([1.0, L_cov_21_, L_cov_21_, 1.0]).reshape((2, 2))
Exemplo n.º 10
0
    def test_sample(self):
        X = np.linspace(0, 1, 10)[:, None]
        Y = np.random.randn(10)
        with Model() as model:
            M = gp.mean.Zero()
            l = Uniform('l', 0, 5)
            K = gp.cov.Matern32(1, l)
            sigma = Uniform('sigma', 0, 10)
            # make a Gaussian model
            random_test = gp.GP('random_test', mean_func=M, cov_func=K, sigma=sigma, observed={'X':X, 'Y':Y})
            tr = sample(20, init=None, progressbar=False, random_seed=self.random_seed)

        # test prediction
        Z = np.linspace(0, 1, 5)[:, None]
        with model:
            out = gp.sample_gp(tr[-3:], gp=random_test, X_values=Z, obs_noise=False,
                               random_seed=self.random_seed, progressbar=False, chol_const=True)
Exemplo n.º 11
0
    def init_hierarchicals(self, problem_config):
        """
        Initialize hierarchical parameters.
        Ramp estimation in azimuth and range direction of a radar scene.
        """
        hierarchicals = problem_config.hierarchicals
        if self.config.fit_plane:
            logger.info('Estimating ramp for each dataset...')
            for data in self.datasets:
                hierarchical_name = data.name + '_ramp'
                if not self.config.fit_plane and \
                        hierarchical_name in hierarchicals:
                    raise ConfigInconsistentError(
                        'Plane removal disabled, but they are defined'
                        ' in the problem configuration (hierarchicals)!')

                if isinstance(data, heart.DiffIFG):

                    if self.config.fit_plane and \
                            hierarchical_name not in hierarchicals:
                        raise ConfigInconsistentError(
                            'Plane corrections enabled, but they are'
                            ' not defined in the problem configuration!'
                            ' (hierarchicals)')

                    param = hierarchicals[hierarchical_name]
                    kwargs = dict(name=param.name,
                                  shape=param.dimension,
                                  lower=param.lower,
                                  upper=param.upper,
                                  testval=param.testvalue,
                                  transform=None,
                                  dtype=tconfig.floatX)
                    try:
                        self.hierarchicals[data.name] = Uniform(**kwargs)

                    except TypeError:
                        kwargs.pop('name')
                        self.hierarchicals[data.name] = \
                            Uniform.dist(**kwargs)

        logger.info('Initialized %i hierarchical parameters '
                    '(ramps).' % len(self.hierarchicals.keys()))
Exemplo n.º 12
0
    def init_hierarchicals(self, problem_config):
        """
        Initialise random variables for temporal station corrections.
        """
        if not self.config.station_corrections and \
                self.correction_name in problem_config.hierarchicals:
            raise ConfigInconsistentError(
                'Station corrections disabled, but they are defined'
                ' in the problem configuration!')

        if self.config.station_corrections and \
                self.correction_name not in problem_config.hierarchicals:
            raise ConfigInconsistentError(
                'Station corrections enabled, but they are not defined'
                ' in the problem configuration!')

        if self.correction_name in problem_config.hierarchicals:
            logger.info(
                'Estimating time shift for each station and waveform map...')
            for wmap in self.wavemaps:
                nhierarchs = len(wmap.get_station_names())

                logger.info('For %s with %i shifts' %
                            (wmap.time_shifts_id, nhierarchs))
                param = problem_config.hierarchicals[self.correction_name]
                kwargs = dict(name=wmap.time_shifts_id,
                              shape=nhierarchs,
                              lower=num.repeat(param.lower, nhierarchs),
                              upper=num.repeat(param.upper, nhierarchs),
                              testval=num.repeat(param.testvalue, nhierarchs),
                              transform=None,
                              dtype=tconfig.floatX)

                try:
                    station_corrs_rv = Uniform(**kwargs)

                except TypeError:
                    kwargs.pop('name')
                    station_corrs_rv = Uniform.dist(**kwargs)

                self.hierarchicals[wmap.time_shifts_id] = station_corrs_rv
        else:
            nhierarchs = 0
Exemplo n.º 13
0
    def test_linear_component_from_formula(self):
        with Model() as model:
            lm = LinearComponent.from_formula('y ~ x', self.data_linear)
            sigma = Uniform('sigma', 0, 20)
            Normal('y_obs', mu=lm.y_est, sd=sigma, observed=self.y_linear)
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(500, step=step, start=start, progressbar=False, random_seed=self.random_seed)

            self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1)
            self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1)
            self.assertAlmostEqual(np.mean(trace['sigma']), self.sd, 1)
Exemplo n.º 14
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model():
        mu = Uniform("mu", -1, 1)
        sigma = Uniform("sigma", 0.5, 1.5)
        Normal("y", mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP(progressbar=False)
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(progressbar=False, method="Powell")

    close_to(map_est1["mu"], 0, tol)
    close_to(map_est1["sigma"], 1, tol)

    close_to(map_est2["mu"], 0, tol)
    close_to(map_est2["sigma"], 1, tol)
Exemplo n.º 15
0
def test_find_MAP():
    tol = 2.0**-11  # 16 bit machine epsilon, a low bar
    data = np.random.randn(100)
    # data should be roughly mean 0, std 1, but let's
    # normalize anyway to get it really close
    data = (data - np.mean(data)) / np.std(data)

    with Model() as model:
        mu = Uniform('mu', -1, 1, transform=None)
        sigma = Uniform('sigma', .5, 1.5, transform=None)
        y = Normal('y', mu=mu, tau=sigma**-2, observed=data)

        # Test gradient minimization
        map_est1 = starting.find_MAP()
        # Test non-gradient minimization
        map_est2 = starting.find_MAP(fmin=starting.optimize.fmin_powell)

    close_to(map_est1['mu'], 0, tol)
    close_to(map_est1['sigma'], 1, tol)

    close_to(map_est2['mu'], 0, tol)
    close_to(map_est2['sigma'], 1, tol)
Exemplo n.º 16
0
def createSignalModelExponential(data):
  """
    Toy model that treats the first ~10% of the waveform as an exponential.  Does a good job of finding the start time (t_0)
    Since I made this as a toy, its super brittle.  Waveform must be normalized
  """
  with Model() as signal_model:
    switchpoint = Uniform('switchpoint', lower=0, upper=len(data), testval=len(data)/2)
    
    noise_sigma = HalfNormal('noise_sigma', sd=1.)
    
    #Modeling these parameters this way is why wf needs to be normalized
    exp_rate = Uniform('exp_rate', lower=0, upper=.5, testval = 0.05)
    exp_scale = Uniform('exp_scale', lower=0, upper=.5, testval = 0.1)
    
    timestamp = np.arange(0, len(data), dtype=np.float)
    
    rate = switch(switchpoint >= timestamp, 0, exp_rate)
    
    baseline_model = Deterministic('baseline_model', exp_scale * (exp( (timestamp-switchpoint)*rate)-1.) )
    
    baseline_observed = Normal("baseline_observed", mu=baseline_model, sd=noise_sigma, observed= data )
  return signal_model
Exemplo n.º 17
0
    def test_linear_component(self):
        with Model() as model:
            lm = LinearComponent.from_formula("y ~ x", self.data_linear)
            sigma = Uniform("sigma", 0, 20)
            Normal("y_obs", mu=lm.y_est, sigma=sigma, observed=self.y_linear)
            start = find_MAP(vars=[sigma])
            step = Slice(model.vars)
            trace = sample(
                500, tune=0, step=step, start=start, progressbar=False, random_seed=self.random_seed
            )

            assert round(abs(np.mean(trace["Intercept"]) - self.intercept), 1) == 0
            assert round(abs(np.mean(trace["x"]) - self.slope), 1) == 0
            assert round(abs(np.mean(trace["sigma"]) - self.sd), 1) == 0
Exemplo n.º 18
0
def _dist_from_str(name, dist_params_):
    if type(dist_params_) is str:
        dist_params = dist_params_.split(',')

        if dist_params[0].strip(' ') == 'uniform':
            rv = Uniform(name,
                         lower=float(dist_params[1]),
                         upper=float(dist_params[2]))
        else:
            raise ValueError("Invalid value of dist_params: %s" % dist_params_)

    elif type(dist_params_) is float:
        rv = dist_params_

    else:
        raise ValueError("Invalid value of dist_params: %s" % dist_params_)

    return rv
Exemplo n.º 19
0
Iobs1 = Iobs[0]
Iobs2 = Iobs[1]
Iobs3 = Iobs[2]
Iobs4 = Iobs[3]
Iobs5 = Iobs[4]
Iobs6 = Iobs[5]
Iobs7 = Iobs[6]
Iobs8 = Iobs[7]
Iobs9 = Iobs[8]
Iobs10 = Iobs[9]

basic_model = Model()

with basic_model:
    # Priors for unknown model parameters
    effprop = Uniform("effprop", lower=0, upper=1)
    beta = Uniform("beta", lower=0, upper=0.02)
    reporting = Uniform("reporting", lower=0, upper=1)

    N0 = Binomial("N0", n=N, p=effprop)
    I1 = 1
    S1 = N0 - I1
    Psi12 = 1 - (1 - beta)**I1
    # Likelihood (sampling distribution) of observations unfolding the for loop
    obs1 = Binomial("obs1", n=I1, p=reporting, observed=Iobs1)

    I2 = Binomial("I2", n=S1, p=Psi12)
    S2 = S1 - I1
    Psi23 = 1 - (1 - beta)**I2
    obs2 = Binomial("obs2", n=I2, p=reporting, observed=Iobs2)
Exemplo n.º 20
0
##plotting histogram

sns.distplot(beat_carotene_levels_before_drug).set_title(
    "beat_carotene_levels_before_drug")

sns.distplot(beat_carotene_levels_after_drug).set_title(
    "beat_carotene_levels_after_drug")

beat_carotene_levels_before_drug.shape
##Looks like normal distribution on no_drug_affect will choose normal prior assuming mean ranging fromN(0,100) and variance U(0,10)

from pymc3 import Model, Uniform, Normal

with Model() as beta_carotene_model:
    mean = Uniform('mean', lower=0, upper=500)
    sigma = Uniform('sigma', 0, 100)

##Getting Likelihood and we assumed it cacanormal

with beta_carotene_model:
    print(beat_carotene_levels_before_drug)
    y = Normal('beat_carotene_levels_before_drug',
               mu=mean,
               sd=sigma,
               observed=beat_carotene_levels_before_drug)

##Calculate posterior distribution here we are using variational inference

from pymc3 import fit
T= T.flatten()


size= len(Y)
#TRANSFORM FROM DATAFRAME TO NP ARRAY FORMAT


# Specifing the parameters that control the MCMC (these will be used throughout the code). 

basic_model = Model()


with basic_model: 
	#priors for unknown model parameters
	c = Gamma('c',10,1)
	Tm= Uniform('Tm',31,45)
	T0= Uniform('T0',5,24)
	tau= Gamma('tau',0.0001, 0.0001)

	mu_temp= c*T*((T-T0)*(T0<T))*np.sqrt((Tm-T)*(Tm>T))
	mu= 0*(mu_temp<0) + mu_temp*(mu_temp>0)

	Y_obs = Normal('Y_obs',mu=mu, sd=tau, observed= Y)


from pymc3 import Metropolis, sample, find_MAP
from scipy import optimize
trace_copy= {}
with basic_model:  

    # obtain starting values via MAP
Exemplo n.º 22
0
    def init_hierarchicals(self, problem_config):
        """
        Initialise random variables for temporal station corrections.
        """
        hierarchicals = problem_config.hierarchicals
        self._hierarchicalnames = []
        if not self.config.station_corrections and \
                self.correction_name in hierarchicals:
            raise ConfigInconsistentError(
                'Station corrections disabled, but they are defined'
                ' in the problem configuration!')

        if self.config.station_corrections and \
                self.correction_name not in hierarchicals:
            raise ConfigInconsistentError(
                'Station corrections enabled, but they are not defined'
                ' in the problem configuration!')

        if self.correction_name in hierarchicals:
            logger.info(
                'Estimating time shift for each station and waveform map...')
            for wmap in self.wavemaps:
                hierarchical_name = wmap.time_shifts_id
                nhierarchs = len(wmap.get_station_names())

                logger.info('For %s with %i shifts' %
                            (hierarchical_name, nhierarchs))

                if hierarchical_name in hierarchicals:
                    logger.info('Using wavemap specific imported:'
                                ' %s ' % hierarchical_name)
                    param = hierarchicals[hierarchical_name]
                else:
                    logger.info('Using global %s' % self.correction_name)
                    param = copy.deepcopy(
                        problem_config.hierarchicals[self.correction_name])
                    param.lower = num.repeat(param.lower, nhierarchs)
                    param.upper = num.repeat(param.upper, nhierarchs)
                    param.testvalue = num.repeat(param.testvalue, nhierarchs)

                if hierarchical_name not in self.hierarchicals:
                    if not num.array_equal(param.lower, param.upper):
                        kwargs = dict(name=hierarchical_name,
                                      shape=param.dimension,
                                      lower=param.lower,
                                      upper=param.upper,
                                      testval=param.testvalue,
                                      transform=None,
                                      dtype=tconfig.floatX)

                        try:
                            self.hierarchicals[hierarchical_name] = Uniform(
                                **kwargs)
                        except TypeError:
                            kwargs.pop('name')
                            self.hierarchicals[hierarchical_name] = \
                                Uniform.dist(**kwargs)

                        self._hierarchicalnames.append(hierarchical_name)
                    else:
                        logger.info(
                            'not solving for %s, got fixed at %s' %
                            (param.name,
                             utility.list2string(param.lower.flatten())))
                        self.hierarchicals[hierarchical_name] = param.lower
Exemplo n.º 23
0
# parital pooling: varying intercept and slope model
with Model() as varying_intercept_slope:
    # Priors
    mu_a = Normal('mu_a', mu=0., sd=10)
    sigma_a = HalfCauchy('sigma_a', beta=5)
    mu_beta = Normal('mu_beta', mu=0., sd=10)
    sigma_beta = HalfCauchy('sigma_beta', beta=2.5)
    # Random intercepts - one adaptive prior for each county
    a_county = Normal('a_county', mu=mu_a, sd=sigma_a, shape=counties)
    # Random slopes - one adaptive prior for each county
    beta_county = Normal('beta_county',
                         mu=mu_beta,
                         sd=sigma_beta,
                         shape=counties)
    # Model error
    sigma_y = Uniform('sigma_y', lower=0, upper=100)
    # linear model - specifying a diff "a" and diff "beta" for ea county
    y_hat = a_county[county] + beta_county[county] * floor_measure
    # Data likelihood
    y_like = Normal('y_like', mu=y_hat, sd=sigma_y, observed=log_radon)

with varying_intercept_slope:
    varying_intercept_slope_trace = sample(500, tune=500)

pm.traceplot(varying_intercept_slope_trace[:])
pm.forestplot(varying_intercept_slope_trace, varnames=['beta_county'])
pm.forestplot(varying_intercept_slope_trace, varnames=['mu_beta'])

xvals = np.arange(2)
b = varying_intercept_slope_trace['a_county'].mean(axis=0)
m = varying_intercept_slope_trace['beta_county'].mean(axis=0)
Exemplo n.º 24
0
    def init_hierarchicals(self, problem_config):
        """
        Initialize hierarchical parameters.
        Ramp estimation in azimuth and range direction of a radar scene and/or
        Rotation of GNSS stations around an Euler pole
        """
        hierarchicals = problem_config.hierarchicals
        self._hierarchicalnames = []
        for number, corr in enumerate(
                self.config.corrections_config.iter_corrections()):
            logger.info('Evaluating config for %s corrections '
                        'for datasets...' % corr.feature)
            if corr.enabled:
                for data in self.datasets:
                    if data.name in corr.dataset_names:
                        hierarchical_names = corr.get_hierarchical_names(
                            name=data.name, number=number)
                    else:
                        hierarchical_names = []

                    for hierarchical_name in hierarchical_names:
                        if not corr.enabled and hierarchical_name in hierarchicals:

                            raise ConfigInconsistentError(
                                '%s %s disabled, but they are defined'
                                ' in the problem configuration'
                                ' (hierarchicals)!' %
                                (corr.feature, data.name))

                        if corr.enabled and hierarchical_name not in hierarchicals \
                                and data.name in corr.dataset_names:
                            raise ConfigInconsistentError(
                                '%s %s corrections enabled, but they are'
                                ' not defined in the problem configuration!'
                                ' (hierarchicals)' % (corr.feature, data.name))

                        param = hierarchicals[hierarchical_name]
                        if hierarchical_name not in self.hierarchicals:
                            if not num.array_equal(param.lower, param.upper):
                                kwargs = dict(name=param.name,
                                              shape=param.dimension,
                                              lower=param.lower,
                                              upper=param.upper,
                                              testval=param.testvalue,
                                              transform=None,
                                              dtype=tconfig.floatX)

                                try:
                                    self.hierarchicals[
                                        hierarchical_name] = Uniform(**kwargs)
                                except TypeError:
                                    kwargs.pop('name')
                                    self.hierarchicals[hierarchical_name] = \
                                        Uniform.dist(**kwargs)

                                self._hierarchicalnames.append(
                                    hierarchical_name)
                            else:
                                logger.info(
                                    'not solving for %s, got fixed at %s' %
                                    (param.name,
                                     utility.list2string(
                                         param.lower.flatten())))
                                self.hierarchicals[
                                    hierarchical_name] = param.lower
            else:
                logger.info('No %s correction!' % corr.feature)

        logger.info('Initialized %i hierarchical parameters.' %
                    len(self.hierarchicals))
Exemplo n.º 25
0
Arquivo: utils.py Projeto: mpcoll/DMpy
def generate_pymc_distribution(p,
                               n_subjects=None,
                               hierarchical=False,
                               mle=False):
    """
    Turns parameters into pymc3 parameter distributions for model fitting
    """
    if hasattr(p, '_Parameter__pymc_kwargs'):
        kwargs = p._Parameter__pymc_kwargs
    else:
        kwargs = {}

    if mle and (p.distribution != 'uniform' and p.distribution != 'flat'
                and p.distribution != 'fixed'):

        if p.upper_bound is not None and p.lower_bound is not None:
            print(
                "\nParameter {0} distribution is {1}, converting to uniform with bounds ({2}, {3}) for MLE"
                .format(p.name, p.distribution, p.lower_bound, p.upper_bound))
            p.distribution = 'uniform'
        elif p.upper_bound is not None:
            print(
                "\nParameter {0} distribution is {1}, converting to uniform with upper bound {2}) for MLE"
                .format(p.name, p.distribution, p.upper_bound))
            p.distribution = 'uniform'
        elif p.lower_bound is not None:
            print(
                "\nParameter {0} distribution is {1}, converting to uniform with lower bound {2}) for MLE"
                .format(p.name, p.distribution, p.lower_bound))
            p.distribution = 'uniform'
        else:
            print(
                "\nParameter {0} distribution is {1}, converting to flat for MLE\n"
                .format(p.name, p.distribution))
            p.distribution = 'flat'

    if p.fixed:
        p.pymc_distribution = T.ones(n_subjects) * p.mean

    else:  # there's probably a cleaner way to do this

        if hierarchical and n_subjects < 2:
            raise ValueError(
                "Hierarchical parameters only possible with > 1 subject")

        if p.distribution == 'normal' and p.lower_bound is not None and p.upper_bound is not None:
            BoundedNormal = Bound(Normal,
                                  lower=p.lower_bound,
                                  upper=p.upper_bound)
            if hierarchical:
                p.pymc_distribution = BoundedNormal(
                    p.name,
                    mu=BoundedNormal(p.name + '_group_mu',
                                     mu=p.mean,
                                     sd=p.variance),
                    sd=Uniform(
                        p.name + '_group_sd', lower=0, upper=100
                    ),  # TODO need to allow adjustment of these values somehow
                    shape=n_subjects,
                    **kwargs)
            elif n_subjects > 1:
                p.pymc_distribution = BoundedNormal(p.name,
                                                    mu=p.mean,
                                                    sd=p.variance,
                                                    shape=n_subjects,
                                                    **kwargs)
            else:  # is this necessary?
                p.pymc_distribution = BoundedNormal(p.name,
                                                    mu=p.mean,
                                                    sd=p.variance,
                                                    **kwargs)
            p.backward, p.forward = get_transforms(p)

        elif p.distribution == 'normal' and p.lower_bound is not None:
            BoundedNormal = Bound(Normal, lower=p.lower_bound)
            if hierarchical:
                p.pymc_distribution = BoundedNormal(
                    p.name,
                    mu=BoundedNormal(p.name + '_group_mu',
                                     mu=p.mean,
                                     sd=p.variance),
                    sd=Uniform(p.name + '_group_sd', lower=0, upper=100),
                    shape=n_subjects,
                    **kwargs)
            elif n_subjects > 1:
                p.pymc_distribution = BoundedNormal(p.name,
                                                    mu=p.mean,
                                                    sd=p.variance,
                                                    shape=n_subjects,
                                                    **kwargs)
            else:
                p.pymc_distribution = BoundedNormal(p.name,
                                                    mu=p.mean,
                                                    sd=p.variance,
                                                    **kwargs)
            p.backward, p.forward = get_transforms(p)

        elif p.distribution == 'normal':
            if hierarchical:
                p.pymc_distribution = Normal(p.name,
                                             mu=Normal(p.name + '_group_mu',
                                                       mu=p.mean,
                                                       sd=p.variance),
                                             sd=Uniform(p.name + '_group_sd',
                                                        lower=0,
                                                        upper=100),
                                             shape=n_subjects,
                                             transform=p.transform_method,
                                             **kwargs)
            elif n_subjects > 1:
                p.pymc_distribution = Normal(p.name,
                                             mu=p.mean,
                                             sd=p.variance,
                                             shape=n_subjects,
                                             transform=p.transform_method,
                                             **kwargs)
            else:
                p.pymc_distribution = Normal(p.name,
                                             mu=p.mean,
                                             sd=p.variance,
                                             transform=p.transform_method,
                                             **kwargs)
            if hasattr(p.pymc_distribution, "transformation"):
                p.backward, p.forward = get_transforms(p)

        elif p.distribution == 'uniform':
            if hierarchical:
                p.pymc_distribution = Uniform(p.name,
                                              lower=p.lower_bound,
                                              upper=p.upper_bound,
                                              shape=n_subjects,
                                              **kwargs)
            elif T.gt(n_subjects, 1):
                p.pymc_distribution = Uniform(p.name,
                                              lower=p.lower_bound,
                                              upper=p.upper_bound,
                                              shape=n_subjects,
                                              **kwargs)
            else:
                p.pymc_distribution = Uniform(p.name,
                                              lower=p.lower_bound,
                                              upper=p.upper_bound,
                                              **kwargs)
            if hasattr(p.pymc_distribution, "transformation"):
                p.backward, p.forward = get_transforms(p)

        elif p.distribution == 'flat':
            if hierarchical:
                p.pymc_distribution = Flat(p.name,
                                           shape=n_subjects,
                                           transform=p.transform_method,
                                           **kwargs)
            elif n_subjects > 1:
                p.pymc_distribution = Flat(p.name,
                                           shape=n_subjects,
                                           transform=p.transform_method,
                                           **kwargs)
            else:
                p.pymc_distribution = Flat(p.name, **kwargs)
            if hasattr(p.pymc_distribution, "transformation"):
                p.backward, p.forward = get_transforms(p)

    return p
Exemplo n.º 26
0
from pymc3 import Model, Normal, HalfNormal, Uniform, Bernoulli, find_MAP, NUTS, sample, Slice, Deterministic
from scipy import optimize
import pymc3 as pm
N = 100

basic_model = Model()
with basic_model:
    p = Uniform("freq_cheating", 0, 1)
    true_answers = Bernoulli("truths", p)
    first_coin_flips = Bernoulli("first_flips", 0.5)
    second_coin_flips = Bernoulli("second_flips", 0.5)

    determin_val1 = Deterministic(
        'determin_val1', first_coin_flips * true_answers +
        (1 - first_coin_flips) * second_coin_flips)
    determin_val = determin_val1.sum() / float(N)

    start = find_MAP(fmin=optimize.fmin_powell)

    # instantiate sampler
    step = Slice(vars=[true_answers])
    # draw 5000 posterior samples
    trace = sample(100, step=step, start=start)

    step = Slice(vars=[first_coin_flips])
    # draw 5000 posterior samples
    trace = sample(100, step=step, start=start)

    step = Slice(vars=[second_coin_flips])
    # draw 5000 posterior samples
    trace = sample(100, step=step, start=start)
Exemplo n.º 27
0
    def init_hyperparams(self):
        """
        Evaluate problem setup and return hyperparameter dictionary.
        """
        pc = self.config.problem_config
        hyperparameters = copy.deepcopy(pc.hyperparameters)

        hyperparams = {}
        n_hyp = 0
        modelinit = True
        self._hypernames = []
        for datatype, composite in self.composites.items():
            hypernames = composite.get_hypernames()

            for hp_name in hypernames:
                if hp_name in hyperparameters.keys():
                    hyperpar = hyperparameters.pop(hp_name)
                    if composite.config:  # only data composites
                        if composite.config.dataset_specific_residual_noise_estimation:
                            if datatype == 'seismic':
                                wmap = composite.hyper2wavemap(hp_name)
                                ndata = wmap.hypersize
                            else:
                                ndata = len(composite.get_all_station_names())
                        else:
                            ndata = 1
                    else:
                        ndata = 1
                else:
                    raise InconsistentNumberHyperparametersError(
                        'Datasets and -types require additional '
                        ' hyperparameter(s): %s!' % hp_name)

                if not num.array_equal(hyperpar.lower, hyperpar.upper):
                    dimension = hyperpar.dimension * ndata

                    kwargs = dict(name=hyperpar.name,
                                  shape=dimension,
                                  lower=num.repeat(hyperpar.lower, ndata),
                                  upper=num.repeat(hyperpar.upper, ndata),
                                  testval=num.repeat(hyperpar.testvalue,
                                                     ndata),
                                  dtype=tconfig.floatX,
                                  transform=None)

                    try:
                        hyperparams[hp_name] = Uniform(**kwargs)

                    except TypeError:
                        kwargs.pop('name')
                        hyperparams[hp_name] = Uniform.dist(**kwargs)
                        modelinit = False

                    n_hyp += dimension
                    self._hypernames.append(hyperpar.name)
                else:
                    logger.info(
                        'not solving for %s, got fixed at %s' %
                        (hyperpar.name, list2string(hyperpar.lower.flatten())))
                    hyperparams[hyperpar.name] = hyperpar.lower

        if len(hyperparameters) > 0:
            raise InconsistentNumberHyperparametersError(
                'There are hyperparameters in config file, which are not'
                ' covered by datasets/datatypes.')

        if modelinit:
            logger.info('Optimization for %i hyperparameters in total!', n_hyp)

        self.hyperparams = hyperparams
Exemplo n.º 28
0
#Get the temperatures
T = data['T']
T = T.as_matrix()
T = T.flatten()

size = len(Y)
#TRANSFORM FROM DATAFRAME TO NP ARRAY FORMAT

# Specifing the parameters that control the MCMC (these will be used throughout the code).

basic_model_GCR = Model()

with basic_model_GCR:
    #priors for unknown model parameters
    c = Gamma('c', 1, 10)
    Tm = Uniform('Tm', 25, 45)
    T0 = Uniform('T0', 0, 24)
    tau = Gamma('tau', 0.0001, 0.0001)

    mu_temp = c * T * ((T - T0) * (T0 < T)) * np.sqrt((Tm - T) * (Tm > T))
    mu = 0 * (mu_temp < 0) + mu_temp * (mu_temp > 0)

    Y_obs = Normal('Y_obs', mu=mu, sd=tau, observed=Y)

from pymc3 import Metropolis, sample, find_MAP
from scipy import optimize

with basic_model_GCR:

    # obtain starting values via MAP
    start = find_MAP(fmin=optimize.fmin_powell)
Exemplo n.º 29
0
plt.hist(Y, 100, alpha=0.7, label='Distribution of observable Y')
plt.xlabel('Y')
plt.ylabel('Counts')
plt.show()


basic_model = Model()

with basic_model:
    
    # Priors for unknown model parameters
    alpha = Normal('alpha', mu=0, sd=10)
    beta = Normal('beta', mu=0, sd=10, shape=2)
    sigma = HalfNormal('sigma', sd=1)
    
    x1_mean = Uniform('x1_mean', lower=5, upper=15)
    x1_sigma = Uniform('x1_sigma', lower=0.5, upper=1.5 )
    x2_mean = Uniform('x2_mean', lower=10, upper =20)
    x2_sigma = Uniform('x2_sigma', lower=1, upper=6)
    
    
    # Expected value of outcome
    mu = alpha + beta[0]*X1 + beta[1]*X2
    
    # Likelihood (sampling distribution) of observations
    Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)

    map_estimate = find_MAP(model=basic_model)
#    map_estimate = find_MAP(model=basic_model, fmin=optimize.fmin_powell)
    
    trace=sample( 2000, start=map_estimate, njobs=4)
Exemplo n.º 30
0
    lex_decision.model_parameters["latency_factor"] = lf
    lex_decision.model_parameters["latency_exponent"] = le
    lex_decision.model_parameters["decay"] = decay
    activation_dict = {
        x[0]: x[1]
        for x in zip(LEMMA_CHUNKS, activation_from_time)
    }
    lex_decision.decmem.activations.update(activation_dict)
    sample = run_lex_decision_task()
    return np.array(sample)


lex_decision_with_bayes = pm.Model()
with lex_decision_with_bayes:
    # prior for activation
    decay = Uniform('decay', lower=0, upper=1)
    # priors for accuracy
    noise = Uniform('noise', lower=0, upper=5)
    threshold = Normal('threshold', mu=0, sd=10)
    # priors for latency
    lf = HalfNormal('lf', sd=1)
    le = HalfNormal('le', sd=1)
    # compute activation
    scaled_time = time**(-decay)

    def compute_activation(scaled_time_vector):
        compare = tt.isinf(scaled_time_vector)
        subvector = scaled_time_vector[(1 - compare).nonzero()]
        activation_from_time = tt.log(subvector.sum())
        return activation_from_time