def DiasModel(): # Initial guesses p0 = {'R0' : 1.0, 'm' : seigle_m, 'log_tau': None, 'eta' : None, 'delta' : None, } # Stochastics R0 = pymc.Uniform('R0', lower=0.9, upper=1.1 , value=1) m = pymc.Uniform('m', lower=0.0, upper=1.0, value=p0['m']) log_tau = pymc.Uniform('log_tau', lower=-7.0, upper=0.0, value=p0['log_tau']) eta = pymc.Uniform('eta', lower=0.0, upper=50.0, value=p0['eta']) delta = pymc.Uniform('delta', lower=0.0, upper=1.0, value=p0['delta']) # Deterministics @pymc.deterministic(plot=False) def zmod(R0=R0, m=m, lt=log_tau, eta=eta, delta=delta): return Dias_cyth(w, R0, m, lt, eta, delta) # Likelihood obs = pymc.Normal('obs', mu=zmod, tau=old_div(1.0,(self.data["zn_err"]**2)), value=self.data["zn"], size = (2,len(w)), observed=True) return locals()
def test_beta_binom_2(): """ Fit a fast beta binomial model to a single row of data, confirm the mean and std dev are as expected""" pi = mc.Uniform('pi', lower=0, upper=1, size=2) obs = dismod_mr.model.likelihood.beta_binom_2('prevalence', pi, np.array([1, 1]), np.array([.5, .5]), np.array([10, 10])) mc.MAP([pi, obs]).fit() assert np.allclose(pi.value, .5, rtol=.01)
def test_initval_resizing(self): with pm.Model() as pmodel: data = aesara.shared(np.arange(4)) rv = pm.Uniform("u", lower=data, upper=10, initval="prior") ip = pmodel.compute_initial_point(seed=0) assert np.shape(ip["u_interval__"]) == (4, ) data.set_value(np.arange(5)) ip = pmodel.compute_initial_point(seed=0) assert np.shape(ip["u_interval__"]) == (5, ) pass
def model(x, f): """ PYMC model to fit 2 gaussian component """ # Priors are uniform... sigma1 = pymc.Uniform('sigma1', 0.01, 5.0, value=1.0) mu1 = pymc.Uniform('mu1', -1.0, 1.0, value= 0.0) A1 = pymc.Uniform('A1', 0.0, 5.0, value= 1.0) sigma2 = pymc.Uniform('sigma2', 0.01, 5.0, value=1.0) mu2 = pymc.Uniform('mu2', -1.0, 1.0, value= 0.0) A2 = pymc.Uniform('A2', 0.0, 5.0, value= 1.0) # Model (gauss) @pymc.deterministic(plot=False) def gauss(x = x, mu1 = mu1,mu2 = mu2, sigma1 = sigma1, sigma2 = sigma2, A1 = A1, A2 = A2): return gauss_fit(mu1, sigma1, A1, x) + gauss_fit(mu2, sigma2, A2, x) # Weights = 1/ error**2 y = pymc.Normal('y', mu = gauss, tau = (1.0 / error**2), value = f, observed=True) return locals()
def model(x, y): c1 = pymc.Uniform('c1', lower=0, upper=100000)#c1の初期分布(lowerからupperまでの一様分布) c2 = pymc.Uniform('c2', lower=0, upper=100000)#c2の初期分布(lowerからupperまでの一様分布) c3 = pymc.Uniform('c3', lower=0, upper=100000)#c3の初期分布(lowerからupperまでの一様分布) c4 = pymc.Uniform('c4', lower=0, upper=10000000)#c4の初期分布(lowerからupperまでの一様分布) c5 = pymc.Uniform('c5', lower=0, upper=100000)#c5の初期分布(lowerからupperまでの一様分布) eps = pymc.Uniform('eps', lower=0, upper=0.5)#誤差パラメータepsの初期分布(lowerからupperまでの一様分布) @pymc.deterministic def function(x=x, c1=c1, c2=c2, c3=c3, c4=c4, c5=c5): if l: x_list = [] for i in range(len(x)): if x[i]>100: term5 = 0.0 else: term5 = (c5 * x[i])/(np.exp(0.5*x[i])) x_list.append(np.log((c4 / np.exp(2.0*x[i])) + (c1 / np.exp(x[i])) + c2 + (c3 * x[i]) + term5)) return x_list else: return np.log((c4 / np.exp(2.0*x)) + (c1 / np.exp(x)) + c2 + (c3 * np.exp(x)) + c5*(np.log(x)/np.sqrt(x))) @pymc.deterministic def tau(eps=eps): return np.power(eps, -2) y = pymc.Normal('y', mu=function, tau=tau, value=y, observed=True) return locals()
def test_simple(self): """We create a variable whose initial value creates a ZeroProbability error in its children, and check that robust_init can randomly sample the variable until it finds a suitable value. """ lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True) pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1, 2, 3, 4], observed=True)
def model(x, f): """ PYMC model to fit 2 gaussian component """ # Priors are uniform... sigma = pymc.Uniform('sigma', 0.01, 5.0, value=1.0) mu = pymc.Uniform('mu', -1.0, 1.0, value= 0.0) A = pymc.Uniform('A', 0.0, 5.0, value= 1.0) # Model (gauss) @pymc.deterministic(plot=False) def gauss(x = x, mu = mu, sigma= sigma, A = A): return gauss_fit(mu, sigma, A, x) # Weights = 1/ error**2 y = pymc.Normal('y', mu = gauss, tau = (1.0 / error**2), value = f, observed=True) return locals()
def Log_splwc(analysis_frequencies, analysis_power, sigma, init=None): #Set up a PyMC model: power law for the power spectrum # PyMC definitions # Define data and stochastics if init == None: power_law_index = pymc.Uniform('power_law_index', lower=-1.0, upper=6.0, doc='power law index') power_law_norm = pymc.Uniform('power_law_norm', lower=-10.0, upper=10.0, doc='power law normalization') background = pymc.Uniform('background', lower=-20.0, upper=10.0, doc='background') else: power_law_index = pymc.Uniform('power_law_index', value=init[0], lower=-1.0, upper=6.0, doc='power law index') power_law_norm = pymc.Uniform('power_law_norm', value=init[1], lower=-10.0, upper=10.0, doc='power law normalization') background = pymc.Uniform('background', value=init[2], lower=-20.0, upper=10.0, doc='background') # Model for the power law spectrum @pymc.deterministic(plot=False) def fourier_power_spectrum(p=power_law_index, a=power_law_norm, b=background, f=analysis_frequencies): #A pure and simple power law model# out = rnspectralmodels.Log_splwc(f, [a, p, b]) return out spectrum = pymc.Normal('spectrum', tau=1.0 / (sigma**2), mu=fourier_power_spectrum, value=analysis_power, observed=True) predictive = pymc.Normal('predictive', tau=1.0 / (sigma**2), mu=fourier_power_spectrum) # MCMC model return locals()
def model_factory(): """Build a PyMC model and return it as a dict""" x = pymc.Uniform("x", value=S0[0], lower=XMIN, upper=XMAX) y = pymc.Uniform("y", value=S0[1], lower=YMIN, upper=YMAX) I = pymc.Uniform("I", value=I0, lower=IMIN, upper=IMAX) @pymc.deterministic(plot=False) def model_pred(x=x, y=y, I=I): return P([x, y], I) detector_response = pymc.Poisson( "d", data, value=data, observed=True, plot=False, ) background = pymc.Poisson( "background", DWELL * BG, value=DWELL * BG, observed=True, plot=False, ) observed_response = model_pred + background # return locals() # the lazy way return { "x": x, "y": y, "I": I, "detector_response": detector_response, "background": background, "observed_response": observed_response, }
def run_DP(): aD = [-10, -9, 10, 11, 20, 21, 42, 43] #Data Points # nA, nC = 3, 3; #Alpha & Max No. Clusters nC = 5 nPts = len(aD) + 1 #Clusters aUh = [ pm.Uniform('UnifH' + str(i), lower=-50, upper=50) for i in range(nC) ] # @UndefinedVariable # Uh = pm.Uniform('UnifH', lower=-50, upper=60); # @UndefinedVariable aNc = [pm.Normal('NormC' + str(i), mu=aUh[i], tau=1) for i in range(nC)] # @UndefinedVariable #Dirichlet & Categorical Nodes Gam = pm.Uniform('UnifG', lower=0, upper=15) # @UndefinedVariable # Gam = pm.Gamma('Gamma', alpha=2.5, beta=2); # @UndefinedVariable Dir = pm.Dirichlet('Dirichlet', theta=[Gam / nC] * nC) # @UndefinedVariable aC = [pm.Categorical('Cat' + str(i), Dir) for i in range(nPts)] # @UndefinedVariable aL = [ pm.Lambda('p_Norm' + str(i), lambda k=aC[i], aNcl=aNc: aNcl[int(k)]) for i in range(nPts) ] # @UndefinedVariable #Points aN = [ pm.Normal('NormX' + str(i), mu=aL[i], tau=1, observed=True, value=aD[i]) for i in range(nPts - 1) ] # @UndefinedVariable Nz = pm.Normal('NormZ', mu=aL[-1], tau=1) # @UndefinedVariable return np.concatenate([[Nz, Dir, Gam], aUh, aNc, aC, aN])
def makeShapePrior(self, data, parts): inputcat = data.inputcat psfSize = data.psfsize pivot, m_slope, m_b, m_cov, c = self.psfDependence( data.options.steppsf, psfSize) parts.step_m_prior = pymc.MvNormalCov('step_m_prior', [pivot, m_b, m_slope], m_cov) @pymc.deterministic(trace=False) def shearcal_m(size=inputcat['size'], mprior=parts.step_m_prior): pivot = mprior[0] m_b = mprior[1] m_slope = mprior[2] m = np.zeros_like(size) m[size >= pivot] = m_b m[size < pivot] = m_slope * (size[size < pivot] - pivot) + m_b return np.ascontiguousarray(m.astype(np.float64)) parts.shearcal_m = shearcal_m parts.step_c_prior = pymc.Normal('step_c_prior', c, 1. / (0.0004**2)) @pymc.deterministic(trace=False) def shearcal_c(size=inputcat['size'], cprior=parts.step_c_prior): c = cprior * np.ones_like(size) return np.ascontiguousarray(c.astype(np.float64)) parts.shearcal_c = shearcal_c parts.sigma = pymc.Uniform('sigma', 0.15, 0.5) #sigma parts.gamma = pymc.Uniform('gamma', 0.003, 0.1) #gamma
def FIT(x, y): # modified IVIM: dfast is fitted inverse to get low numbers when SNR is bad a_init = np.max(y) Dwatershed = 4e-3 # 2x free water (2e-3) #priors d = {} sig = pymc.Uniform('sig', 0, int(a_init / 2), value=int(a_init / 5)) d['sig'] = sig a = pymc.Uniform('a', 0, a_init * 10, value=a_init) d['a'] = a f = pymc.Uniform('f', 0, 1, value=0) d['f'] = f dfast = pymc.Uniform('dfast', 0.1, 2. / Dwatershed, value=2. / Dwatershed) d['dfast'] = dfast #dfast is fitted inverse dslow = pymc.Uniform('dslow', 0, Dwatershed, value=Dwatershed) d['dslow'] = dslow #model @pymc.deterministic(plot=False) def mod_IVIM(x=x, a=a, f=f, dfast=dfast, dslow=dslow): return a * ( (1 - f) * np.exp(-x * dslow) + f * np.exp(-x * (dslow + 1. / dfast))) #dfast is fitted inverse #likelihood d['y'] = pymc.Normal('y', mu=mod_IVIM, tau=sig**-2, value=y, observed=True, verbose=-1) #run R = pymc.MCMC(d) # build the model R.sample(iter=800, burn=100, thin=1, tune_interval=1, verbose=-1) a_result = R.stats()['a']['mean'] f_result = np.median(f.trace()) #pfrac looks better as median dfast_result = R.stats()['dfast']['mean'] dslow_result = R.stats()['dslow']['mean'] return a_result, f_result, 1. / dfast_result, dslow_result #dfast is fitted inverse
def getModel(): D = pm.Dirichlet('1-Dirichlet', theta=[3, 2, 4]) #@UndefinedVariable C1 = pm.Categorical('2-Cat', D) #@UndefinedVariable C2 = pm.Categorical('10-Cat', D) #@UndefinedVariable C3 = pm.Categorical('11-Cat', D) #@UndefinedVariable G0_0 = pm.Gamma('4-Gamma0_1', alpha=1, beta=1.5) #@UndefinedVariable U1 = pm.Uniform('12-Unif', lower=-100, upper=500) #@UndefinedVariable U2 = pm.Uniform('13-Unif', lower=-100, upper=500) #@UndefinedVariable U3 = pm.Uniform('14-Unif', lower=-100, upper=500) #@UndefinedVariable N0_1 = pm.Normal('5-Norm0_1', mu=U1, tau=1) #@UndefinedVariable N0_2 = pm.Normal('6-Norm0_2', mu=U2, tau=1) #@UndefinedVariable N0_3 = pm.Normal('7-Norm0_3', mu=U3, tau=1) #@UndefinedVariable aMu = [N0_1.value, N0_2.value, N0_3.value] fL1 = lambda n=C1: np.select([n == 0, n == 1, n == 2], aMu) fL2 = lambda n=C2: np.select([n == 0, n == 1, n == 2], aMu) fL3 = lambda n=C3: np.select([n == 0, n == 1, n == 2], aMu) p_N1 = pm.Lambda('p_Norm1', fL1, doc='Pr[Norm|Cat]') p_N2 = pm.Lambda('p_Norm2', fL2, doc='Pr[Norm|Cat]') p_N3 = pm.Lambda('p_Norm3', fL3, doc='Pr[Norm|Cat]') N = pm.Normal('3-Norm', mu=p_N1, tau=1) #@UndefinedVariable obsN1 = pm.Normal('8-Norm', mu=p_N2, tau=1, observed=True, value=0) #@UndefinedVariable @UnusedVariable obsN2 = pm.Normal('9-Norm', mu=p_N3, tau=1, observed=True, value=150) #@UndefinedVariable @UnusedVariable return pm.Model( [D, C1, C2, C3, N, G0_0, N0_1, N0_2, N0_3, N, obsN1, obsN2])
def model(x_obs, y_obs): m = pm.Uniform('m', 0, 10, value=0.15) n = pm.Uniform('n', -10, 10, value=1.0) x_pred = pm.Normal('x_true', mu=x_obs, tau=(x_error)**-2) # this allows error in x_obs #Theoretical values @pm.deterministic(plot=False) def linearD(x_true=x_pred, m=m, n=n): return m * x_true + n @pm.deterministic def random_operation_on_observable(y_obs=y_obs): return y_obs + 0 #likelihood y = pm.Normal('y', mu=linearD, tau=1.0 / y_error**2, value=random_operation_on_observable.value, observed=True) return locals()
def pymc_model(data, uvwb_noisy, noise_amp, N, U): sig = noise_amp phi_0 = pymc.Uniform('phi_0', 0, 50., value=4.) X = pymc.Uniform('X', -100000., 100000., value=-5000.) Y = pymc.Uniform('Y', -100000., 100000., value=-1000.) Z = pymc.Uniform('Z', -100000., 100000., value=-10000.) phase_0 = pymc.Uniform('phase_0', -100., 100., value=0.) @pymc.deterministic() def mmodel(phi_0=phi_0, X=X, Y=Y, Z=Z, phase_0=phase_0): k = np.pi * 2. / X l = np.pi * 2. / Y m = np.pi * 2. / Z return model(data, phi_0, k, l, m, phase_0, N, U) # Likelihood uvwb_fit = pymc.Normal('uvwb_fit', mu=mmodel, tau=1. / sig**2, value=uvwb_noisy, observed=True) return locals()
def fitMCMC(self, vel_min, vel_max, vel_disp_min, vel_disp_max, A_V_min, A_V_max, inputSpec): valid_pix = numpy.logical_not(inputSpec._mask) wave = inputSpec._wave[valid_pix] vel = pymc.Uniform('vel', lower=vel_min, upper=vel_max) disp = pymc.Uniform('disp', lower=vel_disp_min, upper=vel_disp_max) a = pymc.Uniform('a', lower=A_V_min, upper=A_V_max) @pymc.deterministic(plot=False) def m(vel=vel, disp=disp, a=a): return self.modelSpec(vel, disp, a, wave)[1] d = pymc.Normal('d', mu=m, tau=inputSpec._error[valid_pix], value=inputSpec._data[valid_pix], observed=True) M = pymc.MCMC([vel, disp, a, m, d]) M.sample(burn=1000, iter=1000, thin=10) return M
def __setup_sigma_star(self): # Jeffer'y prior for sigma_star # self.log_sigma_star = pymc.Uniform('log_sigma_star', # lower = -3.0, # upper = 1.0, # value = -1.0) # self.sigma_star = pymc.Lambda('sigma_star', # lambda s = self.log_sigma_star: numpy.exp(s)) self.sigma_star = pymc.Uniform('sigma_star', lower=0.0, upper=2.0, value=1.0)
def test_errors_and_warnings(self): with pm.Model(): A = pm.Normal("A") B = pm.Uniform("B") strace = pm.sampling.NDArray(vars=[A, B]) strace.setup(10, 0) with pytest.raises(ValueError, match="from existing MultiTrace"): pm.sampling._choose_backend(trace=MultiTrace([strace])) strace.record({"A": 2, "B_interval__": 0.1}) assert len(strace) == 1 with pytest.raises(ValueError, match="Continuation of traces"): pm.sampling._choose_backend(trace=strace)
def model(): varlist = [] sd =pymc.Uniform('sd', lower = 5, upper = 100) #pymc.Gamma("sd", 60 , beta = 2.0) varlist.append(sd) a = pymc.Uniform('a', lower = 0, upper = 100)#pymc.Normal('a', mu = 10, tau = 5**-2) b = pymc.Uniform('b', lower = .05, upper = 2.0) varlist.append(a) varlist.append(b) nonlinear = a * (ReData )** b precision = sd **-2 results = pymc.Normal('results', mu = nonlinear, tau = precision, value = measured, observed = True) return varlist
def model(a_matrix, b_vector): x_coeffs = [pm.Uniform('x_coeffs_%i' % i, 0.0, 5.00) for i in range(len(x_true))] @pm.deterministic(plot=False) def linear_solver(x_coeffs=x_coeffs, a_matrix=a_matrix): solver_solution = a_matrix.dot(x_coeffs) return solver_solution @pm.stochastic(observed=True) def likelihood(value=b_vector, fit_results=linear_solver, sigma=err): chiSq = np.sum(np.square(fit_results - value) / np.square(sigma)) return - chiSq / 2 return locals()
def run_HDP(): nC = 3 #Max No. Clusters Gam = pm.Uniform('Gamma0', lower=0, upper=15) # @UndefinedVariable aDir = [Gam / nC] * nC Dir0 = pm.Dirichlet('Dirichlet0', theta=aDir) # @UndefinedVariable lDir0 = pm.Lambda('p_Dir0', lambda d=Dir0: np.concatenate([d, [1 - sum(d)]])) # @UndefinedVariable aNodes1 = get_DP('1', lDir0, [0, 1, 20, 21]) aNodes2 = get_DP('2', lDir0, [50, 51, 70, 71, 72]) return np.concatenate([[Dir0], aNodes1, aNodes2])
def test_model_253(xp, xm, y): # 簡易モデル import pymc import numpy as np c1 = pymc.Uniform('c1', lower=0, upper=100000) #c11の初期分布(lowerからupperまでの一様分布) c2 = pymc.Uniform('c2', lower=0, upper=1000) #c21の初期分布(lowerからupperまでの一様分布) # c3 = pymc.Uniform('c30', lower=0, upper=100000)#c31の初期分布(lowerからupperまでの一様分布) eps = pymc.Uniform('eps', lower=0, upper=0.5) #誤差パラメータepsの初期分布(lowerからupperまでの一様分布) @pymc.deterministic def function(xp=xp, xm=xm, c1=c1, c2=c2): return ((c1 * np.power(xm, 2)) / xp + (c2 * np.power(xm, 2)) * np.log(xp)) @pymc.deterministic def tau(eps=eps): return np.power(eps, -2) y = pymc.Normal('y', mu=function, tau=tau, value=y, observed=True) return locals()
def simple_normal(bounded_prior=False): """Simple normal for testing MLE / MAP; probes issue #2482.""" x0 = 10.0 sd = 1.0 a, b = (9, 12) # bounds for uniform RV, need non-symmetric to reproduce issue with pm.Model(rng_seeder=2482) as model: if bounded_prior: mu_i = pm.Uniform("mu_i", a, b) else: mu_i = pm.Flat("mu_i") pm.Normal("X_obs", mu=mu_i, sigma=sd, observed=x0) return model.initial_point, model, None
def test_model_133(x, y): #import pymc #import numpy as np c1 = pymc.Uniform('c1', lower=-100000, upper=100000) #c1の初期分布(lowerからupperまでの一様分布) c2 = pymc.Uniform('c2', lower=-100000, upper=100000) #c2の初期分布(lowerからupperまでの一様分布) c3 = pymc.Uniform('c3', lower=-100000, upper=100000) #c3の初期分布(lowerからupperまでの一様分布) eps = pymc.Uniform('eps', lower=0, upper=0.00001) #誤差パラメータepsの初期分布(lowerからupperまでの一様分布) @pymc.deterministic def function(x=x, c1=c1, c2=c2, c3=c3): return (c1 / np.exp(x)) + c2 + (c3 * np.exp(x)) @pymc.deterministic def tau(eps=eps): return np.power(eps, -2) y = pymc.Normal('y', mu=function, tau=tau, value=y, observed=True) return locals()
def lognormIS_chainerr(x, y, parts, logsigmarange = (np.log(1e-4), np.log(1.0))): logsigma = pymc.Uniform('logsigma', logsigmarange[0], logsigmarange[1]) parts['logsigma'] = logsigma logy = [np.log(yi) for yi in y] @pymc.deterministic(trace = False) def logmodel(model = parts['model']): try: return np.log(model) except FloatingPointError, e: print model raise e
def run_Bernoulli_Normal(): aD = [0, 1, 2, 8, 9] nPts = len(aD) + 1 #Cluster 1 Uh1 = pm.Uniform('UnifH1', lower=-50, upper=50) # @UndefinedVariable Nc1 = pm.Normal('NormC1', mu=Uh1, tau=1) #, observed=True, value=10); # @UndefinedVariable #Cluster 2 Uh2 = pm.Uniform('UnifH2', lower=-50, upper=50) # @UndefinedVariable Nc2 = pm.Normal('NormC2', mu=Uh2, tau=1) #, observed=True, value=10); # @UndefinedVariable #Beta & Bernoulli Nodes Bet = pm.Beta('Beta', alpha=1, beta=1) # @UndefinedVariable aB = [pm.Bernoulli('Bern' + str(i), Bet) for i in range(nPts)] # @UndefinedVariable aL = [ pm.Lambda('p_Norm1' + str(i), lambda k=aB[i], c1=Nc1, c2=Nc2: [c1, c2][int(k)]) for i in range(nPts) ] # @UndefinedVariable #Points aN = [ pm.Normal('NormX' + str(i), mu=aL[i], tau=1, observed=True, value=aD[i]) for i in range(nPts - 1) ] # @UndefinedVariable Nz = pm.Normal('NormZ', mu=aL[-1], tau=1) # @UndefinedVariable return np.concatenate([[Nz, Nc1, Nc2, Uh1, Uh2, Bet], aB, aN])
def test_deterministic_samples(): aesara.config.on_opt_error = "raise" np.random.seed(13244) obs = np.random.normal(10, 2, size=100) obs_at = aesara.shared(obs, borrow=True, name="obs") with pm.Model() as model: a = pm.Uniform("a", -20, 20) b = pm.Deterministic("b", a / 2.0) c = pm.Normal("c", a, sigma=1.0, observed=obs_at) trace = sample_numpyro_nuts(chains=2, random_seed=1322, keep_untransformed=True) assert 8 < trace.posterior["a"].mean() < 11 assert np.allclose(trace.posterior["b"].values, trace.posterior["a"].values / 2)
def test_gaussianrandomwalk_inference(self): mu, sigma, steps = 2, 1, 1000 obs = np.concatenate([[0], np.random.normal(mu, sigma, size=steps)]).cumsum() with pm.Model(): _mu = pm.Uniform("mu", -10, 10) _sigma = pm.Uniform("sigma", 0, 10) obs_data = pm.MutableData("obs_data", obs) grw = GaussianRandomWalk("grw", _mu, _sigma, steps=steps, observed=obs_data) trace = pm.sample(chains=1) recovered_mu = trace.posterior["mu"].mean() recovered_sigma = trace.posterior["sigma"].mean() np.testing.assert_allclose([mu, sigma], [recovered_mu, recovered_sigma], atol=0.2)
def linearmodel(x, parts, thetarange = (-np.pi/2., np.pi/2.), offset = (-1., 1.), forcePositive = True): # theta = pymc.Uniform('theta', thetarange[0], thetarange[1]) # parts['theta'] = theta # # @pymc.deterministic # def slope(theta = theta): # return np.tan(theta) # parts['slope'] = slope # # slope = pymc.Uniform('slope', -10., 10.) parts['slope'] = slope try: offset = pymc.Uniform('offset', offset[0], offset[1]) except TypeError: offset = offset parts['offset'] = offset @pymc.deterministic(trace = False) def model(x=x, m= slope, b = offset): return m*x +b parts['model'] = model if forcePositive is True: @pymc.potential def positive(model = model): if (model <= 0).any(): raise pymc.ZeroProbability return 0. parts['positive'] = positive
def get_modelsC(q0, q1): c0 = pymc.Normal("c0", q0, 1e3) c1 = pymc.Normal("c1", q1, 1e3) c2 = pymc.Uniform("c2", x.min(), 1.5) err = pymc.Normal("err", 1e4, 0.1) @pymc.deterministic def observation(c0=c0, c1=c1, c2=c2): return fC(x, c0, c1, c2) obsmodel = pymc.Normal("yC", observation, err, value=y, observed=True) # observation model fullmodel = pymc.Model([observation, c0, c1, c2, obsmodel, err]) # full Bayesian model return obsmodel, fullmodel