from torsionfit import TorsionScanSet as ScanSet import torsionfit.TorsionFitModel as Model from torsionfit import sqlite_plus from pymc import MCMC from parmed.charmm import CharmmParameterSet import glob from pymc import MCMC structure = 'pyrrol.psf' stream = 'pyrrol.str' scan = glob.glob('torsion-scan/*.log') pyrrol_scan = ScanSet.parse_gauss(scan, structure) pyrrol_opt = pyrrol_scan.extract_geom_opt() # set up torsionfit model param = CharmmParameterSet('../charmm_ff/top_all36_cgenff.rtf', '../charmm_ff/par_all36_cgenff.prm') model = Model.TorsionFitModelEliminatePhase(param, pyrrol_opt, decouple_n=True, sample_n5=True, stream=stream) sampler = MCMC(model.pymc_parameters, db=sqlite_plus, dbname='pyrrol_all_2.db') sampler.sample(iter=10000)
import best import best.plot from pymc import MCMC # This example reproduces Figure 3 of # # Kruschke, J. (2012) Bayesian estimation supersedes the t # test. Journal of Experimental Psychology: General. # # According to the article, the data were generated from t # distributions of known values. drug = (101,100,102,104,102,97,105,105,98,101,100,123,105,103,100,95,102,106, 109,102,82,102,100,102,102,101,102,102,103,103,97,97,103,101,97,104, 96,103,124,101,101,100,101,101,104,100,101) placebo = (99,101,100,101,102,100,97,101,104,101,102,102,100,105,88,101,100, 104,100,100,100,101,102,103,97,101,101,100,101,99,101,100,100, 101,100,99,101,100,102,99,100,99) data = {'drug':drug,'placebo':placebo} model = best.make_model( data ) M = MCMC(model) M.sample(iter=110000, burn=10000) fig = best.plot.make_figure(M) fig.savefig('smart_drug.png',dpi=70)
def test_regression_155(): """thin > iter""" M = MCMC(disaster_model, db='ram') M.sample(10, 0, 100, progress_bar=0)
def run_model(mtype, msubtype, model_params, desc, niter=20000, nburnin=15000, nthin=5, nchains=3, am_delay=5000, am_interval=1000, burn_till_tuned=True): """ Run PYMC model """ model = spat_nmixture_nb curr_dir = os.getcwd() output_dir = os.path.join(curr_dir, 'run', mtype, msubtype, desc) if not os.path.exists(output_dir): os.makedirs(output_dir) ##### ---- RUN MODEL ---- ##### os.chdir(output_dir) trace_file = os.path.join(output_dir, desc + '.hdf5') if os.path.exists(trace_file): os.remove(trace_file) while 1: try: M = MCMC(model.build_model(**model_params), db='hdf5', dbname=trace_file) break except ZeroProbability: model_params.update({ 'ls_beta_inits': np.random.normal(0, 1, model_params['ls_dmat'].shape[1]), 'beta_inits': np.random.normal(0, 1, model_params['dmat'].shape[1]) }) M.use_step_method(AdaptiveMetropolis, M.ls_beta, delay=am_delay, interval=am_interval) M.use_step_method(AdaptiveMetropolis, M.beta, delay=am_delay, interval=am_interval) M.use_step_method(AdaptiveMetropolis, M.eps, delay=am_delay, interval=am_interval) M.use_step_method(AdaptiveMetropolis, M.alpha, delay=am_delay, interval=am_interval) M.sample(niter, burn=nburnin, thin=nthin, burn_till_tuned=burn_till_tuned) for i in range(nchains - 1): M.sample(niter, burn=nburnin, thin=nthin) M.db.close() os.chdir(curr_dir) return 0
def detect( self, filtered_sig, fs, step=2, iter_count=4000, burn_count=2000, ): '''Detect and returns P wave detection results. Note: This function returns None when detection fails. ''' # r_list = self.r_list raw_sig = filtered_sig sig_seg = raw_sig max_hermit_level = 8 p_model = P_model_Gaussian.MakeModel(sig_seg, max_hermit_level=max_hermit_level) M = MCMC(p_model) M.sample(iter=iter_count, burn=burn_count, thin=10) # retrieve parameters hermit_coefs = list() for h_ind in xrange(0, P_model_Gaussian.HermitFunction_max_level): hermit_value = np.mean(M.trace('hc%d' % h_ind)[:]) hermit_coefs.append(hermit_value) fitting_curve = np.zeros(len(sig_seg), ) for level, coef in zip(xrange(0, max_hermit_level), hermit_coefs): fitting_curve += P_model_Gaussian.HermitFunction( level, len(sig_seg)) * coef plt.figure(1) plt.clf() plt.plot(sig_seg, label='ECG') plt.plot(fitting_curve, label='fitting curve') # Hermit coef vis plt.bar(xrange(0, len(hermit_coefs)), [ 0.12, ] * len(hermit_coefs), width=0.5, alpha=0.3, color='grey') plt.bar(xrange(0, len(hermit_coefs)), [ -0.12, ] * len(hermit_coefs), width=0.5, alpha=0.3, color='grey') plt.bar(xrange(0, len(hermit_coefs)), np.array(hermit_coefs) * 0.2, width=0.5, color='r') plt.legend() plt.grid(True) plt.show(block=False) # plt.savefig('./results/tmp/%d.png' % int(time.time())) results = dict() return results
DisasterModel.l.logp #-2.6491936762267811 @deterministic(plot=False) def r(s=s, e=e, l=l): """ Concatenate Poisson means """ out = np.empty(len(disasters_array)) out[:s] = e out[s:] = l return out from pymc.examples import DisasterModel from pymc import MCMC M = MCMC(DisasterModel) M.isample(iter=10000, burn=1000, thin=10) M.trace('s')[:] #array([41, 40, 40, ..., 43, 44, 44]) from pylab import hist, show hist(M.trace('l')[:]) #(array([ 8, 52, 565, 1624, 2563, 2105, 1292, 488, 258, 45]), #array([ 0.52721865, 0.60788251, 0.68854637, 0.76921023, 0.84987409, # 0.93053795, 1.01120181, 1.09186567, 1.17252953, 1.25319339]), #<a list of 10 Patch objects>) show() from pymc.Matplot import plot
#Averaged experiments @stochastic def vars(x_plus_gen=x_plus_gen, y_plus_gen=y_plus_gen, x_minus_gen=x_minus_gen, y_minus_gen=y_minus_gen, value=0): return pymc.mv_normal_like( [x_plus_gen, y_plus_gen, x_minus_gen, y_minus_gen], experiment['y_exp'], experiment['y_covar_inv']) # Model if load_last_model: mcmc = pymc.database.pickle.load('mcmc-{}.pickle'.format(name)) else: mcmc = MCMC([vars, gamma, deltaB, rB], db='pickle', dbmode='w', dbname='mcmc-{}.pickle'.format(name)) mcmc.sample(iter=N, burn=min(5000, int(N / 10)), thin=1) for v in var_list: commons.plot( mcmc.trace(v.__name__)[:], v.__doc__, "pics/{}_{}.png".format(name, v.__name__)) for x, y in combinations(var_list, 2): commons.plot_2d_hist( mcmc.trace(x.__name__)[:], mcmc.trace(y.__name__)[:], "pics/{}_{}-{}_hist.png".format(name, x.__name__, y.__name__))
def test_stats_after_reload(self): db = database.pickle.load('MCMC.pickle') M2 = MCMC(disaster_model, db=db) M2.stats() db.close() os.remove('MCMC.pickle')
#NIIRS has a scale of 0-8 lower, upper = 0, 10 mu, sigma = 4.5, 1.5 #use a truncated normal random variable ###this normalizes our bounds- but i don't think we want this### #[a,b] =(lower - mu) / sigma, (upper - mu) / sigma ###this may not be what we actually want to use as upper and lower from pymc import TruncatedNormal, HalfNormal, Normal, Model, MCMC, Metropolis, Uniform mu_dist = TruncatedNormal('mu_dist', mu=mu, tau=sigma, a=lower, b=upper) sigma_dist = TruncatedNormal('sigma_dist', mu=0.2, a=0, b=10) #use a half-normal since sd is always positive, had sd=1, maybe for pymc3 Y_obs= TruncatedNormal('Y_obs', mu=mu_dist, tau=sigma_dist, a=lower, b=upper) #, observed=True) this was giving error- must have an initial value if observed=True sim=MCMC([mu_dist, sigma_dist, Y_obs]) sim.sample(50000, 10000, 1) y_samples = sim.trace("Y_obs")[:] fig = plt.figure(figsize=(5,5)) axes = fig.add_subplot(111) axes.hist(y_samples, bins=50, normed=True, color="blue"); fig.show() mu_samples = sim.trace("mu_dist")[:] fig = plt.figure(figsize=(5,5)) axes = fig.add_subplot(111) axes.hist(mu_samples, bins=50, normed=True, color="green"); fig.show() sig_samples = sim.trace("sigma_dist")[:]
from pymc.examples import disaster_model from pymc import MCMC from pylab import hist, show, rcParams rcParams['figure.figsize'] = 10, 10 M = MCMC(disaster_model) M.sample(iter=65536, burn=8000, thin=16) hist(M.trace('late_mean')[:], color='#b02a2a') show()
import interloper from pymc import MCMC from pymc.Matplot import plot from matplotlib.pyplot import hist2d import matplotlib.pyplot as plt M = MCMC(interloper) M.sample(iter=500000, burn=1000, thin=5) print plot(M) M.alpha.summary() M.beta.summary() alpha_arr = M.trace('alpha')[:] beta_arr = M.trace('beta')[:] H, xedges, yedges, img = hist2d(alpha_arr, beta_arr, 250) extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]] fig = plt.figure() ax = fig.add_subplot(1, 1, 1) im = ax.imshow(H, cmap=plt.cm.hot, extent=extent) fig.colorbar(im, ax=ax) plt.axes().set_aspect(0.67) plt.tight_layout(pad=0.3) plt.savefig('interloper_gaussian_distant_high_iteration.png') plt.close()
import numpy as np from matplotlib import pylab as plt import noba_model import pymc from pymc import MCMC from pymc.Matplot import plot as mcplot M = MCMC(noba_model) M.sample(iter=2000000, burn=0, thin=10, verbose=0) mcplot(M) plt.hist([M.trace('intrinsic_rate')[:]], 500, label='intrinsic') plt.hist([M.trace('social_rate')[:]], 500, label='social') plt.legend(loc='upper left') plt.xlim(0, 0.2) plt.show() plt.hist([M.trace('lag')[:]]) plt.legend(loc='upper left') plt.xlim(0, 5) plt.show() plt.hist([M.trace('dist')[:]], 100) plt.legend(loc='upper left') plt.xlim(0, 200) plt.show() np.savetxt('distNOBA.txt', M.trace('dist')[:]) np.savetxt('lagNOBA.txt', M.trace('lag')[:])
def detectGaussian( self, filtered_sig, annots, fs, step=2, iter_count=4000, burn_count=2000, max_hermit_level=4, savefig_path=None, figID=None, ): '''Detect and returns P wave detection results. Note: * Input is a segment contains only P wave. * This function returns None when detection fails. ''' filtered_sig = np.array(filtered_sig) # Normalization max_val = np.max(filtered_sig) min_val = np.min(filtered_sig) if (max_val - min_val) > 1e-6: filtered_sig = (filtered_sig - min_val) / (max_val - min_val) raw_sig = filtered_sig sig_seg = raw_sig p_model = P_model_Gaussian.MakeModel(sig_seg, annots, max_hermit_level=max_hermit_level) M = MCMC(p_model) M.sample(iter=iter_count, burn=burn_count, thin=10) # retrieve parameters hermit_coefs = list() for h_ind in xrange(0, P_model_Gaussian.HermitFunction_max_level): hermit_value = np.mean(M.trace('hc%d' % h_ind)[:]) hermit_coefs.append(hermit_value) fitting_curve = np.zeros(len(sig_seg), ) for level, coef in zip(xrange(0, max_hermit_level), hermit_coefs): fitting_curve += P_model_Gaussian.HermitFunction( level, len(sig_seg)) * coef # Gaussian pos_ponset = None pos_p = None pos_poffset = None for pos, label in annots: if label == 'Ponset': pos_ponset = pos elif label == 'P': pos_p = pos elif label == 'Poffset': pos_poffset = pos gaussian_curve = np.zeros(len(sig_seg), ) common_length = len(sig_seg) g_amp = np.mean(M.trace('g_amp')[:]) g_sigma = np.mean(M.trace('g_sigma')[:]) g_dc = np.mean(M.trace('dc')[:]) gaussian_curve = P_model_Gaussian.GetGaussianPwave( len(sig_seg) * 2, g_amp, g_sigma / 3, g_dc) gaussian_segment = gaussian_curve[common_length - pos_p:2 * common_length - pos_p] baseline_curve = fitting_curve + g_dc baseline_curve = baseline_curve.tolist() fitting_curve += gaussian_segment # Compute results results = dict(P=pos_p) meet_threshold = 0.07 for ind in xrange(0, len(fitting_curve)): if ('Ponset' not in results and abs(fitting_curve[ind] - baseline_curve[ind]) >= meet_threshold): results['Ponset'] = ind elif ('Ponset' in results and abs(fitting_curve[ind] - baseline_curve[ind]) >= meet_threshold): results['Poffset'] = ind # If not found if 'Ponset' not in results: results['Ponset'] = pos_ponset results['Poffset'] = pos_poffset elif 'Poffset' not in results: results['Poffset'] = pos_poffset # Plot figure plt.figure(1, figsize=(12, 7)) plt.clf() plt.plot(sig_seg, label='ECG') plt.plot(fitting_curve, label='fitting curve') plt.plot(baseline_curve, 'r', alpha=0.5, lw=5, label='Baseline curve') # P wave annotations plt.plot(pos_ponset, sig_seg[pos_ponset], '<', markersize=12, alpha=0.7, label='Ponset') plt.plot(pos_p, sig_seg[pos_p], 'o', markersize=12, alpha=0.7, label='P') plt.plot(pos_poffset, sig_seg[pos_poffset], '>', markersize=12, alpha=0.7, label='Poffset') # P wave enhancement pos_ponset = results['Ponset'] plt.plot(pos_ponset, sig_seg[pos_ponset], '<', markeredgecolor='black', markersize=12, alpha=0.7, label='Ponset enhanced') pos_poffset = results['Poffset'] plt.plot(pos_poffset, sig_seg[pos_poffset], '>', markeredgecolor='black', markersize=12, alpha=0.7, label='Poffset enhanced') # Hermit coef vis plt.bar(xrange(0, len(hermit_coefs)), [ 0.12, ] * len(hermit_coefs), width=0.5, alpha=0.3, color='grey') plt.bar(xrange(0, len(hermit_coefs)), [ -0.12, ] * len(hermit_coefs), width=0.5, alpha=0.3, color='grey') plt.bar(xrange(0, len(hermit_coefs)), np.array(hermit_coefs) * 0.2, width=0.5, color='r') plt.legend() plt.grid(True) plt.title(figID) plt.ylim((-0.2, 1.2)) plt.show(block=False) if savefig_path is not None: plt.savefig(savefig_path) return results
def run(self, debug_info=dict()): '''Run delineation process for each R-R interval.''' r_list = self.r_list result_dict = self.result_dict fs = self.fs raw_sig = self.raw_sig p_wave_length = self.p_wave_length r_detector = DPI_QRS_Detector() for ind in xrange(0, len(r_list) - 1): print 'Progress: %d R-R intervals left.' % (len(r_list) - 1 - ind) if ind > 1: print 'Debug break.' break region_left = r_list[ind] region_right = r_list[ind + 1] QR_length = fs / 46.0 PR_length = 0.5 * (region_right - region_left) cut_x_list = [ int(region_right - PR_length), int(region_right - QR_length) ] sig_seg = raw_sig[cut_x_list[0]:cut_x_list[1]] sig_seg = r_detector.HPF(sig_seg, fs=fs, fc=3.0) sig_seg = sig_seg[:cut_x_list[1] - cut_x_list[0]] if len(sig_seg) <= 75: print 'R-R interval %d is too short!' % len(sig_seg) continue p_model = P_model_Gaussian.MakeModel(sig_seg, p_wave_length, fs=fs) M = MCMC(p_model) M.sample(iter=2000, burn=1000, thin=10) # retrieve parameters hermit_coefs = list() for h_ind in xrange(0, P_model_Gaussian.HermitFunction_max_level): hermit_value = np.mean(M.trace('hc%d' % h_ind)[:]) hermit_coefs.append(hermit_value) # P wave shape parameters gpos = np.mean(M.trace('gaussian_start_position')[:]) gsigma = np.mean(M.trace('gaussian_sigma')[:]) gamp = np.mean(M.trace('gaussian_amplitude')[:]) print 'Results:' print 'gpos = ', gpos print 'gsigma = ', gsigma print 'gamp = ', gamp len_sig = cut_x_list[1] - cut_x_list[0] fitting_curve = P_model_Gaussian.GetFittingCurve( len_sig, gpos, gsigma, gamp, hermit_coefs) baseline_curve = P_model_Gaussian.GetFittingCurve( len_sig, gpos, gsigma, 0, hermit_coefs) plt.figure(1) plt.clf() plt.plot(sig_seg, label='ECG') plt.plot(fitting_curve, label='fitting curve') plt.plot(baseline_curve, linestyle='--', alpha=0.3, lw=2, label='baseline curve') plt.plot(gpos, fitting_curve[gpos], 'r^', markersize=12) plt.legend() plt.grid(True) if 'plot_title' in debug_info: plt.title(debug_info['plot_title'], fontproperties=zn_font) if 'plot_xlabel' in debug_info: plt.xlabel(debug_info['plot_xlabel']) plt.show(block=False) plt.savefig('./results/tmp/%d.png' % int(time.time())) peak_pos = int(gpos + gsigma / 2.0) peak_global_pos = peak_pos + cut_x_list[0] # Save to result dict result_dict['gamp'].append(gamp) result_dict['gsigma'].append(gsigma) result_dict['gpos'].append(gpos) result_dict['hermit_coefs'].append(hermit_coefs) result_dict['segment_range'].append(cut_x_list) result_dict['peak_global_pos'].append(peak_global_pos) continue return result_dict
# vn E = l * u / (n * Z) * kxf(px, kxmax, p50) * (psi - px) / 1000 s[i] = min(sp - E + Rfi / 1000 / n / Z, 1) sapflow_modeled.append(E / alpha) else: print('gs = 0') s[i] = min(sp + Rfi / 1000 / n / Z, 1) sapflow_modeled.append(0) return sapflow_modeled '''data likelihoods''' np.random.seed(1) Y_obs = Normal('Y_obs', mu=muf, tau=sigma, value=vn, observed=True) ''' posterior sampling ''' M = MCMC([alpha, c, g1, kxmax, Lamp, Lave, LTf, p50, Z, sigma]) M.use_step_method(AdaptiveMetropolis, [alpha, c, g1, kxmax, Lamp, Lave, LTf, p50, Z, sigma]) M.sample(iter=1000000, burn=500000, thin=40) # Save trace ensure_dir(species) traces = { 'alpha': M.trace('alpha')[:], 'c': M.trace('c')[:], 'g1': M.trace('g1')[:], 'kxmax': M.trace('kxmax')[:], 'Lamp': M.trace('Lamp')[:], 'Lave': M.trace('Lave')[:], 'LTf': M.trace('LTf')[:], 'p50': M.trace('p50')[:],
default=1.0, help='tau value for gaussian prior on k') args = parser.parse_args() print(args) param_to_opt = [('CG331', 'CG321', 'CG321', 'CG331'), ('HGA2', 'CG321', 'CG321', 'HGA2'), ('CG331', 'CG321', 'CG321', 'HGA2')] param = CharmmParameterSet( '../../../../../../data/charmm_ff/top_all36_cgenff.rtf', '../../../../../../data/charmm_ff/par_all36_cgenff.prm') structure = '../../../../../structure/butane.psf' scan = '../../../../../torsion_scans/MP2_torsion_scan/' butane_scan = ScanSet.parse_psi4_out(scan, structure) optimized = butane_scan.remove_nonoptimized() model = Model.TorsionFitModel(param=param, frags=optimized, init_random=True, param_to_opt=param_to_opt, rj=True, sample_n5=True) sampler = MCMC(model.pymc_parameters, db=sqlite_plus, dbname=args.db_name, verbose=5) sampler.sample(args.iterations)
import model3 import pymc #import networkx as nx import matplotlib.pyplot as plt from pymc import MCMC from pymc.Matplot import plot M = MCMC(model3) #MAP = pymc.MAP(model3) #MAP.fit() def get_coeffs(map_): return [{ str(v): v.value } for v in map_.variables if str(v).startswith('p') or str(v).startswith('b') or str(v).startswith('e')] #print get_coeffs(MAP) M.sample(iter=10000, burn=250, thin=10) #plot(M, path='./plots') M.write_csv('shortout.csv') #pathways = model.pathways #traces = {} #for p in pathways: # traces[p] = M.trace(p)[:] #plt.hist(traces[pathways[0]], bins='auto') #plt.show() #t = M.trace(pathways[0])[:] #import networkx as nx
butane_scan.compute_energy(param) optimized = butane_scan.remove_nonoptimized() optimized.compute_energy(param) # Turn off torsion param.dihedral_types[('CG331', 'CG321', 'CG321', 'CG331')][1].phi_k = 0 param.dihedral_types[('CG331', 'CG321', 'CG321', 'CG331')][0].phi_k = 0 param.dihedral_types[('HGA3', 'CG331', 'CG321', 'HGA2')][0].phi_k = 0 param.dihedral_types[('HGA2', 'CG321', 'CG331', 'HGA3')][0].phi_k = 0 param.dihedral_types[('HGA3', 'CG331', 'CG321', 'CG321')][0].phi_k = 0 param.dihedral_types[('CG321', 'CG321', 'CG331', 'HGA3')][0].phi_k = 0 param.dihedral_types[('HGA2', 'CG321', 'CG321', 'HGA2')][0].phi_k = 0 param.dihedral_types[('CG331', 'CG321', 'CG321', 'HGA2')][0].phi_k = 0 param.dihedral_types[('HGA2', 'CG321', 'CG321', 'CG331')][0].phi_k = 0 # Create butane scan with torsions off optimized_0 = butane_scan.remove_nonoptimized() optimized_0.compute_energy(param) platform = mm.Platform.getPlatformByName('Reference') model = Model.TorsionFitModelEliminatePhase(param, optimized_0, platform=platform, param_to_opt=param_to_opt, decouple_n=False, sample_n5=False) sampler = MCMC(model.pymc_parameters, db=sqlite_plus, dbname='butane_all_n_10000.db', verbose=5) sampler.sample(10000)
# Ph21 Set 5 # Aritra Biswas # coin_mcmc.py # Run MCMC on coin_model.py import lh2d_model from pymc import MCMC from pymc.Matplot import plot M = MCMC(lh2d_model) M.sample(iter=100, burn=0, thin=1) print plot(M) M.alpha.summary() M.beta.summary()
if is_mcmc: # Declare vars var_dict = {} for i, p in enumerate(desired_variables): var_dict[p] = Uniform(p, doc="{}".format(p), lower=lower_bounds[i], upper=upper_bounds[i]) # Dynamically create PyMC sampling function stochastic_args = ','.join(["{}=var_dict['{}']".format(k, k) for k in var_dict.keys()]) exec("@stochastic\n" "def combi({}, value=0):\n" "\tfor p in desired_variables:\n" "\t\tparameters.setRealValue(p, var_dict[p])\n" "\treturn pdf.getLogVal()\n".format(stochastic_args)) # Define and start sampling mcmc = MCMC([combi] + list(var_dict.values()), db='pickle', dbmode='w', dbname='mcmc-{}_combiner.pickle'.format(combination)) mcmc.sample(iter=N, burn=burnout, thin=1) # Output data = {v: mcmc.trace(v)[:] for v in desired_variables} if bins: with gzip.open('output/bins.dat.gz', 'w') as file: data_to_save = {} for v in data: data_to_save[v] = np.histogram(data[v], bins=edges[v]) pickle.dump({'data': data_to_save}, file, protocol=2) else: with gzip.open('output/raw.dat.gz', 'w') as file: pickle.dump({'data': data}, file, protocol=2) else: #not MCMC var_dict = {}
@observed(dtype=int, plot=False) def zip(value=data, mu=mu, psi=psi): """ Zero-inflated Poisson likelihood """ # Initialize likeihood like = 0.0 # Loop over data for x in value: if not x: # Zero values like += np.log((1. - psi) + psi * np.exp(-mu)) else: # Non-zero values like += np.log(psi) + poisson_like(x, mu) return like if __name__ == "__main__": from pymc import MCMC, Matplot # Run model and plot posteriors M = MCMC(locals()) M.sample(100000, 50000) Matplot.plot(M)
# Define data and stochastics switchpoint = DiscreteUniform( 'switchpoint', lower=0, upper=110, doc='Switchpoint[year]') early_mean = Exponential('early_mean', beta=1.) late_mean = Exponential('late_mean', beta=1.) @deterministic(plot=False) def rate(s=switchpoint, e=early_mean, l=late_mean): ''' Concatenate Poisson means ''' out = empty(len(disasters_array)) out[:s] = e out[s:] = l return out disasters = Poisson('disasters', mu=rate, value=disasters_array, observed=True) # import disaster_model from pymc import MCMC # M = MCMC(disaster_model) M = MCMC([switchpoint,early_mean,late_mean,rate,disasters]) M.sample(iter=10000, burn=1000, thin=10) print switchpoint.value print rate.value print M.trace('switchpoint')[:] # from pymc.Matplot import plot # plot(M)
''' Concatenate Normal means ''' out = np.empty(len(dataSet)) out[:s] = e out[s:] = l return out datapoints = Normal('datapoints', mu=rate, tau=.1, value=dataSet, observed=True) vars = [changepoint, early_mean, late_mean, datapoints] M = MCMC(vars) M.sample(iter=100000, burn=1000, thin=10) #%% hist(M.trace('late_mean')[:], 100) hist(M.trace('early_mean')[:], 100) hist(M.trace('changepoint')[:], 100) # ##%% # #switchpoint = DiscreteUniform('switchpoint', lower=0, upper=110, doc='Switchpoint[year]') # #early_mean = Exponential('early_mean', beta=1.) #late_mean = Exponential('late_mean', beta=1.) # #@deterministic(plot=False)
def p_segment_mcmc( filtered_sig, annots, fs, step=2, iter_count=4000, burn_count=2000, max_hermit_level=4, savefig_path=None, figID=None, ): '''Detect and returns P wave detection results. Note: * Input is a segment contains only P wave. * This function returns None when detection fails. ''' filtered_sig = np.array(filtered_sig) # Normalization max_val = np.max(filtered_sig) min_val = np.min(filtered_sig) if (max_val - min_val) > 1e-6: filtered_sig = (filtered_sig - min_val) / (max_val - min_val) raw_sig = filtered_sig sig_seg = raw_sig p_model = P_model_Gaussian.MakeModel(sig_seg, annots, max_hermit_level=max_hermit_level) M = MCMC(p_model) M.sample(iter=iter_count, burn=burn_count, thin=10) # retrieve parameters hermit_coefs = list() for h_ind in xrange(0, P_model_Gaussian.HermitFunction_max_level): hermit_value = np.mean(M.trace('hc%d' % h_ind)[:]) hermit_coefs.append(hermit_value) fitting_curve = np.zeros(len(sig_seg), ) for level, coef in zip(xrange(0, max_hermit_level), hermit_coefs): fitting_curve += P_model_Gaussian.HermitFunction(level, len(sig_seg)) * coef # Gaussian pos_ponset = None pos_p = None pos_poffset = None for pos, label in annots: if label == 'Ponset': pos_ponset = pos elif label == 'P': pos_p = pos elif label == 'Poffset': pos_poffset = pos gaussian_curve = np.zeros(len(sig_seg), ) common_length = len(sig_seg) g_amp = np.mean(M.trace('g_amp')[:]) g_sigma = np.mean(M.trace('g_sigma')[:]) g_dc = np.mean(M.trace('dc')[:]) gaussian_curve = P_model_Gaussian.GetGaussianPwave( len(sig_seg) * 2, g_amp, g_sigma / 3, g_dc) gaussian_segment = gaussian_curve[common_length - pos_p:2 * common_length - pos_p] baseline_curve = fitting_curve + g_dc baseline_curve = baseline_curve.tolist() fitting_curve += gaussian_segment # Compute results results = dict(P=pos_p) meet_threshold = 0.07 for ind in xrange(0, len(fitting_curve)): if ('Ponset' not in results and abs(fitting_curve[ind] - baseline_curve[ind]) >= meet_threshold): results['Ponset'] = ind elif ('Ponset' in results and abs(fitting_curve[ind] - baseline_curve[ind]) >= meet_threshold): results['Poffset'] = ind # If not found if 'Ponset' not in results: results['Ponset'] = pos_ponset results['Poffset'] = pos_poffset elif 'Poffset' not in results: results['Poffset'] = pos_poffset return results