예제 #1
0
    ### @export 'negative-binomial_dispersion-alt_prior'
    pi = mc.Uniform('pi', lower=0, upper=1, value=.5)
    log_10_delta = mc.Normal('log_10_delta', mu=mu_log_10_delta, tau=.25**-2)
    delta = mc.Lambda('delta', lambda x=log_10_delta: 5 + 10**x)

    @mc.potential
    def obs(pi=pi, delta=delta):
        return mc.negative_binomial_like(r * n, pi * n, delta)

    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi * n_pred, delta) / float(n_pred)

    ### @export 'negative-binomial_dispersion-fit_alt_prior'
    mc.MCMC([pi, log_10_delta, delta, obs, pred]).sample(iter,
                                                         burn,
                                                         thin,
                                                         verbose=False,
                                                         progress_bar=False)

    key = 'Neg. Binom., $\mu_{\log\delta}=%d$' % mu_log_10_delta
    results[key] = dict(pred=pred.stats(), pi=pi.stats())
    model_keys.append(key)

    #mc.Matplot.plot(pi)
    #mc.Matplot.plot(delta)

book_graphics.save_json('poisson_model.json', vars())
book_graphics.forest_plot(fname='neg_binom_priors.pdf', **vars())

pl.show()
예제 #2
0
pl.savefig('vzv_data.pdf')

### @export 'forest-plot-age-5'
df = pandas.read_csv('vzv.csv', index_col=None)
df = df[df['Region'] == 'Europe, Western']
df = df[(df['Year End'] >= 1997) & (df['Year End'] <= 2005)]

df5 = df[(df['Age Start'] <= 5) & (df['Age End'] >= 5)]
r = df5['Parameter Value']
n = df5['effective_sample_size']
l = [a.split(',')[0] + ' et al' for a in df5['authors']]
xmax = 1.0
book_graphics.forest_plot(r,
                          n,
                          data_labels=l,
                          xmax=xmax,
                          subplot_params=dict(bottom=.1,
                                              right=.99,
                                              top=.9,
                                              left=.3))

pooled = (r * n).sum() / n.sum()
se = pl.sqrt(pooled * (1 - pooled) / n.sum())
# make diamond width of uncertainty
pl.fill([pooled - 1.96 * se, pooled, pooled + 1.96 * se, pooled],
        [-.5, -.5 + .125 / 2, -.5, -.5 - .125 / 2],
        color='k')
#pl.errorbar(pooled, -.5, xerr=[1.96*se], fmt='kd', mew=1, mec='white', ms=15)

pl.vlines([pooled], -.75, len(n), linewidth=1, linestyle='dashed', color='k')
pl.axis([-.01, 1.05, -.75, .25 + .5 * len(n)])
pl.text(-2 * xmax / 50, -.5, 'Pooled Estimate', ha='right', va='center')
예제 #3
0
파일: vzv.py 프로젝트: aflaxman/gbd
pl.xlabel('Age (Years)')
pl.ylabel('VZV Seroprevalence (Per 1)')
pl.savefig('vzv_data.pdf')


### @export 'forest-plot-age-5'
df = pandas.read_csv('vzv.csv', index_col=None)
df = df[df['Region']=='Europe, Western']
df = df[(df['Year End']>=1997) & (df['Year End']<=2005)]

df5 = df[(df['Age Start'] <= 5) & (df['Age End'] >= 5)]
r = df5['Parameter Value']
n = df5['effective_sample_size']
l = [a.split(',')[0] + ' et al' for a in df5['authors']]
xmax=1.0
book_graphics.forest_plot(r, n, data_labels=l, xmax=xmax, subplot_params=dict(bottom=.1, right=.99, top=.9, left=.3))

pooled = (r*n).sum() / n.sum()
se = pl.sqrt(pooled*(1-pooled)/n.sum())
# make diamond width of uncertainty
pl.fill([pooled-1.96*se, pooled, pooled+1.96*se, pooled], [-.5, -.5 + .125/2, -.5, -.5 - .125/2], color='k')
#pl.errorbar(pooled, -.5, xerr=[1.96*se], fmt='kd', mew=1, mec='white', ms=15)

pl.vlines([pooled], -.75, len(n), linewidth=1, linestyle='dashed', color='k')
pl.axis([-.01, 1.05, -.75, .25+.5*len(n)])
pl.text(-2*xmax/50, -.5, 'Pooled Estimate', ha='right', va='center')
pl.title('Europe, Western')
pl.savefig('vzv_forest_europe.pdf')


df = pandas.read_csv('vzv.csv', index_col=None)
예제 #4
0
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1]-results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)


book_graphics.save_json('schiz_forest.json', vars())

### data only plot, for computational infrastructure appendix
book_graphics.forest_plot(r, n, data_labels=cy,
                          xmax=.0115,
                          subplot_params=dict(bottom=.1, right=.99, top=.95, left=.15),
                          figparams=book_graphics.quarter_page_params,
                          fname='book/graphics/ci-prev_meta_analysis-schiz_data.png')


### master graphic of data and models, for rate model section of stats chapter
book_graphics.forest_plot(r, n, data_labels=cy,
                          xmax=.0115,
                          model_keys=['Binomial', 'Poisson', 'Beta binomial', 'Negative binomial', 'Normal', 'Lognormal',  'Offset lognormal'],
                          results=results,
                          #subplot_params=dict(bottom=.1, right=.99, top=.95, left=.15),
                          fig_params=dict(figsize=(11, 8.5), dpi=120),
                          fname='book/graphics/schiz_forest.pdf')

pl.show()
예제 #5
0
파일: zero_forest.py 프로젝트: aflaxman/gbd
                    1./((s_pred/(pi+zeta))**2 + sigma**2))) \
                - zeta

### @export 'offset-log-normal-fit-and-store'
mc.MCMC([pi, zeta, sigma, obs, pred]).sample(iter, burn, thin)

results['Offset lognormal'] = dict(pi=pi.stats(), pred=pred.stats())


### @export 'save'
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1]-results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)


book_graphics.save_json('zero_forest.json', vars())

### master graphic of data and models, for zeros subsection of rate models section of stats chapter
book_graphics.forest_plot(r, n,
                          xmax=.0005,
                          model_keys=['Zero-inflated beta binomial', 'Beta binomial', 'Negative binomial', 'Lognormal',  'Offset lognormal'],
                          results=results,
                          pi_true=pi_true,
                          fname='zero_forest.pdf')
예제 #6
0
model_keys = ['Poisson', 'Negative Binomial']
### @export 'negative-binomial_dispersion-prior-exploration'
for mu_log_10_delta in [1,2,3]:
    ### @export 'negative-binomial_dispersion-alt_prior'
    pi = mc.Uniform('pi', lower=0, upper=1, value=.5)
    log_10_delta = mc.Normal('log_10_delta', mu=mu_log_10_delta, tau=.25**-2)
    delta = mc.Lambda('delta', lambda x=log_10_delta: 5 + 10**x)

    @mc.potential
    def obs(pi=pi, delta=delta):
        return mc.negative_binomial_like(r*n, pi*n, delta)

    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)

    ### @export 'negative-binomial_dispersion-fit_alt_prior'
    mc.MCMC([pi, log_10_delta, delta, obs, pred]).sample(iter, burn, thin, verbose=False, progress_bar=False)

    key = 'Neg. Binom., $\mu_{\log\delta}=%d$'%mu_log_10_delta
    results[key] = dict(pred=pred.stats(), pi=pi.stats())
    model_keys.append(key)

    #mc.Matplot.plot(pi)
    #mc.Matplot.plot(delta)


book_graphics.save_json('poisson_model.json', vars())
book_graphics.forest_plot(fname='neg_binom_priors.pdf', **vars())

pl.show()
예제 #7
0
mc.MCMC([pi, zeta, sigma, obs, pred]).sample(iter, burn, thin)

results['Offset lognormal'] = dict(pi=pi.stats(), pred=pred.stats())

### @export 'save'
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1] -
                     results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)

book_graphics.save_json('zero_forest.json', vars())

### master graphic of data and models, for zeros subsection of rate models section of stats chapter
book_graphics.forest_plot(r,
                          n,
                          xmax=.0005,
                          model_keys=[
                              'Zero-inflated beta binomial', 'Beta binomial',
                              'Negative binomial', 'Lognormal',
                              'Offset lognormal'
                          ],
                          results=results,
                          pi_true=pi_true,
                          fname='zero_forest.pdf')