Exemple #1
0
results['Offset lognormal'] = dict(pi=pi.stats(), pred=pred.stats())

### @export 'save'
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1]-results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)


book_graphics.save_json('schiz_forest.json', vars())

### data only plot, for computational infrastructure appendix
book_graphics.forest_plot(r, n, data_labels=cy,
                          xmax=.0115,
                          subplot_params=dict(bottom=.1, right=.99, top=.95, left=.15),
                          figparams=book_graphics.quarter_page_params,
                          fname='book/graphics/ci-prev_meta_analysis-schiz_data.png')


### master graphic of data and models, for rate model section of stats chapter
book_graphics.forest_plot(r, n, data_labels=cy,
                          xmax=.0115,
                          model_keys=['Binomial', 'Poisson', 'Beta binomial', 'Negative binomial', 'Normal', 'Lognormal',  'Offset lognormal'],
                          results=results,
                          #subplot_params=dict(bottom=.1, right=.99, top=.95, left=.15),
Exemple #2
0
            linewidth=1,
            label='Uncertainty interval')
    pl.errorbar(data[:, 0],
                data[:, 1],
                yerr=data[:, 2] * 1.96,
                fmt='gs',
                mec='white',
                mew=0,
                ms=10)

    pl.axis([0, 100, 0, 10])
    pl.text(10,
            9,
            '%s ($\\rho = %d$)' % (smoothness, scale[smoothness]),
            ha='left',
            va='top')
    if col == 0:
        pl.ylabel('Rate (per 1000 PY)')
    else:
        pl.yticks([])
    pl.xticks([25, 50, 75])
    pl.xlabel('Age (Years)')

pl.subplots_adjust(left=.1, bottom=.2, top=.95, right=.95, wspace=0)
pl.savefig('smoothness_priors.png')
pl.savefig('smoothness_priors.pdf')

### @export 'store-results'

book_graphics.save_json('age_patterns.json', vars())
Exemple #3
0
    dm.params['covariates']['Study_level']['bias']['rate']['value'] = 0
    for cv in dm.params['covariates']['Country_level']:
        dm.params['covariates']['Country_level'][cv]['rate']['value'] = 0

    # TODO: set bounds on remission and excess-mortality in the second time through

    ### @export 'initialize model data'
    region = 'north_america_high_income'
    year = 1990
    dm.data = [d for d in dm.data if dm.relevant_to(d, 'prevalence', region, year, 'all')]

    # fit model
    dm.clear_fit()
    dm.clear_empirical_prior()
    dismod3.neg_binom_model.covariate_hash = {}

    import fit_world
    fit_world.fit_world(dm)
    models[ii] = dm
    results[ii] = dict(rate_stoch=dm.vars['prevalence+world+all+all']['rate_stoch'].stats(), dispersion=dm.vars['prevalence+world+all+all']['dispersion'].stats())

    ### @export 'save'
    for d in dm.data:
        d['sex'] = 'male'  # otherwise tile plot shows it twice

    dismod3.plotting.tile_plot_disease_model(dm, dismod3.utils.gbd_keys(['incidence', 'remission', 'excess-mortality', 'prevalence'], ['world'], ['all'], ['all']),
                                             plot_prior=False, print_sample_size=False)
pl.show()

book_graphics.save_json('hep_c.json', vars())
Exemple #4
0
#    pl.plot([-100], [0], marker[j], label=label[j]) # HACK: off-screen point with label to appear in legend

for j in range(2):
    vars = results[j].vars[dismod3.utils.gbd_key_for('prevalence', region,
                                                     year, sex)]
    stats = vars['predicted_rates'].stats()
    y = stats['quantiles'][50]
    yerr = [y - stats['quantiles'][2.5], stats['quantiles'][97.5] - y]
    pl.errorbar(sorted_indices + .25 * (j + 1),
                y,
                yerr,
                mew=1,
                ms=5,
                mec='white',
                fmt=marker[j],
                label=label[j])

    results['posterior_dispersion_%d' % j] = vars['dispersion'].stats()
    results['posterior_beta_%d' % j] = vars['study_coeffs'].stats()

pl.xlabel('PMS prevalence data from systematic review')
pl.xticks([])
pl.yticks([0, .2, .4, .6, .8, 1])
pl.ylabel('Prevalence (per 1)')
pl.axis([-.5, 11.5, -.01, 1.])
pl.legend(loc='upper left', fancybox=True, shadow=True, numpoints=1)

pl.savefig('pms_ppc.pdf')

book_graphics.save_json('pms_ppc.json', vars())
Exemple #5
0
                    zorder=1)
            pl.plot(mesh, f_n,
                    '-', color='black', linewidth=1,
                    zorder=2)

    #pl.plot(mesh, sm.f_eval.stats()['quantiles'][50],
    #        'k-', linewidth=3, label='Median')
    pl.plot(mesh, sm.f_eval.stats()['95% HPD interval'],
            'k--', linewidth=1, label='Uncertainty interval')
    pl.errorbar(data[:,0], data[:,1], yerr=data[:,2]*1.96,
                fmt='gs',
                mec='white', mew=0, ms=10)

    pl.axis([0, 100, 0, 10])
    pl.text(10, 9, '%s ($\\rho = %d$)' % (smoothness, scale[smoothness]), ha='left', va='top')
    if col == 0:
        pl.ylabel('Rate (per 1000 PY)')
    else:
        pl.yticks([])
    pl.xticks([25,50,75])
    pl.xlabel('Age (Years)')

pl.subplots_adjust(left=.1, bottom=.2, top=.95, right=.95, wspace=0)
pl.savefig('smoothness_priors.png')
pl.savefig('smoothness_priors.pdf')

### @export 'store-results'

book_graphics.save_json('age_patterns.json', vars())

Exemple #6
0
    results[grid] = dm
    try:
        results['dic_%s'%grid] = dm.mcmc.dic
    except Exception, e:
        print e
        results['dic_%s'%grid] = 'TK'


pl.figure(**book_graphics.quarter_page_params)
for ii, grid in enumerate('abcd'):
    dm = results[grid]
    pl.subplot(1,4,ii+1)
    dismod3.plotting.plot_intervals(dm, [d for d in dm.data if dm.relevant_to(d, 'prevalence', region, year, sex)],
                                    color='black', print_sample_size=False, alpha=1., plot_error_bars=False,
                                    linewidth=2)
    book_graphics.plot_rate(dm, dismod3.utils.gbd_key_for('prevalence', region, year, sex), linestyle='-')
    pl.axis([10, 60, -.05, .8])
    pl.xlabel('Age (Years)')
    pl.xticks([15,35,55])
    if ii == 0:
        pl.yticks([0, .2, .4, .6], [0, 20, 40, 60])
        pl.ylabel('Prevalence (per 100)')
    else:
        pl.yticks([0, .2, .4, .6], ['', '', '', '']) 
    pl.text(12, .75, '(%s)'% grid, va='top', ha='left')


pl.subplots_adjust(wspace=0, bottom=.15, left=.1, right=.99, top=.97)
pl.savefig('pms_grids.pdf')
book_graphics.save_json('pms_grids.json', vars())
Exemple #7
0
    ### @export 'negative-binomial_dispersion-alt_prior'
    pi = mc.Uniform('pi', lower=0, upper=1, value=.5)
    log_10_delta = mc.Normal('log_10_delta', mu=mu_log_10_delta, tau=.25**-2)
    delta = mc.Lambda('delta', lambda x=log_10_delta: 5 + 10**x)

    @mc.potential
    def obs(pi=pi, delta=delta):
        return mc.negative_binomial_like(r * n, pi * n, delta)

    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi * n_pred, delta) / float(n_pred)

    ### @export 'negative-binomial_dispersion-fit_alt_prior'
    mc.MCMC([pi, log_10_delta, delta, obs, pred]).sample(iter,
                                                         burn,
                                                         thin,
                                                         verbose=False,
                                                         progress_bar=False)

    key = 'Neg. Binom., $\mu_{\log\delta}=%d$' % mu_log_10_delta
    results[key] = dict(pred=pred.stats(), pi=pi.stats())
    model_keys.append(key)

    #mc.Matplot.plot(pi)
    #mc.Matplot.plot(delta)

book_graphics.save_json('poisson_model.json', vars())
book_graphics.forest_plot(fname='neg_binom_priors.pdf', **vars())

pl.show()
Exemple #8
0
model_keys = ['Poisson', 'Negative Binomial']
### @export 'negative-binomial_dispersion-prior-exploration'
for mu_log_10_delta in [1,2,3]:
    ### @export 'negative-binomial_dispersion-alt_prior'
    pi = mc.Uniform('pi', lower=0, upper=1, value=.5)
    log_10_delta = mc.Normal('log_10_delta', mu=mu_log_10_delta, tau=.25**-2)
    delta = mc.Lambda('delta', lambda x=log_10_delta: 5 + 10**x)

    @mc.potential
    def obs(pi=pi, delta=delta):
        return mc.negative_binomial_like(r*n, pi*n, delta)

    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)

    ### @export 'negative-binomial_dispersion-fit_alt_prior'
    mc.MCMC([pi, log_10_delta, delta, obs, pred]).sample(iter, burn, thin, verbose=False, progress_bar=False)

    key = 'Neg. Binom., $\mu_{\log\delta}=%d$'%mu_log_10_delta
    results[key] = dict(pred=pred.stats(), pi=pi.stats())
    model_keys.append(key)

    #mc.Matplot.plot(pi)
    #mc.Matplot.plot(delta)


book_graphics.save_json('poisson_model.json', vars())
book_graphics.forest_plot(fname='neg_binom_priors.pdf', **vars())

pl.show()
Exemple #9
0
#             print e
#             pl.plot(i+.25*(j+1), vars['predicted_rates'].value[i], marker[j], label=label[j])
#             results['posterior_dispersion_%d'%j] = 'TK'

#for j in range(2):
#    pl.plot([-100], [0], marker[j], label=label[j]) # HACK: off-screen point with label to appear in legend

for j in range(2):
    vars = results[j].vars[dismod3.utils.gbd_key_for('prevalence', region, year, sex)]
    stats = vars['predicted_rates'].stats()
    y = stats['quantiles'][50]
    yerr = [y-stats['quantiles'][2.5], stats['quantiles'][97.5]-y]
    pl.errorbar(sorted_indices+.25*(j+1), y, yerr,
                mew=1, ms=5, mec='white',
                fmt=marker[j], label=label[j])
    
    results['posterior_dispersion_%d'%j] = vars['dispersion'].stats()
    results['posterior_beta_%d'%j] = vars['study_coeffs'].stats()

pl.xlabel('PMS prevalence data from systematic review')
pl.xticks([])
pl.yticks([0, .2, .4, .6, .8, 1])
pl.ylabel('Prevalence (per 1)')
pl.axis([-.5, 11.5,-.01,1.])
pl.legend(loc='upper left', fancybox=True, shadow=True, numpoints=1)

pl.savefig('pms_ppc.pdf')


book_graphics.save_json('pms_ppc.json', vars())
Exemple #10
0
                mec='k',
                mew=1,
                label='%d Observation' % v)
    else:
        pl.plot([-100], [1],
                'o',
                color='none',
                ms=pl.sqrt(v) * 5 + 2,
                mec='k',
                mew=1,
                label='%d Observations' % v)

pl.legend(loc='lower left', fancybox=True, shadow=True, numpoints=1)

pl.xticks()
pl.yticks()

pl.xlabel('Mean of age group (years)')
pl.ylabel('Width of age group (years)')
pl.axis([-5, 110., .6, 500.])

pl.subplots_adjust(left=.1, right=.99, bottom=.15, top=.95)
pl.savefig('book/graphics/af_age_groups_scatter.pdf')

### @export 'save-results'
book_graphics.save_json('af_age_groups.json', {
    'most_freq_cnt': most_freq_cnt,
    'rows_total': rows_total
})

pl.show()
Exemple #11
0
mc.MCMC([pi, obs, pred]).sample(20000, 10000, 10, verbose=False, progress_bar=False)

pl.figure(**book_graphics.quarter_page_params)
sorted_indices = r.argsort().argsort()
jitter = mc.rnormal(0, 0.1 ** -2, len(pred.trace()))
for i, s_i in enumerate(sorted_indices):
    if i == 0:
        label = "Predicted distribution"
    else:
        label = "_nolabel_"
    pl.plot(s_i + jitter, pred.trace()[:, i] / float(n[i]), "ko", mew=0, alpha=0.25, zorder=-100, label=label)

pl.errorbar(
    sorted_indices, r, yerr=1.96 * pl.sqrt(r * (1 - r) / n), fmt="ks", mew=1, ms=5, mec="white", label="Observed value"
)

pl.xticks([])
pl.yticks([0, 0.002, 0.004, 0.006, 0.008, 0.01])
pl.ylabel("Rate ($r$)")
pl.axis([-0.5, 15.5, -0.0001, 0.0121])
pl.legend(loc="upper center", numpoints=1, fancybox=True, shadow=True)
pl.savefig("book/graphics/binomial-model-ppc.pdf")
pl.savefig("book/graphics/binomial-model-ppc.png")


### @export 'save-vars'
book_graphics.save_json("binomial_model.json", vars())

pl.show()
Exemple #12
0
pl.figure(**book_graphics.quarter_page_params)
for ii, grid in enumerate('abcd'):
    dm = results[grid]
    pl.subplot(1, 4, ii + 1)
    dismod3.plotting.plot_intervals(dm, [
        d
        for d in dm.data if dm.relevant_to(d, 'prevalence', region, year, sex)
    ],
                                    color='black',
                                    print_sample_size=False,
                                    alpha=1.,
                                    plot_error_bars=False,
                                    linewidth=2)
    book_graphics.plot_rate(dm,
                            dismod3.utils.gbd_key_for('prevalence', region,
                                                      year, sex),
                            linestyle='-')
    pl.axis([10, 60, -.05, .8])
    pl.xlabel('Age (Years)')
    pl.xticks([15, 35, 55])
    if ii == 0:
        pl.yticks([0, .2, .4, .6], [0, 20, 40, 60])
        pl.ylabel('Prevalence (per 100)')
    else:
        pl.yticks([0, .2, .4, .6], ['', '', '', ''])
    pl.text(12, .75, '(%s)' % grid, va='top', ha='left')

pl.subplots_adjust(wspace=0, bottom=.15, left=.1, right=.99, top=.97)
pl.savefig('pms_grids.pdf')
book_graphics.save_json('pms_grids.json', vars())
Exemple #13
0
beta = mc.Uninformative('beta', value=10 * (1 - pop_A_prev))
pi = mc.Beta('pi', alpha, beta, value=[pop_A_prev, pop_B_prev, .02])


@mc.potential
def obs(pi=pi):
    return pop_A_prev*pop_A_N*pl.log(pi[0]) + (1-pop_A_prev)*pop_A_N*pl.log(1-pi[0]) \
        + pop_B_prev*pop_B_N*pl.log(pi[1]) + (1-pop_B_prev)*pop_B_N*pl.log(1-pi[1])


pop_C_N = 50000
pop_C_k = mc.Binomial('pop_C_k', pop_C_N, pi[2])
mc.MCMC([alpha, beta, pi, obs, pop_C_k]).sample(200000,
                                                100000,
                                                20,
                                                verbose=False,
                                                progress_bar=False)

pop_C_prev = pop_C_k.stats()['quantiles'][50] / float(pop_C_N)
pop_C_prev_per_1000 = '%.0f' % (pop_C_prev * 1000)
print pop_C_prev_per_1000

pop_C_ui = pop_C_k.stats()['95% HPD interval'] / float(pop_C_N)
pop_C_ui_per_1000 = '[%.0f, %.0f]' % tuple(pop_C_ui * 1000)
print pop_C_ui_per_1000

### @export 'save-vars'
book_graphics.save_json('beta_binomial_model.json', vars())

pl.show()
Exemple #14
0
mc.MCMC([pi, zeta, sigma, obs, pred]).sample(iter, burn, thin)

results['Offset lognormal'] = dict(pi=pi.stats(), pred=pred.stats())

### @export 'save'
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1] -
                     results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)

book_graphics.save_json('zero_forest.json', vars())

### master graphic of data and models, for zeros subsection of rate models section of stats chapter
book_graphics.forest_plot(r,
                          n,
                          xmax=.0005,
                          model_keys=[
                              'Zero-inflated beta binomial', 'Beta binomial',
                              'Negative binomial', 'Lognormal',
                              'Offset lognormal'
                          ],
                          results=results,
                          pi_true=pi_true,
                          fname='zero_forest.pdf')
Exemple #15
0
# set expert priors and other model parameters
dm.params['global_priors']['level_value']['incidence']['age_before'] = 10
dm.params['global_priors']['level_value']['incidence']['age_after'] = 99
#dm.params['global_priors']['smoothness']['incidence']['age_start'] = 10

dm.params['global_priors']['level_bounds']['remission']['upper'] = .05

dm.params['global_priors']['level_value']['prevalence']['age_before'] = 10

dm.params['global_priors']['smoothness']['relative_risk']['amount'] = 'Very'

dm.params['covariates']['Country_level']['LDI_id_Updated_7July2011']['rate'][
    'value'] = 0
dm.params['covariates']['Study_level']['cv_past_year']['rate']['value'] = 1

# clear any fit and priors
dm.clear_fit()
dm.clear_empirical_prior()
dismod3.neg_binom_model.covariate_hash = {}

# initialize model data
#dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence')
import fit_world
fit_world.fit_world(dm)

import fit_posterior
fit_posterior.fit_posterior(dm, 'north_america_high_income', 'female', '2005')

### @export 'save'
book_graphics.save_json('bipolar.json', vars())
Exemple #16
0
    ### @export 'save'
    for d in dm.data:
        d['sex'] = 'male'  # otherwise tile plot shows it twice

    reload(book_graphics)
    book_graphics.plot_age_patterns(
        dm,
        region='world',
        year='all',
        sex='all',
        rate_types='remission incidence prevalence'.split())
    pl.subplot(1, 3, 1)
    pl.yticks([0, .04, .08], [0, 4, 8])
    pl.ylabel('Rate (Per 100 PY)')

    #pl.subplot(1,4,2)
    #pl.yticks([0, .04, .08], [0,4,8])

    pl.subplot(1, 3, 2)
    pl.yticks([0, .0025, .0051], [0, .25, .5])

    pl.subplot(1, 3, 3)
    pl.yticks([0, .025, .05], [0, 2.5, 5])

    pl.savefig('hep_c-consistent%d.pdf' % ii)

pl.show()

book_graphics.save_json('hep_c.json', results)
Exemple #17
0
    @mc.deterministic
    def pred(pi=pi, delta=delta):
        return mc.rnegative_binomial(pi*n_pred, delta) / float(n_pred)

    ## fit model
    mc.MCMC([pi, delta, obs, pred]).sample(iter, burn, thin)


    ## record results
    for i, stoch in enumerate([pi, pred]):
        median = stoch.stats()['quantiles'][50]
        residuals[i].append(pi_true - median)
        lb, ub = stoch.stats()['95% HPD interval']
        coverage[i].append(lb <= pi_true <= ub)

### @export 'summarize-results'
bias = {}
rmse = {}
percent_coverage = {}

for i, s in enumerate(['pi', 'pred']):
    bias[s] = '%.5f' % pl.mean(residuals[i])
    rmse[s] = '%.3f' % pl.rms_flat(residuals[i])
    percent_coverage[s] = '%.2f' % pl.mean(coverage[i])

print 'bias', bias
print 'rmse', rmse
print 'percent coverage', percent_coverage

book_graphics.save_json('neg_binom_sim.json', vars())
Exemple #18
0
# set expert priors and other model parameters
dm.params['global_priors']['level_value']['incidence']['age_before'] = 10
dm.params['global_priors']['level_value']['incidence']['age_after'] = 99
#dm.params['global_priors']['smoothness']['incidence']['age_start'] = 10

dm.params['global_priors']['level_bounds']['remission']['upper'] = .05

dm.params['global_priors']['level_value']['prevalence']['age_before'] = 10

dm.params['global_priors']['smoothness']['relative_risk']['amount'] = 'Very'

dm.params['covariates']['Country_level']['LDI_id_Updated_7July2011']['rate']['value'] = 0
dm.params['covariates']['Study_level']['cv_past_year']['rate']['value'] = 1

# clear any fit and priors
dm.clear_fit()
dm.clear_empirical_prior()
dismod3.neg_binom_model.covariate_hash = {}

# initialize model data
#dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence')
import fit_world
fit_world.fit_world(dm)

import fit_posterior
fit_posterior.fit_posterior(dm, 'north_america_high_income', 'female', '2005')

### @export 'save'
book_graphics.save_json('bipolar.json', vars())
linestyle = dict(slightly='solid', very='dashed', moderately='dotted')
for col, smoothness in enumerate(['slightly', 'moderately', 'very']):
    mesh = pl.arange(-101, 101, .1)
    C = mc.gp.FullRankCovariance(mc.gp.matern.euclidean,
                                 amp=1.,
                                 scale=rho[smoothness],
                                 diff_degree=2)

    pl.plot(mesh, C(mesh, [0]),
            marker='', color='black', linewidth=1,
            linestyle=linestyle[smoothness],
            zorder=1,
            label='%s ($\\rho = %d$)' % (smoothness.capitalize(), rho[smoothness]))
pl.xticks([-25, 0, 25,50,75])
pl.xlabel('$\\Delta$ Age (Years)')
pl.yticks([0, .25, .5, .75, 1.0])
pl.ylabel('Autocovariance')

pl.axis([-30, 90, -.1, 1.1])

pl.legend(loc='upper right', fancybox=True, shadow=True)

pl.subplots_adjust(left=.1, bottom=.2, top=.95, right=.95, wspace=0)
pl.savefig('smoothness_covariance.png')
pl.savefig('smoothness_covariance.pdf')

### @export 'store-results'

book_graphics.save_json('age_pattern_covariance.json', vars())

Exemple #20
0
    C = mc.gp.FullRankCovariance(mc.gp.matern.euclidean,
                                 amp=1.,
                                 scale=rho[smoothness],
                                 diff_degree=2)

    pl.plot(mesh,
            C(mesh, [0]),
            marker='',
            color='black',
            linewidth=1,
            linestyle=linestyle[smoothness],
            zorder=1,
            label='%s ($\\rho = %d$)' %
            (smoothness.capitalize(), rho[smoothness]))
pl.xticks([-25, 0, 25, 50, 75])
pl.xlabel('$\\Delta$ Age (Years)')
pl.yticks([0, .25, .5, .75, 1.0])
pl.ylabel('Autocovariance')

pl.axis([-30, 90, -.1, 1.1])

pl.legend(loc='upper right', fancybox=True, shadow=True)

pl.subplots_adjust(left=.1, bottom=.2, top=.95, right=.95, wspace=0)
pl.savefig('smoothness_covariance.png')
pl.savefig('smoothness_covariance.pdf')

### @export 'store-results'

book_graphics.save_json('age_pattern_covariance.json', vars())
Exemple #21
0
pop_A_N = 50000
pop_B_prev = .006
pop_B_N = 50000

alpha = mc.Uninformative('alpha', value=10*pop_A_prev)
beta = mc.Uninformative('beta', value=10*(1-pop_A_prev))
pi = mc.Beta('pi', alpha, beta, value=[pop_A_prev, pop_B_prev, .02])
@mc.potential
def obs(pi=pi):
    return pop_A_prev*pop_A_N*pl.log(pi[0]) + (1-pop_A_prev)*pop_A_N*pl.log(1-pi[0]) \
        + pop_B_prev*pop_B_N*pl.log(pi[1]) + (1-pop_B_prev)*pop_B_N*pl.log(1-pi[1])
pop_C_N = 50000
pop_C_k = mc.Binomial('pop_C_k', pop_C_N, pi[2])
mc.MCMC([alpha, beta, pi, obs, pop_C_k]).sample(200000,100000,20, verbose=False, progress_bar=False)

pop_C_prev = pop_C_k.stats()['quantiles'][50] / float(pop_C_N)
pop_C_prev_per_1000 = '%.0f' % (pop_C_prev*1000)
print pop_C_prev_per_1000

pop_C_ui = pop_C_k.stats()['95% HPD interval'] / float(pop_C_N)
pop_C_ui_per_1000 = '[%.0f, %.0f]' % tuple(pop_C_ui*1000)
print pop_C_ui_per_1000


### @export 'save-vars'
book_graphics.save_json('beta_binomial_model.json', vars())

pl.show()


Exemple #22
0
                    1./((s_pred/(pi+zeta))**2 + sigma**2))) \
                - zeta

### @export 'offset-log-normal-fit-and-store'
mc.MCMC([pi, zeta, sigma, obs, pred]).sample(iter, burn, thin)

results['Offset lognormal'] = dict(pi=pi.stats(), pred=pred.stats())


### @export 'save'
pi_median = []
pi_spread = []
for i, k in enumerate(results):
    pi_median.append(results[k]['pi']['quantiles'][50])
    pi_spread.append(results[k]['pi']['95% HPD interval'][1]-results[k]['pi']['95% HPD interval'][0])
min_est = min(pi_median).round(4)
max_est = max(pi_median).round(4)
min_spread = min(pi_spread).round(4)
max_spread = max(pi_spread).round(4)


book_graphics.save_json('zero_forest.json', vars())

### master graphic of data and models, for zeros subsection of rate models section of stats chapter
book_graphics.forest_plot(r, n,
                          xmax=.0005,
                          model_keys=['Zero-inflated beta binomial', 'Beta binomial', 'Negative binomial', 'Lognormal',  'Offset lognormal'],
                          results=results,
                          pi_true=pi_true,
                          fname='zero_forest.pdf')
Exemple #23
0
    fit_world.fit_world(dm, generate_diagnostic_plots=False, store_results=False, map_only=False)
    models[ii] = dm
    #results[ii] = dict(rate_stoch=dm.vars['prevalence+world+all+all']['rate_stoch'].stats(), dispersion=dm.vars['prevalence+world+all+all']['dispersion'].stats())

    ### @export 'save'
    for d in dm.data:
        d['sex'] = 'male'  # otherwise tile plot shows it twice


    reload(book_graphics)
    book_graphics.plot_age_patterns(dm, region='world', year='all', sex='all',
                                    rate_types='remission incidence prevalence'.split())   
    pl.subplot(1,3,1)
    pl.yticks([0, .04, .08], [0,4,8])
    pl.ylabel('Rate (Per 100 PY)')

    #pl.subplot(1,4,2)
    #pl.yticks([0, .04, .08], [0,4,8])

    pl.subplot(1,3,2)
    pl.yticks([0, .0025, .0051], [0,.25,.5])

    pl.subplot(1,3,3)
    pl.yticks([0, .025, .05], [0,2.5,5])

    pl.savefig('hep_c-consistent%d.pdf' % ii)

pl.show()

book_graphics.save_json('hep_c.json', results)
Exemple #24
0
for a_0 in range(101):
    for a_1 in range(101):
        if hist[a_0, a_1] <= 0.:
            continue

        x_i = .5 * (a_0 + a_1)
        y_i = a_1 - a_0 + 1  # epidemiologist notation age 0-4 is five years

        pl.semilogy([x_i], [y_i], 'o', color='none', ms=pl.sqrt(hist[a_0,a_1])*5+2, mec='k', mew=1)
        #pl.text(x_i, y_i, '%d'%hist[a_0, a_1], ha='center', va='center')

for v in [1, 5, 10]:
    if v == 1: pl.plot([-100], [1], 'o', color='none', ms=pl.sqrt(v)*5+2, mec='k', mew=1, label='%d Observation'%v)
    else: pl.plot([-100], [1], 'o', color='none', ms=pl.sqrt(v)*5+2, mec='k', mew=1, label='%d Observations'%v)

pl.legend(loc='lower left', fancybox=True, shadow=True, numpoints=1)

pl.xticks()
pl.yticks()

pl.xlabel('Mean of age group (years)')
pl.ylabel('Width of age group (years)')
pl.axis([-5, 110., .6, 500.])

pl.subplots_adjust(left=.1, right=.99, bottom=.15, top=.95)
pl.savefig('book/graphics/af_age_groups_scatter.pdf')

### @export 'save-results'
book_graphics.save_json('af_age_groups.json', {'most_freq_cnt': most_freq_cnt, 'rows_total': rows_total})

pl.show()