# use_flag = False # else: # use_flag = ii # ii = 0 -> no covariates # dm.params['covariates']['Study_level'][cv]['rate']['value'] = use_flag # clear any fit and priors dm.clear_fit() dm.clear_empirical_prior() dismod3.neg_binom_model.covariate_hash = {} import fit_posterior data = dm.data fit_posterior.fit_posterior(dm, region, sex, year, map_only=False, store_results=False) dm.data = data # put data back in results[ii] = dm pl.figure(**book_graphics.quarter_page_params) r = pl.array([ dm.value_per_1(d) for d in data if dm.relevant_to(d, 'prevalence', region, year, sex) ]) n = [ d['effective_sample_size'] for d in data if dm.relevant_to(d, 'prevalence', region, year, sex) ] sorted_indices = r.argsort().argsort()
dm.clear_fit() dm.clear_empirical_prior() dismod3.neg_binom_model.covariate_hash = {} return dm models = [] dm = initialize_model() dm.description = 'As loaded, but only GBR data, no covariates' dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \ [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \ [d for d in dm.data if d.get('country_iso3_code') == 'GBR'] + \ [d for d in dm.data if dm.relevant_to(d, 'excess-mortality', 'all', 'all', 'all')] dm.params['global_priors']['increasing']['excess_mortality'] = dict(age_start=25, age_end=100) fit_posterior.fit_posterior(dm, region, sex, year, map_only=True, store_results=False) models.append(dm) dm = initialize_model() dm.description = 'Without increasing prior on excess-mortality' dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \ [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \ [d for d in dm.data if d.get('country_iso3_code') == 'GBR'] + \ [d for d in dm.data if dm.relevant_to(d, 'excess-mortality', 'all', 'all', 'all')] fit_posterior.fit_posterior(dm, region, sex, year, map_only=True, store_results=False) models.append(dm) dm = initialize_model() dm.description = 'With lower bound of .2 on excess-mortality (to encourage good convergence)' dm.data = [d for d in dm.data if dm.relevant_to(d, 'all-cause_mortality', region, year, sex)] + \ [d for d in dm.data if dm.relevant_to(d, 'prevalence_x_excess-mortality', region, year, sex)] + \
pl.semilogy([1], [1]) Z = Y[Y["Rate type"] == "prevalence"].groupby("Age").apply(weighted_age) pl.plot(Z.mean(1).__array__(), color="red", linewidth=3, alpha=0.5, label="Inconsistent NA/ME") pl.legend() pl.axis([-5, 130, 1e-6, 2]) import dismod3 dm = dismod3.load_disease_model(19807) import fit_posterior fit_posterior.fit_posterior(dm, "north_africa_middle_east", "male", "2005", map_only=True) X = pandas.read_csv( "/var/tmp/dismod_working/test/dm-19807/posterior/dm-19807-north_africa_middle_east-male-2005.csv", index_col=None ) pl.figure() for iso in list(pl.unique(X["Iso3"])): pl.plot(X[(X["Iso3"] == iso)].filter(like="Draw").mean(1).__array__(), label=iso) pl.semilogy([1], [1]) Z = X.groupby("Age").apply(weighted_age) plot(Z.mean(1).__array__(), color="red", linewidth=3, alpha=0.5, label="Inconsistent NA/ME") plot( dm.vars["prevalence+north_africa_middle_east+2005+male"]["rate_stoch"].stats()["mean"],
dm.params['global_priors']['level_value']['prevalence']['age_before'] = 15 dm.params['global_priors']['level_value']['prevalence']['age_after'] = 50 dm.params['global_priors']['smoothness']['prevalence']['age_start'] = 15 dm.params['covariates']['Country_level']['LDI_id_Updated_7July2011']['rate']['value'] = 0 # clear any fit and priors dm.clear_fit() dm.clear_empirical_prior() dismod3.neg_binom_model.covariate_hash = {} import fit_posterior data = dm.data fit_posterior.fit_posterior(dm, region, sex, year, map_only=faster_run_flag, store_results=False) dm.data = data # put data back in results[grid] = dm try: results['dic_%s'%grid] = dm.mcmc.dic except Exception, e: print e results['dic_%s'%grid] = 'TK' pl.figure(**book_graphics.quarter_page_params) for ii, grid in enumerate('abcd'): dm = results[grid] pl.subplot(1,4,ii+1) dismod3.plotting.plot_intervals(dm, [d for d in dm.data if dm.relevant_to(d, 'prevalence', region, year, sex)], color='black', print_sample_size=False, alpha=1., plot_error_bars=False,
# set expert priors and other model parameters dm.params['global_priors']['level_value']['incidence']['age_before'] = 10 dm.params['global_priors']['level_value']['incidence']['age_after'] = 99 #dm.params['global_priors']['smoothness']['incidence']['age_start'] = 10 dm.params['global_priors']['level_bounds']['remission']['upper'] = .05 dm.params['global_priors']['level_value']['prevalence']['age_before'] = 10 dm.params['global_priors']['smoothness']['relative_risk']['amount'] = 'Very' dm.params['covariates']['Country_level']['LDI_id_Updated_7July2011']['rate'][ 'value'] = 0 dm.params['covariates']['Study_level']['cv_past_year']['rate']['value'] = 1 # clear any fit and priors dm.clear_fit() dm.clear_empirical_prior() dismod3.neg_binom_model.covariate_hash = {} # initialize model data #dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence') import fit_world fit_world.fit_world(dm) import fit_posterior fit_posterior.fit_posterior(dm, 'north_america_high_income', 'female', '2005') ### @export 'save' book_graphics.save_json('bipolar.json', vars())
r = pl.array([dm.value_per_1(s) for s in prev_data]) min_rate_per_100 = '%d' % round(r.min()*100) max_rate_per_100 = '%d' % round(r.max()*100) median_rate_per_100 = '%d' % round(pl.median(r*100)) regions = pl.array([d['gbd_region'] for d in prev_data]) num_regions = len(pl.unique(regions)) import fit_world #fit_world.fit_world(dm) #dm.data = prev_data # put data back in import fit_posterior region = 'north_america_high_income' sex = 'female' year='2005' fit_posterior.fit_posterior(dm, region, sex, year, map_only=faster_run_flag, store_results=False) dm.data = prev_data # put data back in pl.figure(**book_graphics.quarter_page_params) pl.subplot(1,2,1) dismod3.plotting.plot_intervals(dm, [d for d in dm.data if dm.relevant_to(d, 'prevalence', 'all', 'all', 'all')], color='black', print_sample_size=False, alpha=.75, plot_error_bars=False, linewidth=1) pl.axis([10,60,-.01,1]) pl.yticks([0,.25,.5,.75]) pl.ylabel('Prevalence (per 1)') pl.xlabel('Age (years)') pl.title('a) All data') pl.subplot(1,2,2)
pl.semilogy([1],[1]) Z = Y[Y['Rate type'] == 'prevalence'].groupby('Age').apply(weighted_age) pl.plot(Z.mean(1).__array__(), color='red', linewidth=3, alpha=.5, label='Inconsistent NA/ME') pl.legend() pl.axis([-5,130,1e-6,2]) import dismod3 dm = dismod3.load_disease_model(19807) import fit_posterior fit_posterior.fit_posterior(dm, 'north_africa_middle_east', 'male', '2005', map_only=True) X = pandas.read_csv('/var/tmp/dismod_working/test/dm-19807/posterior/dm-19807-north_africa_middle_east-male-2005.csv', index_col=None) pl.figure() for iso in list(pl.unique(X['Iso3'])): pl.plot(X[(X['Iso3']==iso)].filter(like='Draw').mean(1).__array__(), label=iso) pl.semilogy([1],[1]) Z = X.groupby('Age').apply(weighted_age) plot(Z.mean(1).__array__(), color='red', linewidth=3, alpha=.5, label='Inconsistent NA/ME') plot(dm.vars['prevalence+north_africa_middle_east+2005+male']['rate_stoch'].stats()['mean'], color='red', linewidth=3, alpha=.5, label='Mean of Consistent NA/ME') pl.legend()
# set expert priors and other model parameters dm.params['global_priors']['level_value']['incidence']['age_before'] = 10 dm.params['global_priors']['level_value']['incidence']['age_after'] = 99 #dm.params['global_priors']['smoothness']['incidence']['age_start'] = 10 dm.params['global_priors']['level_bounds']['remission']['upper'] = .05 dm.params['global_priors']['level_value']['prevalence']['age_before'] = 10 dm.params['global_priors']['smoothness']['relative_risk']['amount'] = 'Very' dm.params['covariates']['Country_level']['LDI_id_Updated_7July2011']['rate']['value'] = 0 dm.params['covariates']['Study_level']['cv_past_year']['rate']['value'] = 1 # clear any fit and priors dm.clear_fit() dm.clear_empirical_prior() dismod3.neg_binom_model.covariate_hash = {} # initialize model data #dismod3.neg_binom_model.fit_emp_prior(dm, 'prevalence') import fit_world fit_world.fit_world(dm) import fit_posterior fit_posterior.fit_posterior(dm, 'north_america_high_income', 'female', '2005') ### @export 'save' book_graphics.save_json('bipolar.json', vars())