def get_computed_dataframe(self, df, location_meta_df): """Split value_column into detailed age and sex groups. Applies a relative rate splitting algorithm with a K-multiplier that adjusts for the specific population that the data to be split applies to. Arguments and Attributes: df (pandas.DataFrame): must contain all columns needed to merge on population: ['location_id', 'age_group_id', 'sex_id', 'year_id']. Must be unique on id_cols. id_cols (list): list of columns that must exist in df and identify observations. Used to preserve df in every way except for splitting value_column, age_group_id, and sex_id. pop_run_id (int): which population version to use cause_set_version_id (int): which cause set version id to use value_column (str): must be a column in df that contains values to be split gbd_round_id (int): which gbd_round is it gbd_team_for_ages (str): what gbd team to use to call the shared function db_queries.get_demographics Returns: split_df (pandas.DataFrame): contains all the columns passed in df, but all age_group_id values will be detailed, all sex_ids will be detailed (1, 2), and val will be split into these detailed ids. """ # set cache options standard_cache_options = { 'force_rerun': False, 'block_rerun': True, 'cache_dir': "standard", 'cache_results': False } verbose = self.verbose value_column = self.value_column pop_run_id = self.pop_run_id cause_set_version_id = self.cause_set_version_id gbd_round_id = self.conf.get_id('gbd_round') id_cols = self.id_cols gbd_team_for_ages = self.gbd_team_for_ages orig_val_sum = df[self.value_column].sum() # pull in populations # get relevant populations if verbose: print("[{}] Prepping population".format(str(datetime.now()))) locations_in_data = list(set(df.location_id)) mapping_to_country_location_id = get_country_level_location_id( locations_in_data, location_meta_df) # Map subnational to it's country df = df.merge(mapping_to_country_location_id, how='left', on='location_id') df.rename(columns={'location_id': 'orig_location_id'}, inplace=True) df['location_id'] = df['country_location_id'] df.drop('country_location_id', axis=1, inplace=True) country_locations_in_data = list(df['location_id'].unique()) years_in_data = list(set(df.year_id)) pop_df = get_pop(pop_run_id=pop_run_id, **standard_cache_options) pop_df = pop_df.loc[ (pop_df['location_id'].isin(country_locations_in_data)) & (pop_df['year_id'].isin(years_in_data))] # what columns identify population data pop_id_cols = ['location_id', 'age_group_id', 'sex_id', 'year_id'] assert not pop_df[pop_id_cols].duplicated().any() # pull causes table if verbose: print("[{}] Prepping cause metadata".format(str(datetime.now()))) cause_meta_df = get_current_cause_hierarchy( cause_set_version_id=cause_set_version_id, **standard_cache_options) # pull age sex weights if verbose: print("[{}] Prepping age sex weights".format(str(datetime.now()))) dist_df = get_cause_age_sex_distributions( distribution_set_version_id=self.distribution_set_version_id, **standard_cache_options) keep_cols = ['cause_id', 'age_group_id', 'sex_id', 'weight'] dist_df = dist_df[keep_cols] # pull age detail map if verbose: print("[{}] Prepping age agg to detail " "map".format(str(datetime.now()))) age_detail_map = getcache_age_aggregate_to_detail_map( gbd_round_id=gbd_round_id, **standard_cache_options) # create map from aggregate sex ids to detail sex ids if verbose: print("[{}] Prepping sex detail map".format(str(datetime.now()))) sex_detail_map = AgeSexSplitter.prep_sex_aggregate_to_detail_map() detail_maps = { 'age_group_id': age_detail_map, 'sex_id': sex_detail_map } dist_causes = dist_df.cause_id.unique() if verbose: print("[{}] Prepping cause_id to weight cause " "map".format(str(datetime.now()))) cause_to_weight_cause_map = \ AgeSexSplitter.prep_cause_to_weight_cause_map( cause_meta_df, dist_causes) val_to_dist_maps = {'cause_id': cause_to_weight_cause_map} # which columns are to be split split_cols = ['age_group_id', 'sex_id'] split_inform_cols = ['cause_id'] value_cols = [value_column] if verbose: print("[{}] Running RR splitting " "algorithm".format(str(datetime.now()))) split_df = relative_rate_split(df, pop_df, dist_df, detail_maps, split_cols, split_inform_cols, pop_id_cols, value_cols, pop_val_name='population', val_to_dist_map_dict=val_to_dist_maps, verbose=verbose) df.drop('location_id', axis=1, inplace=True) df.rename(columns={'orig_location_id': 'location_id'}, inplace=True) if self.collect_diagnostics: # making this optional because of memory usage self.diag_df = split_df.copy() group_columns = list(df.columns) group_columns.remove(value_column) if verbose: print("[{}] Collapsing result".format(str(datetime.now()))) split_df = split_df.groupby(group_columns, as_index=False)[value_column].sum() if verbose: print("[{}] Asserting valid results".format(str(datetime.now()))) val_diff = abs(split_df[value_column].sum() - orig_val_sum) if not np.allclose(split_df[value_column].sum(), orig_val_sum): text = "Difference of {} {} from age sex " \ "splitting".format(val_diff, value_column) raise AssertionError(text) # check that all age group ids are good good_age_group_ids = db_queries.get_demographics( gbd_team_for_ages, gbd_round_id=gbd_round_id)['age_group_id'] bad = set(split_df.age_group_id) - set(good_age_group_ids) if len(bad) > 0: text = "Some age group ids still aggregate: {}".format(bad) raise AssertionError(text) # should be the same set of cause ids assert set(split_df.cause_id) == set(df.cause_id) return split_df
def run_phase(df, csvid, nid, extract_type_id, lsvid, pop_run_id, cmvid, launch_set_id, remove_decimal, write_diagnostics=True): read_file_cache_options = { 'block_rerun': True, 'cache_dir': CACHE_DIR, 'force_rerun': False, 'cache_results': False } iso3 = get_value_from_nid(nid, 'iso3', extract_type_id=extract_type_id, location_set_version_id=lsvid) code_system_id = int( get_value_from_nid(nid, 'code_system_id', extract_type_id=extract_type_id)) data_type_id = get_value_from_nid(nid, 'data_type_id', extract_type_id=extract_type_id) cause_map = get_cause_map(code_map_version_id=cmvid, **read_file_cache_options) orig_deaths_sum = int(df['deaths'].sum()) if remove_decimal: print_log_message("Removing decimal from code map") cause_map['value'] = cause_map['value'].apply( lambda x: x.replace(".", "")) if needs_garbage_correction(iso3, data_type_id): print_log_message("Correcting Garbage for {}".format(iso3)) orig_gc_sum = int(df.query('cause_id == 743')['deaths'].sum()) cause_meta_df = get_current_cause_hierarchy(cause_set_version_id=csvid, **read_file_cache_options) age_meta_df = get_ages(**read_file_cache_options) loc_meta_df = get_current_location_hierarchy( location_set_version_id=lsvid, **read_file_cache_options) pop_meta_df = get_pop(pop_run_id=pop_run_id, **read_file_cache_options) hiv_corrector = HIVCorrector(df, iso3, code_system_id, pop_meta_df, cause_meta_df, loc_meta_df, age_meta_df, correct_garbage=True) df = hiv_corrector.get_computed_dataframe() after_gc_sum = int(df.query('cause_id == 743')['deaths'].sum()) after_deaths_sum = int(df['deaths'].sum()) print_log_message(""" Stage [gc deaths / total deaths] Before GC correction [{gco} / {to}] After GC correction [{gca} / {ta}] """.format(gco=orig_gc_sum, to=orig_deaths_sum, gca=after_gc_sum, ta=after_deaths_sum)) df = add_code_metadata(df, ['value', 'code_system_id'], code_map=cause_map, **read_file_cache_options) assert (df['code_system_id'] == code_system_id).all(), "Variable code " \ "system id {} did not agree with all values of df code " \ "system id: \n{}".format( code_system_id, df.loc[df['code_system_id'] != code_system_id]) print_log_message("Formatting data for redistribution") df = format_age_groups(df) # drop observations with 0 deaths df = drop_zero_deaths(df) # merge on redistribution location hierarchy df = add_rd_locations(df, lsvid) # fill in any missing stuff that may have come from rd hierarchy df = fill_missing_df(df, verify_all=True) df = add_split_group_id_column(df) # final check to make sure we have all the necessary columns df = format_columns_for_rd(df, code_system_id) split_groups = list(df.split_group.unique()) parallel = len(split_groups) > 1 print_log_message("Submitting/Running split groups") for split_group in split_groups: # remove intermediate files from previous run delete_split_group_output(nid, extract_type_id, split_group) # save to file split_df = df.loc[df['split_group'] == split_group] write_split_group_input(split_df, nid, extract_type_id, split_group) if parallel: submit_split_group(nid, extract_type_id, split_group, code_system_id, launch_set_id) else: worker_main(nid, extract_type_id, split_group, code_system_id) if parallel: print_log_message("Waiting for splits to complete...") wait('claude_redistributionworker_{}'.format(nid), 30) print_log_message("Done waiting. Appending them together") df = read_append_split_groups(split_groups, nid, extract_type_id, cause_map) print_log_message("Done appending files - {} rows assembled".format( len(df))) df = revert_variables(df) after_deaths_sum = int(df['deaths'].sum()) before_after_text = """ Before GC redistribution: {a} After GC redistribution: {b} """.format(a=orig_deaths_sum, b=after_deaths_sum) diff = abs(orig_deaths_sum - after_deaths_sum) diff_threshold = max(.02 * orig_deaths_sum, 5) if not diff < diff_threshold: raise AssertionError("Deaths not close.\n" + before_after_text) else: print_log_message(before_after_text) return df
def run_phase(df, csvid, nid, extract_type_id, lsvid, pop_run_id, cmvid, launch_set_id, remove_decimal, write_diagnostics=True): """String together processes for redistribution.""" # what to do about caching throughout the phase read_file_cache_options = { 'block_rerun': True, 'cache_dir': CACHE_DIR, 'force_rerun': False, 'cache_results': False } # the iso3 of this data iso3 = get_value_from_nid(nid, 'iso3', extract_type_id=extract_type_id, location_set_version_id=lsvid) # the code system id code_system_id = int( get_value_from_nid(nid, 'code_system_id', extract_type_id=extract_type_id)) # the data type data_type_id = get_value_from_nid(nid, 'data_type_id', extract_type_id=extract_type_id) # cause map cause_map = get_cause_map(code_map_version_id=cmvid, **read_file_cache_options) orig_deaths_sum = int(df['deaths'].sum()) if remove_decimal: print_log_message("Removing decimal from code map") cause_map['value'] = cause_map['value'].apply( lambda x: x.replace(".", "")) if needs_garbage_correction(iso3, data_type_id): print_log_message("Correcting Garbage for {}".format(iso3)) orig_gc_sum = int(df.query('cause_id == 743')['deaths'].sum()) cause_meta_df = get_current_cause_hierarchy(cause_set_version_id=csvid, **read_file_cache_options) # get age group ids age_meta_df = get_ages(**read_file_cache_options) loc_meta_df = get_current_location_hierarchy( location_set_version_id=lsvid, **read_file_cache_options) pop_meta_df = get_pop(pop_run_id=pop_run_id, **read_file_cache_options) # Move garbage to hiv first hiv_corrector = HIVCorrector(df, iso3, code_system_id, pop_meta_df, cause_meta_df, loc_meta_df, age_meta_df, correct_garbage=True) df = hiv_corrector.get_computed_dataframe() after_gc_sum = int(df.query('cause_id == 743')['deaths'].sum()) after_deaths_sum = int(df['deaths'].sum()) print_log_message(""" Stage [gc deaths / total deaths] Before GC correction [{gco} / {to}] After GC correction [{gca} / {ta}] """.format(gco=orig_gc_sum, to=orig_deaths_sum, gca=after_gc_sum, ta=after_deaths_sum)) df = add_code_metadata(df, ['value', 'code_system_id'], code_map=cause_map, **read_file_cache_options) # recognizing that it is weird for code_system_id to come from two places, # make sure they are consistent assert (df['code_system_id'] == code_system_id).all(), "Variable code " \ "system id {} did not agree with all values of df code " \ "system id: \n{}".format( code_system_id, df.loc[df['code_system_id'] != code_system_id]) print_log_message("Formatting data for redistribution") # do we have all the packages we need? # verify_packages(df) # format age groups to match package parameters df = format_age_groups(df) # drop observations with 0 deaths df = drop_zero_deaths(df) # merge on redistribution location hierarchy df = add_rd_locations(df, lsvid) # fill in any missing stuff that may have come from rd hierarchy df = fill_missing_df(df, verify_all=True) # create split groups # NO SPLIT GROUP NEEDED df = add_split_group_id_column(df) # final check to make sure we have all the necessary columns df = format_columns_for_rd(df, code_system_id) split_groups = list(df.split_group.unique()) parallel = len(split_groups) > 1 print_log_message("Submitting/Running split groups") for split_group in split_groups: # remove intermediate files from previous run delete_split_group_output(nid, extract_type_id, split_group) # save to file split_df = df.loc[df['split_group'] == split_group] write_split_group_input(split_df, nid, extract_type_id, split_group) # submit jobs or just run them here if parallel: submit_split_group(nid, extract_type_id, split_group, code_system_id, launch_set_id) else: worker_main(nid, extract_type_id, split_group, code_system_id) if parallel: print_log_message("Waiting for splits to complete...") # wait until all jobs for a given nid have completed # eventually need logic for files not being present wait('claude_redistributionworker_{}'.format(nid), 30) # This seems to be necessary to wait for files print_log_message("Done waiting. Appending them together") # append split groups together df = read_append_split_groups(split_groups, nid, extract_type_id, cause_map) print_log_message("Done appending files - {} rows assembled".format( len(df))) df = revert_variables(df) after_deaths_sum = int(df['deaths'].sum()) before_after_text = """ Before GC redistribution: {a} After GC redistribution: {b} """.format(a=orig_deaths_sum, b=after_deaths_sum) diff = abs(orig_deaths_sum - after_deaths_sum) # bad if change 2% or 5 deaths, whichever is greater # (somewhat arbitrary, just trying to avoid annoying/non-issue failures) diff_threshold = max(.02 * orig_deaths_sum, 5) if not diff < diff_threshold: raise AssertionError("Deaths not close.\n" + before_after_text) else: print_log_message(before_after_text) return df
def run_phase(df, nid, extract_type_id, pop_run_id, cause_set_version_id, location_set_version_id): """Run the full phase, chaining together computational elements.""" cache_dir = CONF.get_directory('FILEPATH') orig_deaths = df['deaths'].sum() standard_cache_options = { 'force_rerun': False, 'block_rerun': True, 'cache_dir': cache_dir, 'cache_results': False } code_system_id = get_value_from_nid(nid, 'code_system_id', extract_type_id=extract_type_id) # this queries the database, maybe should be passed in directly code_system = get_code_system_from_id(code_system_id) source = get_value_from_nid(nid, 'source', extract_type_id=extract_type_id) data_type_id = get_value_from_nid( nid, 'data_type_id', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) # get cause hierarchy cause_meta_df = get_current_cause_hierarchy( cause_set_version_id=cause_set_version_id, **standard_cache_options) is_vr = data_type_id in [9, 10] if not skip_hiv_correction(source) and is_vr: # get location hierarchy loc_meta_df = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) # get population pop_meta_df = get_pop(pop_run_id=pop_run_id, **standard_cache_options) # get age metadata age_meta_df = get_ages(**standard_cache_options) # get the country iso3 = get_value_from_nid( nid, 'iso3', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) assert pd.notnull(iso3), "Could not find iso3 for nid {}, " \ "extract_type_id {}".format(nid, extract_type_id) hiv_corrector = HIVCorrector(df, iso3, code_system_id, pop_meta_df, cause_meta_df, loc_meta_df, age_meta_df, correct_garbage=False) print_log_message("Running hiv correction for iso3 {}".format(iso3)) df = hiv_corrector.get_computed_dataframe() if needs_injury_redistribution(source): print_log_message("Correcting injuries") if not 'loc_meta_df' in vars(): # get location hierarchy loc_meta_df = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) injury_redistributor = InjuryRedistributor(df, loc_meta_df, cause_meta_df) df = injury_redistributor.get_computed_dataframe() df = combine_with_rd_raw(df, nid, extract_type_id, location_set_version_id) val_cols = ['deaths', 'deaths_raw', 'deaths_corr', 'deaths_rd'] # run china VR rescaling if needs_subnational_rescale(source): china_rescaler = ChinaHospitalUrbanicityRescaler() df = china_rescaler.get_computed_dataframe(df) if needs_strata_collapse(source): # set site id to blank site id and collapse df['site_id'] = 2 group_cols = list(set(df.columns) - set(val_cols)) df = df.groupby(group_cols, as_index=False)[val_cols].sum() if is_vr: # drop if deaths are 0 across all current deaths columns df = df.loc[df[val_cols].sum(axis=1) != 0] # restrict causes based on code system print_log_message("Running bridge mapper") bridge_mapper = BridgeMapper(source, cause_meta_df, code_system) df = bridge_mapper.get_computed_dataframe(df) # run recodes based on expert opinion print_log_message("Enforcing some very hard priors (expert opinion)") expert_opinion_recoder = Recoder(cause_meta_df, source, code_system_id, data_type_id) df = expert_opinion_recoder.get_computed_dataframe(df) end_deaths = df['deaths'].sum() print_log_message("Checking no large loss or gain of deaths") if abs(orig_deaths - end_deaths) >= (.1 * end_deaths): diff = round(abs(orig_deaths - end_deaths), 2) old = round(abs(orig_deaths)) new = round(abs(end_deaths)) raise AssertionError("Change of {} deaths [{}] to [{}]".format( diff, old, new)) return df
def run_phase(df, nid, extract_type_id, env_run_id, pop_run_id, location_set_version_id, cause_set_version_id): cache_dir = CONF.get_directory('db_cache') source = get_value_from_nid( nid, 'source', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) data_type_id = get_value_from_nid( nid, 'data_type_id', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) iso3 = get_value_from_nid(nid, 'iso3', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) standard_cache_options = { 'force_rerun': False, 'block_rerun': True, 'cache_dir': cache_dir, 'cache_results': False } # ************************************************************ # Get cached metadata # ************************************************************ print_log_message("Getting cached db resources") location_hierarchy = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) pop_df = get_pop(pop_run_id=pop_run_id, **standard_cache_options) env_df = get_env(env_run_id=env_run_id, **standard_cache_options) age_weight_df = get_age_weights(**standard_cache_options) cause_meta_df = get_current_cause_hierarchy( cause_set_version_id=cause_set_version_id, **standard_cache_options) age_meta_df = get_ages(**standard_cache_options) # ************************************************************ # RAKING # ************************************************************ # Rake if appropriate based on this logic if ((data_type_id in [8, 9, 10] and (source != 'Other_Maternal')) or source in MATERNAL_NR_SOURCES): if source not in NOT_RAKED_SOURCES: print_log_message("Raking sub national estimates") raker = Raker(df, source) df = raker.get_computed_dataframe(location_hierarchy) # for the Other_Maternal source we only rake household surveys elif source == "Other_Maternal": model_groups = get_datasets(nid, extract_type_id, block_rerun=True, force_rerun=False).model_group.unique() assert len(model_groups) == 1 model_group = model_groups[0] if "HH_SURVEYS" in model_group: if model_group == 'MATERNAL-HH_SURVEYS-IND': print_log_message("Raking sub national estimates," \ " applying double raking for India Maternal" ) raker = Raker(df, source, double=True) df = raker.get_computed_dataframe(location_hierarchy) else: print_log_message("Raking sub national estimates") raker = Raker(df, source) df = raker.get_computed_dataframe(location_hierarchy) # ************************************************************ # DROP ZERO SAMPLE SIZE AND RESTRICTED AGE/SEX DATA # ************************************************************ # data with zero sample size is almost certaintly some anomolous result # of a program generating data it shouldn't have, and it shouldn't be # included in codem models. Was probably already dropped, anyway, before # running noise reduction. df = df.query('sample_size != 0') # uploading data before 1980 is a waste of space because neither codem # nor codviz use it df = df.loc[df['year_id'] >= 1980] print_log_message("Enforcing age sex restrictions") # this actually drops data from the dataframe if it violates age/sex # restrictions (e.g. male maternity disorders) df = enforce_asr(df, cause_meta_df, age_meta_df) # ************************************************************ # FIT EACH DRAW TO NON-ZERO FLOOR # ************************************************************ print_log_message("Fitting to non-zero floor...") nonzero_floorer = NonZeroFloorer(df) df = nonzero_floorer.get_computed_dataframe(pop_df, env_df, cause_meta_df) # ************************************************************ # AGE AGGREGATION # ************************************************************ print_log_message("Creating age standardized and all ages groups") age_aggregator = AgeAggregator(df, pop_df, env_df, age_weight_df) df = age_aggregator.get_computed_dataframe() # ************************************************************ # Make CODEm and CoDViz metrics for uncertainty # ************************************************************ # columns that should be present in the phase output final_cols = [ 'age_group_id', 'cause_id', 'cf_corr', 'cf_final', 'cf_raw', 'cf_rd', 'extract_type_id', 'location_id', 'nid', 'sample_size', 'sex_id', 'site_id', 'year_id' ] # Use draws to make metrics for uncertainty to # be used by CODEm and CoDViz # also creates cf_final from mean of draws print_log_message("Making metrics for CODEm and CoDViz") if dataset_has_redistribution_variance(data_type_id, source): df = RedistributionVarianceEstimator.make_codem_codviz_metrics( df, pop_df) final_cols += [ 'cf_final_high_rd', 'cf_final_low_rd', 'variance_rd_log_dr', 'variance_rd_logit_cf' ] # we did this in the old code-- no cfs over 1 nor below 0 for cf_col in ['cf_final', 'cf_rd', 'cf_raw', 'cf_corr']: df.loc[df[cf_col] > 1, cf_col] = 1 df.loc[df[cf_col] < 0, cf_col] = 0 df = df[final_cols] return df
def run_phase(df, nid, extract_type_id, env_run_id, pop_run_id, location_set_version_id, cause_set_version_id): cache_dir = CONF.get_directory('db_cache') source = get_value_from_nid( nid, 'source', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) data_type_id = get_value_from_nid( nid, 'data_type_id', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) standard_cache_options = { 'force_rerun': False, 'block_rerun': True, 'cache_dir': cache_dir, 'cache_results': False } # ************************************************************ # Get cached metadata # ************************************************************ print_log_message("Getting cached db resources") location_hierarchy = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) pop_df = get_pop(pop_run_id=pop_run_id, **standard_cache_options) env_df = get_env(env_run_id=env_run_id, **standard_cache_options) age_weight_df = get_age_weights(**standard_cache_options) cause_meta_df = get_current_cause_hierarchy( cause_set_version_id=cause_set_version_id, **standard_cache_options) age_meta_df = get_ages(**standard_cache_options) # ************************************************************ # RAKING # ************************************************************ if ((data_type_id in [8, 9, 10] and (source != "Other_Maternal")) or source in MATERNAL_NR_SOURCES): if source not in NOT_RAKED_SOURCES: print_log_message("Raking sub national estimates") raker = Raker(df, source) df = raker.get_computed_dataframe(location_hierarchy) # for the Other_Maternal source we only rake household surveys elif source == "Other_Maternal": model_groups = get_datasets(nid, extract_type_id, block_rerun=True, force_rerun=False).model_group.unique() assert len(model_groups) == 1 model_group = model_groups[0] if "HH_SURVEYS" in model_group: print_log_message("Raking sub national estimates") raker = Raker(df, source) df = raker.get_computed_dataframe(location_hierarchy) # ************************************************************ # DROP ZERO SAMPLE SIZE AND RESTRICTED AGE/SEX DATA # ************************************************************ df = df.query('sample_size != 0') df = df.loc[df['year_id'] >= 1980] print_log_message("Enforcing age sex restrictions") df = enforce_asr(df, cause_meta_df, age_meta_df) # ************************************************************ # FIT EACH DRAW TO NON-ZERO FLOOR # ************************************************************ print_log_message("Fitting to non-zero floor...") nonzero_floorer = NonZeroFloorer(df) df = nonzero_floorer.get_computed_dataframe(pop_df, env_df, cause_meta_df) # ************************************************************ # AGE AGGREGATION # ************************************************************ print_log_message("Creating age standardized and all ages groups") age_aggregator = AgeAggregator(df, pop_df, env_df, age_weight_df) df = age_aggregator.get_computed_dataframe() # ************************************************************ # Make CODEm and CoDViz metrics for uncertainty # ************************************************************ # columns that should be present in the phase output final_cols = [ 'age_group_id', 'cause_id', 'cf_corr', 'cf_final', 'cf_raw', 'cf_rd', 'extract_type_id', 'location_id', 'nid', 'sample_size', 'sex_id', 'site_id', 'year_id' ] print_log_message("Making metrics for CODEm and CoDViz") if dataset_has_redistribution_variance(data_type_id, source): df = RedistributionVarianceEstimator.make_codem_codviz_metrics( df, pop_df) final_cols += [ 'cf_final_high_rd', 'cf_final_low_rd', 'variance_rd_log_dr', 'variance_rd_logit_cf' ] for cf_col in ['cf_final', 'cf_rd', 'cf_raw', 'cf_corr']: df.loc[df[cf_col] > 1, cf_col] = 1 df.loc[df[cf_col] < 0, cf_col] = 0 df = df[final_cols] return df
def run_phase(df, nid, extract_type_id, pop_run_id, cause_set_version_id, location_set_version_id): """Run the full phase, chaining together computational elements.""" # get filepaths cache_dir = CONF.get_directory('db_cache') orig_deaths = df['deaths'].sum() standard_cache_options = { 'force_rerun': False, 'block_rerun': True, 'cache_dir': cache_dir, 'cache_results': False } code_system_id = get_value_from_nid(nid, 'code_system_id', extract_type_id=extract_type_id) code_system = get_code_system_from_id(code_system_id, **standard_cache_options) source = get_value_from_nid(nid, 'source', extract_type_id=extract_type_id) data_type_id = get_value_from_nid( nid, 'data_type_id', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) # get cause hierarchy cause_meta_df = get_current_cause_hierarchy( cause_set_version_id=cause_set_version_id, **standard_cache_options) is_vr = data_type_id in [9, 10] # run hiv correction on VR, but not Other_Maternal # countries to correct will be further pruned by the master cause # selections csv in the hiv corrector class if not skip_hiv_correction(source) and is_vr: # get location hierarchy loc_meta_df = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) # get population pop_meta_df = get_pop(pop_run_id=pop_run_id, **standard_cache_options) # get age metadata age_meta_df = get_ages(**standard_cache_options) # get the country iso3 = get_value_from_nid( nid, 'iso3', extract_type_id=extract_type_id, location_set_version_id=location_set_version_id) assert pd.notnull(iso3), "Could not find iso3 for nid {}, " \ "extract_type_id {}".format(nid, extract_type_id) hiv_corrector = HIVCorrector(df, iso3, code_system_id, pop_meta_df, cause_meta_df, loc_meta_df, age_meta_df, correct_garbage=False) print_log_message("Running hiv correction for iso3 {}".format(iso3)) df = hiv_corrector.get_computed_dataframe() if needs_injury_redistribution(source): print_log_message("Correcting injuries") if not 'loc_meta_df' in vars(): # get location hierarchy loc_meta_df = get_current_location_hierarchy( location_set_version_id=location_set_version_id, **standard_cache_options) injury_redistributor = InjuryRedistributor(df, loc_meta_df, cause_meta_df) df = injury_redistributor.get_computed_dataframe() # apply redistribution of LRI to tb in under 15, non-neonatal ages based # on location/year specific proportions print_log_message( "Applying special redistribution of LRI to TB in under 15") lri_tb_redistributor = LRIRedistributor(df, cause_meta_df) df = lri_tb_redistributor.get_computed_dataframe() # merge in raw and rd here because recodes and bridge mapping should # also apply to the causes that are in previous phases (raw deaths for # secret codes need to be moved up to their parent cause, for example) df = combine_with_rd_raw(df, nid, extract_type_id, location_set_version_id) val_cols = ['deaths', 'deaths_raw', 'deaths_corr', 'deaths_rd'] # run china VR rescaling if needs_subnational_rescale(source): china_rescaler = ChinaHospitalUrbanicityRescaler() df = china_rescaler.get_computed_dataframe(df) if needs_strata_collapse(source): # set site id to blank site id and collapse df['site_id'] = 2 group_cols = list(set(df.columns) - set(val_cols)) df = df.groupby(group_cols, as_index=False)[val_cols].sum() if is_vr: # drop if deaths are 0 across all current deaths columns df = df.loc[df[val_cols].sum(axis=1) != 0] # restrict causes based on code system print_log_message("Running bridge mapper") bridge_mapper = BridgeMapper(source, cause_meta_df, code_system) df = bridge_mapper.get_computed_dataframe(df) # run recodes based on expert opinion print_log_message("Enforcing some very hard priors (expert opinion)") expert_opinion_recoder = Recoder(cause_meta_df, source, code_system_id, data_type_id) df = expert_opinion_recoder.get_computed_dataframe(df) end_deaths = df['deaths'].sum() print_log_message("Checking no large loss or gain of deaths") if abs(orig_deaths - end_deaths) >= (.1 * end_deaths): diff = round(abs(orig_deaths - end_deaths), 2) old = round(abs(orig_deaths)) new = round(abs(end_deaths)) raise AssertionError("Change of {} deaths [{}] to [{}]".format( diff, old, new)) return df