def run_world(year, cascade, drop_emr=False, reimport=False): cl = Cascade_loc(1, 0, year, cascade, timespan=50, reimport=reimport) if drop_emr: cl.gen_data(1, 0, drop_emr=True) cl.run_dismod() cl.summarize_posterior() cl.draw() cl.predict() return cascade
def run_loc(args): '''Meant to be called in parallel using multiprocessing. Run dismod. Args: args(Tuple[int, int, int, Bool, Bool]): tuple of (location_id, sex_id, year_id, full_timespan, debug) Returns: Tuple of location_id and either a string error message or integer 0, representing no error ''' gc.collect() loc_id, sex_id, year, full_timespan, debug = args if debug: if full_timespan: cl = Cascade_loc(loc_id, sex_id, year, cascade, timespan=50, parent_loc=cl_parent) else: cl = Cascade_loc(loc_id, sex_id, year, cascade, parent_loc=cl_parent) cl.run_dismod() cl.summarize_posterior() cl.draw() cl.predict() return loc_id, 0 else: try: if full_timespan: cl = Cascade_loc(loc_id, sex_id, year, cascade, timespan=50, parent_loc=cl_parent) else: cl = Cascade_loc(loc_id, sex_id, year, cascade, parent_loc=cl_parent) cl.run_dismod() cl.summarize_posterior() cl.draw() cl.predict() return loc_id, 0 except Exception as e: logging.exception("Failure running location {}".format(loc_id)) return loc_id, str(e)
def run_loc(args): try: loc_id, parent_loc, sex_id, year = args cl = Cascade_loc(loc_id, sex_id, year, c, parent_loc=parent_loc) cl.run_dismod() cl.summarize_posterior() cl.draw() return loc_id, cl except Exception, e: print loc_id, e return loc_id, None
def run_world(year, cascade, drop_emr=False, reimport=False): ''' Instantiates a global cascade_loc object and runs dismod. Since we're running dismod, that means this function reads from and writes to file system. Args: year (int): Year to run dismod for cascade (drill.cascade): Cascade object of the model_version_id we're running dismod for drop_emr (bool, False): If True, exclude excess mortality data before running dismod reimport (bool, False): if True, read input data from database via importer.Importer object. If False, rely on cached csv files written during a previous cascade instantiation (if a file is missing, will automatically read from db) Returns: cascade object that was passed in ''' log = logging.getLogger(__name__) cl = Cascade_loc(1, 0, year, cascade, timespan=50, reimport=reimport) if drop_emr: cl.gen_data(1, 0, drop_emr=True) log.info("Starting dismod for year {} loc {}".format(year, cl.loc)) cl.run_dismod() log.info("dismod finished") log.info("summarizing posterior") cl.summarize_posterior() log.info("summarizing posterior finished") log.info("begin draw") cl.draw() log.info("draw finished") log.info("beginning predict") cl.predict() log.info("predict finished") return cascade
def run_loc(args): gc.collect() loc_id, sex_id, year, full_timespan, debug = args if debug: if full_timespan: cl = Cascade_loc(loc_id, sex_id, year, c, timespan=50, parent_loc=cl_parent) else: cl = Cascade_loc(loc_id, sex_id, year, c, parent_loc=cl_parent) cl.run_dismod() cl.summarize_posterior() cl.draw() cl.predict() return loc_id, 0 else: try: if full_timespan: cl = Cascade_loc(loc_id, sex_id, year, c, timespan=50, parent_loc=cl_parent) else: cl = Cascade_loc(loc_id, sex_id, year, c, parent_loc=cl_parent) cl.run_dismod() cl.summarize_posterior() cl.draw() cl.predict() return loc_id, 0 except Exception as e: logging.exception("Failure running location {}".format(loc_id)) return loc_id, str(e)
cl_world = Cascade_loc(1, 0, y, c, reimport=False) cl_worlds[y] = cl_world num_cpus = mp.cpu_count() pool = mp.Pool(min(num_cpus, 8)) cl_world = cl_worlds[y] cl_super = Cascade_loc(super_id, sex_id, y, c, parent_loc=cl_world, reimport=False) cl_super.run_dismod() cl_super.summarize_posterior() cl_super.draw() # Run sub-locations lvl = 1 completed_locs = {lvl: {super_id: cl_super}} desc_at_lvl = True while desc_at_lvl: desc = lt.get_node_by_id(super_id).level_n_descendants(lvl) if len(desc) == 0: desc_at_lvl = False break else: arglist = [] for child_loc in desc: