def go_stage_test(infections, **kwargs): old_num_in_red = sum(infections.subinfs[0].work[2]) + \ sum(infections.subinfs[0].play[2]) old_num_in_blue = sum(infections.subinfs[1].work[1]) + \ sum(infections.subinfs[1].play[1]) go_stage(go_from="red", go_to="blue", from_stage=2, to_stage=1, infections=infections, **kwargs) new_num_in_red = sum(infections.subinfs[0].work[2]) + \ sum(infections.subinfs[0].play[2]) new_num_in_blue = sum(infections.subinfs[1].work[1]) + \ sum(infections.subinfs[1].play[1]) from metawards.utils import Console Console.print(f"{old_num_in_blue} -> {new_num_in_blue}, " f"{old_num_in_red} -> {new_num_in_red}") assert new_num_in_red == 0 assert old_num_in_red - new_num_in_red == new_num_in_blue - old_num_in_blue
def write_setup_entries(network: metawards.Network, design_index: int, run_ident: str, **kwargs): global _sql_file_name global _run_index database: Union[sql.Connection, None] = None try: database = sql.connect(_sql_file_name) # Prepare database entries c: sql.Cursor = database.cursor() # Write the hypercube point, ignoring repeats # Repeat runs can create duplicate keys, ideally this would be done before entering here # So we use 'insert or ignore' instead of 'insert' hypercube_keys = [ var[1:] for var in get_input_header_names()[:-1] if var[0] == '.' ] hypercube_values = tuple( [network.params.user_params[x] for x in hypercube_keys]) i_str = f"insert or ignore into design_table({','.join(hypercube_keys)}) values ({','.join(['?'] * len(hypercube_keys))})" c.execute(i_str, hypercube_values) database.commit() # Write this run i_str = f"insert into run_table(design_index,end_day,mw_folder) values (?,?,?)" vals = (design_index, -1, run_ident) c.execute(i_str, vals) _run_index = c.lastrowid Console.print("This is run: " + str(_run_index)) database.commit() finally: database.close()
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") # open a database to hold the data - call the 'create_tables' # function on this database when it is first opened conn = output_dir.open_db("stages.db", initialise=create_tables(network)) conn2 = output_dir.open_db("stages2.db", initialise=create_tables_2(network)) conn3 = output_dir.open_db("stages3.db", initialise=create_tables_3(network)) c = conn.cursor() c2 = conn2.cursor() c3 = conn3.cursor() # get each demographics data for i, subnet in enumerate(network.subnets): name = subnet.name ward_inf_tot = workspace.subspaces[i].ward_inf_tot N_INF_CLASSES = workspace.subspaces[i].n_inf_classes col_names = ["day", "ward"] + [f"{subnet.name}_{i}" for i in range(0, N_INF_CLASSES)] col2_names = ["day", "ward"] + [f"{subnet.name}_{i}" for i in _out_channels[subnet.name]] for k in range(1, workspace.subspaces[i].nnodes+1): vals = [population.day, k] for j in range(0, N_INF_CLASSES): # TODO: What is this?? Why are some classes deltas? if j == 1 or j == 3: vals.append(ward_inf_tot[j - 1][k] + ward_inf_tot[j][k]) else: vals.append(ward_inf_tot[j][k]) vals_str = ",".join([str(v) for v in vals]) # Technically this is open to SQL injection, perhaps let CW know? c.execute(f"insert into {name}_totals VALUES ({vals_str})") col_str = ','.join(col_names) update_cols = col_names[2:] update_str = ','.join([f"{c} = {v}" for c, v in zip(update_cols, vals[2:])]) qstring = f"insert into results ({col_str}) values ({vals_str}) on conflict(day, ward) do update set {update_str}" c2.execute(qstring) col2_str = ','.join(col2_names) #Console.print(col2_names) update_cols = col2_names[2:] #Console.print(update_cols) keeps = [vals[x + 2] for x in range(N_INF_CLASSES) if x in _out_channels[subnet.name]] keeps_str = ",".join([str(v) for v in [population.day, k] + keeps]) #Console.print(keeps_str) update_str = ','.join([f"{c} = {v}" for c, v in zip(update_cols, keeps)]) #Console.print(update_str) qstring = f"insert into compact ({col2_str}) values ({keeps_str}) on conflict(day, ward) do update set {update_str}" #Console.print(f"SQL: {qstring}") c3.execute(qstring) conn.commit() conn2.commit() conn3.commit()
def output_skip(workspace, **kwargs): totals = [ inf + pinf for inf, pinf in zip(workspace.inf_tot, workspace.pinf_tot) ] Console.print(str(totals)) # should have moved from stage[1] to stage[3], so totals[2] is 0 assert totals[2] == 0
def extractor_setup(network: metawards.Network, **kwargs): # Globals global _created_file_flag global _sql_file_name global _run_ident # Get the unique output directory for this run out_object: metawards.OutputFiles = kwargs["output_dir"] run_ident = os.path.basename(out_object.get_path()) out_folder = os.path.abspath( os.path.join(out_object.get_path(), os.path.pardir)) # Identify the design point design_index = int(network.params.user_params["design_index"]) # Create a valid URI to use the SQLite access rights data_base_file_name: str = os.path.join(out_folder, "sql_wards.dat") _sql_file_name = data_base_file_name fixed_path = os.path.abspath(data_base_file_name).replace("\\", "/") db_uri = f"file:{fixed_path}?mode=rw" # Concurrency guard: multiple processes can connect, but only one should create # NOTE: Connections only fail if the file doesn't exist with mode "rw" test_connection: Union[sql.Connection, None] = None create_connection: Union[sql.Connection, None] = None # One at a time here mutex = multiprocessing.Lock() with mutex: try: test_connection = sql.connect(db_uri, uri=True) # TODO: What do we do if there is an old database in there? # FIXME: For re-running partial ensembles this needs to be handled except sql.OperationalError: # This process is the first one in to create the database try: # Append "c" to make the connection create a blank database create_connection = sql.connect(db_uri + "c", uri=True) _created_file_flag = True except sql.OperationalError as err: # An actual problem happened if we get here Console.print("SQL Error: " + str(err)) raise # Drop repeats u_vars = get_input_header_names()[:-1] design_vars = [var[1:] for var in u_vars if var[0] == '.'] make_sql_template(design_vars, create_connection) finally: if test_connection: test_connection.close() if create_connection: create_connection.close() # Do first commits for setting up the run write_setup_entries(network, design_index, run_ident, **kwargs)
def initialise(conn, network): c = conn.cursor() values: List[str] = [] for i, subnet in enumerate(network.subnets): values += [f"{subnet.name}_{i} int" for i in _out_channels[subnet.name]] qstring = f"CREATE TABLE IF NOT EXISTS {table_name}(design INT NOT NULL, repeat INT NOT NULL, " \ f"day INT NOT NULL, ward INT NOT NULL, {','.join(values)}, " \ f"PRIMARY KEY (design, repeat, day, ward));" Console.print(f"POSTGRESQL Exec: \n{qstring}") c.execute(qstring) conn.commit()
def write_setup_entries(database: Connection, design_index: int, run_ident: str): global _run_index c: Cursor = database.cursor() # Write this run i_str = f"insert into run_table(design_index,end_day,mw_folder) values (?,?,?)" vals = (design_index, -1, run_ident) c.execute(i_str, vals) _run_index = c.lastrowid Console.print("This is run: " + str(_run_index)) database.commit()
def iterate_custom(network, population, **kwargs): from metawards.iterators import iterate_working_week state, rate, can_work = get_lock_down_vars(network=network, population=population) Console.print(f"state {state}: scale_rate = {rate}, can_work = {can_work}") if state > 0: return [advance_lock_down] else: return iterate_working_week(network=network, population=population, **kwargs)
def output_wards_serial(network: metawards.Network, population: metawards.Population, workspace: metawards.Workspace, out_dir: metawards.OutputFiles, **kwargs): global _sql_file_name global _run_index connection = out_dir.open_db("rundata.dat", auto_bzip=False, initialise=None) # Potential problem: what if we accidentally hit a real attribute? if not hasattr(network.params, "_uq4covid_setup"): network.params._uq4covid_setup = True extractor_setup(network, **kwargs) # Prepare database entries database = connect(_sql_file_name) c: Cursor = database.cursor() # Write the current day, some may be longer / shorter, so ignore duplicate entries c.execute(f"insert or ignore into day_table(day,date) values (?,?)", (int(population.day), str(population.date))) database.commit() # Write results for infections and removed # NOTE: Don't re-use time_index as if there is a duplicate then the rowid will be zero # TODO: List comprehension is fine, but consider numpy (or equivalent) for more speed output_src = 0 mode_str = "normal write" # The index has already been sent to the database, so reuse it here safely for channel_index, channel_name in enumerate(_output_channels_list.keys()): Console.print( f"Writing channel {channel_name} to database src = " f"{_output_channels_list[channel_name][output_src]}: {mode_str}") data_source = eval(_output_channels_list[channel_name][output_src]) values = [(i, channel_index, x, int(population.day), _run_index) for i, x in enumerate(data_source) if i != 0] c.executemany( f"insert into results_table(ward_id,output_channel,sim_out,sim_time,run_id) " f"values (?,?,?,?,?)", values) # Write last day into run table c.execute("update run_table set end_day = ? where run_index = ?", (int(population.day), _run_index)) database.commit() database.close()
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") postgres_connection = psg.connect(host="localhost", user="******", password="******", database=database_name) # Make sure init is only called once, it actually doesn't matter, but saves time if not hasattr(network, "init_guard"): initialise(postgres_connection, network) setattr(network, "init_guard", False) cur = postgres_connection.cursor() run_ident = path.basename(output_dir.get_path()) repeat_ident = int(run_ident[-3:]) design_index = int(network.params.user_params["design_index"]) # get each demographics data for i, subnet in enumerate(network.subnets): ward_inf_tot = workspace.subspaces[i].ward_inf_tot N_INF_CLASSES = workspace.subspaces[i].n_inf_classes col2_names = ["design", "repeat", "day", "ward"] + \ [f"{subnet.name}_{i}" for i in _out_channels[subnet.name]] for k in range(1, workspace.subspaces[i].nnodes + 1): if k not in _zero_crossings: _zero_crossings[k] = False vals = [design_index, repeat_ident, population.day, k] for j in range(0, N_INF_CLASSES): # Try to fudge a marker for first infections if ward_inf_tot[0][k] != 0 and _zero_crossings[k] is False: _zero_crossings[k] = True Console.print(f"Got first infection in ward {k}") # TODO: What is this?? Why are some classes deltas? if j == 1 or j == 3: vals.append(ward_inf_tot[j - 1][k] + ward_inf_tot[j][k]) else: vals.append(ward_inf_tot[j][k]) col2_str = ','.join(col2_names) update_cols = col2_names[4:] keeps = [vals[x + 4] for x in range(N_INF_CLASSES) if x in _out_channels[subnet.name]] keeps_str = ",".join([str(v) for v in [design_index, repeat_ident, population.day, k] + keeps]) update_str = ','.join([f"{c} = {v}" for c, v in zip(update_cols, keeps)]) qstring = f"INSERT INTO {table_name} ({col2_str}) VALUES ({keeps_str}) " \ f"ON CONFLICT (design, repeat, day, ward) DO UPDATE SET {update_str};" if _zero_crossings[k] is True: cur.execute(qstring) postgres_connection.commit() postgres_connection.close()
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") conn3 = output_dir.open_db("stages3.db", initialise=create_tables_3(network)) c3 = conn3.cursor() # get each demographics data for i, subnet in enumerate(network.subnets): ward_inf_tot = workspace.subspaces[i].ward_inf_tot N_INF_CLASSES = workspace.subspaces[i].n_inf_classes col2_names = ["day", "ward"] + [ f"{subnet.name}_{i}" for i in _out_channels[subnet.name] ] for k in range(1, workspace.subspaces[i].nnodes + 1): if k not in _zero_crossings: _zero_crossings[k] = False vals = [population.day, k] for j in range(0, N_INF_CLASSES): # Try to fudge a marker for first infections if ward_inf_tot[0][k] != 0 and _zero_crossings[k] is False: _zero_crossings[k] = True Console.print(f"Got first infection in ward {k}") # TODO: What is this?? Why are some classes deltas? if j == 1 or j == 3: vals.append(ward_inf_tot[j - 1][k] + ward_inf_tot[j][k]) else: vals.append(ward_inf_tot[j][k]) col2_str = ','.join(col2_names) update_cols = col2_names[2:] keeps = [ vals[x + 2] for x in range(N_INF_CLASSES) if x in _out_channels[subnet.name] ] keeps_str = ",".join([str(v) for v in [population.day, k] + keeps]) update_str = ','.join( [f"{c} = {v}" for c, v in zip(update_cols, keeps)]) qstring = f"insert into compact ({col2_str}) values ({keeps_str}) " \ f"on conflict(day, ward) do update set {update_str}" if _zero_crossings[k] is True: c3.execute(qstring) conn3.commit()
def output_wards_ir_serial(network: metawards.Network, population: metawards.Population, workspace: metawards.Workspace, **kwargs): global _sql_file_name global _run_index # Potential problem: what if we accidentally hit a real attribute? if not hasattr(network.params, "_uq4covid_setup"): network.params._uq4covid_setup = True Console.print("First output") extractor_setup(network, **kwargs) # Prepare database entries database = sql.connect(_sql_file_name) c: sql.Cursor = database.cursor() # Write the current day, some may be longer / shorter, so ignore duplicate entries c.execute(f"insert or ignore into day_table(day,date) values (?,?)", (int(population.day), str(population.date))) time_index = c.lastrowid database.commit() # Write results for infections and removed v = [] for i, x in enumerate(workspace.I_in_wards): if i == 0: continue v.append((i, _output_channels["infected"], x, time_index, _run_index)) for i, x in enumerate(workspace.R_in_wards): if i == 0: continue v.append((i, _output_channels["removed"], x, time_index, _run_index)) c.executemany( f"insert into results_table(ward_id,output_channel,sim_out,sim_time,run_id) values (?,?,?,?,?)", v) database.commit() database.close()
def test_ward_conversion(): # load all of the parameters try: params = Parameters.load(parameters="march29") except Exception as e: print(f"Unable to load parameter files. Make sure that you have " f"cloned the MetaWardsData repository and have set the " f"environment variable METAWARDSDATA to point to the " f"local directory containing the repository, e.g. the " f"default is $HOME/GitHub/MetaWardsData") raise e params.set_input_files("2011Data") print("Building the network...") network = Network.build(params=params) profiler = Profiler() profiler = profiler.start("to_json") wards = network.to_wards(profiler=profiler) print(f"{wards.num_workers()} / {wards.num_players()}") _assert_equal(wards.num_workers(), network.work_population) _assert_equal(wards.num_players(), network.play_population) print(f"{wards.num_work_links()} / {wards.num_play_links()}") _assert_equal(wards.num_work_links(), network.nlinks) _assert_equal(wards.num_play_links(), network.nplay) print("Converting to data...") data = wards.to_data(profiler=profiler) print("Converting to json...") profiler = profiler.start("Convert to JSON") s = json.dumps(data) profiler = profiler.stop() profiler = profiler.stop() # end to_json print(f"Done - {len(s)/(1024*1024.0)} MB : {s[0:1024]}...") print(f"Converting from json...") profiler = profiler.start("Convert from JSON") profiler = profiler.start("from_json") data = json.loads(s) profiler = profiler.stop() wards2 = Wards.from_data(data, profiler=profiler) assert wards2 == wards network2 = Network.from_wards(wards2, profiler=profiler) profiler = profiler.stop() Console.print(profiler) Console.print("Validating equality - may take some time...") _assert_equal(network2.nnodes, network.nnodes) _assert_equal(network2.nlinks, network.nlinks) _assert_equal(network2.nplay, network.nplay) if network.info is None: assert network2.info is None _assert_equal(len(network.info), len(network2.info)) Console.print(f"{len(network.info)}, {network.nnodes}") with Console.progress() as progress: task1 = progress.add_task("Validating info", total=len(network.info)) task2 = progress.add_task("Validating nodes", total=network.nnodes) task3 = progress.add_task("Validating work", total=network.nlinks) task4 = progress.add_task("Validating play", total=network.nplay) for i in range(0, len(network.info)): assert network.info[i] == network2.info[i] progress.update(task1, advance=1) progress.update(task1, completed=len(network.info), force_update=True) for i in range(1, network.nnodes + 1): _assert_equal(network.nodes.label[i], network2.nodes.label[i]) _assert_equal(network.nodes.begin_to[i], network2.nodes.begin_to[i]) _assert_equal(network.nodes.end_to[i], network2.nodes.end_to[i]) _assert_equal(network.nodes.self_w[i], network2.nodes.self_w[i]) _assert_equal(network.nodes.begin_p[i], network2.nodes.begin_p[i]) _assert_equal(network.nodes.end_p[i], network2.nodes.end_p[i]) _assert_equal(network.nodes.self_p[i], network2.nodes.self_p[i]) _assert_equal(network.nodes.x[i], network2.nodes.x[i]) _assert_equal(network.nodes.y[i], network2.nodes.y[i]) progress.update(task2, advance=1) progress.update(task2, completed=network.nnodes, force_update=True) for i in range(1, network.nlinks + 1): _assert_equal(network.links.ifrom[i], network2.links.ifrom[i]) _assert_equal(network.links.ito[i], network2.links.ito[i]) _assert_equal(network.links.weight[i], network2.links.weight[i]) _assert_equal(network.links.suscept[i], network2.links.suscept[i]) progress.update(task3, advance=1) progress.update(task3, completed=network.nlinks, force_update=True) for i in range(1, network.nplay + 1): _assert_equal(network.play.ifrom[i], network2.play.ifrom[i]) _assert_equal(network.play.ito[i], network2.play.ito[i]) _assert_equal(network.play.weight[i], network2.play.weight[i]) _assert_equal(network.play.suscept[i], network2.play.suscept[i]) progress.update(task4, advance=1) progress.update(task4, completed=network.nplay, force_update=True)
def cli(): """Main function for the command line interface. This does one of three things: 1. If this is the main process, then it parses the arguments and runs and manages the jobs 2. If this is a worker process, then it starts up and waits for work 3. If this is a supervisor process, then it query the job scheduling system for information about the compute nodes to use, and will then set up and run a manager (main) process that will use those nodes to run the jobs """ from metawards.utils import Console # get the parallel scheme now before we import any other modules # so that it is clear if mpi4py or scoop (or another parallel module) # has been imported via the required "-m module" syntax parallel_scheme = get_parallel_scheme() if parallel_scheme == "mpi4py": from mpi4py import MPI comm = MPI.COMM_WORLD nprocs = comm.Get_size() rank = comm.Get_rank() if rank != 0: # this is a worker process, so should not do anything # more until it is given work in the pool Console.print(f"Starting worker process {rank+1} of {nprocs-1}...") return else: Console.print("Starting main process...") elif parallel_scheme == "scoop": Console.print("STARTING SCOOP PROCESS") import sys args, parser = parse_args() if not args.already_supervised: hostfile = get_hostfile(args) if hostfile: # The user has asked to run a parallel job - this means that this # process is the parallel supervisor if args.mpi: mpi_supervisor(hostfile, args) return elif args.scoop: scoop_supervisor(hostfile, args) return # neither is preferred - if scoop is installed then use that try: import scoop # noqa - disable unused warning have_scoop = True except Exception: have_scoop = False if have_scoop: scoop_supervisor(hostfile, args) return # do we have MPI? try: import mpi4py # noqa - disable unused warning have_mpi4py = True except Exception: have_mpi4py = False if have_mpi4py: mpi_supervisor(hostfile, args) return # we don't have any other option, just keep going and # use multiprocessing - in this case we don't need a # supervisor and this is the main process # This is now the code for the main process # WE NEED ONE OF these listed options; should_run = False for arg in [ args.input, args.repeats, args.disease, args.additional, args.model, args.iterator, args.extractor, args.demographics, args.mixer, args.mover ]: if arg is not None: should_run = True break if not should_run: parser.print_help(sys.stdout) sys.exit(0) if args.repeats is None: args.repeats = [1] # import the parameters here to speed up the display of help from metawards import Parameters, Network, Population, print_version_string # print the version information first, so that there is enough # information to enable someone to reproduce this run print_version_string() Console.rule("Initialise") if args.input: # get the line numbers of the input file to read if args.line is None or len(args.line) == 0: linenums = None Console.print(f"* Using parameters from all lines of {args.input}", markdown=True) else: from metawards.utils import string_to_ints linenums = string_to_ints(args.line) if len(linenums) == 0: Console.error(f"You cannot read no lines from {args.input}?") sys.exit(-1) elif len(linenums) == 1: Console.print( f"* Using parameters from line {linenums[0]} of " f"{args.input}", markdown=True) else: Console.print( f"* Using parameters from lines {linenums} of " f"{args.input}", markdown=True) from metawards import VariableSets, VariableSet variables = VariableSets.read(filename=args.input, line_numbers=linenums) else: from metawards import VariableSets, VariableSet # create a VariableSets with one null VariableSet variables = VariableSets() variables.append(VariableSet()) nrepeats = args.repeats if nrepeats is None or len(nrepeats) < 1: nrepeats = [1] if len(nrepeats) > 1 and len(variables) != len(nrepeats): Console.error(f"The number of repeats {len(nrepeats)} must equal the " f"number of adjustable variable lines {len(variables)}") raise ValueError("Disagreement in the number of repeats and " "adjustable variables") # ensure that all repeats are >= 0 nrepeats = [0 if int(x) < 0 else int(x) for x in nrepeats] if sum(nrepeats) == 0: Console.error(f"The number of the number of repeats is 0. Are you " f"sure that you don't want to run anything?") raise ValueError("Cannot run nothing") if len(nrepeats) == 1 and nrepeats[0] == 1: Console.print("* Performing a single run of each set of parameters", markdown=True) elif len(nrepeats) == 1: Console.print( f"* Performing {nrepeats[0]} runs of each set of parameters", markdown=True) else: Console.print( f"* Performing {nrepeats} runs applied to the parameters", markdown=True) variables = variables.repeat(nrepeats) # working out the number of processes and threads... from metawards.utils import guess_num_threads_and_procs (nthreads, nprocs) = guess_num_threads_and_procs(njobs=len(variables), nthreads=args.nthreads, nprocs=args.nprocs, parallel_scheme=parallel_scheme) Console.print( f"\n* Number of threads to use for each model run is {nthreads}", markdown=True) if nprocs > 1: Console.print( f"* Number of processes used to parallelise model " f"runs is {nprocs}", markdown=True) Console.print( f"* Parallelisation will be achieved using {parallel_scheme}", markdown=True) # sort out the random number seed seed = args.seed if seed is None: import random seed = random.randint(10000, 99999999) if seed == 0: # this is a special mode that a developer can use to force # all jobs to use the same random number seed (15324) that # is used for comparing outputs. This should NEVER be used # for production code Console.warning("Using special mode to fix all random number" "seeds to 15324. DO NOT USE IN PRODUCTION!!!") else: Console.print(f"* Using random number seed {seed}", markdown=True) # get the starting day and date start_day = args.start_day if start_day < 0: raise ValueError(f"You cannot use a start day {start_day} that is " f"less than zero!") start_date = None if args.start_date: try: from dateparser import parse start_date = parse(args.start_date).date() except Exception: pass if start_date is None: from datetime import date try: start_date = date.fromisoformat(args.start_date) except Exception as e: raise ValueError(f"Cannot interpret a valid date from " f"'{args.start_date}'. Error is " f"{e.__class__} {e}") if start_date is None: from datetime import date start_date = date.today() Console.print(f"* Day zero is {start_date.strftime('%A %B %d %Y')}", markdown=True) if start_day != 0: from datetime import timedelta start_day_date = start_date + timedelta(days=start_day) Console.print(f"Starting on day {start_day}, which is " f"{start_day_date.strftime('%A %B %d %Y')}") else: start_day_date = start_date # now find the MetaWardsData repository as this will be needed # for the repeat command line too (repository, repository_version) = Parameters.get_repository(args.repository) Console.print(f"* Using MetaWardsData at {repository}", markdown=True) if repository_version["is_dirty"]: Console.warning("This repository is dirty, meaning that the data" "has not been committed to git. This may make " "this calculation very difficult to reproduce") # now work out the minimum command line needed to repeat this job args.seed = seed args.nprocs = nprocs args.nthreads = nthreads args.start_date = start_date.isoformat() args.repository = repository # also print the source of all inputs import configargparse Console.rule("Souce of inputs") p = configargparse.get_argument_parser("main") Console.print(p.format_values()) # print out the command used to repeat this job repeat_cmd = "metawards" for key, value in vars(args).items(): if value is not None: k = key.replace("_", "-") if isinstance(value, bool): if value: repeat_cmd += f" --{k}" elif isinstance(value, list): repeat_cmd += f" --{k}" for val in value: v = str(val) if " " in v: repeat_cmd += f" '{v}''" else: repeat_cmd += f" {v}" else: v = str(value) if " " in v: repeat_cmd += f" --{k} '{v}''" else: repeat_cmd += f" --{k} {v}" Console.rule("Repeating this run") Console.print("To repeat this job use the command;") Console.command(repeat_cmd) Console.print("Or alternatively use the config.yaml file that will be " "written to the output directory and use the command;") Console.command("metawards -c config.yaml") # load all of the parameters try: params = Parameters.load(parameters=args.parameters) except Exception as e: Console.warning( f"Unable to load parameter files. Make sure that you have " f"cloned the MetaWardsData repository and have set the " f"environment variable METAWARDSDATA to point to the " f"local directory containing the repository, e.g. the " f"default is $HOME/GitHub/MetaWardsData") raise e # should we profile the code? (default no as it prints a lot) profiler = None if args.no_profile: profiler = None elif args.profile: from metawards.utils import Profiler profiler = Profiler() # load the disease and starting-point input files Console.rule("Disease") if args.disease: params.set_disease(args.disease) else: params.set_disease("ncov") Console.rule("Model data") if args.model: params.set_input_files(args.model) else: params.set_input_files("2011Data") # load the user-defined custom parameters Console.rule("Custom parameters and seeds") if args.user_variables: custom = VariableSet.read(args.user_variables) Console.print(f"Adjusting variables to {custom}") custom.adjust(params) else: Console.print("Not adjusting any parameters...") # read the additional seeds if args.additional is None or len(args.additional) == 0: Console.print("Not using any additional seeds...") else: for additional in args.additional: Console.print(f"Loading additional seeds from {additional}") params.add_seeds(additional) # what to do with the 0 state? stage_0 = "R" if args.disable_star: Console.print("Disabling the * state. Stage 0 is the one and " "only E state.") stage_0 = "disable" elif args.star_is_E: Console.print("Setting the * state as an additional E state.") stage_0 = "E" else: Console.print("Setting the * state as an additional R state.") stage_0 = "R" params.stage_0 = stage_0 # extra parameters that are set params.UV = args.UV # set these extra parameters to 0 params.static_play_at_home = 0 params.play_to_work = 0 params.work_to_play = 0 params.daily_imports = 0.0 Console.rule("Parameters") Console.print(params, markdown=True) # the size of the starting population population = Population(initial=args.population, date=start_day_date, day=start_day) Console.rule("Building the network") network = Network.build(params=params, population=population, max_nodes=args.max_nodes, max_links=args.max_links, profiler=profiler) if args.demographics: from metawards import Demographics Console.rule("Specialising into demographics") demographics = Demographics.load(args.demographics) Console.print(demographics) network = network.specialise(demographics, profiler=profiler, nthreads=nthreads) Console.rule("Preparing to run") from metawards import OutputFiles from metawards.utils import run_models outdir = args.output if outdir is None: outdir = "output" if args.force_overwrite_output: prompt = None else: from metawards import input def prompt(x): return input(x, default="y") auto_bzip = True if args.auto_bzip: auto_bzip = True elif args.no_auto_bzip: auto_bzip = False if args.iterator: iterator = args.iterator else: iterator = None if args.extractor: extractor = args.extractor else: extractor = None if args.mixer: mixer = args.mixer else: mixer = None if args.mover: mover = args.mover else: mover = None with OutputFiles(outdir, force_empty=args.force_overwrite_output, auto_bzip=auto_bzip, prompt=prompt) as output_dir: # write the config file for this job to output/config.yaml Console.rule("Running the model") CONSOLE = output_dir.open("console.log") Console.save(CONSOLE) lines = [] max_keysize = None for key, value in vars(args).items(): if max_keysize is None: max_keysize = len(key) elif len(key) > max_keysize: max_keysize = len(key) for key, value in vars(args).items(): if value is not None: key = key.replace("_", "-") spaces = " " * (max_keysize - len(key)) if isinstance(value, bool): if value: lines.append(f"{key}:{spaces} true") else: lines.append(f"{key}:{spaces} false") elif isinstance(value, list): s_value = [str(x) for x in value] lines.append(f"{key}:{spaces} [ {', '.join(s_value)} ]") else: lines.append(f"{key}:{spaces} {value}") CONFIG = output_dir.open("config.yaml", auto_bzip=False) lines.sort(key=str.swapcase) CONFIG.write("\n".join(lines)) CONFIG.write("\n") CONFIG.flush() CONFIG.close() lines = None result = run_models(network=network, variables=variables, population=population, nprocs=nprocs, nthreads=nthreads, seed=seed, nsteps=args.nsteps, output_dir=output_dir, iterator=iterator, extractor=extractor, mixer=mixer, mover=mover, profiler=profiler, parallel_scheme=parallel_scheme) if result is None or len(result) == 0: Console.print("No output - end of run") return 0 Console.rule("End of the run", style="finish") Console.save(CONSOLE) return 0
def mpi_supervisor(hostfile, args): """Function used by the MPI supervisor to get the information needed to form the mpiexec call to run an MPI version of the program """ import os import stat import sys from metawards.utils import Console Console.print("RUNNING AN MPI PROGRAM") cores_per_node = get_cores_per_node(args) Console.print( f"Will run jobs assuming {cores_per_node} cores per compute node") # based on the number of threads requested and the number of cores # per node, we can work out the number of mpi processes to start, # and can write a hostfile that will create the right layout nthreads = get_threads_per_task(args) Console.print(f"Will use {nthreads} OpenMP threads per model run...") tasks_per_node = int(cores_per_node / nthreads) Console.print(f"...meaning that the number of model runs per node will be " f"{tasks_per_node}") # Next, read the hostfile to get a unique list of hostnames hostnames = {} with open(hostfile, "r") as FILE: line = FILE.readline() while line: hostname = line.strip() if len(hostname) > 0: hostnames[hostname] = 1 line = FILE.readline() hostnames = list(hostnames.keys()) hostnames.sort() Console.print(f"Number of compute nodes equals {len(hostnames)}") Console.print(", ".join(hostnames)) # how many tasks can we perform in parallel? nprocs = tasks_per_node * len(hostnames) if args.nprocs: if nprocs != args.nprocs: Console.print(f"WARNING: You are using an unrecommended number of " f"processes {args.nprocs} for the cluster {nprocs}.") nprocs = args.nprocs Console.print( f"Total number of parallel processes to run will be {nprocs}") Console.print(f"Total number of cores in use will be {nprocs*nthreads}") # Now write a new hostfile that round-robins the MPI tasks over # the nodes for 'tasks_per_node' runs hostfile = f"_metawards_hostfile_{os.getpid()}" Console.print(f"Writing hostfile to {hostfile}") with open(hostfile, "w") as FILE: i = 0 while i < nprocs: for hostname in hostnames: FILE.write(hostname + "\n") i += 1 if i == nprocs: break # now craft the mpiexec command that will use this hostfile to # run the job - remember to pass the option to stop the main process # attempt to become a supervisor itself... mpiexec = os.getenv("MPIEXEC") if mpiexec is None: mpiexec = "mpiexec" # check for weird mpiexecs... import subprocess import shlex try: args = shlex.split(f"{mpiexec} -v") p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) v = p.stdout.decode("utf-8").strip() Console.print(f"{mpiexec} -v => {v}") if v.find("HPE HMPT") != -1: raise ValueError( "metawards needs a more modern MPI library than HPE's, " "so please compile to another MPI and use that.") except Exception as e: Console.error(f"[ERROR] {e.__class__} {e}") pyexe = sys.executable script = os.path.abspath(sys.argv[0]) args = " ".join(sys.argv[1:]) cmd = f"{mpiexec} -np {nprocs} -hostfile {hostfile} " \ f"{pyexe} -m mpi4py {script} --already-supervised {args} " \ f"--nprocs {nprocs}" Console.print("Executing MPI job using") Console.command(cmd) try: args = shlex.split(cmd) subprocess.run(args).check_returncode() except Exception as e: Console.error("ERROR: Something went wrong!") Console.error(f"{e.__class__}: {e}") sys.exit(-1) # clean up the hostfile afterwards... (we leave it if something # went wrong as it may help debugging) os.unlink(hostfile) Console.print("MPI processes completed successfully")
def scoop_supervisor(hostfile, args): """Function used by the scoop supervisor to get the information needed to form the scoop call to run a scoop version of the program """ import os import stat import sys from metawards.utils import Console Console.print("RUNNING A SCOOP PROGRAM") cores_per_node = get_cores_per_node(args) Console.print( f"Will run jobs assuming {cores_per_node} cores per compute node") # based on the number of threads requested and the number of cores # per node, we can work out the number of scoop processes to start, # and can write a hostfile that will create the right layout nthreads = get_threads_per_task(args) Console.print(f"Will use {nthreads} OpenMP threads per model run...") tasks_per_node = int(cores_per_node / nthreads) Console.print(f"...meaning that the number of model runs per node will be " f"{tasks_per_node}") # Next, read the hostfile to get a unique list of hostnames hostnames = {} with open(hostfile, "r") as FILE: line = FILE.readline() while line: hostname = line.strip() if len(hostname) > 0: hostnames[hostname] = 1 line = FILE.readline() hostnames = list(hostnames.keys()) hostnames.sort() Console.print(f"Number of compute nodes equals {len(hostnames)}") Console.print(", ".join(hostnames)) # how many tasks can we perform in parallel? nprocs = tasks_per_node * len(hostnames) if args.nprocs: if nprocs != args.nprocs: Console.warning( f"You are using a not-recommended number of " f"processes {args.nprocs} for the cluster {nprocs}.") nprocs = args.nprocs Console.print( f"Total number of parallel processes to run will be {nprocs}") Console.print(f"Total number of cores in use will be {nprocs*nthreads}") # Now write a new hostfile that round-robins the MPI tasks over # the nodes for 'tasks_per_node' runs hostfile = f"_metawards_hostfile_{os.getpid()}" Console.print(f"Writing hostfile to {hostfile}") with open(hostfile, "w") as FILE: i = 0 while i < nprocs: for hostname in hostnames: FILE.write(hostname + "\n") i += 1 if i == nprocs: break # now craft the scoop command that will use this hostfile to # run the job - remember to pass the option to stop the main process # attempting to become a supervisor itself... import subprocess import shlex pyexe = sys.executable script = os.path.abspath(sys.argv[0]) args = " ".join(sys.argv[1:]) # also need to tell the main program the number of processes # as it can't work it out itself cmd = f"{pyexe} -m scoop --hostfile {hostfile} -n {nprocs} " \ f"{script} --already-supervised {args} --nprocs {nprocs}" Console.print("Executing scoop job using") Console.command(cmd) try: args = shlex.split(cmd) subprocess.run(args).check_returncode() except Exception as e: Console.error("ERROR: Something went wrong!") Console.error(f"{e.__class__}: {e}") sys.exit(-1) # clean up the hostfile afterwards... (we leave it if something # went wrong as it may help debugging) os.unlink(hostfile) Console.print("Scoop processes completed successfully")
def extract_only_i_per_ward(**kwargs): Console.print("Sending I per ward to the output stream") return [output_wards_i]
def extract_only_i_per_ward(**kwargs): from metawards.utils import Console Console.print("Sending I per ward to the output stream") return [output_wards_i]
def extract(network: metawards.Network, **kwargs) -> List[metawards.utils.MetaFunction]: Console.print(f"Sending I and R per ward to the output stream") return [output_wards_i]
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") conn3 = output_dir.open_db("stages.db", initialise=create_tables(network)) c3 = conn3.cursor() ## setup marker for previous daya for i, subnet in enumerate(network.subnets): ## if first day, then create a copy of the ward data ## these should all be zero at day 0 and so not affect incidence if not hasattr(workspace.subspaces[i], "output_previous"): workspace.subspaces[i].output_previous = deepcopy( workspace.subspaces[i].ward_inf_tot) for j, sub in enumerate(workspace.subspaces[i].output_previous): for k, sub1 in enumerate( workspace.subspaces[i].output_previous[j]): workspace.subspaces[i].output_previous[j][k] = 0 ## extract Rprime and Dprime in each demographic Rprime = [[] for _ in range(4)] Dprime = [[] for _ in range(4)] for i, subnet in enumerate(network.subnets): ## get yesterday's data ward_inf_previous = workspace.subspaces[i].output_previous ## get today's data ward_inf_tot = workspace.subspaces[i].ward_inf_tot ## new deaths across wards if subnet.name != "asymp": for old, new in zip(ward_inf_previous[3], ward_inf_tot[3]): Dprime[i].append(new - old) ## new removals across wards for old, new in zip(ward_inf_previous[2], ward_inf_tot[2]): Rprime[i].append(new - old) ## calculate Iprime in each demographic Iprime = [[] for _ in range(4)] ## NEED TO DO FOLLOWING CALCULATIONS IN ORDER ## extract subnets names sub_names = [network.subnets[i].name for i in range(4)] ## ASYMPTOMATICS ia = sub_names.index("asymp") ## get data ward_inf_previous = workspace.subspaces[ia].output_previous ward_inf_tot = workspace.subspaces[ia].ward_inf_tot ## calculate incidence for old, new, Rinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ia]): Iprime[ia].append(new - old + Rinc) ## CRITICAL ic = sub_names.index("critical") ## get data ward_inf_previous = workspace.subspaces[ic].output_previous ward_inf_tot = workspace.subspaces[ic].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ic], Dprime[ic]): Iprime[ic].append(new - old + Rinc + Dinc) ## HOSPITAL ih = sub_names.index("hospital") ## get data ward_inf_previous = workspace.subspaces[ih].output_previous ward_inf_tot = workspace.subspaces[ih].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc, Cinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ih], Dprime[ih], Iprime[ic]): Iprime[ih].append(new - old + Rinc + Dinc + Cinc) ## GENPOP ig = sub_names.index("genpop") ## get data ward_inf_previous = workspace.subspaces[ig].output_previous ward_inf_tot = workspace.subspaces[ig].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc, Hinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ig], Dprime[ig], Iprime[ih]): Iprime[ig].append(new - old + Rinc + Dinc + Hinc) ## calculate Eprime in GENPOP demographic Eprime = [] for old, new, Iinc, Ainc in zip(ward_inf_previous[0], ward_inf_tot[0], Iprime[ig], Iprime[ia]): Eprime.append(new - old + Iinc + Ainc) ## loop over wards and write to file wards = range(0, workspace.subspaces[0].nnodes + 1) day = [population.day] * len(wards) # print(workspace.subspaces[0].nnodes) # print(len(day)) # print(len(wards)) # print(len(Eprime)) ## set column names col_names = ["day", "ward", "Einc", "E", "Iinc", "I", "RI", "DI", "Ainc", "A", "RA", "Hinc", "H",\ "RH", "DH", "Cinc", "C", "RC", "DC"] col_str = ','.join(col_names) ## extract demographics asymp_ward = workspace.subspaces[ia].ward_inf_tot genpop_ward = workspace.subspaces[ig].ward_inf_tot hospital_ward = workspace.subspaces[ih].ward_inf_tot critical_ward = workspace.subspaces[ic].ward_inf_tot ## write to file for day, ward, Einc, E, Iinc, I, RI, DI, Ainc, A, RA, Hinc, H, RH, RD, Cinc, C, RC, DC in\ zip(day, wards, Eprime, genpop_ward[0], Iprime[ig], genpop_ward[1], genpop_ward[2], genpop_ward[3],\ Iprime[ia], asymp_ward[1], asymp_ward[2], Iprime[ih], hospital_ward[1], hospital_ward[2],\ hospital_ward[3], Iprime[ic], critical_ward[1], critical_ward[2], critical_ward[3]): if ward not in _zero_crossings: _zero_crossings[ward] = False ## try to fudge a marker for first infections if Einc != 0 and _zero_crossings[ward] is False and ward != 0: _zero_crossings[ward] = True Console.print(f"Got first infection in ward {ward}") val = [ day, ward, Einc, E, Iinc, I, RI, DI, Ainc, A, RA, Hinc, H, RH, RD, Cinc, C, RC, DC ] keeps_str = ",".join([str(v) for v in val]) qstring = f"insert into compact ({col_str}) values ({keeps_str}) " if _zero_crossings[ward] is True: c3.execute(qstring) conn3.commit() ## save today's data so that it can be used tomorrow for i, subnet in enumerate(network.subnets): workspace.subspaces[i].output_previous = deepcopy( workspace.subspaces[i].ward_inf_tot)
def output_wards_ir_serial(network: metawards.Network, population: metawards.Population, workspace: metawards.Workspace, **kwargs): global _sql_file_name global _run_index global _prev_s global _prev_e global _prev_i global _prev_r # Potential problem: what if we accidentally hit a real attribute? if not hasattr(network.params, "_uq4covid_setup"): network.params._uq4covid_setup = True extractor_setup(network, **kwargs) # Prepare database entries database = connect(_sql_file_name) c: Cursor = database.cursor() # Write the current day, some may be longer / shorter, so ignore duplicate entries c.execute(f"insert or ignore into day_table(day,date) values (?,?)", (int(population.day), str(population.date))) database.commit() # Write results for infections and removed # NOTE: Don't re-use time_index as if there is a duplicate then the rowid will be zero # TODO: List comprehension is fine, but consider numpy (or equivalent) for more speed output_src = 0 mode_str = "normal write" deltas = network.params.user_params["deltas"] if deltas: # Write deltas - This saves about ~6% total space and increases the compression factor by 150% # NOTE: Assumption - if one is None, the rest of them are (this isn't good!!) if _prev_i is None: deltas_s = workspace.S_in_wards deltas_e = workspace.E_in_wards deltas_i = workspace.I_in_wards deltas_r = workspace.R_in_wards else: deltas_s = [a - b for a, b in zip(workspace.S_in_wards, _prev_s)] deltas_e = [a - b for a, b in zip(workspace.E_in_wards, _prev_e)] deltas_i = [a - b for a, b in zip(workspace.I_in_wards, _prev_i)] deltas_r = [a - b for a, b in zip(workspace.R_in_wards, _prev_r)] _prev_s = workspace.S_in_wards _prev_e = workspace.E_in_wards _prev_i = workspace.I_in_wards _prev_r = workspace.R_in_wards # Point eval() at deltas output_src = 1 mode_str = "delta write" # The index has already been sent to the database, so reuse it here safely for channel_index, channel_name in enumerate(_output_channels_list.keys()): Console.print( f"Writing channel {channel_name} to database src = " f"{_output_channels_list[channel_name][output_src]}: {mode_str}") data_source = eval(_output_channels_list[channel_name][output_src]) values = [(i, channel_index, x, int(population.day), _run_index) for i, x in enumerate(data_source) if i != 0] c.executemany( f"insert into results_table(ward_id,output_channel,sim_out,sim_time,run_id) " f"values (?,?,?,?,?)", values) # Write last day into run table c.execute("update run_table set end_day = ? where run_index = ?", (int(population.day), _run_index)) database.commit() database.close()
def advance_initial_seeds(network, population, infections, profiler, rngs, **kwargs): # extract user parameters params = network.params # extract files name for initial seeding probabilities ward_seed_filename = params.user_params["ward_seed_filename"] age_seed_filename = params.user_params["age_seed_filename"] time_seed_filename = params.user_params["time_seed_filename"] # start profiler p = profiler.start("additional_seeds") # set up lookups or read from cache age_probs_ind, age_probs = read_age_file(age_seed_filename) ward_probs_ind, ward_probs_trust, ward_probs = read_seed_file( ward_seed_filename) time_seed_date, time_seed_trust, time_seed_count = read_time_file( time_seed_filename) # extract current date date = population.date # filter to extract number of seeds filter_time_seed = [i == date for i in time_seed_date] time_seed_count = time_seed_count[filter_time_seed] if len(time_seed_count) > 0: # loop over trusts time_seed_trust = time_seed_trust[filter_time_seed] for j in range(len(time_seed_trust)): # extract trust trust = time_seed_trust[j] # extract wards filter_wards = [i == trust for i in ward_probs_trust] tward_probs = ward_probs[filter_wards] tward_probs_ind = ward_probs_ind[filter_wards] # extract number of seeds nseeds = time_seed_count[j] # select seeds in age-classes at random according to initial probabilities age_seeds = np.random.multinomial(nseeds, age_probs) # run over each age demographic for demographic in range(len(age_seeds)): # check if any seeding done in demographic if age_seeds[demographic] > 0: # select seeds in wards at random according to initial probabilities seeds = np.random.multinomial(age_seeds[demographic], tward_probs) # now seed infections for i in range(len(seeds)): ward = tward_probs_ind[i] num = seeds[i] if num > 0: seed_network = network.subnets[demographic] seed_wards = seed_network.nodes seed_infections = infections.subinfs[ demographic].play try: ward = seed_network.get_node_index(ward) if seed_wards.play_suscept[ward] == 0: Console.warning( f"Cannot seed {num} infection(s) in ward {ward} " f"as there are no susceptibles remaining" ) continue elif seed_wards.play_suscept[ward] < num: Console.warning( f"Not enough susceptibles in ward to see all {num}" ) num = seed_wards.play_suscept[ward] seed_wards.play_suscept[ward] -= num if demographic is not None: Console.print( f"seeding demographic {demographic} " f"play_infections[0][{ward}] += {num}") else: Console.print( f"seeding play_infections[0][{ward}] += {num}" ) seed_infections[0][ward] += num except Exception as e: Console.error( f"Unable to seed the infection using {seed}. The " f"error was {e.__class__}: {e}. Please double-check " f"that you are trying to seed a node that exists " f"in this network.") raise e # end profiler p.stop()