def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") # open a database to hold the data - call the 'create_tables' # function on this database when it is first opened conn = output_dir.open_db("stages.db", initialise=create_tables(network)) conn2 = output_dir.open_db("stages2.db", initialise=create_tables_2(network)) conn3 = output_dir.open_db("stages3.db", initialise=create_tables_3(network)) c = conn.cursor() c2 = conn2.cursor() c3 = conn3.cursor() # get each demographics data for i, subnet in enumerate(network.subnets): name = subnet.name ward_inf_tot = workspace.subspaces[i].ward_inf_tot N_INF_CLASSES = workspace.subspaces[i].n_inf_classes col_names = ["day", "ward"] + [f"{subnet.name}_{i}" for i in range(0, N_INF_CLASSES)] col2_names = ["day", "ward"] + [f"{subnet.name}_{i}" for i in _out_channels[subnet.name]] for k in range(1, workspace.subspaces[i].nnodes+1): vals = [population.day, k] for j in range(0, N_INF_CLASSES): # TODO: What is this?? Why are some classes deltas? if j == 1 or j == 3: vals.append(ward_inf_tot[j - 1][k] + ward_inf_tot[j][k]) else: vals.append(ward_inf_tot[j][k]) vals_str = ",".join([str(v) for v in vals]) # Technically this is open to SQL injection, perhaps let CW know? c.execute(f"insert into {name}_totals VALUES ({vals_str})") col_str = ','.join(col_names) update_cols = col_names[2:] update_str = ','.join([f"{c} = {v}" for c, v in zip(update_cols, vals[2:])]) qstring = f"insert into results ({col_str}) values ({vals_str}) on conflict(day, ward) do update set {update_str}" c2.execute(qstring) col2_str = ','.join(col2_names) #Console.print(col2_names) update_cols = col2_names[2:] #Console.print(update_cols) keeps = [vals[x + 2] for x in range(N_INF_CLASSES) if x in _out_channels[subnet.name]] keeps_str = ",".join([str(v) for v in [population.day, k] + keeps]) #Console.print(keeps_str) update_str = ','.join([f"{c} = {v}" for c, v in zip(update_cols, keeps)]) #Console.print(update_str) qstring = f"insert into compact ({col2_str}) values ({keeps_str}) on conflict(day, ward) do update set {update_str}" #Console.print(f"SQL: {qstring}") c3.execute(qstring) conn.commit() conn2.commit() conn3.commit()
def output_wards_serial(network: metawards.Network, population: metawards.Population, workspace: metawards.Workspace, out_dir: metawards.OutputFiles, **kwargs): global _sql_file_name global _run_index connection = out_dir.open_db("rundata.dat", auto_bzip=False, initialise=None) # Potential problem: what if we accidentally hit a real attribute? if not hasattr(network.params, "_uq4covid_setup"): network.params._uq4covid_setup = True extractor_setup(network, **kwargs) # Prepare database entries database = connect(_sql_file_name) c: Cursor = database.cursor() # Write the current day, some may be longer / shorter, so ignore duplicate entries c.execute(f"insert or ignore into day_table(day,date) values (?,?)", (int(population.day), str(population.date))) database.commit() # Write results for infections and removed # NOTE: Don't re-use time_index as if there is a duplicate then the rowid will be zero # TODO: List comprehension is fine, but consider numpy (or equivalent) for more speed output_src = 0 mode_str = "normal write" # The index has already been sent to the database, so reuse it here safely for channel_index, channel_name in enumerate(_output_channels_list.keys()): Console.print( f"Writing channel {channel_name} to database src = " f"{_output_channels_list[channel_name][output_src]}: {mode_str}") data_source = eval(_output_channels_list[channel_name][output_src]) values = [(i, channel_index, x, int(population.day), _run_index) for i, x in enumerate(data_source) if i != 0] c.executemany( f"insert into results_table(ward_id,output_channel,sim_out,sim_time,run_id) " f"values (?,?,?,?,?)", values) # Write last day into run table c.execute("update run_table set end_day = ? where run_index = ?", (int(population.day), _run_index)) database.commit() database.close()
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") conn3 = output_dir.open_db("stages3.db", initialise=create_tables_3(network)) c3 = conn3.cursor() # get each demographics data for i, subnet in enumerate(network.subnets): ward_inf_tot = workspace.subspaces[i].ward_inf_tot N_INF_CLASSES = workspace.subspaces[i].n_inf_classes col2_names = ["day", "ward"] + [ f"{subnet.name}_{i}" for i in _out_channels[subnet.name] ] for k in range(1, workspace.subspaces[i].nnodes + 1): if k not in _zero_crossings: _zero_crossings[k] = False vals = [population.day, k] for j in range(0, N_INF_CLASSES): # Try to fudge a marker for first infections if ward_inf_tot[0][k] != 0 and _zero_crossings[k] is False: _zero_crossings[k] = True Console.print(f"Got first infection in ward {k}") # TODO: What is this?? Why are some classes deltas? if j == 1 or j == 3: vals.append(ward_inf_tot[j - 1][k] + ward_inf_tot[j][k]) else: vals.append(ward_inf_tot[j][k]) col2_str = ','.join(col2_names) update_cols = col2_names[2:] keeps = [ vals[x + 2] for x in range(N_INF_CLASSES) if x in _out_channels[subnet.name] ] keeps_str = ",".join([str(v) for v in [population.day, k] + keeps]) update_str = ','.join( [f"{c} = {v}" for c, v in zip(update_cols, keeps)]) qstring = f"insert into compact ({col2_str}) values ({keeps_str}) " \ f"on conflict(day, ward) do update set {update_str}" if _zero_crossings[k] is True: c3.execute(qstring) conn3.commit()
def output_db(population: Population, network: Networks, workspace: Workspace, output_dir: OutputFiles, **kwargs): Console.print(f"Calling output_db for a {network.__class__} object") conn3 = output_dir.open_db("stages.db", initialise=create_tables(network)) c3 = conn3.cursor() ## setup marker for previous daya for i, subnet in enumerate(network.subnets): ## if first day, then create a copy of the ward data ## these should all be zero at day 0 and so not affect incidence if not hasattr(workspace.subspaces[i], "output_previous"): workspace.subspaces[i].output_previous = deepcopy( workspace.subspaces[i].ward_inf_tot) for j, sub in enumerate(workspace.subspaces[i].output_previous): for k, sub1 in enumerate( workspace.subspaces[i].output_previous[j]): workspace.subspaces[i].output_previous[j][k] = 0 ## extract Rprime and Dprime in each demographic Rprime = [[] for _ in range(4)] Dprime = [[] for _ in range(4)] for i, subnet in enumerate(network.subnets): ## get yesterday's data ward_inf_previous = workspace.subspaces[i].output_previous ## get today's data ward_inf_tot = workspace.subspaces[i].ward_inf_tot ## new deaths across wards if subnet.name != "asymp": for old, new in zip(ward_inf_previous[3], ward_inf_tot[3]): Dprime[i].append(new - old) ## new removals across wards for old, new in zip(ward_inf_previous[2], ward_inf_tot[2]): Rprime[i].append(new - old) ## calculate Iprime in each demographic Iprime = [[] for _ in range(4)] ## NEED TO DO FOLLOWING CALCULATIONS IN ORDER ## extract subnets names sub_names = [network.subnets[i].name for i in range(4)] ## ASYMPTOMATICS ia = sub_names.index("asymp") ## get data ward_inf_previous = workspace.subspaces[ia].output_previous ward_inf_tot = workspace.subspaces[ia].ward_inf_tot ## calculate incidence for old, new, Rinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ia]): Iprime[ia].append(new - old + Rinc) ## CRITICAL ic = sub_names.index("critical") ## get data ward_inf_previous = workspace.subspaces[ic].output_previous ward_inf_tot = workspace.subspaces[ic].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ic], Dprime[ic]): Iprime[ic].append(new - old + Rinc + Dinc) ## HOSPITAL ih = sub_names.index("hospital") ## get data ward_inf_previous = workspace.subspaces[ih].output_previous ward_inf_tot = workspace.subspaces[ih].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc, Cinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ih], Dprime[ih], Iprime[ic]): Iprime[ih].append(new - old + Rinc + Dinc + Cinc) ## GENPOP ig = sub_names.index("genpop") ## get data ward_inf_previous = workspace.subspaces[ig].output_previous ward_inf_tot = workspace.subspaces[ig].ward_inf_tot ## calculate incidence for old, new, Rinc, Dinc, Hinc in zip(ward_inf_previous[1], ward_inf_tot[1], Rprime[ig], Dprime[ig], Iprime[ih]): Iprime[ig].append(new - old + Rinc + Dinc + Hinc) ## calculate Eprime in GENPOP demographic Eprime = [] for old, new, Iinc, Ainc in zip(ward_inf_previous[0], ward_inf_tot[0], Iprime[ig], Iprime[ia]): Eprime.append(new - old + Iinc + Ainc) ## loop over wards and write to file wards = range(0, workspace.subspaces[0].nnodes + 1) day = [population.day] * len(wards) # print(workspace.subspaces[0].nnodes) # print(len(day)) # print(len(wards)) # print(len(Eprime)) ## set column names col_names = ["day", "ward", "Einc", "E", "Iinc", "I", "RI", "DI", "Ainc", "A", "RA", "Hinc", "H",\ "RH", "DH", "Cinc", "C", "RC", "DC"] col_str = ','.join(col_names) ## extract demographics asymp_ward = workspace.subspaces[ia].ward_inf_tot genpop_ward = workspace.subspaces[ig].ward_inf_tot hospital_ward = workspace.subspaces[ih].ward_inf_tot critical_ward = workspace.subspaces[ic].ward_inf_tot ## write to file for day, ward, Einc, E, Iinc, I, RI, DI, Ainc, A, RA, Hinc, H, RH, RD, Cinc, C, RC, DC in\ zip(day, wards, Eprime, genpop_ward[0], Iprime[ig], genpop_ward[1], genpop_ward[2], genpop_ward[3],\ Iprime[ia], asymp_ward[1], asymp_ward[2], Iprime[ih], hospital_ward[1], hospital_ward[2],\ hospital_ward[3], Iprime[ic], critical_ward[1], critical_ward[2], critical_ward[3]): if ward not in _zero_crossings: _zero_crossings[ward] = False ## try to fudge a marker for first infections if Einc != 0 and _zero_crossings[ward] is False and ward != 0: _zero_crossings[ward] = True Console.print(f"Got first infection in ward {ward}") val = [ day, ward, Einc, E, Iinc, I, RI, DI, Ainc, A, RA, Hinc, H, RH, RD, Cinc, C, RC, DC ] keeps_str = ",".join([str(v) for v in val]) qstring = f"insert into compact ({col_str}) values ({keeps_str}) " if _zero_crossings[ward] is True: c3.execute(qstring) conn3.commit() ## save today's data so that it can be used tomorrow for i, subnet in enumerate(network.subnets): workspace.subspaces[i].output_previous = deepcopy( workspace.subspaces[i].ward_inf_tot)