Пример #1
0
def prevalence(predicted_state, population_size, name=None):
    """Computes prevalence of E and I individuals

    :param state: the state at a particular timepoint [batch, M, S]
    :param population_size: the size of the population
    :returns: a dict of mean and 95% credibility intervals for prevalence
              in units of infections per person
    """
    prev = tf.reduce_sum(predicted_state[:, :, 1:3],
                         axis=-1) / tf.squeeze(population_size)
    return mean_and_ci(prev, name=name)
Пример #2
0
def rt(input_file, output_file):
    """Reads an array of next generation matrices and
       outputs mean (ci) local Rt values.

    :param input_file: a pickled xarray of NGMs
    :param output_file: a .csv of mean (ci) values
    """

    with open(input_file, "rb") as f:
        ngm = pkl.load(f)

    rt = np.sum(ngm, axis=-2)
    rt_summary = mean_and_ci(rt, name="Rt")
    exceed = np.mean(rt > 1.0, axis=0)

    rt_summary = pd.DataFrame(rt_summary,
                              index=pd.Index(ngm.coords["dest"],
                                             name="location"))
    rt_summary['Rt_exceed'] = exceed
    rt_summary.to_csv(output_file)
Пример #3
0
def predicted_events(events, name=None):
    num_events = tf.reduce_sum(events, axis=-1)
    return mean_and_ci(num_events, name=name)
Пример #4
0
            for k, v in summary.items():
                arr = v
                if isinstance(v, tf.Tensor):
                    arr = v.numpy()
                geodata[k] = arr

    ## GIS here
    ltla = gp.read_file(GIS_TEMPLATE, layer="UK2019mod_pop_xgen")
    ltla = ltla[ltla["lad19cd"].str.startswith("E")]  # England only, for now.
    ltla = ltla.sort_values("lad19cd")
    rti = tf.reduce_sum(ngms, axis=-2)

    geosummary(
        ltla,
        (
            mean_and_ci(rti, name="Rt"),
            prev_now,
            cases_now,
            prev_7,
            prev_14,
            prev_21,
            prev_28,
            prev_56,
            cases_7,
            cases_14,
            cases_21,
            cases_28,
            cases_56,
        ),
    )
Пример #5
0
def makeGeopackage(pipelineData):
    # This function makes the geopackage from the summary data
    config = pipelineData['config']
    settings = config['GeoSummary']

    GIS_TEMPLATE = settings['template']
    GIS_OUTPUT = settings['address']

    # use the pipelineData if available
    # otherwise try to load the processed output
    if 'summary' in pipelineData:
        summaryData = pipelineData['summary']
    else:
        print('SummaryData not present in pipelineData')
        print('Trying to load processed file instead')
        print('Looking for file at', config['SummaryData']['address'])
        config['SummaryData']['input'] = 'processed'
        summaryData = GetData.SummaryData.process(config)

    try:
        ltla = gp.read_file(GIS_TEMPLATE, layer="UK2019mod_pop_xgen")
    except:
        print("Layer UK2019mod_pop_xgen doesn't exist in", GIS_TEMPLATE)
        ltla = gp.read_file(GIS_TEMPLATE)
    ltla = ltla[ltla["lad19cd"].str.startswith("E")]  # England only, for now.
    ltla = ltla.sort_values("lad19cd")

    # FNC: These likely need to be downselected to just the LADs in the summary data
    # do some data checks and throw a warning for now if they're not the same
    # we didn't have an example file with more than 43 LADs
    geo_lads = ltla.lad19cd.values
    config_lads = np.array(summaryData['LADs'])
    if geo_lads.size != config_lads.size:
        print(
            'GEOSUMMARY: Different number of LADs in GIS_TEMPLATE vs summaryData'
        )
        print('GIS_Template:', geo_lads.size)
        print('summaryData:', config_lads.size)

    con_in_geo = np.array([x in geo_lads for x in config_lads]).sum()
    geo_in_con = np.array([x in config_lads for x in geo_lads]).sum()
    if con_in_geo != geo_lads.size:
        print('GEOSUMMARY:', con_in_geo, '/', geo_lads.size,
              'summary data LADs in GIS Template')
    if geo_in_con != config_lads.size:
        print('GEOSUMMARY:', geo_in_con, '/', config_lads.size,
              'GIS Template LADs in summary data')

    rti = tf.reduce_sum(summaryData['metrics']['ngms'], axis=-2)

    geosummary(
        ltla,
        (
            mean_and_ci(rti, name="Rt"),
            summaryData['prev']['now'],
            summaryData['cases']['now'],
            summaryData['prev']['7'],
            summaryData['prev']['14'],
            summaryData['prev']['21'],
            summaryData['prev']['28'],
            summaryData['prev']['56'],
            summaryData['cases']['7'],
            summaryData['cases']['14'],
            summaryData['cases']['21'],
            summaryData['cases']['28'],
            summaryData['cases']['56'],
        ),
    )

    ltla["Rt_exceed"] = np.mean(rti > 1.0, axis=0)
    ltla = ltla.loc[:,
                    ltla.columns.str.contains(
                        "(lad19cd|lad19nm$|prev|cases|Rt|popsize|geometry)",
                        regex=True), ]
    ltla.to_file(
        GIS_OUTPUT,
        driver="GPKG",
    )
Пример #6
0
 def calc_prev(state, name=None):
     prev = np.sum(state[..., 1:3], axis=-1) / np.squeeze(data["N"])
     return mean_and_ci(prev, name=name)
Пример #7
0
 def pred_events(events, name=None):
     num_events = np.sum(events, axis=-1)
     return mean_and_ci(num_events, name=name)