示例#1
0
    .xlabel("date").ylabel("cases")\
    .show()

# now, do district-level estimation
smoothing = 10
state_cases["geo_reported"] = state_cases.geo_reported.str.strip()
district_time_series = state_cases.groupby(
    ["geo_reported", "date_reported"])["date_reported"].count().sort_index()
migration = np.zeros((len(district_names), len(district_names)))
estimates = []
max_len = 1 + max(map(len, district_names))
with tqdm(etl.normalize(district_names)) as districts:
    for district in districts:
        districts.set_description(f"{district :<{max_len}}")
        try:
            (dates, Rt_pred, Rt_CI_upper, Rt_CI_lower,
             *_) = analytical_MPVS(district_time_series.loc[district],
                                   CI=CI,
                                   smoothing=convolution(window=smoothing),
                                   totals=False)
            estimates.append(
                (district, Rt_pred[-1], Rt_CI_lower[-1], Rt_CI_upper[-1],
                 linear_projection(dates, Rt_pred, smoothing)))
        except (IndexError, ValueError):
            estimates.append((district, np.nan, np.nan, np.nan, np.nan))
estimates = pd.DataFrame(estimates)
estimates.columns = ["district", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"]
estimates.set_index("district", inplace=True)
estimates.to_csv(data / "Rt_estimates_private.csv")
print(estimates)
示例#2
0
    ])\
    .title("\nDKI Jakarta: Daily Cases")\
    .xlabel("\ndate")\
    .ylabel("cases\n")\
    .annotate("\nBayesian training process on empirical data, with anomalies identified")\
    .show()

logger.info("district-level projections")
district_cases = dkij.groupby(["district", "date_positiveresult"])["id"].count().sort_index()
districts = dkij.district.unique()
migration = np.zeros((len(districts), len(districts)))
estimates = []
max_len = 1 + max(map(len, districts))
with tqdm(districts) as progress:
    for district in districts:
        progress.set_description(f"{district :<{max_len}}")
        (dates, RR_pred, RR_CI_upper, RR_CI_lower, *_) = analytical_MPVS(district_cases.loc[district], CI = CI, smoothing = smoothing, totals=False)
        estimates.append((district, RR_pred[-1], RR_CI_lower[-1], RR_CI_upper[-1], linear_projection(dates, RR_pred, window)))
estimates = pd.DataFrame(estimates)
estimates.columns = ["district", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"]
estimates.set_index("district", inplace=True)
estimates.to_csv(data/"Jakarta_Rt_projections.csv")
print(estimates)

logger.info("generating choropleths")
gdf = gdf.merge(estimates, left_on = "NAME_2", right_on = "district")
plt.choropleth(gdf, lambda row: row["NAME_2"]+"\n")\
   .adjust(left = 0.06)\
   .title("\nDKI Jakarta: $R_t$ by District")\
   .show()
示例#3
0
logger.info("province-level projections")
migration = np.zeros((len(provinces), len(provinces)))
estimates = []
max_len = 1 + max(map(len, provinces))
with tqdm(provinces) as progress:
    for (province, cases) in province_cases.items():
        progress.set_description(f"{province :<{max_len}}")
        (dates, Rt_pred, Rt_CI_upper, Rt_CI_lower,
         *_) = analytical_MPVS(cases, CI=CI, smoothing=smoothing)
        apr_idx = np.argmax(dates > "31 Mar, 2020")
        may_idx = np.argmax(dates >= "01 May, 2020")
        max_idx = np.argmax(Rt_pred[apr_idx:may_idx])
        apr_max_idx = apr_idx + max_idx
        estimates.append(
            (province, Rt_pred[-1], Rt_CI_lower[-1], Rt_CI_upper[-1],
             max(0, linear_projection(dates, Rt_pred, window,
                                      period=2 * weeks)), Rt_pred[apr_max_idx],
             Rt_CI_lower[apr_max_idx], Rt_CI_upper[apr_max_idx],
             dates[apr_max_idx], cases.iloc[-1][0]))
        progress.update()
estimates = pd.DataFrame(estimates)
estimates.columns = [
    "province", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj", "Rt_max",
    "Rt_CI_lower_at_max", "Rt_CI_upper_at_max", "date_at_max_Rt", "total_cases"
]
estimates.set_index("province", inplace=True)
estimates.to_csv(data / "IDN_only_apr_Rt_max_filtered.csv")
print(estimates)

# choropleths
logger.info("generating choropleths")
gdf = gpd.read_file("data/gadm36_IDN_shp/gadm36_IDN_1.shp").drop([
示例#4
0
    ["regency",
     "confirmed"]).size().sort_index().unstack(fill_value=0).stack()
migration = np.zeros((len(regencies), len(regencies)))
estimates = []
max_len = 1 + max(map(len, regencies))
with tqdm(regencies) as progress:
    for regency in regencies:
        progress.set_description(f"{regency :<{max_len}}")
        (dates, Rt_pred, Rt_CI_upper, Rt_CI_lower,
         *_) = analytical_MPVS(regency_cases.loc[regency],
                               CI=CI,
                               smoothing=smoothing,
                               totals=False)
        estimates.append(
            (regency, Rt_pred[-1], Rt_CI_lower[-1], Rt_CI_upper[-1],
             linear_projection(dates, Rt_pred, 7)))
estimates = pd.DataFrame(estimates)
estimates.columns = ["regency", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"]
estimates.set_index("regency", inplace=True)
estimates.to_csv("data/SULSEL_Rt_projections.csv")
print(estimates)

gdf = gpd.read_file("data/gadm36_IDN_shp/gadm36_IDN_2.shp")\
    .query("NAME_1 == 'Sulawesi Selatan'")\
    .merge(estimates, left_on = "NAME_2", right_on = "regency")

choro = plt.choropleth(gdf, mappable=plt.get_cmap(0.4, 1.4, "viridis"))

for ax in choro.figure.axes[:-1]:
    plt.sca(ax)
    plt.xlim(left=119, right=122)
示例#5
0
    subdistricts = dkij.subdistrict.unique()
    migration = np.zeros((len(subdistricts), len(subdistricts)))
    estimates = []
    max_len = 1 + max(map(len, subdistricts))
    with tqdm(subdistricts) as progress:
        for subdistrict in subdistricts:
            progress.set_description(f"{subdistrict :<{max_len}}")
            try:
                (dates, RR_pred, RR_CI_upper, RR_CI_lower,
                 *_) = analytical_MPVS(subdistrict_cases.loc[subdistrict],
                                       CI=CI,
                                       smoothing=smoothing,
                                       totals=False)
                estimates.append((subdistrict, RR_pred[-1], RR_CI_lower[-1],
                                  RR_CI_upper[-1],
                                  linear_projection(dates, RR_pred, window)))
            except Exception:
                estimates.append((subdistrict, np.nan, np.nan, np.nan, np.nan))
    estimates = pd.DataFrame(estimates)
    estimates.columns = [
        "subdistrict", "Rt", "Rt_CI_lower", "Rt_CI_upper", "Rt_proj"
    ]
    estimates.set_index("subdistrict", inplace=True)
    estimates.to_csv(data / "Jakarta_Rt_projections.csv")
    print(estimates)

    logger.info("generating choropleths")

    sm = mpl.cm.ScalarMappable(norm=mpl.colors.Normalize(vmin=0.9, vmax=1.4),
                               cmap="viridis")