Пример #1
0
# Get the policies shifts from the CART tree to compute different values of gamma(t)
# Depending on the policy in place in the future to affect predictions
dict_normalized_policy_gamma_countries, dict_current_policy_countries = (
    get_normalized_policy_shifts_and_current_policy_all_countries(
        policy_data_countries=policy_data_countries[policy_data_countries.date <= testing_date],
        pastparameters=pastparameters,
    )
)
# Setting same value for these 2 policies because of the inherent structure of the tree
dict_normalized_policy_gamma_countries[future_policies[3]] = dict_normalized_policy_gamma_countries[future_policies[5]]

## US Only Policies
dict_normalized_policy_gamma_us_only, dict_current_policy_us_only = (
    get_normalized_policy_shifts_and_current_policy_us_only(
        policy_data_us_only=policy_data_us_only[policy_data_us_only.date <= testing_date],
        pastparameters=pastparameters,
    )
)
dict_current_policy_international = dict_current_policy_countries.copy()
dict_current_policy_international.update(dict_current_policy_us_only)
df_current_policies = pd.DataFrame({
    "country": [x[0] for x in dict_current_policy_international.keys()],
    "province": [x[1] for x in dict_current_policy_international.keys()],
    "date": [testing_date for _ in range(len(dict_current_policy_international))],
    f"current_policy_{testing_date_file[4:]}": [
        dict_current_policy_international[x]
        for x in dict_current_policy_international.keys()
    ]
})

########################################################################
Пример #2
0
             datetime.strptime(yesterday, '%Y%m%d')).days
std_date = 30
mean_magnitude = 1
std_magnitude = 0.75
truncate_magnitude = [0.25, 1.75]
#n_magnitudes = 3
n_dates = 20

# Setting same value for these 2 policies because of the inherent structure of the tree
dict_normalized_policy_gamma_countries[future_policies[
    3]] = dict_normalized_policy_gamma_countries[future_policies[5]]

## US Only Policies
dict_normalized_policy_gamma_us_only, dict_current_policy_us_only = (
    get_normalized_policy_shifts_and_current_policy_us_only(
        policy_data_us_only=policy_data_us_only,
        pastparameters=pastparameters,
    ))
dict_current_policy_international = dict_current_policy_countries.copy()
dict_current_policy_international.update(dict_current_policy_us_only)

# Initalizing lists of the different dataframes that will be concatenated in the end
list_df_global_predictions_since_today_scenarios = []
list_df_global_predictions_since_100_cases_scenarios = []
obj_value = 0

for sw_intervention_days in [7, 14, 21, 28]:

    for continent, country, province in zip(
            popcountries.Continent.tolist(),
            popcountries.Country.tolist(),
            popcountries.Province.tolist(),
Пример #3
0
pastparameters = pd.read_csv(
PATH_TO_FOLDER_DANGER_MAP + f"predicted/parameters_global_CR_all/Parameters_Global_CR_{yesterday}.csv"
)
param_MATHEMATICA = False
dict_normalized_policy_gamma_countries, dict_current_policy_countries = (
get_normalized_policy_shifts_and_current_policy_all_countries(
    policy_data_countries=policy_data_countries[policy_data_countries.date <= yesterday],
    pastparameters=pastparameters,
)
)
dict_normalized_policy_gamma_countries[future_policies[3]] = dict_normalized_policy_gamma_countries[future_policies[5]]

dict_normalized_policy_gamma_us_only, dict_current_policy_us_only = (
get_normalized_policy_shifts_and_current_policy_us_only(
    policy_data_us_only=policy_data_us_only[policy_data_us_only.date <= yesterday],
    pastparameters=pastparameters,
)
)
dict_current_policy_international = dict_current_policy_countries.copy()
dict_current_policy_international.update(dict_current_policy_us_only)

#%% Mobility
mobility = pd.read_csv('C:/Users/omars/Downloads/Global_Mobility_Report.csv')
mobility = mobility.query('country_region_code == "US"')[[not(a) and b for a, b in zip(mobility.query('country_region_code == "US"').sub_region_1.isnull(), mobility.query('country_region_code == "US"').sub_region_2.isnull())]]

mobility['date'] = mobility['date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
mob_columns = list(mobility.columns[7:])

mobility_agg = mobility[[a and b for a, b in zip(mobility['date'] >= prediction_date, mobility['date'] <= testing_date)]].groupby('sub_region_1')[mob_columns].mean().dropna(axis=1).reset_index()
#%% Construct Agg Dset for US