コード例 #1
0
def setUpModule():
    '''
    If an exception is raised in a setUpModule then none of
    the tests in the module will be run.

    This is useful because the teams run in a while loop on initialization
    only responding to the scheduler's commands and will never execute anything else.

    On termination of scheduler, the teams call quit() that raises a SystemExit().
    Because of the behaviour of setUpModule, it will not run any unit tests
    for the team and we now only need to write unit-tests from the scheduler's
    point of view.
    '''
    global backend_mpi
    backend_mpi = BackendMPI()
コード例 #2
0
    def run(self,
            jobID,
            n_sample,
            steps,
            epsilon_init,
            epsilon_percentile,
            save_output=True,
            parallelize=False):

        assert self._prior_set is True

        if parallelize:
            backend = BackendMPI()
        else:
            backend = BackendDummy()

        steps_minus_1 = steps - 1
        epsilon_init = [epsilon_init] + [None] * steps_minus_1

        sim = Simulator(self, self.to_sample_list, self.priors_over_hood)
        sampler = PMCABC([sim], [self._distance_calc], backend, seed=1)

        journal_filename = self.output_folder + 'journal_' + jobID

        if os.path.exists(journal_filename):
            f = open(journal_filename, 'rb')
            journal_init = pickle.load(f)
            f.close()
            print('loading from journal file..')
            stat = journal_init.get_distances()
            print(
                str(epsilon_percentile) +
                'th percentile of initial distances: ',
                np.percentile(stat, epsilon_percentile))
        else:
            print('first_iteration...')
            journal_init = None

        journal = sampler.sample([self._obs],
                                 steps,
                                 epsilon_init,
                                 n_sample,
                                 1,
                                 epsilon_percentile,
                                 journal_class=journal_init)

        stat = journal.get_distances()
        print(
            str(epsilon_percentile) + 'th percentile of new distances: ',
            np.percentile(stat, epsilon_percentile))
        print('obtained ' + str(n_sample) + ' samples from ' +
              str(journal.number_of_simulations[0]) + ' realizations')

        if save_output:
            f = open(journal_filename, 'wb')
            pickle.dump(journal, f)
            f.close()

        self._prior_set = False

        return journal
コード例 #3
0
                                              namefile_postfix_no_index +
                                              ".npy")
        kernel_sr_values_timestep = np.load(inference_folder +
                                            "kernel_sr_values_timestep" +
                                            namefile_postfix_no_index + ".npy")
        kernel_sr_values_cumulative = np.load(inference_folder +
                                              "kernel_sr_values_cumulative" +
                                              namefile_postfix_no_index +
                                              ".npy")
        print("Loaded previously computed scoring rule values.")
        compute_srs = False
    except FileNotFoundError:
        pass

if compute_srs:  # compute_srs:
    backend = BackendMPI() if use_MPI else BackendDummy()
    if gamma_kernel_score is None:
        print("Set gamma from simulations from the model")
        gamma_kernel_score = estimate_bandwidth_timeseries(
            ABC_model,
            backend=backend,
            n_theta=1000,
            seed=seed + 1,
            num_vars=num_vars_in_Lorenz)
        print("Estimated gamma ", gamma_kernel_score)

    print(
        "Computing scoring rules values by generating predictive distribution."
    )

    draw_from_params = DrawFromParamValues([ABC_model],
コード例 #4
0
def setup_backend(parallel=False):
    if parallel:
        backend = BackendMPI()
    else:
        backend = BackendDummy()
    return backend
コード例 #5
0
ファイル: MLE_variance.py プロジェクト: eth-cscs/abcpy-models
#from posterior_mode import compute_posterior_mode
#import pylab as plt
###############################################
# ########### Read Observed data ################
from numpy import genfromtxt
my_data = genfromtxt('AllResults.csv', delimiter=',')

sample = False
compute_MLE = False
analysis = False
plot = False
pathology = True

if sample:
    from abcpy.backends import BackendMPI
    backend = BackendMPI()
    for ind in range(1,10):
        print(ind)

        # True parameter 89.0, 76.0, 2.49, 7e-3, 7.7, 6e-3, 8e-4
        noAP, noNAP, SR_x = int(my_data[0, 16]), int(my_data[0, 11]), float(my_data[0, 21])
        # The parameters considered random and we want to infer
        pAd = Uniform([[5], [150]], name='pAD')
        pAg = Uniform([[5], [150]], name='pAg')
        pT = Uniform([[0.1], [10.0]], name='pT')
        pF = Uniform([[0.1e-3], [9.0e-3]], name='pF')
        aT = Uniform([[0], [10]], name='aT')
        v_z_AP = Uniform([[1.0e-3], [9.0e-3]], name='v_z_AP')
        v_z_NAP = Uniform([[1.0e-4], [9.0e-4]], name='v_z_NAP')
        PD = PlateletDeposition([noAP, noNAP, SR_x, pAd, pAg, pT, pF, aT, v_z_AP, v_z_NAP], name='PD')
        # XObserved = np.array([0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.11600000e+05, 5.96585366e+03,
def main(epsilon,
         sigma,
         filename_prefix,
         perform_standard_optimal_control=False,
         perform_iterative_strategy=True,
         use_sample_with_higher_weight=False,
         use_posterior_median=False,
         n_post_samples=None,
         shift_each_iteration=1,
         n_shifts=10,
         window_size=30,
         only_plot=False,
         plot_file=None,
         plot_days=None,
         loss="deaths_Isc",
         results_folder=None,
         journal_file_name=None,
         training_window_length=None,
         use_mpi=False,
         restart_at_index=None):
    """epsilon is an array with size 3, with order school, work, other
    If use_sample_with_higher_weight is True: we do the procedure with that only, no posterior expectation
    use_posterior_median: do the optimal control with the marginal posterior median.
    n_post_samples: for the posterior expectation. Ignored if use_sample_with_higher_weight or use_posterior_median is True,
    shift_each_iteration and n_shifts are for the iterative strategy.
    """
    if use_mpi:
        print("Using MPI")
        backend = BackendMPI()
    else:
        backend = BackendDummy()

    print("Epsilon: ", epsilon)

    logging.basicConfig(level=logging.INFO)
    ############################ Load relevant data #################################################
    if results_folder is None:
        results_folder = "results/SEI4RD_france_infer_1Mar_31Aug/"
    data_folder = "data/france_inference_data_1Mar_to_31Aug/"

    alpha_home = 1  # set this to 1
    mobility_work = np.load(data_folder + "mobility_work.npy")
    mobility_other = np.load(data_folder + "mobility_other.npy")
    mobility_school = np.load(data_folder + "mobility_school.npy")

    france_pop = np.load(data_folder + "france_pop.npy", allow_pickle=True)

    contact_matrix_home = np.load(data_folder + "contact_matrix_home.npy")
    contact_matrix_work = np.load(data_folder + "contact_matrix_work.npy")
    contact_matrix_school = np.load(data_folder + "contact_matrix_school.npy")
    contact_matrix_other = np.load(data_folder + "contact_matrix_other.npy")

    if journal_file_name is None:
        jrnl = Journal.fromFile(results_folder + "PMCABC_inf3.jrl")
    else:
        jrnl = Journal.fromFile(results_folder + journal_file_name)
    #################################### Define Model #################################################
    # parameters
    n = 5  # number of age groups
    dt = 0.1  # integration timestep
    if training_window_length is not None:
        T = training_window_length
    else:
        T = mobility_school.shape[0] - 1  # horizon time in days
    total_population = france_pop  # population for each age group
    # 16th March: Boris Johnson asked old people to isolate; we then learn a new alpha from the 18th March:
    lockdown_day = 17

    # alpha_home = np.repeat(alpha_home, np.int(1 / dt), axis=0)
    mobility_work = np.repeat(mobility_work[0:T + 1], np.int(1 / dt), axis=0)
    mobility_other = np.repeat(mobility_other[0:T + 1], np.int(1 / dt), axis=0)
    mobility_school = np.repeat(mobility_school[0:T + 1],
                                np.int(1 / dt),
                                axis=0)
    # daily_tests = np.repeat(daily_tests, np.int(1 / dt), axis=0)

    # ABC model (priors need to be fixed better):
    beta = Uniform(
        [[0], [0.5]],
        name='beta')  # controls how fast the epidemics grows. Related to R_0
    d_L = Uniform([[1], [16]], name='d_L')  # average duration of incubation
    d_C = Uniform([[1], [16]],
                  name='d_C')  # average time before going to clinical
    d_R = Uniform([[1], [16]], name='d_R')  # average recovery time
    d_RC = Uniform([[1], [16]], name='d_RC')  # average recovery time
    d_D = Uniform(
        [[1], [16]], name='d_D'
    )  # average duration of infected clinical state (resulting in death)
    p01 = Uniform([[0], [1]], name="p01")
    p02 = Uniform([[0], [1]], name="p02")
    p03 = Uniform([[0], [1]], name="p03")
    p04 = Uniform([[0], [1]], name="p04")
    p05 = Uniform([[0], [1]], name="p05")
    p11 = Uniform([[0], [1]], name="p11")
    p12 = Uniform([[0], [1]], name="p12")
    p13 = Uniform([[0], [1]], name="p13")
    p14 = Uniform([[0], [1]], name="p14")
    p15 = Uniform([[0], [1]], name="p15")
    initial_exposed = Uniform([[0], [500]], name="initial_exposed")
    alpha_123 = Uniform([[0.3], [1]], name="alpha_123")
    alpha_4 = Uniform([[0], [1]], name="alpha_4")
    alpha_5 = Uniform([[0], [1]], name="alpha_5")

    model = SEI4RD([
        beta, d_L, d_C, d_R, d_RC, d_D, p01, p02, p03, p04, p05, p11, p12, p13,
        p14, p15, initial_exposed, alpha_123, alpha_4, alpha_5
    ],
                   tot_population=total_population,
                   T=T,
                   contact_matrix_school=contact_matrix_school,
                   contact_matrix_work=contact_matrix_work,
                   contact_matrix_home=contact_matrix_home,
                   contact_matrix_other=contact_matrix_other,
                   alpha_school=mobility_school,
                   alpha_work=mobility_work,
                   alpha_home=alpha_home,
                   alpha_other=mobility_other,
                   modify_alpha_home=False,
                   dt=dt,
                   return_once_a_day=True,
                   learn_alphas_old=True,
                   lockdown_day=lockdown_day)

    # guess for a phi function
    NHS_max = 10000

    def phi_func_sc(x):  # this is an hard max function.
        return np.maximum(0, x - NHS_max)

    def phi_func_death(x):  # this is an hard max function.
        return np.maximum(0, x)

    # def phi_func(x):
    #     return np.pow(np.maximum(0, x - NHS_max), 2)

    # def phi_func(x, beta=.1):  # this is the softplus, a smooth version of hard max
    #    threshold = 30
    #    shape = x.shape
    #    x = x.reshape(-1)
    #    new_x = x - NHS_max
    #    indices = new_x * beta < threshold
    #    phi_x = copy.deepcopy(new_x)  # is deepcopy actually needed?
    #    phi_x[indices] = np.log(
    #        1 + np.exp(new_x[indices] * beta)) / beta  # approximate for numerical stability in other places
    #    return phi_x.reshape(shape)

    # extract posterior sample points and bootstrap them:
    seed = 1
    np.random.seed(seed)
    iteration = -1
    weights = jrnl.get_weights(iteration) / np.sum(jrnl.get_weights(iteration))
    params = jrnl.get_parameters(iteration)
    if not use_posterior_median:
        if use_sample_with_higher_weight:
            post_samples = np.where(weights == weights.max())[0]
        else:
            # bootstrap
            if n_post_samples is None:
                n_post_samples = len(weights)
            post_samples = np.random.choice(range(len(weights)),
                                            p=weights.reshape(-1),
                                            size=n_post_samples)

        beta_values = np.array([params['beta'][i][0] for i in post_samples])
        kappa_values = np.array(
            [1 / params['d_L'][i][0] for i in post_samples])
        gamma_c_values = np.array(
            [1 / params['d_C'][i][0] for i in post_samples])
        gamma_r_values = np.array(
            [1 / params['d_R'][i][0] for i in post_samples])
        gamma_rc_values = np.array(
            [1 / params['d_RC'][i][0] for i in post_samples])
        nu_values = np.array([1 / params['d_D'][i][0] for i in post_samples])
        rho_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1) for i in post_samples
        ])
        rho_prime_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1) for i in post_samples
        ])
        alpha_123_values = np.array(
            [params["alpha_123"][i][0] for i in post_samples])
        alpha_4_values = np.array(
            [params["alpha_4"][i][0] for i in post_samples])
        alpha_5_values = np.array(
            [params["alpha_5"][i][0] for i in post_samples])
        initial_exposed_values = np.array(
            [params["initial_exposed"][i][0] for i in post_samples])
    else:
        params_array = np.array(
            [[params[key][i] for i in range(len(params[key]))]
             for key in params.keys()]).squeeze()
        marginal_medians = {
            key: weighted_quantile(
                np.array(params[key]).reshape(-1), [0.5], weights.squeeze())
            for i in range(params_array.shape[0]) for key in params.keys()
        }

        beta_values = np.array([marginal_medians['beta'][0]])
        kappa_values = np.array([1 / marginal_medians['d_L'][0]])
        gamma_c_values = np.array([1 / marginal_medians['d_C'][0]])
        gamma_r_values = np.array([1 / marginal_medians['d_R'][0]])
        gamma_rc_values = np.array([1 / marginal_medians['d_RC'][0]])
        nu_values = np.array([1 / marginal_medians['d_D'][0]])
        rho_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1)
        ])
        rho_prime_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1)
        ])
        alpha_123_values = np.array([marginal_medians["alpha_123"][0]])
        alpha_4_values = np.array([marginal_medians["alpha_4"][0]])
        alpha_5_values = np.array([marginal_medians["alpha_5"][0]])
        initial_exposed_values = np.array(
            [marginal_medians["initial_exposed"][0]])

    # instantiate the posterior cost class:
    posterior_cost = PosteriorCost(model,
                                   phi_func_sc=phi_func_sc,
                                   phi_func_death=phi_func_death,
                                   beta_vals=beta_values,
                                   kappa_vals=kappa_values,
                                   gamma_c_vals=gamma_c_values,
                                   gamma_r_vals=gamma_r_values,
                                   gamma_rc_vals=gamma_rc_values,
                                   nu_vals=nu_values,
                                   rho_vals=rho_values,
                                   rho_prime_vals=rho_prime_values,
                                   alpha_123_vals=alpha_123_values,
                                   alpha_4_vals=alpha_4_values,
                                   alpha_5_vals=alpha_5_values,
                                   initial_exposed_vals=initial_exposed_values,
                                   loss=loss)

    if plot_days is None:
        n_days = 120
    else:
        n_days = plot_days
    end_training_mobility_values = [
        mobility_school[-1], mobility_work[-1], mobility_other[-1]
    ]
    # alpha initial is taken assuming values will be kept constant as it was on the last day observed
    mobility_initial = copy.deepcopy(
        np.stack((mobility_school[-1] * np.ones(shape=(n_days, )),
                  mobility_work[-1] * np.ones(shape=(n_days, )),
                  mobility_other[-1] * np.ones(shape=(n_days, ))))).flatten()

    # Only plot using a mobility file
    if only_plot:
        mobility = np.load(results_folder + plot_file)[:, 0:n_days]
        fig, ax = posterior_cost.produce_plot(mobility, n_days)
        plt.savefig(results_folder + filename_prefix + ".pdf")
        plt.close(fig)
        return

    # try cost computation:
    t = time.time()
    cost_initial = posterior_cost.compute_cost(mobility_initial, n_days, sigma,
                                               epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(mobility_initial, n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_final_training_lockdown_conditions.pdf")
    # plt.close(fig)
    cost_no_lockdown = posterior_cost.compute_cost(
        np.ones_like(mobility_initial), n_days, sigma, epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(np.ones_like(mobility_initial), n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_no_lockdown.pdf")
    # plt.close(fig)
    print("Initial cost: {:.2f}, no-lockdown cost: {:.2f}".format(
        cost_initial, cost_no_lockdown))
    print(time.time() - t)

    # OPTIMAL CONTROL WITH NO MOVING WINDOW APPROACH
    if perform_standard_optimal_control:
        # bounds = different_bounds('startconstrained')
        bounds = different_bounds('realistic', n_days, mobility_initial,
                                  end_training_mobility_values)

        results_da = optimize.dual_annealing(posterior_cost.compute_cost,
                                             bounds=bounds,
                                             args=(n_days, sigma, epsilon,
                                                   backend),
                                             maxiter=10,
                                             maxfun=1e3,
                                             x0=mobility_initial)
        # Plotting the figures
        mobility_initial = mobility_initial.reshape(
            3, n_days)  # 3 instead of 4 as we are not using alpha_home
        mobility_final = results_da.x.reshape(3, n_days)
        cost_final = posterior_cost.compute_cost(mobility_final, n_days, sigma,
                                                 epsilon, backend)
        np.save(results_folder + filename_prefix + "mobility_standard",
                mobility_final)

    # MOVING WINDOW APPROACH
    if perform_iterative_strategy:
        print("Iterative strategy")
        # window_size = 30  # in days
        mobility_initial = copy.deepcopy(
            np.stack((mobility_school[-1] * np.ones(shape=(window_size, )),
                      mobility_work[-1] * np.ones(shape=(window_size, )),
                      mobility_other[-1] *
                      np.ones(shape=(window_size, ))))).flatten()

        # shift_each_iteration = 10  # number of days by which to shift the sliding window at each iteration.
        # n_shifts = 10
        total_days = n_shifts * shift_each_iteration
        print(total_days)

        total_mobility = np.zeros((3, total_days))

        if restart_at_index is not None:
            total_mobility = np.load(results_folder + filename_prefix +
                                     "mobility_iterative_" +
                                     str(restart_at_index) + ".npy")

        bounds = different_bounds(
            'realistic',
            n_days=window_size,
            alpha_initial=mobility_initial,
            end_training_alpha_values=end_training_mobility_values)

        for shift_idx in range(n_shifts):
            print('Running shift: ' + str(shift_idx))
            if restart_at_index is not None and shift_idx <= restart_at_index:
                # we exploit the same loop in order to restart, so that the evolution of the model will be the same.
                mobility_final = np.zeros((3, window_size))
                mobility_final[:, 0:shift_each_iteration] = \
                    total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration]
                # keep that constant for the future; this is only used to initialize the next optimal control iteration:
                mobility_final[:,
                               shift_each_iteration:] = mobility_final[:,
                                                                       shift_each_iteration
                                                                       -
                                                                       1].reshape(
                                                                           3,
                                                                           1)
            else:
                # do the optimal control stuff
                results_da = optimize.dual_annealing(
                    posterior_cost.compute_cost,
                    bounds=bounds,
                    args=(window_size, sigma, epsilon, backend),
                    maxiter=10,
                    maxfun=1e3,
                    x0=mobility_initial)

                # get the result of the optimization in that time window
                mobility_final = results_da.x.reshape(3, window_size)
                # save it to the total_mobility array:
                total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration] = \
                    mobility_final[:, 0:shift_each_iteration]
                # Save in between mobility steps
                np.save(
                    results_folder + filename_prefix + "mobility_iterative_" +
                    str(shift_idx), total_mobility)

            # update now the state of the model:
            posterior_cost.update_states(
                shift_each_iteration, mobility_final[:, :shift_each_iteration])

            # update mobility_initial as well, with the translated values of mobility_final, it may speed up convergence.
            mobility_initial_tmp = np.zeros_like(mobility_final)
            mobility_initial_tmp[:, 0:window_size -
                                 shift_each_iteration] = mobility_final[:,
                                                                        shift_each_iteration:
                                                                        window_size]
            mobility_initial_tmp[:, window_size -
                                 shift_each_iteration:] = np.stack([
                                     mobility_final[:, window_size -
                                                    shift_each_iteration - 1]
                                 ] * shift_each_iteration,
                                                                   axis=1)
            mobility_initial = mobility_initial_tmp.flatten()

        np.save(results_folder + filename_prefix + "mobility_iterative",
                total_mobility)