Ejemplo n.º 1
0
    def setUp(self):
        # setup backend
        dummy = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        # define a stupid uniform model now
        self.model2 = Uniform([[0], [10]])

        self.sampler = DrawFromPrior([self.model], dummy, seed=1)
        self.original_journal = self.sampler.sample(100)

        self.generate_from_journal = GenerateFromJournal([self.model],
                                                         dummy,
                                                         seed=2)
        self.generate_from_journal_2 = GenerateFromJournal([self.model2],
                                                           dummy,
                                                           seed=2)

        # expected mean values from bootstrapped samples:
        self.mu_mean = -0.2050921750330999
        self.sigma_mean = 5.178647189918053
        # expected mean values from subsampled samples:
        self.mu_mean_2 = -0.021275259024241676
        self.sigma_mean_2 = 5.672004487129107
Ejemplo n.º 2
0
    def setUp(self):
        if has_torch:
            self.net = createDefaultNN(2, 3)()
            self.net_with_scaler = ScalerAndNet(self.net, None)
            self.net_with_discard_wrapper = DiscardLastOutputNet(self.net)
            self.stat_calc = NeuralEmbedding(self.net)
            self.stat_calc_with_scaler = NeuralEmbedding(self.net_with_scaler)
            self.stat_calc_with_discard_wrapper = NeuralEmbedding(
                self.net_with_discard_wrapper)
            # reference input and output
            torch.random.manual_seed(1)
            self.tensor = torch.randn(1, 2)
            self.out = self.net(self.tensor)
            self.out_discard = self.net_with_discard_wrapper(self.tensor)

            # try now the statistics rescaling option:
            mu = Uniform([[-5.0], [5.0]], name='mu')
            sigma = Uniform([[0.0], [10.0]], name='sigma')
            # define a Gaussian model
            self.model = Normal([mu, sigma])

            sampler = DrawFromPrior([self.model], BackendDummy(), seed=1)
            reference_parameters, reference_simulations = sampler.sample_par_sim_pairs(
                30, 1)
            reference_simulations = reference_simulations.reshape(
                reference_simulations.shape[0], reference_simulations.shape[2])

            self.stat_calc_rescaling = NeuralEmbedding(
                self.net,
                reference_simulations=reference_simulations,
                previous_statistics=Identity(degree=2))

        if not has_torch:
            self.assertRaises(ImportError, NeuralEmbedding, None)
Ejemplo n.º 3
0
    def test_sample(self):
        # setup backend
        dummy = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # define a distance function
        dist_calc = Euclidean(stat_calc)

        # create fake observed data
        y_obs = [np.array(9.8)]

        # use the rejection sampling scheme
        sampler = RejectionABC([self.model], [dist_calc], dummy, seed=1)
        journal = sampler.sample([y_obs], 10, 1, 10)
        mu_sample = np.array(journal.get_parameters()['mu'])
        sigma_sample = np.array(journal.get_parameters()['sigma'])

        # test shape of samples
        self.assertEqual(np.shape(mu_sample), (10, 1))
        self.assertEqual(np.shape(sigma_sample), (10, 1))

        # Compute posterior mean
        #self.assertAlmostEqual(np.average(np.asarray(samples[:,0])),1.22301,10e-2)
        self.assertLess(np.average(mu_sample) - 1.22301, 1e-2)
        self.assertLess(np.average(sigma_sample) - 6.992218, 10e-2)

        self.assertFalse(journal.number_of_simulations == 0)
Ejemplo n.º 4
0
    def test_sample(self):
        # setup backend
        dummy = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # define a distance function
        dist_calc = Euclidean(stat_calc)

        # create fake observed data
        y_obs = model.simulate(1)

        # use the rejection sampling scheme
        sampler = RejectionABC(model, dist_calc, dummy, seed=1)
        journal = sampler.sample(y_obs, 10, 1, 0.1)
        samples = journal.get_parameters()

        # test shape of samples
        samples_shape = np.shape(samples)
        self.assertEqual(samples_shape, (10, 2))

        # Compute posterior mean
        self.assertEqual((np.average(np.asarray(
            samples[:, 0])), np.average(np.asarray(samples[:, 1]))),
                         (1.6818856447333246, 8.4384177826766518))
Ejemplo n.º 5
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()
        
        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu,sigma])

        # define sufficient statistics for the model
        stat_calc = Identity(degree = 2, cross = 0)

        # create fake observed data
        #y_obs = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist()
        y_obs = [np.array(9.8)]
      
        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)


        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed = 1)
        journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors =  np.array([.1,.1]), iniPoints = None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(journal.get_parameters()['mu']), np.array(journal.get_parameters()['sigma']), np.array(journal.get_weights())

        # Compute posterior mean
        mu_post_mean, sigma_post_mean = np.average(mu_post_sample, weights=post_weights, axis=0), np.average(sigma_post_sample, weights=post_weights, axis=0)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10,1))
        self.assertEqual(sigma_sample_shape, (10,1))
        self.assertEqual(weights_sample_shape, (10,1))
        self.assertLess(abs(mu_post_mean - (-3.402868)), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.212), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)


        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed = 1)
        journal = sampler.sample([y_obs], T, n_sample, n_samples_per_param, covFactors = np.array([.1,.1]), iniPoints = None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(journal.get_parameters()['mu']), np.array(journal.get_parameters()['sigma']), np.array(journal.get_weights())
        
        # Compute posterior mean
        mu_post_mean, sigma_post_mean = np.average(mu_post_sample, weights=post_weights, axis=0), np.average(sigma_post_sample, weights=post_weights, axis=0)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10,1))
        self.assertEqual(sigma_sample_shape, (10,1))
        self.assertEqual(weights_sample_shape, (10,1))
        self.assertLess(abs(mu_post_mean - (-3.03325763) ), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.92124735), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)
Ejemplo n.º 6
0
    def setUp(self):
        # find spark and initialize it
        self.backend = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        # define a distance function
        stat_calc = Identity(degree=2, cross=0)
        self.dist_calc = Euclidean(stat_calc)

        # create fake observed data
        #self.observation = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist()
        self.observation = [np.array(9.8)]
Ejemplo n.º 7
0
    def setUp(self):
        self.coeff = np.array([[3, 4], [5, 6]])
        self.stat_calc = LinearTransformation(self.coeff,
                                              degree=1,
                                              cross=False)

        # try now the statistics rescaling option:
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        sampler = DrawFromPrior([self.model], BackendDummy(), seed=1)
        reference_parameters, reference_simulations = sampler.sample_par_sim_pairs(
            30, 1)
        reference_simulations = reference_simulations.reshape(
            reference_simulations.shape[0], reference_simulations.shape[2])
        reference_simulations_double = np.concatenate(
            [reference_simulations, reference_simulations], axis=1)

        self.stat_calc_rescaling = LinearTransformation(
            self.coeff, reference_simulations=reference_simulations_double)
Ejemplo n.º 8
0
    def setUp(self):
        # find spark and initialize it
        self.backend = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        self.model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define a distance function
        stat_calc = Identity(degree=2, cross=0)
        self.dist_calc = Euclidean(stat_calc)

        # create fake observed data
        self.observation = self.model.simulate(1)

        # define kernel
        mean = np.array([-13.0, .0, 7.0])
        cov = np.eye(3)
        self.kernel = MultiNormal(mean, cov, seed=1)
Ejemplo n.º 9
0
    def setUp(self):
        self.stat_calc = Identity(degree=1, cross=False)
        self.stat_calc_pipeline = Identity(degree=2,
                                           cross=False,
                                           previous_statistics=self.stat_calc)

        # try now the statistics rescaling option:
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        sampler = DrawFromPrior([self.model], BackendDummy(), seed=1)
        reference_parameters, reference_simulations = sampler.sample_par_sim_pairs(
            30, 1)
        reference_simulations = reference_simulations.reshape(
            reference_simulations.shape[0], reference_simulations.shape[2])
        reference_simulations_double = np.concatenate(
            [reference_simulations, reference_simulations], axis=1)

        self.stat_calc_rescaling = Identity(
            reference_simulations=reference_simulations_double)
        self.stat_calc_rescaling_2 = Identity(
            reference_simulations=reference_simulations)
Ejemplo n.º 10
0
    def run(self,
            jobID,
            n_sample,
            steps,
            epsilon_init,
            epsilon_percentile,
            save_output=True,
            parallelize=False):

        assert self._prior_set is True

        if parallelize:
            backend = BackendMPI()
        else:
            backend = BackendDummy()

        steps_minus_1 = steps - 1
        epsilon_init = [epsilon_init] + [None] * steps_minus_1

        sim = Simulator(self, self.to_sample_list, self.priors_over_hood)
        sampler = PMCABC([sim], [self._distance_calc], backend, seed=1)

        journal_filename = self.output_folder + 'journal_' + jobID

        if os.path.exists(journal_filename):
            f = open(journal_filename, 'rb')
            journal_init = pickle.load(f)
            f.close()
            print('loading from journal file..')
            stat = journal_init.get_distances()
            print(
                str(epsilon_percentile) +
                'th percentile of initial distances: ',
                np.percentile(stat, epsilon_percentile))
        else:
            print('first_iteration...')
            journal_init = None

        journal = sampler.sample([self._obs],
                                 steps,
                                 epsilon_init,
                                 n_sample,
                                 1,
                                 epsilon_percentile,
                                 journal_class=journal_init)

        stat = journal.get_distances()
        print(
            str(epsilon_percentile) + 'th percentile of new distances: ',
            np.percentile(stat, epsilon_percentile))
        print('obtained ' + str(n_sample) + ' samples from ' +
              str(journal.number_of_simulations[0]) + ' realizations')

        if save_output:
            f = open(journal_filename, 'wb')
            pickle.dump(journal, f)
            f.close()

        self._prior_set = False

        return journal
                                              namefile_postfix_no_index +
                                              ".npy")
        kernel_sr_values_timestep = np.load(inference_folder +
                                            "kernel_sr_values_timestep" +
                                            namefile_postfix_no_index + ".npy")
        kernel_sr_values_cumulative = np.load(inference_folder +
                                              "kernel_sr_values_cumulative" +
                                              namefile_postfix_no_index +
                                              ".npy")
        print("Loaded previously computed scoring rule values.")
        compute_srs = False
    except FileNotFoundError:
        pass

if compute_srs:  # compute_srs:
    backend = BackendMPI() if use_MPI else BackendDummy()
    if gamma_kernel_score is None:
        print("Set gamma from simulations from the model")
        gamma_kernel_score = estimate_bandwidth_timeseries(
            ABC_model,
            backend=backend,
            n_theta=1000,
            seed=seed + 1,
            num_vars=num_vars_in_Lorenz)
        print("Estimated gamma ", gamma_kernel_score)

    print(
        "Computing scoring rules values by generating predictive distribution."
    )

    draw_from_params = DrawFromParamValues([ABC_model],
Ejemplo n.º 12
0
def setup_backend(parallel=False):
    if parallel:
        backend = BackendMPI()
    else:
        backend = BackendDummy()
    return backend
Ejemplo n.º 13
0
n_samples_per_param = args.n_samples_per_param
full_output = args.full_output
load_trace_if_available = args.load_trace_if_available
subsample_size = args.subsample_size
plot_marginal_densities = args.plot_marginal
plot_bivariate_densities = args.plot_bivariate
perform_postprocessing = args.postprocessing

if model not in ("gaussian", "beta", "gamma", "MA2",
                 "AR2") or technique not in ("SL", "RE"):
    raise NotImplementedError

true_posterior_available = model in ("gaussian", "beta", "gamma", "MA2", "AR2")
theta_dim = 2

backend = BackendMPI() if use_MPI else BackendDummy(
)  # be careful, these need to be instantiated
print("{} model with {} approach.".format(model, technique))

args_dict = args.__dict__

if sleep_time > 0:
    print("Wait for {} minutes...".format(sleep_time))
    sleep(60 * sleep_time)
    print("Done waiting!")

if model == "AR2":
    arma_size = 100
    ar1_bounds = [-1, 1]
    ar2_bounds = [-1, 0]
    args_dict['arma_size'] = arma_size
    args_dict['ar1_bounds'] = ar1_bounds
Ejemplo n.º 14
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # create fake observed data
        #y_obs = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist()
        y_obs = [np.array(9.8)]

        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)

        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed=1)
        journal = sampler.sample([y_obs],
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactors=np.array([.1, .1]),
                                 iniPoints=None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            journal.get_parameters()['mu']), np.array(
                journal.get_parameters()['sigma']), np.array(
                    journal.get_weights())

        # Compute posterior mean
        mu_post_mean, sigma_post_mean = journal.posterior_mean(
        )['mu'], journal.posterior_mean()['sigma']

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \
                                                                    (len(sigma_post_sample),
                                                                     sigma_post_sample[0].shape[1]), post_weights.shape
        self.assertEqual(mu_sample_shape, (10, 1))
        self.assertEqual(sigma_sample_shape, (10, 1))
        self.assertEqual(weights_sample_shape, (10, 1))
        self.assertLess(abs(mu_post_mean - (-3.3711206204663764)), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.518520667688998), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)

        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed=1)
        journal = sampler.sample([y_obs],
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactors=np.array([.1, .1]),
                                 iniPoints=None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            journal.get_parameters()['mu']), np.array(
                journal.get_parameters()['sigma']), np.array(
                    journal.get_weights())

        # Compute posterior mean
        mu_post_mean, sigma_post_mean = journal.posterior_mean(
        )['mu'], journal.posterior_mean()['sigma']

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \
                                                                    (len(sigma_post_sample),
                                                                     sigma_post_sample[0].shape[1]), post_weights.shape
        self.assertEqual(mu_sample_shape, (10, 1))
        self.assertEqual(sigma_sample_shape, (10, 1))
        self.assertEqual(weights_sample_shape, (10, 1))
        self.assertLess(abs(mu_post_mean - (-2.970827684425406)), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.82165619013458), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)
Ejemplo n.º 15
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # create fake observed data
        y_obs = model.simulate(1)

        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)

        # use the PMC scheme for T = 1
        mean = np.array([-13.0, .0, 7.0])
        cov = np.eye(3)
        kernel = MultiNormal(mean, cov, seed=1)

        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            samples[0][:, 0]), np.array(samples[0][:,
                                                   1]), np.array(samples[1][:,
                                                                            0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.48953333102)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 6.50695612708), 1e-10)

        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.asarray(
            samples[0][:, 0]), np.asarray(samples[0][:, 1]), np.asarray(
                samples[1][:, 0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.4033145848)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 7.05175546876), 1e-10)
def main(epsilon,
         sigma,
         filename_prefix,
         perform_standard_optimal_control=False,
         perform_iterative_strategy=True,
         use_sample_with_higher_weight=False,
         use_posterior_median=False,
         n_post_samples=None,
         shift_each_iteration=1,
         n_shifts=10,
         window_size=30,
         only_plot=False,
         plot_file=None,
         plot_days=None,
         loss="deaths_Isc",
         results_folder=None,
         journal_file_name=None,
         training_window_length=None,
         use_mpi=False,
         restart_at_index=None):
    """epsilon is an array with size 3, with order school, work, other
    If use_sample_with_higher_weight is True: we do the procedure with that only, no posterior expectation
    use_posterior_median: do the optimal control with the marginal posterior median.
    n_post_samples: for the posterior expectation. Ignored if use_sample_with_higher_weight or use_posterior_median is True,
    shift_each_iteration and n_shifts are for the iterative strategy.
    """
    if use_mpi:
        print("Using MPI")
        backend = BackendMPI()
    else:
        backend = BackendDummy()

    print("Epsilon: ", epsilon)

    logging.basicConfig(level=logging.INFO)
    ############################ Load relevant data #################################################
    if results_folder is None:
        results_folder = "results/SEI4RD_france_infer_1Mar_31Aug/"
    data_folder = "data/france_inference_data_1Mar_to_31Aug/"

    alpha_home = 1  # set this to 1
    mobility_work = np.load(data_folder + "mobility_work.npy")
    mobility_other = np.load(data_folder + "mobility_other.npy")
    mobility_school = np.load(data_folder + "mobility_school.npy")

    france_pop = np.load(data_folder + "france_pop.npy", allow_pickle=True)

    contact_matrix_home = np.load(data_folder + "contact_matrix_home.npy")
    contact_matrix_work = np.load(data_folder + "contact_matrix_work.npy")
    contact_matrix_school = np.load(data_folder + "contact_matrix_school.npy")
    contact_matrix_other = np.load(data_folder + "contact_matrix_other.npy")

    if journal_file_name is None:
        jrnl = Journal.fromFile(results_folder + "PMCABC_inf3.jrl")
    else:
        jrnl = Journal.fromFile(results_folder + journal_file_name)
    #################################### Define Model #################################################
    # parameters
    n = 5  # number of age groups
    dt = 0.1  # integration timestep
    if training_window_length is not None:
        T = training_window_length
    else:
        T = mobility_school.shape[0] - 1  # horizon time in days
    total_population = france_pop  # population for each age group
    # 16th March: Boris Johnson asked old people to isolate; we then learn a new alpha from the 18th March:
    lockdown_day = 17

    # alpha_home = np.repeat(alpha_home, np.int(1 / dt), axis=0)
    mobility_work = np.repeat(mobility_work[0:T + 1], np.int(1 / dt), axis=0)
    mobility_other = np.repeat(mobility_other[0:T + 1], np.int(1 / dt), axis=0)
    mobility_school = np.repeat(mobility_school[0:T + 1],
                                np.int(1 / dt),
                                axis=0)
    # daily_tests = np.repeat(daily_tests, np.int(1 / dt), axis=0)

    # ABC model (priors need to be fixed better):
    beta = Uniform(
        [[0], [0.5]],
        name='beta')  # controls how fast the epidemics grows. Related to R_0
    d_L = Uniform([[1], [16]], name='d_L')  # average duration of incubation
    d_C = Uniform([[1], [16]],
                  name='d_C')  # average time before going to clinical
    d_R = Uniform([[1], [16]], name='d_R')  # average recovery time
    d_RC = Uniform([[1], [16]], name='d_RC')  # average recovery time
    d_D = Uniform(
        [[1], [16]], name='d_D'
    )  # average duration of infected clinical state (resulting in death)
    p01 = Uniform([[0], [1]], name="p01")
    p02 = Uniform([[0], [1]], name="p02")
    p03 = Uniform([[0], [1]], name="p03")
    p04 = Uniform([[0], [1]], name="p04")
    p05 = Uniform([[0], [1]], name="p05")
    p11 = Uniform([[0], [1]], name="p11")
    p12 = Uniform([[0], [1]], name="p12")
    p13 = Uniform([[0], [1]], name="p13")
    p14 = Uniform([[0], [1]], name="p14")
    p15 = Uniform([[0], [1]], name="p15")
    initial_exposed = Uniform([[0], [500]], name="initial_exposed")
    alpha_123 = Uniform([[0.3], [1]], name="alpha_123")
    alpha_4 = Uniform([[0], [1]], name="alpha_4")
    alpha_5 = Uniform([[0], [1]], name="alpha_5")

    model = SEI4RD([
        beta, d_L, d_C, d_R, d_RC, d_D, p01, p02, p03, p04, p05, p11, p12, p13,
        p14, p15, initial_exposed, alpha_123, alpha_4, alpha_5
    ],
                   tot_population=total_population,
                   T=T,
                   contact_matrix_school=contact_matrix_school,
                   contact_matrix_work=contact_matrix_work,
                   contact_matrix_home=contact_matrix_home,
                   contact_matrix_other=contact_matrix_other,
                   alpha_school=mobility_school,
                   alpha_work=mobility_work,
                   alpha_home=alpha_home,
                   alpha_other=mobility_other,
                   modify_alpha_home=False,
                   dt=dt,
                   return_once_a_day=True,
                   learn_alphas_old=True,
                   lockdown_day=lockdown_day)

    # guess for a phi function
    NHS_max = 10000

    def phi_func_sc(x):  # this is an hard max function.
        return np.maximum(0, x - NHS_max)

    def phi_func_death(x):  # this is an hard max function.
        return np.maximum(0, x)

    # def phi_func(x):
    #     return np.pow(np.maximum(0, x - NHS_max), 2)

    # def phi_func(x, beta=.1):  # this is the softplus, a smooth version of hard max
    #    threshold = 30
    #    shape = x.shape
    #    x = x.reshape(-1)
    #    new_x = x - NHS_max
    #    indices = new_x * beta < threshold
    #    phi_x = copy.deepcopy(new_x)  # is deepcopy actually needed?
    #    phi_x[indices] = np.log(
    #        1 + np.exp(new_x[indices] * beta)) / beta  # approximate for numerical stability in other places
    #    return phi_x.reshape(shape)

    # extract posterior sample points and bootstrap them:
    seed = 1
    np.random.seed(seed)
    iteration = -1
    weights = jrnl.get_weights(iteration) / np.sum(jrnl.get_weights(iteration))
    params = jrnl.get_parameters(iteration)
    if not use_posterior_median:
        if use_sample_with_higher_weight:
            post_samples = np.where(weights == weights.max())[0]
        else:
            # bootstrap
            if n_post_samples is None:
                n_post_samples = len(weights)
            post_samples = np.random.choice(range(len(weights)),
                                            p=weights.reshape(-1),
                                            size=n_post_samples)

        beta_values = np.array([params['beta'][i][0] for i in post_samples])
        kappa_values = np.array(
            [1 / params['d_L'][i][0] for i in post_samples])
        gamma_c_values = np.array(
            [1 / params['d_C'][i][0] for i in post_samples])
        gamma_r_values = np.array(
            [1 / params['d_R'][i][0] for i in post_samples])
        gamma_rc_values = np.array(
            [1 / params['d_RC'][i][0] for i in post_samples])
        nu_values = np.array([1 / params['d_D'][i][0] for i in post_samples])
        rho_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1) for i in post_samples
        ])
        rho_prime_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1) for i in post_samples
        ])
        alpha_123_values = np.array(
            [params["alpha_123"][i][0] for i in post_samples])
        alpha_4_values = np.array(
            [params["alpha_4"][i][0] for i in post_samples])
        alpha_5_values = np.array(
            [params["alpha_5"][i][0] for i in post_samples])
        initial_exposed_values = np.array(
            [params["initial_exposed"][i][0] for i in post_samples])
    else:
        params_array = np.array(
            [[params[key][i] for i in range(len(params[key]))]
             for key in params.keys()]).squeeze()
        marginal_medians = {
            key: weighted_quantile(
                np.array(params[key]).reshape(-1), [0.5], weights.squeeze())
            for i in range(params_array.shape[0]) for key in params.keys()
        }

        beta_values = np.array([marginal_medians['beta'][0]])
        kappa_values = np.array([1 / marginal_medians['d_L'][0]])
        gamma_c_values = np.array([1 / marginal_medians['d_C'][0]])
        gamma_r_values = np.array([1 / marginal_medians['d_R'][0]])
        gamma_rc_values = np.array([1 / marginal_medians['d_RC'][0]])
        nu_values = np.array([1 / marginal_medians['d_D'][0]])
        rho_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1)
        ])
        rho_prime_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1)
        ])
        alpha_123_values = np.array([marginal_medians["alpha_123"][0]])
        alpha_4_values = np.array([marginal_medians["alpha_4"][0]])
        alpha_5_values = np.array([marginal_medians["alpha_5"][0]])
        initial_exposed_values = np.array(
            [marginal_medians["initial_exposed"][0]])

    # instantiate the posterior cost class:
    posterior_cost = PosteriorCost(model,
                                   phi_func_sc=phi_func_sc,
                                   phi_func_death=phi_func_death,
                                   beta_vals=beta_values,
                                   kappa_vals=kappa_values,
                                   gamma_c_vals=gamma_c_values,
                                   gamma_r_vals=gamma_r_values,
                                   gamma_rc_vals=gamma_rc_values,
                                   nu_vals=nu_values,
                                   rho_vals=rho_values,
                                   rho_prime_vals=rho_prime_values,
                                   alpha_123_vals=alpha_123_values,
                                   alpha_4_vals=alpha_4_values,
                                   alpha_5_vals=alpha_5_values,
                                   initial_exposed_vals=initial_exposed_values,
                                   loss=loss)

    if plot_days is None:
        n_days = 120
    else:
        n_days = plot_days
    end_training_mobility_values = [
        mobility_school[-1], mobility_work[-1], mobility_other[-1]
    ]
    # alpha initial is taken assuming values will be kept constant as it was on the last day observed
    mobility_initial = copy.deepcopy(
        np.stack((mobility_school[-1] * np.ones(shape=(n_days, )),
                  mobility_work[-1] * np.ones(shape=(n_days, )),
                  mobility_other[-1] * np.ones(shape=(n_days, ))))).flatten()

    # Only plot using a mobility file
    if only_plot:
        mobility = np.load(results_folder + plot_file)[:, 0:n_days]
        fig, ax = posterior_cost.produce_plot(mobility, n_days)
        plt.savefig(results_folder + filename_prefix + ".pdf")
        plt.close(fig)
        return

    # try cost computation:
    t = time.time()
    cost_initial = posterior_cost.compute_cost(mobility_initial, n_days, sigma,
                                               epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(mobility_initial, n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_final_training_lockdown_conditions.pdf")
    # plt.close(fig)
    cost_no_lockdown = posterior_cost.compute_cost(
        np.ones_like(mobility_initial), n_days, sigma, epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(np.ones_like(mobility_initial), n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_no_lockdown.pdf")
    # plt.close(fig)
    print("Initial cost: {:.2f}, no-lockdown cost: {:.2f}".format(
        cost_initial, cost_no_lockdown))
    print(time.time() - t)

    # OPTIMAL CONTROL WITH NO MOVING WINDOW APPROACH
    if perform_standard_optimal_control:
        # bounds = different_bounds('startconstrained')
        bounds = different_bounds('realistic', n_days, mobility_initial,
                                  end_training_mobility_values)

        results_da = optimize.dual_annealing(posterior_cost.compute_cost,
                                             bounds=bounds,
                                             args=(n_days, sigma, epsilon,
                                                   backend),
                                             maxiter=10,
                                             maxfun=1e3,
                                             x0=mobility_initial)
        # Plotting the figures
        mobility_initial = mobility_initial.reshape(
            3, n_days)  # 3 instead of 4 as we are not using alpha_home
        mobility_final = results_da.x.reshape(3, n_days)
        cost_final = posterior_cost.compute_cost(mobility_final, n_days, sigma,
                                                 epsilon, backend)
        np.save(results_folder + filename_prefix + "mobility_standard",
                mobility_final)

    # MOVING WINDOW APPROACH
    if perform_iterative_strategy:
        print("Iterative strategy")
        # window_size = 30  # in days
        mobility_initial = copy.deepcopy(
            np.stack((mobility_school[-1] * np.ones(shape=(window_size, )),
                      mobility_work[-1] * np.ones(shape=(window_size, )),
                      mobility_other[-1] *
                      np.ones(shape=(window_size, ))))).flatten()

        # shift_each_iteration = 10  # number of days by which to shift the sliding window at each iteration.
        # n_shifts = 10
        total_days = n_shifts * shift_each_iteration
        print(total_days)

        total_mobility = np.zeros((3, total_days))

        if restart_at_index is not None:
            total_mobility = np.load(results_folder + filename_prefix +
                                     "mobility_iterative_" +
                                     str(restart_at_index) + ".npy")

        bounds = different_bounds(
            'realistic',
            n_days=window_size,
            alpha_initial=mobility_initial,
            end_training_alpha_values=end_training_mobility_values)

        for shift_idx in range(n_shifts):
            print('Running shift: ' + str(shift_idx))
            if restart_at_index is not None and shift_idx <= restart_at_index:
                # we exploit the same loop in order to restart, so that the evolution of the model will be the same.
                mobility_final = np.zeros((3, window_size))
                mobility_final[:, 0:shift_each_iteration] = \
                    total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration]
                # keep that constant for the future; this is only used to initialize the next optimal control iteration:
                mobility_final[:,
                               shift_each_iteration:] = mobility_final[:,
                                                                       shift_each_iteration
                                                                       -
                                                                       1].reshape(
                                                                           3,
                                                                           1)
            else:
                # do the optimal control stuff
                results_da = optimize.dual_annealing(
                    posterior_cost.compute_cost,
                    bounds=bounds,
                    args=(window_size, sigma, epsilon, backend),
                    maxiter=10,
                    maxfun=1e3,
                    x0=mobility_initial)

                # get the result of the optimization in that time window
                mobility_final = results_da.x.reshape(3, window_size)
                # save it to the total_mobility array:
                total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration] = \
                    mobility_final[:, 0:shift_each_iteration]
                # Save in between mobility steps
                np.save(
                    results_folder + filename_prefix + "mobility_iterative_" +
                    str(shift_idx), total_mobility)

            # update now the state of the model:
            posterior_cost.update_states(
                shift_each_iteration, mobility_final[:, :shift_each_iteration])

            # update mobility_initial as well, with the translated values of mobility_final, it may speed up convergence.
            mobility_initial_tmp = np.zeros_like(mobility_final)
            mobility_initial_tmp[:, 0:window_size -
                                 shift_each_iteration] = mobility_final[:,
                                                                        shift_each_iteration:
                                                                        window_size]
            mobility_initial_tmp[:, window_size -
                                 shift_each_iteration:] = np.stack([
                                     mobility_final[:, window_size -
                                                    shift_each_iteration - 1]
                                 ] * shift_each_iteration,
                                                                   axis=1)
            mobility_initial = mobility_initial_tmp.flatten()

        np.save(results_folder + filename_prefix + "mobility_iterative",
                total_mobility)
Ejemplo n.º 17
0
    def test_resample(self):
        # -- setup --
        # setup backend
        dummy = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        model = Normal([mu, sigma])

        sampler = DrawFromPrior([model], dummy, seed=1)
        original_journal = sampler.sample(100)

        # expected mean values from bootstrapped samples:
        mu_mean = -0.5631214403709973
        sigma_mean = 5.2341427118053705
        # expected mean values from subsampled samples:
        mu_mean_2 = -0.6414897172489
        sigma_mean_2 = 6.217381777130734

        # -- bootstrap --
        new_j = original_journal.resample(path_to_save_journal="tmp.jnl",
                                          seed=42)
        mu_sample = np.array(new_j.get_parameters()['mu'])
        sigma_sample = np.array(new_j.get_parameters()['sigma'])

        accepted_parameters = new_j.get_accepted_parameters()
        self.assertEqual(len(accepted_parameters), 100)
        self.assertEqual(len(accepted_parameters[0]), 2)

        # test shape of samples
        mu_shape, sigma_shape = (len(mu_sample), mu_sample[0].shape[1]), \
                                (len(sigma_sample), sigma_sample[0].shape[1])
        self.assertEqual(mu_shape, (100, 1))
        self.assertEqual(sigma_shape, (100, 1))

        # Compute posterior mean
        self.assertAlmostEqual(np.average(mu_sample), mu_mean)
        self.assertAlmostEqual(np.average(sigma_sample), sigma_mean)

        self.assertTrue(new_j.number_of_simulations[0] == 0)

        # check whether the dictionary or parameter list contain same data:
        self.assertEqual(new_j.get_parameters()["mu"][9],
                         new_j.get_accepted_parameters()[9][0])
        self.assertEqual(new_j.get_parameters()["sigma"][7],
                         new_j.get_accepted_parameters()[7][1])

        # -- subsample (replace=False, smaller number than the full sample) --
        new_j_2 = original_journal.resample(replace=False,
                                            n_samples=10,
                                            seed=42)
        mu_sample = np.array(new_j_2.get_parameters()['mu'])
        sigma_sample = np.array(new_j_2.get_parameters()['sigma'])

        accepted_parameters = new_j_2.get_accepted_parameters()
        self.assertEqual(len(accepted_parameters), 10)
        self.assertEqual(len(accepted_parameters[0]), 2)

        # test shape of samples
        mu_shape, sigma_shape = (len(mu_sample), mu_sample[0].shape[1]), \
                                (len(sigma_sample), sigma_sample[0].shape[1])
        self.assertEqual(mu_shape, (10, 1))
        self.assertEqual(sigma_shape, (10, 1))

        # Compute posterior mean
        self.assertAlmostEqual(np.average(mu_sample), mu_mean_2)
        self.assertAlmostEqual(np.average(sigma_sample), sigma_mean_2)

        self.assertTrue(new_j_2.number_of_simulations[0] == 0)

        # check whether the dictionary or parameter list contain same data:
        self.assertEqual(new_j_2.get_parameters()["mu"][9],
                         new_j_2.get_accepted_parameters()[9][0])
        self.assertEqual(new_j_2.get_parameters()["sigma"][7],
                         new_j_2.get_accepted_parameters()[7][1])

        # -- check that resampling the full samples with replace=False gives the exact same posterior mean and std --
        new_j_3 = original_journal.resample(replace=False, n_samples=100)
        mu_sample = np.array(new_j_3.get_parameters()['mu'])
        sigma_sample = np.array(new_j_3.get_parameters()['sigma'])

        # original journal
        mu_sample_original = np.array(original_journal.get_parameters()['mu'])
        sigma_sample_original = np.array(
            original_journal.get_parameters()['sigma'])

        # Compute posterior mean and std
        self.assertAlmostEqual(np.average(mu_sample),
                               np.average(mu_sample_original))
        self.assertAlmostEqual(np.average(sigma_sample),
                               np.average(sigma_sample_original))
        self.assertAlmostEqual(np.std(mu_sample), np.std(mu_sample_original))
        self.assertAlmostEqual(np.std(sigma_sample),
                               np.std(sigma_sample_original))

        # check whether the dictionary or parameter list contain same data:
        self.assertEqual(new_j_3.get_parameters()["mu"][9],
                         new_j_3.get_accepted_parameters()[9][0])
        self.assertEqual(new_j_3.get_parameters()["sigma"][7],
                         new_j_3.get_accepted_parameters()[7][1])

        # -- test the error --
        with self.assertRaises(RuntimeError):
            original_journal.resample(replace=False, n_samples=200)
Ejemplo n.º 18
0
early_stopping = not args.no_early_stop
update_batchnorm_running_means_before_eval = args.update_batchnorm_running_means_before_eval
momentum = args.bn_momentum
epochs_before_early_stopping = args.epochs_before_early_stopping
epochs_test_interval = args.epochs_test_interval
use_MPI = args.use_MPI
generate_data_only = args.generate_data_only
save_net_at_each_epoch = args.save_net_at_each_epoch

# checks
if model not in ("gaussian", "beta", "gamma", "MA2", "AR2", "fullLorenz95",
                 "fullLorenz95smaller") or technique not in ("SM", "SSM",
                                                             "FP"):
    raise NotImplementedError

backend = BackendMPI() if use_MPI else BackendDummy()

if generate_data_only:
    print("Generate data only, no train.")
else:
    print("{} model with {}.".format(model, technique))
# set up the default root folder and other values
default_root_folder = {
    "gaussian": "results/gaussian/",
    "gamma": "results/gamma/",
    "beta": "results/beta/",
    "AR2": "results/AR2/",
    "MA2": "results/MA2/",
    "fullLorenz95": "results/fullLorenz95/",
    "fullLorenz95smaller": "results/fullLorenz95smaller/"
}
Ejemplo n.º 19
0
import numpy as np
import networkx as nx

from ComplexContagion.Model import ComplexContagion
from ComplexContagion.Statistics import DiffusionIdentityStatistics
from ComplexContagion.Distance import SubsetDistance
from ComplexContagion.Kernel import DiffusionKernel
from ComplexContagion.Prior import DiffusionPrior
from ComplexContagion.Inference import SABCDiffusion

#==============================================================================
# Choose the appropriate Backend for Parallelization
from abcpy.backends import BackendDummy
backend = BackendDummy()
#from abcpy.backends import BackendMPI as Backend
#backend = Backend()
#==============================================================================
# Different types of network (BA: Barabasi-Albert, ER: Erdos-Renyi, FB: Facebook Social Network,
# INRV: Indian Village contact Network) with node_no many nodes on the network. The infection_node
# is the true seed-node. (Choose one of the options)
#==============================================================================
case, node_no, infection_node = 'ba', 100, 4
#case, node_no, infection_node = 'er', 100, 10
#case, node_no, infection_node = 'inrv', 354, 70
#case, node_no, infection_node = 'fb', 4039, 2000
#==============================================================================
# Time observed
time_observed = np.arange(20, 120 + 1)
#==============================================================================
# Load network
#==============================================================================