Ejemplo n.º 1
0
import numpy as np
import networkx as nx

from ComplexContagion.Model import ComplexContagion
from ComplexContagion.Statistics import DiffusionIdentityStatistics
from ComplexContagion.Distance import SubsetDistance
from ComplexContagion.Kernel import DiffusionKernel
from ComplexContagion.Prior import DiffusionPrior
from ComplexContagion.Inference import SABCDiffusion

#==============================================================================
# Choose the appropriate Backend for Parallelization
from abcpy.backends import BackendDummy
backend = BackendDummy()
#from abcpy.backends import BackendMPI as Backend
#backend = Backend()
#==============================================================================
# Different types of network (BA: Barabasi-Albert, ER: Erdos-Renyi, FB: Facebook Social Network,
# INRV: Indian Village contact Network) with node_no many nodes on the network. The infection_node
# is the true seed-node. (Choose one of the options)
#==============================================================================
case, node_no, infection_node = 'ba', 100, 4
#case, node_no, infection_node = 'er', 100, 10
#case, node_no, infection_node = 'inrv', 354, 70
#case, node_no, infection_node = 'fb', 4039, 2000
#==============================================================================
# Time observed
time_observed = np.arange(20, 120 + 1)
#==============================================================================
# Load network
#==============================================================================
Ejemplo n.º 2
0
early_stopping = not args.no_early_stop
update_batchnorm_running_means_before_eval = args.update_batchnorm_running_means_before_eval
momentum = args.bn_momentum
epochs_before_early_stopping = args.epochs_before_early_stopping
epochs_test_interval = args.epochs_test_interval
use_MPI = args.use_MPI
generate_data_only = args.generate_data_only
save_net_at_each_epoch = args.save_net_at_each_epoch

# checks
if model not in ("gaussian", "beta", "gamma", "MA2", "AR2", "fullLorenz95",
                 "fullLorenz95smaller") or technique not in ("SM", "SSM",
                                                             "FP"):
    raise NotImplementedError

backend = BackendMPI() if use_MPI else BackendDummy()

if generate_data_only:
    print("Generate data only, no train.")
else:
    print("{} model with {}.".format(model, technique))
# set up the default root folder and other values
default_root_folder = {
    "gaussian": "results/gaussian/",
    "gamma": "results/gamma/",
    "beta": "results/beta/",
    "AR2": "results/AR2/",
    "MA2": "results/MA2/",
    "fullLorenz95": "results/fullLorenz95/",
    "fullLorenz95smaller": "results/fullLorenz95smaller/"
}
Ejemplo n.º 3
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()

        # define a uniform prior distribution
        lb = np.array([-5, 0])
        ub = np.array([5, 10])
        prior = Uniform(lb, ub, seed=1)

        # define a Gaussian model
        model = Gaussian(prior, mu=2.1, sigma=5.0, seed=1)

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # create fake observed data
        y_obs = model.simulate(1)

        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)

        # use the PMC scheme for T = 1
        mean = np.array([-13.0, .0, 7.0])
        cov = np.eye(3)
        kernel = MultiNormal(mean, cov, seed=1)

        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            samples[0][:, 0]), np.array(samples[0][:,
                                                   1]), np.array(samples[1][:,
                                                                            0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.48953333102)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 6.50695612708), 1e-10)

        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC(model, likfun, kernel, backend, seed=1)
        journal = sampler.sample(y_obs,
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactor=np.array([.1, .1]),
                                 iniPoints=None)
        samples = (journal.get_parameters(), journal.get_weights())

        # Compute posterior mean
        mu_post_sample, sigma_post_sample, post_weights = np.asarray(
            samples[0][:, 0]), np.asarray(samples[0][:, 1]), np.asarray(
                samples[1][:, 0])
        mu_post_mean, sigma_post_mean = np.average(
            mu_post_sample,
            weights=post_weights), np.average(sigma_post_sample,
                                              weights=post_weights)

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = np.shape(
            mu_post_sample), np.shape(mu_post_sample), np.shape(post_weights)
        self.assertEqual(mu_sample_shape, (10, ))
        self.assertEqual(sigma_sample_shape, (10, ))
        self.assertEqual(weights_sample_shape, (10, ))
        self.assertLess(abs(mu_post_mean - (-1.4033145848)), 1e-10)
        self.assertLess(abs(sigma_post_mean - 7.05175546876), 1e-10)
Ejemplo n.º 4
0
    def test_sample(self):
        # setup backend
        backend = BackendDummy()

        # define a uniform prior distribution
        mu = Uniform([[-5.0], [5.0]], name='mu')
        sigma = Uniform([[0.0], [10.0]], name='sigma')
        # define a Gaussian model
        self.model = Normal([mu, sigma])

        # define sufficient statistics for the model
        stat_calc = Identity(degree=2, cross=0)

        # create fake observed data
        #y_obs = self.model.forward_simulate(1, np.random.RandomState(1))[0].tolist()
        y_obs = [np.array(9.8)]

        # Define the likelihood function
        likfun = SynLiklihood(stat_calc)

        T, n_sample, n_samples_per_param = 1, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed=1)
        journal = sampler.sample([y_obs],
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactors=np.array([.1, .1]),
                                 iniPoints=None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            journal.get_parameters()['mu']), np.array(
                journal.get_parameters()['sigma']), np.array(
                    journal.get_weights())

        # Compute posterior mean
        mu_post_mean, sigma_post_mean = journal.posterior_mean(
        )['mu'], journal.posterior_mean()['sigma']

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \
                                                                    (len(sigma_post_sample),
                                                                     sigma_post_sample[0].shape[1]), post_weights.shape
        self.assertEqual(mu_sample_shape, (10, 1))
        self.assertEqual(sigma_sample_shape, (10, 1))
        self.assertEqual(weights_sample_shape, (10, 1))
        self.assertLess(abs(mu_post_mean - (-3.3711206204663764)), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.518520667688998), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)

        # use the PMC scheme for T = 2
        T, n_sample, n_samples_per_param = 2, 10, 100
        sampler = PMC([self.model], [likfun], backend, seed=1)
        journal = sampler.sample([y_obs],
                                 T,
                                 n_sample,
                                 n_samples_per_param,
                                 covFactors=np.array([.1, .1]),
                                 iniPoints=None)
        mu_post_sample, sigma_post_sample, post_weights = np.array(
            journal.get_parameters()['mu']), np.array(
                journal.get_parameters()['sigma']), np.array(
                    journal.get_weights())

        # Compute posterior mean
        mu_post_mean, sigma_post_mean = journal.posterior_mean(
        )['mu'], journal.posterior_mean()['sigma']

        # test shape of sample
        mu_sample_shape, sigma_sample_shape, weights_sample_shape = (len(mu_post_sample), mu_post_sample[0].shape[1]), \
                                                                    (len(sigma_post_sample),
                                                                     sigma_post_sample[0].shape[1]), post_weights.shape
        self.assertEqual(mu_sample_shape, (10, 1))
        self.assertEqual(sigma_sample_shape, (10, 1))
        self.assertEqual(weights_sample_shape, (10, 1))
        self.assertLess(abs(mu_post_mean - (-2.970827684425406)), 1e-3)
        self.assertLess(abs(sigma_post_mean - 6.82165619013458), 1e-3)

        self.assertFalse(journal.number_of_simulations == 0)
def main(epsilon,
         sigma,
         filename_prefix,
         perform_standard_optimal_control=False,
         perform_iterative_strategy=True,
         use_sample_with_higher_weight=False,
         use_posterior_median=False,
         n_post_samples=None,
         shift_each_iteration=1,
         n_shifts=10,
         window_size=30,
         only_plot=False,
         plot_file=None,
         plot_days=None,
         loss="deaths_Isc",
         results_folder=None,
         journal_file_name=None,
         training_window_length=None,
         use_mpi=False,
         restart_at_index=None):
    """epsilon is an array with size 3, with order school, work, other
    If use_sample_with_higher_weight is True: we do the procedure with that only, no posterior expectation
    use_posterior_median: do the optimal control with the marginal posterior median.
    n_post_samples: for the posterior expectation. Ignored if use_sample_with_higher_weight or use_posterior_median is True,
    shift_each_iteration and n_shifts are for the iterative strategy.
    """
    if use_mpi:
        print("Using MPI")
        backend = BackendMPI()
    else:
        backend = BackendDummy()

    print("Epsilon: ", epsilon)

    logging.basicConfig(level=logging.INFO)
    ############################ Load relevant data #################################################
    if results_folder is None:
        results_folder = "results/SEI4RD_france_infer_1Mar_31Aug/"
    data_folder = "data/france_inference_data_1Mar_to_31Aug/"

    alpha_home = 1  # set this to 1
    mobility_work = np.load(data_folder + "mobility_work.npy")
    mobility_other = np.load(data_folder + "mobility_other.npy")
    mobility_school = np.load(data_folder + "mobility_school.npy")

    france_pop = np.load(data_folder + "france_pop.npy", allow_pickle=True)

    contact_matrix_home = np.load(data_folder + "contact_matrix_home.npy")
    contact_matrix_work = np.load(data_folder + "contact_matrix_work.npy")
    contact_matrix_school = np.load(data_folder + "contact_matrix_school.npy")
    contact_matrix_other = np.load(data_folder + "contact_matrix_other.npy")

    if journal_file_name is None:
        jrnl = Journal.fromFile(results_folder + "PMCABC_inf3.jrl")
    else:
        jrnl = Journal.fromFile(results_folder + journal_file_name)
    #################################### Define Model #################################################
    # parameters
    n = 5  # number of age groups
    dt = 0.1  # integration timestep
    if training_window_length is not None:
        T = training_window_length
    else:
        T = mobility_school.shape[0] - 1  # horizon time in days
    total_population = france_pop  # population for each age group
    # 16th March: Boris Johnson asked old people to isolate; we then learn a new alpha from the 18th March:
    lockdown_day = 17

    # alpha_home = np.repeat(alpha_home, np.int(1 / dt), axis=0)
    mobility_work = np.repeat(mobility_work[0:T + 1], np.int(1 / dt), axis=0)
    mobility_other = np.repeat(mobility_other[0:T + 1], np.int(1 / dt), axis=0)
    mobility_school = np.repeat(mobility_school[0:T + 1],
                                np.int(1 / dt),
                                axis=0)
    # daily_tests = np.repeat(daily_tests, np.int(1 / dt), axis=0)

    # ABC model (priors need to be fixed better):
    beta = Uniform(
        [[0], [0.5]],
        name='beta')  # controls how fast the epidemics grows. Related to R_0
    d_L = Uniform([[1], [16]], name='d_L')  # average duration of incubation
    d_C = Uniform([[1], [16]],
                  name='d_C')  # average time before going to clinical
    d_R = Uniform([[1], [16]], name='d_R')  # average recovery time
    d_RC = Uniform([[1], [16]], name='d_RC')  # average recovery time
    d_D = Uniform(
        [[1], [16]], name='d_D'
    )  # average duration of infected clinical state (resulting in death)
    p01 = Uniform([[0], [1]], name="p01")
    p02 = Uniform([[0], [1]], name="p02")
    p03 = Uniform([[0], [1]], name="p03")
    p04 = Uniform([[0], [1]], name="p04")
    p05 = Uniform([[0], [1]], name="p05")
    p11 = Uniform([[0], [1]], name="p11")
    p12 = Uniform([[0], [1]], name="p12")
    p13 = Uniform([[0], [1]], name="p13")
    p14 = Uniform([[0], [1]], name="p14")
    p15 = Uniform([[0], [1]], name="p15")
    initial_exposed = Uniform([[0], [500]], name="initial_exposed")
    alpha_123 = Uniform([[0.3], [1]], name="alpha_123")
    alpha_4 = Uniform([[0], [1]], name="alpha_4")
    alpha_5 = Uniform([[0], [1]], name="alpha_5")

    model = SEI4RD([
        beta, d_L, d_C, d_R, d_RC, d_D, p01, p02, p03, p04, p05, p11, p12, p13,
        p14, p15, initial_exposed, alpha_123, alpha_4, alpha_5
    ],
                   tot_population=total_population,
                   T=T,
                   contact_matrix_school=contact_matrix_school,
                   contact_matrix_work=contact_matrix_work,
                   contact_matrix_home=contact_matrix_home,
                   contact_matrix_other=contact_matrix_other,
                   alpha_school=mobility_school,
                   alpha_work=mobility_work,
                   alpha_home=alpha_home,
                   alpha_other=mobility_other,
                   modify_alpha_home=False,
                   dt=dt,
                   return_once_a_day=True,
                   learn_alphas_old=True,
                   lockdown_day=lockdown_day)

    # guess for a phi function
    NHS_max = 10000

    def phi_func_sc(x):  # this is an hard max function.
        return np.maximum(0, x - NHS_max)

    def phi_func_death(x):  # this is an hard max function.
        return np.maximum(0, x)

    # def phi_func(x):
    #     return np.pow(np.maximum(0, x - NHS_max), 2)

    # def phi_func(x, beta=.1):  # this is the softplus, a smooth version of hard max
    #    threshold = 30
    #    shape = x.shape
    #    x = x.reshape(-1)
    #    new_x = x - NHS_max
    #    indices = new_x * beta < threshold
    #    phi_x = copy.deepcopy(new_x)  # is deepcopy actually needed?
    #    phi_x[indices] = np.log(
    #        1 + np.exp(new_x[indices] * beta)) / beta  # approximate for numerical stability in other places
    #    return phi_x.reshape(shape)

    # extract posterior sample points and bootstrap them:
    seed = 1
    np.random.seed(seed)
    iteration = -1
    weights = jrnl.get_weights(iteration) / np.sum(jrnl.get_weights(iteration))
    params = jrnl.get_parameters(iteration)
    if not use_posterior_median:
        if use_sample_with_higher_weight:
            post_samples = np.where(weights == weights.max())[0]
        else:
            # bootstrap
            if n_post_samples is None:
                n_post_samples = len(weights)
            post_samples = np.random.choice(range(len(weights)),
                                            p=weights.reshape(-1),
                                            size=n_post_samples)

        beta_values = np.array([params['beta'][i][0] for i in post_samples])
        kappa_values = np.array(
            [1 / params['d_L'][i][0] for i in post_samples])
        gamma_c_values = np.array(
            [1 / params['d_C'][i][0] for i in post_samples])
        gamma_r_values = np.array(
            [1 / params['d_R'][i][0] for i in post_samples])
        gamma_rc_values = np.array(
            [1 / params['d_RC'][i][0] for i in post_samples])
        nu_values = np.array([1 / params['d_D'][i][0] for i in post_samples])
        rho_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1) for i in post_samples
        ])
        rho_prime_values = np.array([
            np.array([
                params[key][i][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1) for i in post_samples
        ])
        alpha_123_values = np.array(
            [params["alpha_123"][i][0] for i in post_samples])
        alpha_4_values = np.array(
            [params["alpha_4"][i][0] for i in post_samples])
        alpha_5_values = np.array(
            [params["alpha_5"][i][0] for i in post_samples])
        initial_exposed_values = np.array(
            [params["initial_exposed"][i][0] for i in post_samples])
    else:
        params_array = np.array(
            [[params[key][i] for i in range(len(params[key]))]
             for key in params.keys()]).squeeze()
        marginal_medians = {
            key: weighted_quantile(
                np.array(params[key]).reshape(-1), [0.5], weights.squeeze())
            for i in range(params_array.shape[0]) for key in params.keys()
        }

        beta_values = np.array([marginal_medians['beta'][0]])
        kappa_values = np.array([1 / marginal_medians['d_L'][0]])
        gamma_c_values = np.array([1 / marginal_medians['d_C'][0]])
        gamma_r_values = np.array([1 / marginal_medians['d_R'][0]])
        gamma_rc_values = np.array([1 / marginal_medians['d_RC'][0]])
        nu_values = np.array([1 / marginal_medians['d_D'][0]])
        rho_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p01', 'p02', 'p03', 'p04', 'p05']
            ]).reshape(-1)
        ])
        rho_prime_values = np.array([
            np.array([
                marginal_medians[key][0]
                for key in ['p11', 'p12', 'p13', 'p14', 'p15']
            ]).reshape(-1)
        ])
        alpha_123_values = np.array([marginal_medians["alpha_123"][0]])
        alpha_4_values = np.array([marginal_medians["alpha_4"][0]])
        alpha_5_values = np.array([marginal_medians["alpha_5"][0]])
        initial_exposed_values = np.array(
            [marginal_medians["initial_exposed"][0]])

    # instantiate the posterior cost class:
    posterior_cost = PosteriorCost(model,
                                   phi_func_sc=phi_func_sc,
                                   phi_func_death=phi_func_death,
                                   beta_vals=beta_values,
                                   kappa_vals=kappa_values,
                                   gamma_c_vals=gamma_c_values,
                                   gamma_r_vals=gamma_r_values,
                                   gamma_rc_vals=gamma_rc_values,
                                   nu_vals=nu_values,
                                   rho_vals=rho_values,
                                   rho_prime_vals=rho_prime_values,
                                   alpha_123_vals=alpha_123_values,
                                   alpha_4_vals=alpha_4_values,
                                   alpha_5_vals=alpha_5_values,
                                   initial_exposed_vals=initial_exposed_values,
                                   loss=loss)

    if plot_days is None:
        n_days = 120
    else:
        n_days = plot_days
    end_training_mobility_values = [
        mobility_school[-1], mobility_work[-1], mobility_other[-1]
    ]
    # alpha initial is taken assuming values will be kept constant as it was on the last day observed
    mobility_initial = copy.deepcopy(
        np.stack((mobility_school[-1] * np.ones(shape=(n_days, )),
                  mobility_work[-1] * np.ones(shape=(n_days, )),
                  mobility_other[-1] * np.ones(shape=(n_days, ))))).flatten()

    # Only plot using a mobility file
    if only_plot:
        mobility = np.load(results_folder + plot_file)[:, 0:n_days]
        fig, ax = posterior_cost.produce_plot(mobility, n_days)
        plt.savefig(results_folder + filename_prefix + ".pdf")
        plt.close(fig)
        return

    # try cost computation:
    t = time.time()
    cost_initial = posterior_cost.compute_cost(mobility_initial, n_days, sigma,
                                               epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(mobility_initial, n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_final_training_lockdown_conditions.pdf")
    # plt.close(fig)
    cost_no_lockdown = posterior_cost.compute_cost(
        np.ones_like(mobility_initial), n_days, sigma, epsilon, backend)
    # fig, ax = posterior_cost.produce_plot(np.ones_like(mobility_initial), n_days)
    # plt.savefig(results_folder + filename_prefix + "evolution_under_no_lockdown.pdf")
    # plt.close(fig)
    print("Initial cost: {:.2f}, no-lockdown cost: {:.2f}".format(
        cost_initial, cost_no_lockdown))
    print(time.time() - t)

    # OPTIMAL CONTROL WITH NO MOVING WINDOW APPROACH
    if perform_standard_optimal_control:
        # bounds = different_bounds('startconstrained')
        bounds = different_bounds('realistic', n_days, mobility_initial,
                                  end_training_mobility_values)

        results_da = optimize.dual_annealing(posterior_cost.compute_cost,
                                             bounds=bounds,
                                             args=(n_days, sigma, epsilon,
                                                   backend),
                                             maxiter=10,
                                             maxfun=1e3,
                                             x0=mobility_initial)
        # Plotting the figures
        mobility_initial = mobility_initial.reshape(
            3, n_days)  # 3 instead of 4 as we are not using alpha_home
        mobility_final = results_da.x.reshape(3, n_days)
        cost_final = posterior_cost.compute_cost(mobility_final, n_days, sigma,
                                                 epsilon, backend)
        np.save(results_folder + filename_prefix + "mobility_standard",
                mobility_final)

    # MOVING WINDOW APPROACH
    if perform_iterative_strategy:
        print("Iterative strategy")
        # window_size = 30  # in days
        mobility_initial = copy.deepcopy(
            np.stack((mobility_school[-1] * np.ones(shape=(window_size, )),
                      mobility_work[-1] * np.ones(shape=(window_size, )),
                      mobility_other[-1] *
                      np.ones(shape=(window_size, ))))).flatten()

        # shift_each_iteration = 10  # number of days by which to shift the sliding window at each iteration.
        # n_shifts = 10
        total_days = n_shifts * shift_each_iteration
        print(total_days)

        total_mobility = np.zeros((3, total_days))

        if restart_at_index is not None:
            total_mobility = np.load(results_folder + filename_prefix +
                                     "mobility_iterative_" +
                                     str(restart_at_index) + ".npy")

        bounds = different_bounds(
            'realistic',
            n_days=window_size,
            alpha_initial=mobility_initial,
            end_training_alpha_values=end_training_mobility_values)

        for shift_idx in range(n_shifts):
            print('Running shift: ' + str(shift_idx))
            if restart_at_index is not None and shift_idx <= restart_at_index:
                # we exploit the same loop in order to restart, so that the evolution of the model will be the same.
                mobility_final = np.zeros((3, window_size))
                mobility_final[:, 0:shift_each_iteration] = \
                    total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration]
                # keep that constant for the future; this is only used to initialize the next optimal control iteration:
                mobility_final[:,
                               shift_each_iteration:] = mobility_final[:,
                                                                       shift_each_iteration
                                                                       -
                                                                       1].reshape(
                                                                           3,
                                                                           1)
            else:
                # do the optimal control stuff
                results_da = optimize.dual_annealing(
                    posterior_cost.compute_cost,
                    bounds=bounds,
                    args=(window_size, sigma, epsilon, backend),
                    maxiter=10,
                    maxfun=1e3,
                    x0=mobility_initial)

                # get the result of the optimization in that time window
                mobility_final = results_da.x.reshape(3, window_size)
                # save it to the total_mobility array:
                total_mobility[:, shift_idx * shift_each_iteration:(shift_idx + 1) * shift_each_iteration] = \
                    mobility_final[:, 0:shift_each_iteration]
                # Save in between mobility steps
                np.save(
                    results_folder + filename_prefix + "mobility_iterative_" +
                    str(shift_idx), total_mobility)

            # update now the state of the model:
            posterior_cost.update_states(
                shift_each_iteration, mobility_final[:, :shift_each_iteration])

            # update mobility_initial as well, with the translated values of mobility_final, it may speed up convergence.
            mobility_initial_tmp = np.zeros_like(mobility_final)
            mobility_initial_tmp[:, 0:window_size -
                                 shift_each_iteration] = mobility_final[:,
                                                                        shift_each_iteration:
                                                                        window_size]
            mobility_initial_tmp[:, window_size -
                                 shift_each_iteration:] = np.stack([
                                     mobility_final[:, window_size -
                                                    shift_each_iteration - 1]
                                 ] * shift_each_iteration,
                                                                   axis=1)
            mobility_initial = mobility_initial_tmp.flatten()

        np.save(results_folder + filename_prefix + "mobility_iterative",
                total_mobility)