Пример #1
0
def get_model_info(timespan,
                   ensemble_size=25,
                   model=None,
                   load_ensemble=False,
                   ignore_existing_repo=False,
                   repo_file=None):
    """
    """
    timespan = np.array(timespan).flatten()
    # print("Requesting timespan: ", timespan)
    # Check the experiment repository
    if repo_file is None:
        file_name = 'Coupled_Lorenz_Repository.pickle'
        file_name = os.path.abspath(
            os.path.join(os.path.dirname(__file__), file_name))
    else:
        file_name = os.path.abspath(repo_file)

    # Check repo if needed, and found:
    if ignore_existing_repo:
        ref_IC_np = ref_trajectory_np = init_ensemble_np = observations_np = None
        #
    elif os.path.isfile(file_name):
        print("Loading model repo from : %s" % file_name)
        cont = pickle.load(open(file_name, 'rb'))
        tspan = cont['tspan']

        # Get the initial condition anyways
        ref_IC_np = cont['ref_trajectory'][:, 0].copy()

        # Verify timespan, and get reference trajectory
        if tspan.size < timespan.size:
            ref_trajectory_np = None
            t_indexes = None
            save_info = True
            #
        elif tspan.size > timespan.size:
            # Look into the reference trajectory to find matches, if existing
            t_indexes = []
            for i, t in enumerate(timespan):
                loc = np.where(np.isclose(tspan, t, rtol=1e-12))[0]
                if loc.size > 0:
                    loc = loc[0]
                    t_indexes.append(loc)
                else:
                    t_indexes = None
                    break
            #
            if t_indexes is not None:
                t_indexes.sort()
                t_indexes = np.asarray(t_indexes)
                ref_trajectory_np = cont['ref_trajectory'][:, t_indexes]
            else:
                ref_trajectory_np = None
                #
        else:
            if np.isclose(tspan, timespan).all():
                ref_trajectory_np = cont['ref_trajectory']
                t_indexes = np.arange(timespan.size)
                #

        # Observations:
        if t_indexes is None or ref_trajectory_np is None:  # TODO: this is overkill!
            observations_np = None
        else:
            observations_np = cont['observations'][:, t_indexes]

        # Read the ensemble
        if load_ensemble:
            init_ensemble_np = cont['init_ensemble']
            if np.size(init_ensemble_np, 1) <= ensemble_size:
                init_ensemble_np = init_ensemble_np[:, :ensemble_size]
            else:
                init_ensemble_np = None
        else:
            init_ensemble_np = None

    else:
        print(
            "The coupled-lorenz model repo is not found. This should be the first time to run this function/script."
        )
        ref_IC_np = ref_trajectory_np = init_ensemble_np = observations_np = None

    # Check the integrity of the loaded data:
    if ref_trajectory_np is None or (load_ensemble and init_ensemble_np is None
                                     ) or observations_np is None:
        #
        # Generate stuff, and save them
        if model is None:
            print(
                "YOU MUST pass a MODEL object because the repo is not found in %s, or some information is missing!"
                % file_name)
            raise ValueError
        else:
            print("Got the model >> Creating experiemnt info...")

        # Reference IC
        if ref_IC_np is None:
            ref_IC = model._reference_initial_condition.copy()
            ref_IC_np = ref_IC.get_numpy_array()

        # Reference Trajectory (Truth)
        if ref_trajectory_np is None:
            if ref_IC_np is not None:
                ref_IC = model.state_vector(ref_IC_np.copy())
            else:
                ref_IC = model._reference_initial_condition.copy()
            # generate observations from coupled lorenz:
            ref_trajectory = model.integrate_state(ref_IC, timespan)
            ref_trajectory_np = utility.ensemble_to_np_array(ref_trajectory,
                                                             state_as_col=True)

        # Initial (forecast) Ensmble:
        if load_ensemble and init_ensemble_np is None:
            prior_noise_model = model.background_error_model
            prior_noise_sample = [
                prior_noise_model.generate_noise_vec()
                for i in xrange(ensemble_size)
            ]
            noise_mean = utility.ensemble_mean(prior_noise_sample)
            ic = prior_noise_model.generate_noise_vec().add(ref_IC)
            for i in xrange(ensemble_size):
                prior_noise_sample[i].axpy(-1.0, noise_mean)
                prior_noise_sample[i].add(ic, in_place=True)
            init_ensemble = prior_noise_sample
            init_ensemble_np = utility.ensemble_to_np_array(init_ensemble,
                                                            state_as_col=True)

        if observations_np is None:
            # Create observations' and assimilation checkpoints:
            if ref_trajectory is None:
                if ref_trajectory_np is None:
                    ref_trajectory = model.integrate_state(ref_IC, timespan)
                else:
                    ref_trajectory = [
                        model.state_vectory(ref_trajectory_np[i, :].copy())
                        for i in xrange(np.size(ref_trajectory, 1))
                    ]
            observations = [
                model.evaluate_theoretical_observation(x)
                for x in ref_trajectory
            ]
            # Perturb observations:
            obs_noise_model = model.observation_error_model
            for obs_ind in xrange(len(timespan)):
                observations[obs_ind].add(obs_noise_model.generate_noise_vec(),
                                          in_place=True)
            observations_np = utility.ensemble_to_np_array(observations,
                                                           state_as_col=True)

        print("Experiment repo created; saving for later use...")
        # save results for later use
        out_dict = dict(tspan=timespan,
                        ref_trajectory=ref_trajectory_np,
                        init_ensemble=init_ensemble_np,
                        observations=observations_np)
        pickle.dump(out_dict, open(file_name, 'wb'))
        print("...done...")
        #
    else:
        print("All information properly loaded from file")
        pass

    return timespan, ref_IC_np, ref_trajectory_np, init_ensemble_np, observations_np
Пример #2
0
                ref_initial_time=experiment_tspan[
                    0],  # should be obtained from the model along with the ref_IC
                random_seed=2345)
            assim_output_configs = dict(scr_output=True,
                                        scr_output_iter=1,
                                        file_output=True,
                                        file_output_dir=file_output_dir,
                                        file_output_iter=1)
            experiment = FilteringProcess(assimilation_configs,
                                          output_configs=assim_output_configs)

            # Save reference Trajectory:
            trgt_dir = file_output_dir
            np.save(
                os.path.join(trgt_dir, 'Reference_Trajectory.npy'),
                utility.ensemble_to_np_array(ref_trajectory,
                                             state_as_col=True))
            np.save(
                os.path.join(trgt_dir, 'Initial_Ensemble.npy'),
                utility.ensemble_to_np_array(init_ensemble, state_as_col=True))
            np.save(
                os.path.join(trgt_dir, 'Observations.npy'),
                utility.ensemble_to_np_array(observations, state_as_col=True))

            # run the sequential filtering over the timespan created by da_checkpoints
            experiment.recursive_assimilation_process(observations,
                                                      obs_checkpoints,
                                                      da_checkpoints)

        if individual_plots:
            cmd = "python filtering_results_reader_coupledLorenz.py -f %s" % os.path.join(
                file_output_dir, 'output_dir_structure.txt')
Пример #3
0
    def EnKF_update(self, forecast_ensemble, observation, inflation_factor=1.0, in_place=False):
        """
        Calculate the kalman updated matrix, i.e. the update matrix of ensemble anomalies
            and calculate the (moving) analysis ensemble
        Args:
            state:

        Returns:
            analysis_ensemble: a list of model.state_vector objects
            X5: the state anomalies update matrix

        """
        model = self.smoother_configs['model']
        ensemble_size = len(forecast_ensemble)
        forecast_mean = utility.ensemble_mean(forecast_ensemble)
        forecast_anomalies = []

        if inflation_factor is None:
            inflation_factor = 1.0
        #
        for state in forecast_ensemble:
            if inflation_factor != 1:
                forecast_anomalies.append(state.axpy(-1, forecast_mean, in_place=False).scale(inflation_factor))
            else:
                forecast_anomalies.append(state.axpy(-1, forecast_mean, in_place=False))
        A_prime = utility.ensemble_to_np_array(forecast_anomalies)

        observation_perturbations = [model.evaluate_theoretical_observation(state) for state in forecast_anomalies]
        S = utility.ensemble_to_np_array(observation_perturbations)
        C = S.dot(S.T)

        try:
            C += (ensemble_size - 1) * model.observation_error_model.R[...]
        except:
            C += (ensemble_size - 1) * model.observation_error_model.R.get_numpy_array()

        C_eigs, Z = linalg.eigh(C)
        C_eigs = 1.0 / C_eigs

        obs_innov = observation.axpy(-1, model.evaluate_theoretical_observation(forecast_mean))
        y = Z.T.dot(obs_innov.get_numpy_array())
        y *= C_eigs
        y = Z.dot(y)
        y = S.T.dot(y)

        # Update ensemble mean
        analysis_mean = model.state_vector(A_prime.dot(y))
        analysis_mean = analysis_mean.add(forecast_mean)
        xa = analysis_mean.get_numpy_array()

        inv_sqrt_Ceig = sparse.spdiags(np.sqrt(C_eigs), 0, C_eigs.size, C_eigs.size)
        X2 = inv_sqrt_Ceig.dot(Z.T.dot(S))
        _, Sig2, V2 = linalg.svd(X2, full_matrices=False)

        # Calculate the square root of the matrix I-Sig2'*Sig2*theta
        Sqrtmat = sparse.spdiags(np.sqrt(1.0 - np.power(Sig2,2)), 0, Sig2.size, Sig2.size)
        Aa_prime = A_prime.dot(V2.T.dot(Sqrtmat.dot(V2)))
        
        if self._verbose:
            # For debugging
            print(A_prime, type(A_prime))
        
        # Update analysis ensemble:
        analysis_ensemble = [model.state_vector(xa+A_prime[:, j]) for j in xrange(np.size(A_prime, 1)) ]
        
        # Build the update kernel X5
        xx = V2.T.dot(Sqrtmat.dot(V2))

        for j in xrange(np.size(xx, 1)):
            xx[:, j] += y[:]
        IN = np.ones((ensemble_size, ensemble_size), dtype=np.float) / ensemble_size
        X5  = IN + inflation_factor * np.dot( (sparse.spdiags(np.ones(ensemble_size), 0, ensemble_size, ensemble_size) - IN), xx)

        return analysis_ensemble, X5
Пример #4
0
    def analysis(self):
        """
        Analysis step of the (Vanilla Ensemble Kalman Smoother (EnKS).
        In this case, the given forecast ensemble is propagated to the observation time instances to create model
        observations H(xk) that will be used in the assimilation process...

        """
        model = self.model

        # Timsespan info:
        forecast_time = self.smoother_configs['forecast_time']
        analysis_time = self.smoother_configs['analysis_time']
        if self.smoother_configs['obs_checkpoints'] is None:
            print("Couldn't find observation checkpoints in self.smoother_configs['obs_checkpoints']; None found!")
            raise ValueError
        else:
            obs_checkpoints = np.asarray(self.smoother_configs['obs_checkpoints'])

        if (analysis_time - obs_checkpoints[0] ) >= self._model_step_size:
            print("Observations MUST follow the assimilation times in this implementation!")
            raise ValueError

        if (analysis_time - obs_checkpoints[0] ) >= self._model_step_size:
            print("Observations MUST follow the assimilation times in this implementation!")
            raise ValueError

        # Get observations list:
        observations_list = self.smoother_configs['observations_list']
        #
        assim_flags = [True] * len(obs_checkpoints)
        if (forecast_time - obs_checkpoints[0]) >= self._model_step_size:
            print("forecast time can't be after the first observation time instance!")
            print("forecast_time", forecast_time)
            print("obs_checkpoints[0]", obs_checkpoints[0])
            raise AssertionError
            #
        elif abs(forecast_time - obs_checkpoints[0]) <= self._time_eps:
            # an observation exists at the analysis time
            pass
            #
        else:
            obs_checkpoints = np.insert(obs_checkpoints, 0, forecast_time)
            assim_flags.insert(0, False)

        # state and observation vector dimensions
        forecast_ensemble = self.smoother_configs['forecast_ensemble']
        if forecast_ensemble is None:
            print("Can't carry out the analysis step without a forecast ensemble; Found None!")
            raise ValueError

        if abs(forecast_time-analysis_time) > self._time_eps:
            print("At this point, the analysis and forecast times have to be the SAME!")
            print("Forecast Time=%f, Analysis time=%f" % (forecast_time, analysis_time))
            raise ValueError
        #
        elif (analysis_time-obs_checkpoints[0]) > self._time_eps:
            print("At this point, the analysis time should be equal to or less that the time of the first observation")
            print("Analysis time=%f Time of the first observation=%f" %(analysis_time, obs_checkpoints[0]))
            raise ValueError
        else:
            if len(observations_list) == 0:
                print("An empty list of observations is detected! No observations to assimilate;")
                print("Nothing to assimilate...")
                print("Setting the analysis state to the forecast state...")

                self.smoother_configs['analysis_state'] = self.smoother_configs['forecast_state'].copy()
                return

                #
            elif len(observations_list) == 1:
                if abs(obs_checkpoints[0]-forecast_time)<=self._time_eps and \
                    abs(analysis_time-forecast_time)<=self._time_eps:
                    print("A single observation is detected, and the assimilation time coincides with observation time;")
                    print("You are advised to use EnKF!")  # Todo, redirect automatically to 3D-Var
                    raise ValueError
                elif (obs_checkpoints[0]-forecast_time)>self._time_eps or (obs_checkpoints[0]-analysis_time)>self._time_eps:
                    pass
                else:
                    print("This settings instances are not acceptable:")
                    print("Observation time instance: %f" % obs_checkpoints[0])
                    print("Forecast time: %f" % forecast_time)
                    print("Observation time instance: %f" % analysis_time)
                    print("Terminating ")
                    raise ValueError
            else:
                # Good to go!
                pass

        #
        # > --------------------------------------------||
        # START the Smoothing  process:
        # > --------------------------------------------||
        # Forward propagation:
        # 1- Apply EnKF at each observation timepoint,
        # 2- Save Kalman gain matrix
        #
        cwd = os.getcwd()
        saved_kernels = []
        
        # Copy the initial forecast ensemble,
        moving_ensemble = [state.copy() for state in forecast_ensemble]
        ensemble_size = len(moving_ensemble)
        num_obs_points = len(observations_list)
        if self._verbose:
            print("Started the Analysis step of EnKS; with [num_obs_points] observation/assimilation time points;")
        inflation_factor = self.smoother_configs['inflation_factor']
        #
        # Forward to observation time instances and update observation term
        obs_ind = 0
        for iter_ind, t0, t1 in zip(xrange(num_obs_points), obs_checkpoints[: -1], obs_checkpoints[1: ]):
            local_ckeckpoints = np.array([t0, t1])

            # For initial subwindow only, check the flag on both and final assimilation flags
            if iter_ind == 0:
                assim_flag = assim_flags[iter_ind]
                if assim_flag:
                    observation = observations_list[obs_ind]
                    obs_ind += 1
                    print("TODO: Applying EnKF step at the initial time of the whole assimilation window")
                    moving_ensemble, X5 = self.EnKF_update(moving_ensemble, observation, inflation_factor, in_place=True)
                    saved_kernels = self._save_EnKF_update_kernel(X5, saved_kernels)
                # Copy initial ensemble; could be saved to file instead
                analysis_ensemble_np = utility.ensemble_to_np_array(moving_ensemble)

            # Now, let's do forecast/analysis step for each subwindow:
            for ens_ind in xrange(ensemble_size):
                state = moving_ensemble[ens_ind]
                tmp_trajectory = model.integrate_state(initial_state=state, checkpoints=local_ckeckpoints)
                if isinstance(tmp_trajectory, list):
                    moving_ensemble[ens_ind] = tmp_trajectory[-1].copy()
                else:
                    moving_ensemble[ens_ind] = tmp_trajectory.copy()
            #
            assim_flag = assim_flags[iter_ind+1]
            if assim_flag:
                observation = observations_list[obs_ind]
                obs_ind += 1
                moving_ensemble, X5 = self.EnKF_update(moving_ensemble, observation, inflation_factor, in_place=True)
            saved_kernels = self._save_EnKF_update_kernel(X5, saved_kernels)

        #
        # Backward propagation:
        for iter_ind in xrange(num_obs_points-2, -1, -1):
            assim_flag = assim_flags[iter_ind]
            try:
                next_assim_ind = iter_ind + 1 + assim_flags[iter_ind+1: ].index(True)
            except ValueError:
                # This is the latest assimilation cycle; proceed
                continue

            if assim_flag:
                kernel_file = saved_kernels[iter_ind]
                X5 = np.load(kernel_file)

                next_kernel = saved_kernels[next_assim_ind]
                X6 = np.load(next_kernel)

                X5 = X5.dot(X6)
                del X6

                # save/overwrite the updated kernel; no need to keep both versions!
                np.save(kernel_file, X5)

        # Update analysis ensemble at the initial time of the window:
        X5 = np.load(saved_kernels[0])
        analysis_ensemble_np = analysis_ensemble_np.dot(X5)
        del X5

        analysis_ensemble = moving_ensemble
        for ens_ind in xrange(ensemble_size):
            analysis_ensemble[ens_ind][:] = analysis_ensemble_np[:, ens_ind].copy()  # Need to copy?!

        self.smoother_configs.update({'analysis_ensemble': analysis_ensemble})
        self.smoother_configs.update({'analysis_state': utility.ensemble_mean(analysis_ensemble)})
def start_filtering(results_dir=None,
                    overwrite=True,
                    create_plots=True,
                    background_noise_level=None,
                    observation_noise_level=None):
    """
    """

    if background_noise_level is None:
        background_noise_level = 0.08
    if observation_noise_level is None:
        observation_noise_level = 0.05

    # Experiment Settings:
    # ============================================================
    # Timesetup
    experiment_tspan = np.arange(0, 100.001, 0.1)

    # Model settings:
    num_Xs = 40
    num_Ys = 32
    F = 8.0

    observation_size = num_Xs

    # Filter settings:
    ensemble_size = 25
    #
    # ============================================================
    # Create model instance:
    model, checkpoints, cpld_model_info, model_info = \
            create_model_info(num_Xs=num_Xs,
                              num_Ys=num_Ys,
                              F=F,
                              observation_size=observation_size,
                              ensemble_size=ensemble_size,
                              observation_opertor_type='linear-coarse',
                              observation_noise_level=observation_noise_level,
                              background_noise_level=background_noise_level,
                              experiment_tspan=experiment_tspan
                             )
    cpld_ref_trajectory, cpld_init_ensemble, cpld_observations = cpld_model_info
    ref_trajectory, init_ensemble, observations = model_info
    cpld_ref_IC = cpld_ref_trajectory[0].copy()
    ref_IC = ref_trajectory[0].copy()

    #
    model_name = model._model_name
    state_size = model.state_size()
    obs_size = model.observation_size()
    print(
        "Lorenz model and corresponding observations created. Starting the Assimilation section"
    )

    #
    # ======================================================================================== #
    #                               Inititalize the filter object                              #
    # ======================================================================================== #
    # Filter Configs
    # read settings from input file
    settings_filename = __FILTER_CONFIGS_FILENAME

    if os.path.isfile(settings_filename):
        _, parser = utility.read_configs(settings_filename)
        section_name = 'filter settings'
        adaptive_inflation = parser.getboolean(section_name,
                                               'adaptive_inflation')
        inflation_bounds = eval(parser.get(section_name, 'inflation_bounds'))
        inflation_design_penalty = parser.getfloat(section_name,
                                                   'inflation_design_penalty')
        inflation_factor = parser.getfloat(section_name, 'inflation_factor')
        forecast_inflation_factor = parser.getfloat(
            section_name, 'forecast_inflation_factor')
        #
        adaptive_localization = parser.getboolean(section_name,
                                                  'adaptive_localization')
        localization_function = parser.get(section_name,
                                           'localization_function')
        localization_radius = parser.getfloat(section_name,
                                              'localization_radius')
        localization_design_penalty = parser.getfloat(
            section_name, 'localization_design_penalty')
        localization_bounds = eval(
            parser.get(section_name, 'localization_bounds'))
        loc_direct_approach = parser.getint(section_name,
                                            'loc_direct_approach')
        localization_space = parser.get(section_name,
                                        'localization_space').upper().strip()
        #
        regularization_norm = parser.get(
            section_name, 'regularization_norm').lower().strip()
        moving_average_radius = parser.getint(section_name,
                                              'moving_average_radius')
        ensemble_size = parser.getint(section_name, 'ensemble_size')
        #
    else:
        print("Couldn't find configs file: %s" % settings_filename)
        raise IOError

    #
    # Both are now implemented in Adaptive OED-EnKF ; we will test both
    if adaptive_inflation and adaptive_localization:
        forecast_inflation_factor = inflation_factor = 1.0
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_INFL_LOC'
            results_dir = os.path.join(
                results_dir, 'InflPenalty_%f' % (inflation_design_penalty))
            #

    elif adaptive_inflation:
        forecast_inflation_factor = inflation_factor = 1.0
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_INFL'
            results_dir = os.path.join(
                results_dir, 'LocRad_%f_InflPenalty_%f' %
                (localization_radius, inflation_design_penalty))
        #
    elif adaptive_localization:
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_LOC'
            results_dir = os.path.join(
                results_dir, 'InflFac_%f_LocPenalty_%f' %
                (forecast_inflation_factor, localization_design_penalty))
    else:
        results_dir = __BASE_RESULTS_DIR + '_NonAdaptive'
        inflation_factor = forecast_inflation_factor
        results_dir = os.path.join(
            results_dir,
            'InflFac_%f_LocRad_%f' % (inflation_factor, localization_radius))

    #
    if os.path.isdir(results_dir):
        if overwrite:
            pass
        else:
            return None

    #
    enkf_filter_configs = dict(
        model=model,
        analysis_ensemble=init_ensemble,
        forecast_ensemble=None,
        ensemble_size=ensemble_size,
        #
        adaptive_inflation=adaptive_inflation,
        forecast_inflation_factor=forecast_inflation_factor,
        inflation_design_penalty=
        inflation_design_penalty,  # penalty of the regularization parameter
        localization_design_penalty=
        localization_design_penalty,  # penalty of the regularization parameter
        inflation_factor=inflation_factor,
        inflation_factor_bounds=inflation_bounds,
        adaptive_localization=adaptive_localization,
        localize_covariances=True,
        localization_radii_bounds=localization_bounds,
        localization_method='covariance_filtering',
        localization_radius=localization_radius,
        localization_function=localization_function,
        loc_direct_approach=loc_direct_approach,
        localization_space=localization_space,
        regularization_norm=regularization_norm,
        moving_average_radius=moving_average_radius,
    )
    #
    if adaptive_inflation and adaptive_localization:
        from adaptive_EnKF import EnKF_OED_Adaptive
    elif adaptive_inflation:
        from adaptive_EnKF import EnKF_OED_Inflation as EnKF_OED_Adaptive
    elif adaptive_localization:
        from adaptive_EnKF import EnKF_OED_Localization as EnKF_OED_Adaptive
    else:
        from EnKF import DEnKF as EnKF_OED_Adaptive
        print(
            "neither adapgtive inflation nor adaptive localization are revoked!"
        )
        # raise ValueError
    #
    filter_obj = EnKF_OED_Adaptive(filter_configs=enkf_filter_configs,
                                   output_configs=dict(
                                       file_output_moment_only=False,
                                       verbose=False))
    #
    # ======================================================================================== #
    #

    #
    # ======================================================================================== #
    #                        Inititalize the sequential DA process                             #
    # ======================================================================================== #
    # Create the processing object:
    # -------------------------------------
    #
    # create observations' and assimilation checkpoints:
    obs_checkpoints = experiment_tspan
    da_checkpoints = obs_checkpoints
    #

    # Here this is a filtering_process object;
    from filtering_process import FilteringProcess
    assimilation_configs = dict(
        filter=filter_obj,
        obs_checkpoints=obs_checkpoints,
        # da_checkpoints=da_checkpoints,
        forecast_first=True,
        ref_initial_condition=ref_IC,
        ref_initial_time=experiment_tspan[0],
        random_seed=2345)
    assim_output_configs = dict(scr_output=True,
                                scr_output_iter=1,
                                file_output_dir=results_dir,
                                file_output=True,
                                file_output_iter=1)
    experiment = FilteringProcess(assimilation_configs=assimilation_configs,
                                  output_configs=assim_output_configs)

    # Save reference Trajectory:
    results_dir = os.path.abspath(results_dir)
    np.save(os.path.join(results_dir, 'Reference_Trajectory.npy'),
            utility.ensemble_to_np_array(ref_trajectory, state_as_col=True))
    np.save(os.path.join(results_dir, 'Initial_Ensemble.npy'),
            utility.ensemble_to_np_array(init_ensemble, state_as_col=True))
    np.save(os.path.join(results_dir, 'Observations.npy'),
            utility.ensemble_to_np_array(observations, state_as_col=True))
    # Run the sequential filtering process:
    # -------------------------------------
    experiment.recursive_assimilation_process(observations, obs_checkpoints,
                                              da_checkpoints)
    #
    # ======================================================================================== #
    #
    if create_plots:
        # Try plotting results
        try:
            osf = os.path.join(results_dir, 'output_dir_structure.txt')
            cmd = "python filtering_results_reader_coupledlorenz.py -f %s -o True -r True" % osf
            print("Plotting Results:\n%s" % cmd)
            os.system(cmd)
        except:
            print("Failed to generate plots!")
            pass
def start_filtering(results_dir=None, overwrite=True, create_plots=True):
    """
    """

    # Experiment Settings:
    # ============================================================
    # Timesetup
    experiment_tspan = np.arange(0, 100.001, 0.1)

    # Model settings:
    num_Xs = 40
    num_Ys = 32
    F = 8.0

    observation_size = num_Xs

    # Filter settings:
    ensemble_size = 25
    #
    # ============================================================

    # Create a model object for the truth
    try:
        _, cpld_ref_IC, cpld_ref_trajectory, cpld_init_ensemble, cpld_observations = get_model_info(
            experiment_tspan, ensemble_size)
    except ValueError:
        coupled_model_configs = dict(
            num_prognostic_variables=[
                num_Xs, num_Ys
            ],  # [X, Y's per each X] each of the 8 is coupled to all 32
            force=F,  # forcing term: F
            subgrid_varibles_parameters=[1.0, 10.0, 10.0],  # (h, c, b)
            # create_background_errors_correlations=True,
            observation_opertor_type='linear-coarse',
            observation_noise_level=0.05,
            background_noise_level=0.08)
        coupled_model = Coupled_Lorenz(coupled_model_configs)
        # Get Repo Info:
        _, cpld_ref_IC, cpld_ref_trajectory, cpld_init_ensemble, cpld_observations = get_model_info(
            experiment_tspan, ensemble_size, coupled_model)
        del coupled_model, coupled_model_configs

    #
    # Create a forecast model: i.e. use the reduced version Lorenz-96 with 40 variables
    model_configs = {
        'create_background_errors_correlations': True,
        'num_prognostic_variables': num_Xs,
        'observation_error_variances': 0.05,
        # 'observation_noise_level':0.05,
        'observation_vector_size':
        observation_size,  # observe everything first
        'background_noise_level': 0.08
    }
    model = Lorenz96(model_configs)
    model_name = model._model_name

    # return is in NumPy format
    # convert entities to model-based formats
    state_size = model.state_size()
    obs_size = model.observation_size()
    ref_IC = model.state_vector(cpld_ref_IC[:state_size].copy())
    ref_trajectory = []
    observations = []
    for i in xrange(len(experiment_tspan)):
        state = model.state_vector(cpld_ref_trajectory[:state_size, i])
        ref_trajectory.append(state)
        obs = model.state_vector(cpld_observations[:state_size, i])
        observations.append(model.evaluate_theoretical_observation(obs))

    # Create initial ensemble...
    init_ensemble = model.create_initial_ensemble(ensemble_size=ensemble_size)
    # init_ensemble = utility.inflate_ensemble(init_ensemble, 4, in_place=True)
    print(
        "Lorenz model and corresponding observations created. Starting the Assimilation section"
    )

    #
    # ======================================================================================== #
    #                               Inititalize the filter object                              #
    # ======================================================================================== #
    # Filter Configs
    # read settings from input file
    settings_filename = __FILTER_CONFIGS_FILENAME
    default_configs = __DEF_FILTER_CONFIGS

    if os.path.isfile(settings_filename):
        _, parser = utility.read_configs(settings_filename)
        section_name = 'filter settings'
        if not parser.has_section(section_name):
            # No configurations found: set defaults
            print(
                "Configurations file found, with nothing in it! Setting to defaults"
            )
            this_dir = os.path.abspath(os.path.dirname(__file__))
            utility.write_dicts_to_config_file(settings_filename, this_dir,
                                               default_configs,
                                               'filter settings')
            fetched = False
        else:
            adaptive_inflation = parser.getboolean(section_name,
                                                   'adaptive_inflation')
            inflation_bounds = eval(
                parser.get(section_name, 'inflation_bounds'))
            inflation_design_penalty = parser.getfloat(
                section_name, 'inflation_design_penalty')
            inflation_factor = parser.getfloat(section_name,
                                               'inflation_factor')
            forecast_inflation_factor = parser.getfloat(
                section_name, 'forecast_inflation_factor')
            #
            adaptive_localization = parser.getboolean(section_name,
                                                      'adaptive_localization')
            localization_function = parser.get(section_name,
                                               'localization_function')
            localization_radius = parser.getfloat(section_name,
                                                  'localization_radius')
            localization_design_penalty = parser.getfloat(
                section_name, 'localization_design_penalty')
            localization_bounds = eval(
                parser.get(section_name, 'localization_bounds'))
            loc_direct_approach = parser.getint(section_name,
                                                'loc_direct_approach')
            localization_space = parser.get(
                section_name, 'localization_space').upper().strip()
            #
            regularization_norm = parser.get(
                section_name, 'regularization_norm').lower().strip()
            moving_average_radius = parser.getint(section_name,
                                                  'moving_average_radius')
            ensemble_size = parser.getint(section_name, 'ensemble_size')
            #
            fetched = True
    else:
        print("Couldn't find configs file: %s" % settings_filename)
        print("Added the default values to this config file for later use...")
        this_dir = os.path.abspath(os.path.dirname(__file__))
        utility.write_dicts_to_config_file(settings_filename, this_dir,
                                           default_configs, 'filter settings')
        fetched = False

    if not fetched:
        print("Gettings things from default dict")
        for k in default_configs:
            exec("%s = default_configs['%s']" % (k, k))
        #
    #
    # Both are now implemented in Adaptive OED-EnKF ; we will test both
    if adaptive_inflation and adaptive_localization:
        forecast_inflation_factor = inflation_factor = 1.0
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_INFL_LOC'
            results_dir = os.path.join(
                results_dir, 'InflPenalty_%f' % (inflation_design_penalty))
            #
    elif adaptive_inflation:
        forecast_inflation_factor = inflation_factor = 1.0
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_INFL'
            results_dir = os.path.join(
                results_dir, 'LocRad_%f_InflPenalty_%f' %
                (localization_radius, inflation_design_penalty))
        #
    elif adaptive_localization:
        if results_dir is None:
            results_dir = __BASE_RESULTS_DIR + '_ADAPTIVE_LOC'
            results_dir = os.path.join(
                results_dir, 'InflFac_%f_LocPenalty_%f' %
                (forecast_inflation_factor, localization_design_penalty))
    else:
        results_dir = __BASE_RESULTS_DIR + '_NonAdaptive'
        inflation_factor = forecast_inflation_factor
        results_dir = os.path.join(
            results_dir,
            'InflFac_%f_LocRad_%f' % (inflation_factor, localization_radius))

    #
    if os.path.isdir(results_dir):
        if overwrite:
            pass
        else:
            return None

    #
    enkf_filter_configs = dict(
        model=model,
        analysis_ensemble=init_ensemble,
        forecast_ensemble=None,
        ensemble_size=ensemble_size,
        #
        adaptive_inflation=adaptive_inflation,
        forecast_inflation_factor=forecast_inflation_factor,
        inflation_design_penalty=
        inflation_design_penalty,  # penalty of the regularization parameter
        localization_design_penalty=
        localization_design_penalty,  # penalty of the regularization parameter
        inflation_factor=inflation_factor,
        inflation_factor_bounds=inflation_bounds,
        adaptive_localization=adaptive_localization,
        localize_covariances=True,
        localization_radii_bounds=localization_bounds,
        localization_method='covariance_filtering',
        localization_radius=localization_radius,
        localization_function=localization_function,
        loc_direct_approach=loc_direct_approach,
        localization_space=localization_space,
        regularization_norm=regularization_norm,
        moving_average_radius=moving_average_radius,
    )
    #
    if adaptive_inflation and adaptive_localization:
        from adaptive_EnKF import EnKF_OED_Adaptive
    elif adaptive_inflation:
        from adaptive_EnKF import EnKF_OED_Inflation as EnKF_OED_Adaptive
    elif adaptive_localization:
        from adaptive_EnKF import EnKF_OED_Localization as EnKF_OED_Adaptive
    else:
        from EnKF import DEnKF as EnKF_OED_Adaptive
        print(
            "neither adapgtive inflation nor adaptive localization are revoked!"
        )
        # raise ValueError
    #
    filter_obj = EnKF_OED_Adaptive(filter_configs=enkf_filter_configs,
                                   output_configs=dict(
                                       file_output_moment_only=False,
                                       verbose=False))
    #
    # ======================================================================================== #
    #

    #
    # ======================================================================================== #
    #                        Inititalize the sequential DA process                             #
    # ======================================================================================== #
    # Create the processing object:
    # -------------------------------------
    #
    # create observations' and assimilation checkpoints:
    obs_checkpoints = experiment_tspan
    da_checkpoints = obs_checkpoints
    #

    # Here this is a filtering_process object;
    from filtering_process import FilteringProcess
    assimilation_configs = dict(
        filter=filter_obj,
        obs_checkpoints=obs_checkpoints,
        # da_checkpoints=da_checkpoints,
        forecast_first=True,
        ref_initial_condition=ref_IC,
        ref_initial_time=experiment_tspan[0],
        random_seed=2345)
    assim_output_configs = dict(scr_output=True,
                                scr_output_iter=1,
                                file_output_dir=results_dir,
                                file_output=True,
                                file_output_iter=1)
    experiment = FilteringProcess(assimilation_configs=assimilation_configs,
                                  output_configs=assim_output_configs)

    # Save reference Trajectory:
    results_dir = os.path.abspath(results_dir)
    np.save(os.path.join(results_dir, 'Reference_Trajectory.npy'),
            utility.ensemble_to_np_array(ref_trajectory, state_as_col=True))
    np.save(os.path.join(results_dir, 'Initial_Ensemble.npy'),
            utility.ensemble_to_np_array(init_ensemble, state_as_col=True))
    np.save(os.path.join(results_dir, 'Observations.npy'),
            utility.ensemble_to_np_array(observations, state_as_col=True))
    # Run the sequential filtering process:
    # -------------------------------------
    experiment.recursive_assimilation_process(observations, obs_checkpoints,
                                              da_checkpoints)
    #
    # ======================================================================================== #
    #
    if create_plots:
        print("Creating Plots")
        cmd = "python filtering_results_reader_coupledLorenz.py -f %s -r True -o True" % os.path.join(
            results_dir, 'output_dir_structure.txt')
        os.system(cmd)