예제 #1
0
    def __init__(
        self,
        pst,
        parcov=None,
        obscov=None,
        num_workers=0,
        use_approx_prior=True,
        submit_file=None,
        verbose=False,
        port=4004,
        worker_dir="template",
    ):
        self.logger = Logger(verbose)
        if verbose is not False:
            self.logger.echo = True
        self.num_workers = int(num_workers)
        if submit_file is not None:
            if not os.path.exists(submit_file):
                self.logger.lraise(
                    "submit_file {0} not found".format(submit_file))
        elif num_workers > 0:
            if not os.path.exists(worker_dir):
                self.logger.lraise(
                    "template dir {0} not found".format(worker_dir))

        self.worker_dir = worker_dir
        self.submit_file = submit_file
        self.port = int(port)
        self.paren_prefix = ".parensemble.{0:04d}.csv"
        self.obsen_prefix = ".obsensemble.{0:04d}.csv"

        if isinstance(pst, str):
            pst = Pst(pst)
        assert isinstance(pst, Pst)
        self.pst = pst
        self.sweep_in_csv = pst.pestpp_options.get("sweep_parameter_csv_file",
                                                   "sweep_in.csv")
        self.sweep_out_csv = pst.pestpp_options.get("sweep_output_csv_file",
                                                    "sweep_out.csv")
        if parcov is not None:
            assert isinstance(parcov, Cov)
        else:
            parcov = Cov.from_parameter_data(self.pst)
        if obscov is not None:
            assert isinstance(obscov, Cov)
        else:
            obscov = Cov.from_observation_data(pst)

        self.parcov = parcov
        self.obscov = obscov

        self._initialized = False
        self.iter_num = 0
        self.total_runs = 0
        self.raw_sweep_out = None
예제 #2
0
파일: smoother.py 프로젝트: zandy19/pyemu
    def __init__(self,
                 pst,
                 parcov=None,
                 obscov=None,
                 num_slaves=0,
                 use_approx=True,
                 restart_iter=0,
                 submit_file=None):
        self.num_slaves = int(num_slaves)
        self.submit_file = submit_file
        self.use_approx = bool(use_approx)
        self.paren_prefix = ".parensemble.{0:04d}.csv"
        self.obsen_prefix = ".obsensemble.{0:04d}.csv"

        if isinstance(pst, str):
            pst = Pst(pst)
        assert isinstance(pst, Pst)
        self.pst = pst
        self.sweep_in_csv = pst.pestpp_options.get("sweep_parameter_csv_file",
                                                   "sweep_in.csv")
        self.sweep_out_csv = pst.pestpp_options.get("sweep_output_csv_file",
                                                    "sweep_out.csv")
        if parcov is not None:
            assert isinstance(parcov, Cov)
        else:
            parcov = Cov.from_parameter_data(self.pst)
        if obscov is not None:
            assert isinstance(obscov, Cov)
        else:
            obscov = Cov.from_observation_data(pst)

        self.parcov = parcov
        self.obscov = obscov
        self.restart = False

        if restart_iter > 0:
            self.restart_iter = restart_iter
            paren = self.pst.filename + self.paren_prefix.format(restart_iter)
            assert os.path.exists(paren),\
                "could not find restart par ensemble {0}".format(paren)
            obsen0 = self.pst.filename + self.obsen_prefix.format(0)
            assert os.path.exists(obsen0),\
                "could not find restart obs ensemble 0 {0}".format(obsen0)
            obsen = self.pst.filename + self.obsen_prefix.format(restart_iter)
            assert os.path.exists(obsen),\
                "could not find restart obs ensemble {0}".format(obsen)
            self.restart = True

        self.__initialized = False
        self.num_reals = 0
        self.half_parcov_diag = None
        self.half_obscov_diag = None
        self.delta_par_prior = None
        self.iter_num = 0
예제 #3
0
    def update(self):
        if not self.__initialized:
            raise Exception("must call initialize() before update()")
        self._calc_obs()
        delta_obs = self._calc_delta_obs()
        u,s,v = delta_obs.pseudo_inv_components()
        #print(v)
        #print(s)
        #print(v)
        diff = self.obsensemble.as_pyemu_matrix() - self.obsensemble_0.as_pyemu_matrix()
        #print(diff)
        x1 = u.T * self.obscov.inv.sqrt * diff.T
        x1.autoalign = False
        #print(x1)
        x2 = (Cov.identity_like(s) + s**2).inv * x1
        #print(x2)
        x3 = v * s * x2
        #print(x3)
        upgrade_1 = (self.half_parcov_diag * self._calc_delta_par() * x3).to_dataframe()
        upgrade_1.index.name = "parnme"
        print(upgrade_1)
        self.parensemble += upgrade_1.T
        print(self.parensemble)
        if self.iter_num > 0:
            raise NotImplementedError()

        print(upgrade_1.shape)
예제 #4
0
 def posterior_parameter(self):
     """get the posterior parameter covariance matrix
     """
     if self.__posterior_parameter is not None:
         return self.__posterior_parameter
     else:
         self.clean()
         self.log("Schur's complement")
         try:
             pinv = self.parcov.inv
             r = self.xtqx + pinv
             r = r.inv
         except Exception as e:
             self.xtqx.to_binary("xtqx.err.jcb")
             pinv.to_ascii("parcov_inv.err.cov")
             self.logger.warn("error forming schur's complement: {0}".
                             format(str(e)))
             self.logger.warn("problemtic xtqx saved to xtqx.err.jcb")
             self.logger.warn("problematic inverse parcov saved to parcov_inv.err.cov")
             raise Exception("error forming schur's complement: {0}".
                             format(str(e)))
         assert r.row_names == r.col_names
         self.__posterior_parameter = Cov(r.x, row_names=r.row_names,
                                          col_names=r.col_names)
         self.log("Schur's complement")
         return self.__posterior_parameter
예제 #5
0
    def initialize(self, num_reals):
        '''
        (re)initialize the process
        '''
        self.num_reals = int(num_reals)
        self.parensemble = ParameterEnsemble(self.pst)
        self.parensemble.draw(cov=self.parcov, num_reals=num_reals)

        self.obsensemble_0 = ObservationEnsemble(self.pst)
        self.obsensemble_0.draw(cov=self.obscov, num_reals=num_reals)
        self.obsensemble = self.obsensemble_0.copy()

        if self.parcov.isdiagonal:
            self.half_parcov_diag = self.parcov.inv.sqrt
        else:
            self.half_parcov_diag = Cov(x=np.diag(self.parcov.x),
                                        names=self.parcov.col_names,
                                        isdiagonal=True).inv.sqrt
        #if self.obscov.isdiagonal:
        #self.half_obscov_inv = self.obscov.inv.sqrt

    # else:
    #    self.half_obscov_diag = Cov(x=np.diag(self.obscov.x),
    #                                names=self.obscov.col_names,
    #                                isdiagonal=True)

        self.delta_par_prior = self._calc_delta_par()

        self.__initialized = True
예제 #6
0
    def update(self):
        if not self.__initialized:
            raise Exception("must call initialize() before update()")
        self._calc_obs()
        delta_obs = self._calc_delta_obs()
        u, s, v = delta_obs.pseudo_inv_components()
        #print(v)
        #print(s)
        #print(v)
        diff = self.obsensemble.as_pyemu_matrix(
        ) - self.obsensemble_0.as_pyemu_matrix()
        #print(diff)
        x1 = u.T * self.obscov.inv.sqrt * diff.T
        x1.autoalign = False
        #print(x1)
        x2 = (Cov.identity_like(s) + s**2).inv * x1
        #print(x2)
        x3 = v * s * x2
        #print(x3)
        upgrade_1 = (self.half_parcov_diag * self._calc_delta_par() *
                     x3).to_dataframe()
        upgrade_1.index.name = "parnme"
        print(upgrade_1)
        self.parensemble += upgrade_1.T
        print(self.parensemble)
        if self.iter_num > 0:
            raise NotImplementedError()

        print(upgrade_1.shape)
예제 #7
0
    def __init__(self,pst,parcov=None,obscov=None):
        assert isinstance(pst,Pst)
        self.pst = pst
        if parcov is not None:
            assert isinstance(parcov,Cov)
        else:
            parcov = Cov.from_parameter_data(self.pst)
        if obscov is not None:
            assert isinstance(obscov,Cov)
        else:
            obscov = Cov.from_observation_data(pst)

        self.parcov = parcov
        self.obscov = obscov

        self.__initialized = False
        self.num_reals = 0
        self.half_parcov_diag = None
        self.half_obscov_diag = None
        self.delta_par_prior = None
        self.iter_num = 0
예제 #8
0
    def __init__(self, pst, parcov=None, obscov=None):
        assert isinstance(pst, Pst)
        self.pst = pst
        if parcov is not None:
            assert isinstance(parcov, Cov)
        else:
            parcov = Cov.from_parameter_data(self.pst)
        if obscov is not None:
            assert isinstance(obscov, Cov)
        else:
            obscov = Cov.from_observation_data(pst)

        self.parcov = parcov
        self.obscov = obscov

        self.__initialized = False
        self.num_reals = 0
        self.half_parcov_diag = None
        self.half_obscov_diag = None
        self.delta_par_prior = None
        self.iter_num = 0
예제 #9
0
    def posterior_parameter(self):
        """get the posterior parameter covariance matrix.  If Schur.__posterior_parameter
        is None, the posterior parameter covariance matrix is calculated via
        Schur compliment before returning

        Returns
        -------
        posterior_parameter : pyemu.Cov
            the posterior parameter covariance matrix

        Note
        ----
        returns a reference

        Example
        -------
        ``>>>import pyemu``

        ``>>>sc = pyemu.Schur(jco="pest.jcb")``

        ``>>>post_cov = sc.posterior_parameter``

        ``>>>post_cov.to_ascii("post.cov")``


        """
        if self.__posterior_parameter is not None:
            return self.__posterior_parameter
        else:
            self.clean()
            self.log("Schur's complement")
            try:
                pinv = self.parcov.inv
                r = self.xtqx + pinv
                r = r.inv
            except Exception as e:
                self.xtqx.to_binary("xtqx.err.jcb")
                pinv.to_ascii("parcov_inv.err.cov")
                self.logger.warn(
                    "error forming schur's complement: {0}".format(str(e)))
                self.logger.warn("problemtic xtqx saved to xtqx.err.jcb")
                self.logger.warn(
                    "problematic inverse parcov saved to parcov_inv.err.cov")
                raise Exception("error forming schur's complement: {0}".format(
                    str(e)))
            assert r.row_names == r.col_names
            self.__posterior_parameter = Cov(r.x,
                                             row_names=r.row_names,
                                             col_names=r.col_names)
            self.log("Schur's complement")
            return self.__posterior_parameter
예제 #10
0
 def posterior_parameter(self):
     """get the posterior parameter covariance matrix
     """
     if self.__posterior_parameter is not None:
         return self.__posterior_parameter
     else:
         self.clean()
         self.log("Schur's complement")
         r = (self.xtqx + self.parcov.inv).inv
         assert r.row_names == r.col_names
         self.__posterior_parameter = Cov(r.x,
                                          row_names=r.row_names,
                                          col_names=r.col_names)
         self.log("Schur's complement")
         return self.__posterior_parameter
예제 #11
0
파일: smoother.py 프로젝트: wk1984/pyemu
    def update(self):
        if not self.__initialized:
            raise Exception("must call initialize() before update()")

        self._calc_obs()
        self.iter_num += 1
        self.obsensemble.to_csv("obsensemble.{0}.csv".format(self.iter_num))
        delta_obs = self._calc_delta_obs()

        u,s,v = delta_obs.pseudo_inv_components()
        scaled_par_diff = self._calc_delta_par()
        scaled_obs_diff = self.obsensemble.as_pyemu_matrix() -\
               self.obsensemble_0.as_pyemu_matrix()
        scaled_ident = (self.current_lambda*Cov.identity_like(s) + s**2).inv

        x1 = u.T * self.obscov.inv.sqrt * scaled_obs_diff.T
        x1.autoalign = False
        x2 = scaled_ident * x1
        x3 = v * s * x2
        upgrade_1 = -1.0 *  (self.half_parcov_diag * scaled_par_diff *\
                             x3).to_dataframe()
        upgrade_1.index.name = "parnme"
        upgrade_1.T.to_csv("upgrade_1.{0}.csv".format(self.iter_num))
        self.parensemble += upgrade_1.T
        if self.iter_num > 1:
            par_diff = (self.parensemble - self.parensemble_0).\
                as_pyemu_matrix().T
            x4 = self.Am.T * self.half_parcov_diag * par_diff
            x5 = self.Am * x4
            x6 = scaled_par_diff.T * x5
            x7 = v * scaled_ident * v.T * x6
            upgrade_2 = -1.0 * (self.half_parcov_diag *
                                scaled_par_diff * x7).to_dataframe()
            upgrade_2.index.name = "parnme"
            upgrade_2.T.to_csv("upgrade_2.{0}.csv".format(self.iter_num))
            self.parensemble += upgrade_2.T
        self.parensemble.to_csv("parensemble.{0}.csv".format(self.iter_num))
예제 #12
0
파일: mat_tests.py 프로젝트: aleaf/pyemu
def mat_test():
    import os
    import numpy as np
    from pyemu.mat import Jco,Cov,concat
    test_dir = os.path.join("mat")
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)
    arr = np.arange(0,12)
    arr.resize(4,3)
    first = Jco(x=arr,col_names=["p1","p2","p3"],row_names=["o1","o2","o3","o4"])
    first.to_binary(os.path.join(test_dir,"test.bin"))
    first.from_binary(os.path.join(test_dir,"test.bin"))

    first = Jco(x=np.ones((4,3)),col_names=["p1","p2","p3"],row_names=["o1","o2","o3","o4"])
    second = Cov(x=np.ones((3,3))+1.0,names=["p1","p2","p3"],isdiagonal=False)
    third = Cov(x=np.ones((4,1))+2.0,names=["o1","o2","o3","o4"],isdiagonal=True)
    third.to_uncfile(os.path.join(test_dir,"test.unc"),
                     covmat_file=os.path.join(test_dir,"cov.mat"))
    third.from_uncfile(os.path.join(test_dir,"test.unc"))

    si = second.identity
    result = second - second.identity
    # add and sub
    newfirst = first.get(row_names=["o1"],col_names="p1")
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst
    newfirst = first.get(row_names=["o1","o2"],col_names="p1")
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst
    newfirst = first.get(row_names=["o1","o2"],col_names=["p1","p3"])
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst

    # mul test
    result = first.T * third * first
    result = first * second

    result = first.T.x * third
    result = 2.0 * third

    newfirst = first.get(col_names="p1")
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(col_names=["p1","p2"])
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(row_names=["o1"])
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(row_names=["o1","o2"])
    result = newfirst * second
    result = second * newfirst.T
    result = newfirst.T * third * newfirst

    newthird = third.get(row_names=["o1"])
    result = first.T * newthird * first

    result.to_sparse()

    # drop testing
    second.drop("p2",axis=0)
    assert second.shape == (2, 2)

    third.drop("o1",axis=1)
    assert third.shape == (3, 3)

    first.drop("p1",axis=1)
    assert first.shape == (4,2)

    first.drop("o4",axis=0)
    assert first.shape == (3,2)

    try:
        concat([first,third])
    except:
        pass
    else:
        raise Exception()
예제 #13
0
    def update(self,
               lambda_mults=[1.0],
               localizer=None,
               run_subset=None,
               use_approx=True,
               calc_only=False):
        """update the iES one GLM cycle

        Parameters
        ----------
            lambda_mults : list
                a list of lambda multipliers to test.  Each lambda mult value will require
                evaluating (a subset of) the parameter ensemble.
            localizer : pyemu.Matrix
                a jacobian localizing matrix
            run_subset : int
                the number of realizations to test for each lambda_mult value.  For example,
                if run_subset = 30 and num_reals=100, the first 30 realizations will be run (in
                parallel) for each lambda_mult value.  Then the best lambda_mult is selected and the
                remaining 70 realizations for that lambda_mult value are run (in parallel).
            use_approx : bool
                 a flag to use the MLE or MAP upgrade solution.  True indicates use MLE solution
            calc_only : bool
                a flag to calculate the upgrade matrix only (not run the ensemble). This is mostly for
                debugging and testing on travis. Default is False

        Example
        -------

        ``>>>import pyemu``

        ``>>>es = pyemu.EnsembleSmoother(pst="pest.pst")``

        ``>>>es.initialize(num_reals=100)``

        ``>>>es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)``

         """

        if run_subset is not None:
            if run_subset >= self.obsensemble.shape[0]:
                self.logger.warn("run_subset ({0}) >= num of active reals ({1})...ignoring ".\
                                 format(run_subset,self.obsensemble.shape[0]))
                run_subset = None

        self.iter_num += 1
        self.logger.log("iteration {0}".format(self.iter_num))
        self.logger.statement("{0} active realizations".format(
            self.obsensemble.shape[0]))
        if self.obsensemble.shape[0] < 2:
            self.logger.lraise(
                "at least active 2 realizations (really like 300) are needed to update"
            )
        if not self.__initialized:
            #raise Exception("must call initialize() before update()")
            self.logger.lraise("must call initialize() before update()")

        self.logger.log("calculate scaled delta obs")
        scaled_delta_obs = self._calc_delta_obs(self.obsensemble)
        self.logger.log("calculate scaled delta obs")
        self.logger.log("calculate scaled delta par")
        scaled_delta_par = self._calc_delta_par(self.parensemble)
        self.logger.log("calculate scaled delta par")

        self.logger.log("calculate pseudo inv comps")
        u, s, v = scaled_delta_obs.pseudo_inv_components()
        self.logger.log("calculate pseudo inv comps")

        self.logger.log("calculate obs diff matrix")
        obs_diff = self.obscov_inv_sqrt * self._get_residual_matrix(
            self.obsensemble).T
        self.logger.log("calculate obs diff matrix")

        # here is the math part...calculate upgrade matrices
        mean_lam, std_lam, paren_lam, obsen_lam = [], [], [], []
        lam_vals = []
        for ilam, cur_lam_mult in enumerate(lambda_mults):

            parensemble_cur_lam = self.parensemble.copy()
            #print(parensemble_cur_lam.isnull().values.any())

            cur_lam = self.current_lambda * cur_lam_mult
            lam_vals.append(cur_lam)
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))
            scaled_ident = Cov.identity_like(s) * (cur_lam + 1.0)
            scaled_ident += s**2
            scaled_ident = scaled_ident.inv

            # build up this matrix as a single element so we can apply
            # localization
            self.logger.log("building upgrade_1 matrix")
            upgrade_1 = -1.0 * (self.half_parcov_diag * scaled_delta_par) *\
                        v * s * scaled_ident * u.T
            self.logger.log("building upgrade_1 matrix")

            # apply localization
            if localizer is not None:
                self.logger.log("applying localization")
                upgrade_1.hadamard_product(localizer)
                self.logger.log("applying localization")

            # apply residual information
            self.logger.log("applying residuals")
            upgrade_1 *= obs_diff
            self.logger.log("applying residuals")

            self.logger.log("processing upgrade_1")
            upgrade_1 = upgrade_1.to_dataframe()
            upgrade_1.index.name = "parnme"
            upgrade_1 = upgrade_1.T
            upgrade_1.index = [int(i) for i in upgrade_1.index]
            upgrade_1.to_csv(self.pst.filename+".upgrade_1.{0:04d}.csv".\
                               format(self.iter_num))
            if upgrade_1.isnull().values.any():
                self.logger.lraise("NaNs in upgrade_1")
            self.logger.log("processing upgrade_1")

            #print(upgrade_1.isnull().values.any())
            #print(parensemble_cur_lam.index)
            #print(upgrade_1.index)
            parensemble_cur_lam += upgrade_1

            # parameter-based upgrade portion
            if not use_approx and self.iter_num > 1:
                self.logger.log("building upgrade_2 matrix")
                par_diff = (self.parensemble - self.parensemble_0.loc[self.parensemble.index,:]).\
                    as_pyemu_matrix().T
                x4 = self.Am.T * self.half_parcov_diag * par_diff
                x5 = self.Am * x4
                x6 = scaled_delta_par.T * x5
                x7 = v * scaled_ident * v.T * x6
                upgrade_2 = -1.0 * (self.half_parcov_diag * scaled_delta_par *
                                    x7).to_dataframe()
                upgrade_2.index.name = "parnme"
                upgrade_2 = upgrade_2.T
                upgrade_2.to_csv(self.pst.filename+".upgrade_2.{0:04d}.csv".\
                                   format(self.iter_num))
                upgrade_2.index = [int(i) for i in upgrade_2.index]

                if upgrade_2.isnull().values.any():
                    self.logger.lraise("NaNs in upgrade_2")

                parensemble_cur_lam += upgrade_2
                self.logger.log("building upgrade_2 matrix")
            parensemble_cur_lam.enforce(self.enforce_bounds)

            # this is for testing failed runs on upgrade testing
            # works with the 10par_xsec smoother test
            #parensemble_cur_lam.iloc[:,:] = -1000000.0

            paren_lam.append(pd.DataFrame(parensemble_cur_lam.loc[:, :]))
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))

        if calc_only:
            return

        # subset if needed
        # and combine lambda par ensembles into one par ensemble for evaluation
        if run_subset is not None and run_subset < self.parensemble.shape[0]:
            #subset_idx = ["{0:d}".format(i) for i in np.random.randint(0,self.parensemble.shape[0]-1,run_subset)]
            subset_idx = self.parensemble.iloc[:run_subset, :].index.values
            self.logger.statement("subset idxs: " +
                                  ','.join([str(s) for s in subset_idx]))
            paren_lam_subset = [pe.loc[subset_idx, :] for pe in paren_lam]
            paren_combine = pd.concat(paren_lam_subset, ignore_index=True)
            paren_lam_subset = None
        else:
            subset_idx = self.parensemble.index.values
            paren_combine = pd.concat(paren_lam, ignore_index=True)


        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        failed_runs, obsen_combine = self._calc_obs(paren_combine)
        #if failed_runs is not None:
        #    obsen_combine.loc[failed_runs,:] = np.NaN
        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        paren_combine = None

        if failed_runs is not None and len(
                failed_runs) == obsen_combine.shape[0]:
            self.logger.lraise("all runs failed - cannot continue")

        # unpack lambda obs ensembles from combined obs ensemble
        nrun_per_lam = self.obsensemble.shape[0]
        if run_subset is not None:
            nrun_per_lam = run_subset
        obsen_lam = []
        for i in range(len(lam_vals)):
            sidx = i * nrun_per_lam
            eidx = sidx + nrun_per_lam
            oe = ObservationEnsemble.from_dataframe(
                df=obsen_combine.iloc[sidx:eidx, :].copy(), pst=self.pst)
            oe.index = subset_idx
            # check for failed runs in this set - drop failed runs from obs ensembles
            if failed_runs is not None:
                failed_runs_this = np.array(
                    [f for f in failed_runs if f >= sidx and f < eidx]) - sidx
                if len(failed_runs_this) > 0:
                    if len(failed_runs_this) == oe.shape[0]:
                        self.logger.warn(
                            "all runs failed for lambda {0}".format(
                                lam_vals[i]))
                    else:
                        self.logger.warn("{0} run failed for lambda {1}".\
                                         format(len(failed_runs_this),lam_vals[i]))
                    oe.iloc[failed_runs_this, :] = np.NaN
                    oe = oe.dropna()

            # don't drop bad reals here, instead, mask bad reals in the lambda
            # selection and drop later
            # if self.drop_bad_reals is not None:
            #     assert isinstance(drop_bad_reals, float)
            #     drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
            #     run_ids = self.obsensemble.index.values
            #     drop_idx = run_ids[drop_idx]
            #     if len(drop_idx) == self.obsensemble.shape[0]:
            #         raise Exception("dropped all realizations as 'bad'")
            #     if len(drop_idx) > 0:
            #         self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
            #                          format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
            #         self.parensemble.loc[drop_idx, :] = np.NaN
            #         self.parensemble = self.parensemble.dropna()
            #         self.obsensemble.loc[drop_idx, :] = np.NaN
            #         self.obsensemble = self.obsensemble.dropna()
            #
            #         self.current_phi_vec = self._calc_phi_vec(self.obsensemble)

            obsen_lam.append(oe)
        obsen_combine = None

        # here is where we need to select out the "best" lambda par and obs
        # ensembles
        self.logger.statement("\n**************************")
        self.logger.statement(str(datetime.now()))
        self.logger.statement("total runs:{0}".format(self.total_runs))
        self.logger.statement("iteration: {0}".format(self.iter_num))
        self.logger.statement("current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                              format(self.current_lambda,
                         self.last_best_mean,self.last_best_std))
        phi_vecs = [self._calc_phi_vec(obsen) for obsen in obsen_lam]
        if self.drop_bad_reals is not None:
            for i, pv in enumerate(phi_vecs):
                #for testing the drop_bad_reals functionality
                #pv[[0,3,7]] = self.drop_bad_reals + 1.0
                pv[pv > self.drop_bad_reals] = np.NaN
                pv = pv[~np.isnan(pv)]
                if len(pv) == 0:
                    raise Exception("all realization for lambda {0} dropped as 'bad'".\
                                    format(lam_vals[i]))
                phi_vecs[i] = pv
        mean_std = [(pv.mean(), pv.std()) for pv in phi_vecs]
        update_pars = False
        update_lambda = False
        # accept a new best if its within 10%
        best_mean = self.last_best_mean * 1.1
        best_std = self.last_best_std * 1.1
        best_i = 0
        for i, (m, s) in enumerate(mean_std):
            self.logger.statement(" tested lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                                 format(self.current_lambda * lambda_mults[i],m,s))
            if m < best_mean:
                update_pars = True
                best_mean = m
                best_i = i
                if s < best_std:
                    update_lambda = True
                    best_std = s
        if np.isnan(best_mean):
            self.logger.lraise("best mean = NaN")
        if np.isnan(best_std):
            self.logger.lraise("best std = NaN")

        if not update_pars:
            self.current_lambda *= max(lambda_mults) * 10.0
            self.current_lambda = min(self.current_lambda, 100000)
            self.logger.statement("not accepting iteration, increased lambda:{0}".\
                  format(self.current_lambda))
        else:
            self.parensemble = ParameterEnsemble.from_dataframe(
                df=paren_lam[best_i], pst=self.pst)
            if run_subset is not None:
                failed_runs, self.obsensemble = self._calc_obs(
                    self.parensemble)
                if failed_runs is not None:
                    self.logger.warn("dropping failed realizations")
                    self.parensemble.loc[failed_runs, :] = np.NaN
                    self.parensemble = self.parensemble.dropna()
                    self.obsensemble.loc[failed_runs, :] = np.NaN
                    self.obsensemble = self.obsensemble.dropna()

                self.current_phi_vec = self._calc_phi_vec(self.obsensemble)

                #self._phi_report(self.current_phi_vec,self.current_lambda * lambda_mults[best_i])
                best_mean = self.current_phi_vec.mean()
                best_std = self.current_phi_vec.std()
            else:
                self.obsensemble = obsen_lam[best_i]
                # reindex parensemble in case failed runs
                self.parensemble = ParameterEnsemble.from_dataframe(
                    df=self.parensemble.loc[self.obsensemble.index],
                    pst=self.pst)
                self.current_phi_vec = phi_vecs[best_i]

            if self.drop_bad_reals is not None:
                # for testing drop_bad_reals functionality
                # self.current_phi_vec[::2] = self.drop_bad_reals + 1.0
                drop_idx = np.argwhere(
                    self.current_phi_vec > self.drop_bad_reals).flatten()
                run_ids = self.obsensemble.index.values
                drop_idx = run_ids[drop_idx]
                if len(drop_idx) > self.obsensemble.shape[0] - 3:
                    raise Exception("dropped too many realizations as 'bad'")
                if len(drop_idx) > 0:
                    self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
                                     format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
                    self.parensemble.loc[drop_idx, :] = np.NaN
                    self.parensemble = self.parensemble.dropna()
                    self.obsensemble.loc[drop_idx, :] = np.NaN
                    self.obsensemble = self.obsensemble.dropna()

                    self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
                    best_mean = self.current_phi_vec.mean()
                    best_std = self.current_phi_vec.std()

            self._phi_report(self.phi_csv, self.current_phi_vec,
                             self.current_lambda * lambda_mults[best_i])
            self._phi_report(self.phi_act_csv,
                             self.obsensemble.phi_vector.values,
                             self.current_lambda * lambda_mults[best_i])


            self.logger.statement("   best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda*lambda_mults[best_i],
                         best_mean,best_std))
            self.logger.statement("   actual mean phi: {0:15.6G}".format(
                float(self.current_actual_phi.mean())))
            self.last_best_mean = best_mean
            self.last_best_std = best_std

        if update_lambda:
            # be aggressive
            self.current_lambda *= (lambda_mults[best_i] * 0.75)
            # but don't let lambda get too small
            self.current_lambda = max(self.current_lambda, 0.00001)
            self.logger.statement("updating lambda: {0:15.6G}".\
                  format(self.current_lambda ))

        self.logger.statement("**************************\n")
        self.parensemble.to_csv(self.pst.filename+self.paren_prefix.\
                                    format(self.iter_num))
        self.obsensemble.to_csv(self.pst.filename+self.obsen_prefix.\
                                    format(self.iter_num))
        if self.raw_sweep_out is not None:
            self.raw_sweep_out.to_csv(self.pst.filename+"_raw{0}".\
                                        format(self.iter_num))
        self.logger.log("iteration {0}".format(self.iter_num))
예제 #14
0
파일: smoother.py 프로젝트: xuexianwu/pyemu
    def update(self,
               lambda_mults=[1.0],
               localizer=None,
               run_subset=None,
               use_approx=True):

        if run_subset is not None:
            if run_subset >= self.obsensemble.shape[0]:
                self.logger.warn("run_subset ({0}) >= num of active reals ({1})...ignoring ".\
                                 format(run_subset,self.obsensemble.shape[0]))
                run_subset = None

        self.iter_num += 1
        self.logger.log("iteration {0}".format(self.iter_num))
        self.logger.statement("{0} active realizations".format(
            self.obsensemble.shape[0]))
        if self.obsensemble.shape[0] < 2:
            self.logger.lraise(
                "at least active 2 realizations (really like 300) are needed to update"
            )
        if not self.__initialized:
            #raise Exception("must call initialize() before update()")
            self.logger.lraise("must call initialize() before update()")

        self.logger.log("calculate scaled delta obs")
        scaled_delta_obs = self._calc_delta_obs(self.obsensemble)
        self.logger.log("calculate scaled delta obs")
        self.logger.log("calculate scaled delta par")
        scaled_delta_par = self._calc_delta_par(self.parensemble)
        self.logger.log("calculate scaled delta par")

        self.logger.log("calculate pseudo inv comps")
        u, s, v = scaled_delta_obs.pseudo_inv_components()
        self.logger.log("calculate pseudo inv comps")

        self.logger.log("calculate obs diff matrix")
        obs_diff = self.obscov_inv_sqrt * self._get_residual_matrix(
            self.obsensemble).T
        self.logger.log("calculate obs diff matrix")

        # here is the math part...calculate upgrade matrices
        mean_lam, std_lam, paren_lam, obsen_lam = [], [], [], []
        lam_vals = []
        for ilam, cur_lam_mult in enumerate(lambda_mults):

            parensemble_cur_lam = self.parensemble.copy()
            #print(parensemble_cur_lam.isnull().values.any())

            cur_lam = self.current_lambda * cur_lam_mult
            lam_vals.append(cur_lam)
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))
            scaled_ident = Cov.identity_like(s) * (cur_lam + 1.0)
            scaled_ident += s**2
            scaled_ident = scaled_ident.inv

            # build up this matrix as a single element so we can apply
            # localization
            self.logger.log("building upgrade_1 matrix")
            upgrade_1 = -1.0 * (self.half_parcov_diag * scaled_delta_par) *\
                        v * s * scaled_ident * u.T
            self.logger.log("building upgrade_1 matrix")

            # apply localization
            if localizer is not None:
                self.logger.log("applying localization")
                upgrade_1.hadamard_product(localizer)
                self.logger.log("applying localization")

            # apply residual information
            self.logger.log("applying residuals")
            upgrade_1 *= obs_diff
            self.logger.log("applying residuals")

            self.logger.log("processing upgrade_1")
            upgrade_1 = upgrade_1.to_dataframe()
            upgrade_1.index.name = "parnme"
            upgrade_1 = upgrade_1.T
            upgrade_1.index = [int(i) for i in upgrade_1.index]
            upgrade_1.to_csv(self.pst.filename+".upgrade_1.{0:04d}.csv".\
                               format(self.iter_num))
            if upgrade_1.isnull().values.any():
                self.logger.lraise("NaNs in upgrade_1")
            self.logger.log("processing upgrade_1")

            #print(upgrade_1.isnull().values.any())
            #print(parensemble_cur_lam.index)
            #print(upgrade_1.index)
            parensemble_cur_lam += upgrade_1

            # parameter-based upgrade portion
            if not use_approx and self.iter_num > 1:
                self.logger.log("building upgrade_2 matrix")
                par_diff = (self.parensemble - self.parensemble_0.loc[self.parensemble.index,:]).\
                    as_pyemu_matrix().T
                x4 = self.Am.T * self.half_parcov_diag * par_diff
                x5 = self.Am * x4
                x6 = scaled_delta_par.T * x5
                x7 = v * scaled_ident * v.T * x6
                upgrade_2 = -1.0 * (self.half_parcov_diag * scaled_delta_par *
                                    x7).to_dataframe()
                upgrade_2.index.name = "parnme"
                upgrade_2 = upgrade_2.T
                upgrade_2.to_csv(self.pst.filename+".upgrade_2.{0:04d}.csv".\
                                   format(self.iter_num))
                upgrade_2.index = [int(i) for i in upgrade_2.index]

                if upgrade_2.isnull().values.any():
                    self.logger.lraise("NaNs in upgrade_2")

                parensemble_cur_lam += upgrade_2
                self.logger.log("building upgrade_2 matrix")
            parensemble_cur_lam.enforce(self.enforce_bounds)

            # this is for testing failed runs on upgrade testing
            # works with the 10par_xsec smoother test
            #parensemble_cur_lam.iloc[:,:] = -1000000.0

            paren_lam.append(pd.DataFrame(parensemble_cur_lam.loc[:, :]))
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))

        # subset if needed
        # and combine lambda par ensembles into one par ensemble for evaluation
        if run_subset is not None and run_subset < self.parensemble.shape[0]:
            #subset_idx = ["{0:d}".format(i) for i in np.random.randint(0,self.parensemble.shape[0]-1,run_subset)]
            subset_idx = self.parensemble.iloc[:run_subset, :].index.values
            self.logger.statement("subset idxs: " +
                                  ','.join([str(s) for s in subset_idx]))
            paren_lam_subset = [pe.loc[subset_idx, :] for pe in paren_lam]
            paren_combine = pd.concat(paren_lam_subset, ignore_index=True)
            paren_lam_subset = None
        else:
            subset_idx = self.parensemble.index.values
            paren_combine = pd.concat(paren_lam, ignore_index=True)


        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        failed_runs, obsen_combine = self._calc_obs(paren_combine)
        #if failed_runs is not None:
        #    obsen_combine.loc[failed_runs,:] = np.NaN
        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        paren_combine = None

        if failed_runs is not None and len(
                failed_runs) == obsen_combine.shape[0]:
            self.logger.lraise("all runs failed - cannot continue")

        # unpack lambda obs ensembles from combined obs ensemble
        nrun_per_lam = self.obsensemble.shape[0]
        if run_subset is not None:
            nrun_per_lam = run_subset
        obsen_lam = []
        for i in range(len(lam_vals)):
            sidx = i * nrun_per_lam
            eidx = sidx + nrun_per_lam
            oe = ObservationEnsemble.from_dataframe(
                df=obsen_combine.iloc[sidx:eidx, :].copy(), pst=self.pst)
            oe.index = subset_idx
            # check for failed runs in this set - drop failed runs from obs ensembles
            if failed_runs is not None:
                failed_runs_this = np.array(
                    [f for f in failed_runs if f >= sidx and f < eidx]) - sidx
                if len(failed_runs_this) > 0:
                    if len(failed_runs_this) == oe.shape[0]:
                        self.logger.warn(
                            "all runs failed for lambda {0}".format(
                                lam_vals[i]))
                    else:
                        self.logger.warn("{0} run failed for lambda {1}".\
                                         format(len(failed_runs_this),lam_vals[i]))
                    oe.iloc[failed_runs_this, :] = np.NaN
                    oe = oe.dropna()
            obsen_lam.append(oe)
        obsen_combine = None

        # here is where we need to select out the "best" lambda par and obs
        # ensembles
        self.logger.statement("\n**************************")
        self.logger.statement(str(datetime.now()))
        self.logger.statement("total runs:{0}".format(self.total_runs))
        self.logger.statement("iteration: {0}".format(self.iter_num))
        self.logger.statement("current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                              format(self.current_lambda,
                         self.last_best_mean,self.last_best_std))
        phi_vecs = [self._calc_phi_vec(obsen) for obsen in obsen_lam]
        mean_std = [(pv.mean(), pv.std()) for pv in phi_vecs]
        update_pars = False
        update_lambda = False
        # accept a new best if its within 10%
        best_mean = self.last_best_mean * 1.1
        best_std = self.last_best_std * 1.1
        best_i = 0
        for i, (m, s) in enumerate(mean_std):
            self.logger.statement(" tested lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                                 format(self.current_lambda * lambda_mults[i],m,s))
            if m < best_mean:
                update_pars = True
                best_mean = m
                best_i = i
                if s < best_std:
                    update_lambda = True
                    best_std = s
        if np.isnan(best_mean):
            self.logger.lraise("best mean = NaN")
        if np.isnan(best_std):
            self.logger.lraise("best std = NaN")

        if not update_pars:
            self.current_lambda *= max(lambda_mults) * 10.0
            self.current_lambda = min(self.current_lambda, 100000)
            self.logger.statement("not accepting iteration, increased lambda:{0}".\
                  format(self.current_lambda))
        else:
            self.parensemble = ParameterEnsemble.from_dataframe(
                df=paren_lam[best_i], pst=self.pst)
            if run_subset is not None:
                failed_runs, self.obsensemble = self._calc_obs(
                    self.parensemble)
                if failed_runs is not None:
                    self.logger.warn("dropping failed realizations")
                    self.parensemble = self.parensemble.drop(failed_runs)
                    self.obsensemble = self.obsensemble.drop(failed_runs)
                self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
                self._phi_report(self.current_phi_vec,
                                 self.current_lambda * lambda_mults[best_i])
                best_mean = self.current_phi_vec.mean()
                best_std = self.current_phi_vec.std()
            else:
                self.obsensemble = obsen_lam[best_i]
                # reindex parensemble in case failed runs
                self.parensemble = ParameterEnsemble.from_dataframe(
                    df=self.parensemble.loc[self.obsensemble.index],
                    pst=self.pst)
                self._phi_report(phi_vecs[best_i],
                                 self.current_lambda * lambda_mults[best_i])
                self.current_phi_vec = phi_vecs[best_i]

            self.logger.statement("   best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda*lambda_mults[best_i],
                         best_mean,best_std))
            self.last_best_mean = best_mean
            self.last_best_std = best_std

        if update_lambda:
            # be aggressive
            self.current_lambda *= (lambda_mults[best_i] * 0.75)
            # but don't let lambda get too small
            self.current_lambda = max(self.current_lambda, 0.001)
            self.logger.statement("updating lambda: {0:15.6G}".\
                  format(self.current_lambda ))

        self.logger.statement("**************************\n")
        self.parensemble.to_csv(self.pst.filename+self.paren_prefix.\
                                    format(self.iter_num))
        self.obsensemble.to_csv(self.pst.filename+self.obsen_prefix.\
                                    format(self.iter_num))
        if self.raw_sweep_out is not None:
            self.raw_sweep_out.to_csv(self.pst.filename+"_raw{0}".\
                                        format(self.iter_num))
        self.logger.log("iteration {0}".format(self.iter_num))
예제 #15
0
    def update(self,
               lambda_mults=[1.0],
               localizer=None,
               run_subset=None,
               use_approx=True,
               calc_only=False):
        """update the iES one GLM cycle

        Parameters
        ----------
            lambda_mults : list
                a list of lambda multipliers to test.  Each lambda mult value will require
                evaluating (a subset of) the parameter ensemble.
            localizer : pyemu.Matrix
                a jacobian localizing matrix
            run_subset : int
                the number of realizations to test for each lambda_mult value.  For example,
                if run_subset = 30 and num_reals=100, the first 30 realizations will be run (in
                parallel) for each lambda_mult value.  Then the best lambda_mult is selected and the
                remaining 70 realizations for that lambda_mult value are run (in parallel).
            use_approx : bool
                 a flag to use the MLE or MAP upgrade solution.  True indicates use MLE solution
            calc_only : bool
                a flag to calculate the upgrade matrix only (not run the ensemble). This is mostly for
                debugging and testing on travis. Default is False

        Example
        -------

        ``>>>import pyemu``

        ``>>>es = pyemu.EnsembleSmoother(pst="pest.pst")``

        ``>>>es.initialize(num_reals=100)``

        ``>>>es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)``

         """

        #if not self.parensemble.istransformed:
        #    self.parensemble._transform(inplace=False)

        if run_subset is not None:
            if run_subset >= self.obsensemble.shape[0]:
                self.logger.warn("run_subset ({0}) >= num of active reals ({1})...ignoring ".\
                                 format(run_subset,self.obsensemble.shape[0]))
                run_subset = None

        self.iter_num += 1
        mat_prefix = self.pst.filename.replace('.pst', '') + ".{0}".format(
            self.iter_num)
        self.logger.log("iteration {0}".format(self.iter_num))
        self.logger.statement("{0} active realizations".format(
            self.obsensemble.shape[0]))
        if self.obsensemble.shape[0] < 2:
            self.logger.lraise(
                "at least active 2 realizations (really like 300) are needed to update"
            )
        if not self._initialized:
            #raise Exception("must call initialize() before update()")
            self.logger.lraise("must call initialize() before update()")

        self.logger.log("calculate scaled delta obs")
        scaled_delta_obs = self._calc_delta_obs(self.obsensemble)
        self.logger.log("calculate scaled delta obs")
        self.logger.log("calculate scaled delta par")
        scaled_delta_par = self._calc_delta_par(self.parensemble)
        self.logger.log("calculate scaled delta par")

        self.logger.log("calculate pseudo inv comps")
        u, s, v = scaled_delta_obs.pseudo_inv_components(
            eigthresh=self.pst.svd_data.eigthresh)
        s.col_names = s.row_names
        self.logger.log("calculate pseudo inv comps")

        self.logger.log("calculate obs diff matrix")
        #obs_diff = self.obscov_inv_sqrt * self._get_residual_obs_matrix(self.obsensemble).T
        obs_diff = self.obscov_inv_sqrt * self.phi.get_residual_obs_matrix(
            self.obsensemble).T
        self.logger.log("calculate obs diff matrix")

        if self.save_mats:
            np.savetxt(mat_prefix + ".obs_diff.dat",
                       scaled_delta_obs.x,
                       fmt="%15.6e")
            np.savetxt(mat_prefix + ".par_diff.dat",
                       scaled_delta_par.x,
                       fmt="%15.6e")
            np.savetxt(mat_prefix + ".u.dat", u.x, fmt="%15.6e")
            np.savetxt(mat_prefix + ".s.dat", s.x, fmt="%15.6e")
            np.savetxt(mat_prefix + ".v.dat", v.x, fmt="%15.6e")
        # here is the math part...calculate upgrade matrices
        mean_lam, std_lam, paren_lam, obsen_lam = [], [], [], []
        lam_vals = []
        for ilam, cur_lam_mult in enumerate(lambda_mults):

            parensemble_cur_lam = self.parensemble.copy()
            #print(parensemble_cur_lam.isnull().values.any())

            cur_lam = self.current_lambda * cur_lam_mult
            lam_vals.append(cur_lam)
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))
            scaled_ident = Cov.identity_like(s) * (cur_lam + 1.0)
            scaled_ident += s**2
            scaled_ident = scaled_ident.inv

            # build up this matrix as a single element so we can apply
            # localization
            self.logger.log("building upgrade_1 matrix")
            upgrade_1 = -1.0 * (self.parcov_inv_sqrt * scaled_delta_par) *\
                        v * s * scaled_ident * u.T
            if self.save_mats:
                np.savetxt(mat_prefix + ".ivec.dat".format(self.iter_num),
                           scaled_ident.x,
                           fmt="%15.6e")
            self.logger.log("building upgrade_1 matrix")

            # apply localization
            if localizer is not None:
                self.logger.log("applying localization")
                upgrade_1.hadamard_product(localizer)
                self.logger.log("applying localization")

            # apply residual information
            self.logger.log("applying residuals")
            upgrade_1 *= obs_diff
            self.logger.log("applying residuals")

            self.logger.log("processing upgrade_1")
            if self.save_mats:
                np.savetxt(mat_prefix + ".upgrade_1.dat",
                           upgrade_1.T.x,
                           fmt="%15.6e")
            upgrade_1 = upgrade_1.to_dataframe()
            upgrade_1.index.name = "parnme"
            upgrade_1 = upgrade_1.T
            upgrade_1.index = [int(i) for i in upgrade_1.index]
            upgrade_1.to_csv(self.pst.filename+".upgrade_1.{0:04d}.csv".\
                               format(self.iter_num))
            if upgrade_1.isnull().values.any():
                self.logger.lraise("NaNs in upgrade_1")
            self.logger.log("processing upgrade_1")

            #print(upgrade_1.isnull().values.any())
            #print(parensemble_cur_lam.index)
            #print(upgrade_1.index)
            parensemble_cur_lam += upgrade_1

            # parameter-based upgrade portion
            if not use_approx and self.iter_num > 1:
                #if True:
                self.logger.log("building upgrade_2 matrix")
                par_diff = (self.parensemble - self.parensemble_0.loc[self.parensemble.index,:]).\
                    as_pyemu_matrix().T
                x4 = self.Am.T * self.parcov_inv_sqrt * par_diff
                x5 = self.Am * x4
                x6 = scaled_delta_par.T * x5
                x7 = v * scaled_ident * v.T * x6
                ug2_mat = -1.0 * (self.parcov_inv_sqrt * scaled_delta_par * x7)
                upgrade_2 = ug2_mat.to_dataframe()
                upgrade_2.index.name = "parnme"
                upgrade_2 = upgrade_2.T
                upgrade_2.to_csv(self.pst.filename+".upgrade_2.{0:04d}.csv".\
                                   format(self.iter_num))
                upgrade_2.index = [int(i) for i in upgrade_2.index]

                if self.save_mats:
                    np.savetxt(mat_prefix + ".scaled_par_resid.dat",
                               par_diff.x,
                               fmt="%15.6e")
                    np.savetxt(mat_prefix + ".x4.dat", x4.x, fmt="%15.6e")
                    np.savetxt(mat_prefix + ".x5.dat", x5.x, fmt="%15.6e")
                    np.savetxt(mat_prefix + ".x6.dat", x6.x, fmt="%15.6e")
                    np.savetxt(mat_prefix + ".x7.dat", x7.x, fmt="%15.6e")
                    np.savetxt(mat_prefix + ".upgrade_2.dat",
                               ug2_mat.T.x,
                               fmt="%15.6e")

                if upgrade_2.isnull().values.any():
                    self.logger.lraise("NaNs in upgrade_2")

                parensemble_cur_lam += upgrade_2
                self.logger.log("building upgrade_2 matrix")
            self.logger.log("enforcing bounds")
            parensemble_cur_lam.enforce(self.enforce_bounds)
            self.logger.log("enforcing bounds")

            self.logger.log("filling fixed parameters")
            #fill in fixed pars with initial values
            fi = parensemble_cur_lam.fixed_indexer
            li = parensemble_cur_lam.log_indexer
            log_values = self.pst.parameter_data.loc[:, "parval1"].copy()
            log_values.loc[li] = log_values.loc[li].apply(np.log10)
            fixed_vals = log_values.loc[fi]

            for fname, fval in zip(fixed_vals.index, fixed_vals.values):
                # if fname not in df.columns:
                #    continue
                # print(fname)
                parensemble_cur_lam.loc[:, fname] = fval
            self.logger.log("filling fixed parameters")
            # this is for testing failed runs on upgrade testing
            # works with the 10par_xsec smoother test
            #parensemble_cur_lam.iloc[:,:] = -1000000.0

            # some hackery - we lose track of the transform flag here, but just
            # know it is transformed.  Need to create dataframe here because
            # pd.concat doesn't like par ensembles later
            paren_lam.append(pd.DataFrame(parensemble_cur_lam.loc[:, :]))
            self.logger.log("calcs for  lambda {0}".format(cur_lam_mult))

        if calc_only:
            return

        # subset if needed
        # and combine lambda par ensembles into one par ensemble for evaluation
        if run_subset is not None and run_subset < self.parensemble.shape[0]:
            #subset_idx = ["{0:d}".format(i) for i in np.random.randint(0,self.parensemble.shape[0]-1,run_subset)]
            subset_idx = self.parensemble.iloc[:run_subset, :].index.values
            self.logger.statement("subset idxs: " +
                                  ','.join([str(s) for s in subset_idx]))

            # more tracking of transformed - just know it! Creating dataframes...
            paren_lam_subset = [pe.loc[subset_idx, :] for pe in paren_lam]
            paren_combine = pd.concat(paren_lam_subset, ignore_index=True)
            paren_lam_subset = None
        else:
            subset_idx = self.parensemble.index.values
            paren_combine = pd.concat(paren_lam, ignore_index=True)


        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        # back to par ensemble and know it is transformed
        paren_combine = ParameterEnsemble.from_dataframe(df=paren_combine,
                                                         pst=self.pst,
                                                         istransformed=True)
        failed_runs, obsen_combine = self._calc_obs(paren_combine)
        self.logger.log("evaluating ensembles for lambdas : {0}".\
                        format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
        paren_combine = None

        if failed_runs is not None and len(
                failed_runs) == obsen_combine.shape[0]:
            self.logger.lraise("all runs failed - cannot continue")

        # unpack lambda obs ensembles from combined obs ensemble
        nrun_per_lam = self.obsensemble.shape[0]
        if run_subset is not None:
            nrun_per_lam = run_subset
        obsen_lam = []

        for i in range(len(lam_vals)):
            sidx = i * nrun_per_lam
            eidx = sidx + nrun_per_lam
            oe = ObservationEnsemble.from_dataframe(
                df=obsen_combine.iloc[sidx:eidx, :].copy(), pst=self.pst)
            oe.index = subset_idx
            # check for failed runs in this set - drop failed runs from obs ensembles
            if failed_runs is not None:
                failed_runs_this = np.array(
                    [f for f in failed_runs if f >= sidx and f < eidx]) - sidx
                if len(failed_runs_this) > 0:
                    if len(failed_runs_this) == oe.shape[0]:
                        self.logger.warn(
                            "all runs failed for lambda {0}".format(
                                lam_vals[i]))
                    else:
                        self.logger.warn("{0} run failed for lambda {1}".\
                                         format(len(failed_runs_this),lam_vals[i]))
                    oe.iloc[failed_runs_this, :] = np.NaN
                    oe = oe.dropna()
                    paren_lam[i].iloc[failed_runs_this, :] = np.NaN
                    paren_lam[i] = ParameterEnsemble.from_dataframe(
                        df=paren_lam[i].dropna(), pst=self.pst)
                    paren_lam[i].__instransformed = True

            # don't drop bad reals here, instead, mask bad reals in the lambda
            # selection and drop later
            # if self.drop_bad_reals is not None:
            #     assert isinstance(drop_bad_reals, float)
            #     drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
            #     run_ids = self.obsensemble.index.values
            #     drop_idx = run_ids[drop_idx]
            #     if len(drop_idx) == self.obsensemble.shape[0]:
            #         raise Exception("dropped all realizations as 'bad'")
            #     if len(drop_idx) > 0:
            #         self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
            #                          format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
            #         self.parensemble.loc[drop_idx, :] = np.NaN
            #         self.parensemble = self.parensemble.dropna()
            #         self.obsensemble.loc[drop_idx, :] = np.NaN
            #         self.obsensemble = self.obsensemble.dropna()
            #
            #         self.current_phi_vec = self._calc_phi_vec(self.obsensemble)

            obsen_lam.append(oe)
        obsen_combine = None

        # here is where we need to select out the "best" lambda par and obs
        # ensembles

        #phi_vecs = [self._calc_phi_vec(obsen) for obsen in obsen_lam]
        #phi_vecs_reg = [self._calc_regul_phi_vec(paren) for paren in paren_lam]
        #if self.regul_factor > 0.0:
        #    for i,(pv,prv) in enumerate(zip(phi_vecs,phi_vecs_reg)):
        #        phi_vecs[i] = pv + (prv * self.regul_factor)
        self.logger.log("calc lambda phi vectors")
        phi_vecs = [
            self.phi.get_meas_and_regul_phi(oe, pe.loc[oe.index, :])
            for oe, pe in zip(obsen_lam, paren_lam)
        ]
        self.logger.log("calc lambda phi vectors")
        if self.drop_bad_reals is not None:
            for i, (meas_pv, regul_pv) in enumerate(phi_vecs):
                #for testing the drop_bad_reals functionality
                #pv[[0,3,7]] = self.drop_bad_reals + 1.0
                regul_pv = regul_pv.copy()
                regul_pv[meas_pv > self.drop_bad_reals] = np.NaN
                regul_pv = regul_pv[~np.isnan(regul_pv)]
                meas_pv[meas_pv > self.drop_bad_reals] = np.NaN
                meas_pv = meas_pv[~np.isnan(meas_pv)]
                if len(meas_pv) == 0:
                    #raise Exception("all realization for lambda {0} dropped as 'bad'".\
                    #                format(lam_vals[i]))
                    self.logger.warn(
                        "all realizations for lambda {0} marked as 'bad'")
                    meas_pv = np.zeros_like(obsen_lam[0].shape[0]) + 1.0e+30
                    regul_pv = np.zeros_like(obsen_lam[0].shape[0]) + 1.0e+30
                phi_vecs[i] = (meas_pv, regul_pv)
        mean_std_meas = [(pv[0].mean(), pv[0].std()) for pv in phi_vecs]
        mean_std_regul = [(pv[1].mean(), pv[1].std()) for pv in phi_vecs]
        update_pars = False
        update_lambda = False
        self.logger.statement("**************************")
        # self.logger.statement(str(datetime.now()))
        self.logger.statement("lambda testing summary")
        self.logger.statement("total runs:{0}".format(self.total_runs))
        self.logger.statement("iteration: {0}".format(self.iter_num))
        self.logger.statement("current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}". \
                              format(self.current_lambda,
                                     self.last_best_mean, self.last_best_std))

        # accept a new best if its within 10%
        best_mean = self.last_best_mean * 1.1
        best_std = self.last_best_std * 1.1
        best_i = 0
        for i, ((mm, ms),
                (rm, rs)) in enumerate(zip(mean_std_meas, mean_std_regul)):
            self.logger.statement(
                " tested lambda:{0:15.6G}, meas mean:{1:15.6G}, meas std:{2:15.6G}"
                .format(self.current_lambda * lambda_mults[i], mm, ms))
            self.logger.statement("{0:30s}regul mean:{1:15.6G}, regul std:{2:15.6G}".\
                                  format(' ',rm,rs))
            m = mm + (self.regul_factor * rm)
            s = ms + (self.regul_factor * rs)
            if m < best_mean:
                update_pars = True
                best_mean = m
                best_i = i
                if s < best_std:
                    update_lambda = True
                    best_std = s
        if np.isnan(best_mean):
            self.logger.lraise("best mean = NaN")
        if np.isnan(best_std):
            self.logger.lraise("best std = NaN")

        if not update_pars:
            self.current_lambda *= max(lambda_mults) * 10.0
            self.current_lambda = min(self.current_lambda, 100000)
            self.logger.statement("not accepting iteration, increased lambda:{0}".\
                  format(self.current_lambda))
        else:
            #more transformation status hard coding - ugly
            self.parensemble = ParameterEnsemble.from_dataframe(
                df=paren_lam[best_i], pst=self.pst, istransformed=True)
            if run_subset is not None:
                failed_runs, self.obsensemble = self._calc_obs(
                    self.parensemble)
                if failed_runs is not None:
                    self.logger.warn("dropping failed realizations")
                    self.parensemble.loc[failed_runs, :] = np.NaN
                    self.parensemble = self.parensemble.dropna()
                    self.obsensemble.loc[failed_runs, :] = np.NaN
                    self.obsensemble = self.obsensemble.dropna()

                self.phi.update()
                best_mean = self.phi.comp_phi.mean()
                best_std = self.phi.comp_phi.std()
            else:
                self.obsensemble = obsen_lam[best_i]
                # reindex parensemble in case failed runs
                self.parensemble = ParameterEnsemble.from_dataframe(
                    df=self.parensemble.loc[self.obsensemble.index],
                    pst=self.pst,
                    istransformed=self.parensemble.istransformed)
                self.phi.update()
            if self.drop_bad_reals is not None:
                # for testing drop_bad_reals functionality
                # self.current_phi_vec[::2] = self.drop_bad_reals + 1.0
                #drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
                drop_idx = np.argwhere(
                    self.phi.comp_phi > self.drop_bad_reals).flatten()
                run_ids = self.obsensemble.index.values
                drop_idx = run_ids[drop_idx]
                if len(drop_idx) > self.obsensemble.shape[0] - 3:
                    raise Exception("dropped too many realizations as 'bad'")
                if len(drop_idx) > 0:
                    self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
                                     format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
                    self.parensemble.loc[drop_idx, :] = np.NaN
                    self.parensemble = self.parensemble.dropna()
                    self.obsensemble.loc[drop_idx, :] = np.NaN
                    self.obsensemble = self.obsensemble.dropna()

                    self.phi.update()
                    best_mean = self.phi.comp_phi.mean()
                    best_std = self.phi.comp_phi.std()

            self.phi.report(cur_lam=self.current_lambda * lambda_mults[best_i])

            self.logger.statement("   best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda*lambda_mults[best_i],
                         best_mean,best_std))
            #self.logger.statement("   actual mean phi: {0:15.6G}".format(float(self.current_actual_phi.mean())))
            self.last_best_mean = best_mean
            self.last_best_std = best_std

        if update_lambda:
            # be aggressive
            self.current_lambda *= (lambda_mults[best_i] * 0.75)
            # but don't let lambda get too small
            self.current_lambda = max(self.current_lambda, 0.00001)
            self.logger.statement("updating lambda: {0:15.6G}".\
                  format(self.current_lambda ))

        self.logger.statement("**************************\n")
        self.parensemble.to_csv(self.pst.filename+self.paren_prefix.\
                                    format(self.iter_num))
        self.obsensemble.to_csv(self.pst.filename+self.obsen_prefix.\
                                    format(self.iter_num))
        if self.raw_sweep_out is not None:
            self.raw_sweep_out.to_csv(self.pst.filename+"_sweepraw{0}.csv".\
                                        format(self.iter_num))
        self.logger.log("iteration {0}".format(self.iter_num))
예제 #16
0
    def update(self,lambda_mults=[1.0],localizer=None,run_subset=None):

        self.iter_num += 1
        if not self.__initialized:
            raise Exception("must call initialize() before update()")

        scaled_delta_obs = self._calc_delta_obs(self.obsensemble)
        scaled_delta_par = self._calc_delta_par(self.parensemble)

        u,s,v = scaled_delta_obs.pseudo_inv_components()

        obs_diff = self._get_residual_matrix(self.obsensemble)

        if run_subset is not None:
            subset_idx = ["{0:d}".format(i) for i in np.random.randint(0,self.num_reals-1,run_subset)]
            print("subset idxs: " + ','.join(subset_idx))

        mean_lam,std_lam,paren_lam,obsen_lam = [],[],[],[]
        for ilam,cur_lam_mult in enumerate(lambda_mults):

            parensemble_cur_lam = self.parensemble.copy()

            cur_lam = self.current_lambda * cur_lam_mult

            scaled_ident = Cov.identity_like(s) * (cur_lam+1.0)
            scaled_ident += s**2
            scaled_ident = scaled_ident.inv

            # build up this matrix as a single element so we can apply
            # localization
            upgrade_1 = -1.0 * (self.half_parcov_diag * scaled_delta_par) *\
                        v * s * scaled_ident * u.T

            # apply localization
            #print(cur_lam,upgrade_1)
            if localizer is not None:
                upgrade_1.hadamard_product(localizer)

            # apply residual information
            upgrade_1 *= (self.obscov_inv_sqrt * obs_diff.T)

            upgrade_1 = upgrade_1.to_dataframe()
            upgrade_1.index.name = "parnme"
            upgrade_1 = upgrade_1.T
            upgrade_1.to_csv(self.pst.filename+".upgrade_1.{0:04d}.csv".\
                               format(self.iter_num))
            parensemble_cur_lam += upgrade_1

            # parameter-based upgrade portion
            if not self.use_approx and self.iter_num > 1:
                par_diff = (self.parensemble - self.parensemble_0).\
                    as_pyemu_matrix().T
                x4 = self.Am.T * self.half_parcov_diag * par_diff
                x5 = self.Am * x4
                x6 = scaled_delta_par.T * x5
                x7 = v * scaled_ident * v.T * x6
                upgrade_2 = -1.0 * (self.half_parcov_diag *
                                   scaled_delta_par * x7).to_dataframe()

                upgrade_2.index.name = "parnme"
                upgrade_2.T.to_csv(self.pst.filename+".upgrade_2.{0:04d}.csv".\
                                   format(self.iter_num))
                parensemble_cur_lam += upgrade_2.T
            parensemble_cur_lam.enforce()
            paren_lam.append(parensemble_cur_lam)
            if run_subset is not None:
                #phi_series = pd.Series(data=self.current_phi_vec)
                #phi_series.sort_values(inplace=True,ascending=False)
                #subset_idx = ["{0:d}".format(i) for i in phi_series.index.values[:run_subset]]

                parensemble_subset = parensemble_cur_lam.loc[subset_idx,:]
                obsensemble_cur_lam = self._calc_obs(parensemble_subset)
            else:
                obsensemble_cur_lam = self._calc_obs(parensemble_cur_lam)
            #print(obsensemble_cur_lam.head())
            obsen_lam.append(obsensemble_cur_lam)


        # here is where we need to select out the "best" lambda par and obs
        # ensembles
        print("\n**************************")
        print(str(datetime.now()))
        print("total runs:{0}".format(self.total_runs))
        print("iteration: {0}".format(self.iter_num))
        print("current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda,
                         self.last_best_mean,self.last_best_std))
        phi_vecs = [self._calc_phi_vec(obsen) for obsen in obsen_lam]
        mean_std = [(pv.mean(),pv.std()) for pv in phi_vecs]
        update_pars = False
        update_lambda = False
        # accept a new best if its within 10%
        best_mean = self.last_best_mean * 1.1
        best_std = self.last_best_std * 1.1
        best_i = 0
        for i,(m,s) in enumerate(mean_std):
            print(" tested lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda * lambda_mults[i],m,s))
            if m < best_mean:
                update_pars = True
                best_mean = m
                best_i = i
                if s < best_std:
                    update_lambda = True
                    best_std = s

        if not update_pars:
            self.current_lambda *= max(lambda_mults) * 3.0
            self.current_lambda = min(self.current_lambda,100000)
            print("not accepting iteration, increased lambda:{0}".\
                  format(self.current_lambda))

        else:

            self.parensemble = paren_lam[best_i]
            if run_subset is not None:
                self.obsensemble = self._calc_obs(self.parensemble)
                self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
                self._phi_report(self.current_phi_vec,self.current_lambda * lambda_mults[best_i])
                best_mean = self.current_phi_vec.mean()
                best_std = self.current_phi_vec.std()
            else:
                self.obsensemble = obsen_lam[best_i]
                self._phi_report(phi_vecs[best_i],self.current_lambda * lambda_mults[best_i])
                self.current_phi_vec = phi_vecs[best_i]

            print("\n" + "   best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
                  format(self.current_lambda*lambda_mults[best_i],
                         best_mean,best_std))
            self.last_best_mean = best_mean
            self.last_best_std = best_std

        if update_lambda:
            # be aggressive - cut best lambda in half
            self.current_lambda *= (lambda_mults[best_i] * 0.75)
            # but don't let lambda get too small
            self.current_lambda = max(self.current_lambda,0.001)
            print("updating lambda: {0:15.6G}".\
                  format(self.current_lambda ))


        print("**************************\n")

        self.parensemble.to_csv(self.pst.filename+self.paren_prefix.\
                                    format(self.iter_num))

        self.obsensemble.to_csv(self.pst.filename+self.obsen_prefix.\
                                    format(self.iter_num))
예제 #17
0
def mat_test():
    import os
    import numpy as np
    from pyemu.mat import Jco, Cov
    test_dir = os.path.join("mat")
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)
    arr = np.arange(0, 12)
    arr.resize(4, 3)
    first = Jco(x=arr,
                col_names=["p1", "p2", "p3"],
                row_names=["o1", "o2", "o3", "o4"])
    first.to_binary(os.path.join(test_dir, "test.bin"))
    first.from_binary(os.path.join(test_dir, "test.bin"))

    first = Jco(x=np.ones((4, 3)),
                col_names=["p1", "p2", "p3"],
                row_names=["o1", "o2", "o3", "o4"])
    second = Cov(x=np.ones((3, 3)) + 1.0,
                 names=["p1", "p2", "p3"],
                 isdiagonal=False)
    third = Cov(x=np.ones((4, 1)) + 2.0,
                names=["o1", "o2", "o3", "o4"],
                isdiagonal=True)
    third.to_uncfile(os.path.join(test_dir, "test.unc"),
                     covmat_file=os.path.join(test_dir, "cov.mat"))
    third.from_uncfile(os.path.join(test_dir, "test.unc"))

    si = second.identity
    result = second - second.identity
    # add and sub
    newfirst = first.get(row_names=["o1"], col_names="p1")
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst
    newfirst = first.get(row_names=["o1", "o2"], col_names="p1")
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst
    newfirst = first.get(row_names=["o1", "o2"], col_names=["p1", "p3"])
    result = newfirst - first
    result = first - newfirst
    result = newfirst + first
    result = first + newfirst

    # mul test
    result = first.T * third * first
    result = first * second

    newfirst = first.get(col_names="p1")
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(col_names=["p1", "p2"])
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(row_names=["o1"])
    result = newfirst * second
    result = second * newfirst.T

    newfirst = first.get(row_names=["o1", "o2"])
    result = newfirst * second
    result = second * newfirst.T
    result = newfirst.T * third * newfirst

    newthird = third.get(row_names=["o1"])
    result = first.T * newthird * first

    # drop testing
    second.drop("p2", axis=0)
    assert second.shape == (2, 2)

    third.drop("o1", axis=1)
    assert third.shape == (3, 3)

    first.drop("p1", axis=1)
    assert first.shape == (4, 2)

    first.drop("o4", axis=0)
    assert first.shape == (3, 2)
예제 #18
0
    def __init__(self,
                 pst,
                 parcov=None,
                 obscov=None,
                 num_slaves=0,
                 use_approx_prior=True,
                 submit_file=None,
                 verbose=False,
                 port=4004,
                 slave_dir="template"):
        self.logger = Logger(verbose)
        if verbose is not False:
            self.logger.echo = True
        self.num_slaves = int(num_slaves)
        if submit_file is not None:
            if not os.path.exists(submit_file):
                self.logger.lraise(
                    "submit_file {0} not found".format(submit_file))
        elif num_slaves > 0:
            if not os.path.exists(slave_dir):
                self.logger.lraise(
                    "template dir {0} not found".format(slave_dir))

        self.slave_dir = slave_dir
        self.submit_file = submit_file
        self.port = int(port)
        self.paren_prefix = ".parensemble.{0:04d}.csv"
        self.obsen_prefix = ".obsensemble.{0:04d}.csv"

        if isinstance(pst, str):
            pst = Pst(pst)
        assert isinstance(pst, Pst)
        self.pst = pst
        self.sweep_in_csv = pst.pestpp_options.get("sweep_parameter_csv_file",
                                                   "sweep_in.csv")
        self.sweep_out_csv = pst.pestpp_options.get("sweep_output_csv_file",
                                                    "sweep_out.csv")
        if parcov is not None:
            assert isinstance(parcov, Cov)
        else:
            parcov = Cov.from_parameter_data(self.pst)
        if obscov is not None:
            assert isinstance(obscov, Cov)
        else:
            obscov = Cov.from_observation_data(pst)

        self.parcov = parcov
        self.obscov = obscov

        # if restart_iter > 0:
        #     self.restart_iter = restart_iter
        #     paren = self.pst.filename+self.paren_prefix.format(restart_iter)
        #     assert os.path.exists(paren),\
        #         "could not find restart par ensemble {0}".format(paren)
        #     obsen0 = self.pst.filename+self.obsen_prefix.format(0)
        #     assert os.path.exists(obsen0),\
        #         "could not find restart obs ensemble 0 {0}".format(obsen0)
        #     obsen = self.pst.filename+self.obsen_prefix.format(restart_iter)
        #     assert os.path.exists(obsen),\
        #         "could not find restart obs ensemble {0}".format(obsen)
        #     self.restart = True

        self.__initialized = False
        self.iter_num = 0
        self.raw_sweep_out = None