Esempio n. 1
0
    def PrepareCalibration(self, ignore_obs=False):
        """Summary
        Create run directory for the calibration run
        """
        logger.info('~~~~ Prepare Calibration directory ~~~~')
        linkForcings = self.GatherForcings(self.calib_start_date,
                                           self.calib_end_date)

        # Create the run directory...
        self.CreateRunDir(self.clbdirc, linkForcings)

        self.CreateNamelist(self.clbdirc, self.calib_start_date,
                            self.calib_end_date)

        # create submit script..
        self.CreateSubmitScript(self.clbdirc)

        if not ignore_obs:
            # Get the USGS gauge observations...
            logger.info("download observations from internet...")
            self.GatherObs(self.clbdirc, self.calib_start_date,
                           self.calib_end_date)

            logger.info("log the observation files to the database")
            obsQ, lat, lon = dbl.readObsFiles(self.clbdirc)
            table_name = 'qObserved'
            dbl.logDataframe(obsQ, table_name, self.database)
        else:
            logger.info("ignoring observations (maybe they don't exist)")

        self.CreateAnalScript(self.clbdirc, 'Calibration.db', self.iteration)

        logger.info(self.clbdirc)
Esempio n. 2
0
    def logAquifer(self, niter):
        # kgm-2 * 1E6m2km-2 / density (1000kgm-3) ==> m3
        df = self.df.copy()

        gridRes_m = 1E6  #m2
        h20_density = 1E3  # kg/m3
        WT = (self.last['WT'][0, :, :] -
              self.first['WT'][0, :, :]) * gridRes_m  # now units of kg
        WA = (self.last['WA'][0, :, :] -
              self.first['WA'][0, :, :]) * gridRes_m  #
        #
        WT_vol = WT.sum(
        ) / h20_density * m3_to_acrefeet  # kg --> m3 --> acrefeet
        WA_vol = WA.sum() / h20_density * m3_to_acrefeet
        WT_row = [
            niter, 'WT',
            np.float(WT_vol.values), 'Water in aquifer', 'acre_feet',
            'area_total', self.drange
        ]
        WA_row = [
            niter, 'WA',
            np.float(WA_vol.values), 'Water in aquifer and sat soil',
            'acre_feet', 'area_total', self.drange
        ]
        df = df.append(dfCreator(WT_row), ignore_index=True)
        df = df.append(dfCreator(WA_row), ignore_index=True)
        df.set_index('Iteration', inplace=True)
        dbl.logDataframe(df, 'WATER_BALANCE', str(self.dsPath))
Esempio n. 3
0
    def LogData(self):
        # dataframe --> sql database
        paramDic = {
            'Iteration': [str(self.iteration)],
            'Objective': [self.obj],
            'Improvement': [self.improvement]
        }

        paramDic.update(self.performance)
        print(paramDic)
        pdf = pd.DataFrame(paramDic)
        pdf.set_index('Iteration', inplace=True)
        dbl.logDataframe(pdf, 'Validation', self.valdirc)
Esempio n. 4
0
 def logLSM(self, niter):
     df = self.df.copy()
     # creat list
     for key in self.landDic.keys():
         output = self.accVar(key)
         row = [
             niter, key, output, self.landDic[key], 'acre_feet',
             'area_total', self.drange
         ]
         dic = dfCreator(row)
         df = df.append(dic, ignore_index=True)
         # assign to df ... this is dumb
     df.set_index('Iteration', inplace=True)
     dbl.logDataframe(df, 'WATER_BALANCE', str(self.dsPath))
Esempio n. 5
0
 def logRouting(self, iters):
     drange = '{} : {}'.format(str(self.eval_start_date),
                               str(self.eval_end_date))
     df = self.df.copy()
     # open the chan routing files
     for key in self.Qdic.keys():
         output = self.integrateRate(key)
         row = [
             iters, key, output, self.Qdic[key], 'acre_feet', 'point',
             drange
         ]
         dic = dfCreator(row)
         df = df.append(dic, ignore_index=True)
     df.set_index('Iteration', inplace=True)
     dbl.logDataframe(df, 'WATER_BALANCE', str(self.dsPath))
Esempio n. 6
0
    def PrepareValidation(self):
        """Summary
        Create run directory for the calibration run

        /Validation
            |__ baseline/
            |__ calibrated/

        """
        logger.info('~~~~ Prepare Validation/baseline directory ~~~~')
        self.valdirc.mkdir(exist_ok=True)

        # get the correct forcings...
        linkForcings = self.GatherForcings(self.val_start_date,
                                           self.val_end_date)

        # Create the 'Baseline' directory
        # -------------------------------
        self.CreateRunDir(self.baseline, linkForcings)
        self.CreateNamelist(self.baseline, self.val_start_date,
                            self.val_end_date)

        self.CreateSubmitScript(self.baseline)
        self.GatherObs(self.baseline, self.val_start_date, self.val_end_date)

        self.CreateAnalScript(self.baseline, self.database, 0)
        obsQ, lat, lon = dbl.readObsFiles(self.baseline)
        table_name = 'qObserved'
        dbl.logDataframe(obsQ, table_name, self.database)

        # Create the 'Calibrated' directory
        # -------------------------------
        logger.info('~~~~ Prepare Validation/calibrated directory ~~~~')

        self.CreateRunDir(self.calibrated, linkForcings)
        self.CreateNamelist(self.calibrated, self.val_start_date,
                            self.val_end_date)

        self.CreateSubmitScript(self.calibrated)
        self.GatherObs(self.calibrated, self.val_start_date, self.val_end_date)

        self.CreateAnalScript(self.calibrated, self.database, 1)
        # Done setting up.... no need to download obs twice

        # get the best parameter files... append them to the domain files in
        # the 'calibrated' directory
        logger.info('calling get_best_parameters')
        self.get_best_parameters()
Esempio n. 7
0
    def LogParameters(self):
        """
        Summary:
        Log the parameter values to the dataframe... so we
        know which are being calibtrated, and their value,
        for each iteration
        """

        # Build the dataframe...
        table_name = 'parameters'
        params = self.df.copy()
        params['iteration'] = str(self.iteration)
        params.drop(columns=['file', 'dims', 'nextValue'], inplace=True)

        # Log the data frame to the Calib. sql database
        dbl.logDataframe(params, table_name, self.database)
Esempio n. 8
0
    def LogPerformance(self):
        """Summary
        """

        # SQL table name
        table_name = 'Calibration'

        # Build the dataframe ...
        paramDic = {
            'iteration': [str(self.iteration)],
            'objective': [self.objective],
            'improvement': [self.improvement]
        }

        paramDic.update(self.performance)
        pdf = pd.DataFrame(paramDic)
        pdf.set_index('iteration', inplace=True)

        # Log the dataframe to the Calib.. sql database
        dbl.logDataframe(pdf, table_name, self.database)