예제 #1
0
    def run(
        self,
        model,
        targets=['reach_outvolume'],
        verbose=False,
    ):
        """
        Creates a copy of the base model, adjusts a parameter value, runs
        the simulation, calculates and returns the perturbation.
        """

        # build the input files and run

        model.build_wdminfile()

        if self.submodel is None:

            model.build_uci(targets,
                            self.start,
                            self.end,
                            atemp=self.atemp,
                            snow=self.snow,
                            hydrology=self.hydrology)

        else:

            model.build_uci(self.comid,
                            self.start,
                            self.end,
                            atemp=self.atemp,
                            snow=self.snow,
                            hydrology=self.hydrology)

        model.run(verbose=verbose)

        # get the regression information using the postprocessor

        dates = self.start + datetime.timedelta(days=self.warmup), self.end

        # use WDMUtil to get the simulated values

        wdm = WDMUtil()

        f = '{}_out.wdm'.format(model.filename)

        wdm.open(f, 'r')
        dsns = wdm.get_datasets(f)
        staids = [wdm.get_attribute(f, n, 'STAID') for n in dsns]

        data = wdm.get_data(f,
                            dsns[staids.index(self.comid)],
                            start=dates[0],
                            end=dates[1])

        wdm.close(f)

        if model.units == 'Metric': conv = 10**6
        else: conv = 43560

        # the submodel is daily, full model is hourly

        if self.submodel is None:

            sflows = [
                sum(data[i:i + 24]) * conv / 86400
                for i in range(0,
                               len(data) - 23, 24)
            ]

        else:

            sflows = [d * conv / 86400 for d in data]

        stimes = [
            self.start + i * datetime.timedelta(days=1)
            for i in range(self.warmup, (self.end - self.start).days)
        ]

        otimes = self.otimes
        oflows = self.oflows

        # remove points with missing data from both simulated and oberved flows

        sflows = [
            sflows[stimes.index(t)] for t, f in zip(otimes, oflows)
            if t in stimes and f is not None
        ]
        oflows = [
            oflows[otimes.index(t)] for t, f in zip(otimes, oflows)
            if f is not None
        ]

        # return the appropriate performance metric

        if self.optimization == 'Nash-Sutcliffe Product':

            # daily log flows

            log_o = [numpy.log(f) for f in oflows]
            log_s = [numpy.log(f) for f in sflows]

            logdNS = (1 - sum(
                (numpy.array(log_s) - numpy.array(log_o))**2) / sum(
                    (numpy.array(log_o) - numpy.mean(log_o))**2))

            # daily NS

            dNS = (1 - sum(
                (numpy.array(sflows) - numpy.array(oflows))**2) / sum(
                    (numpy.array(oflows) - numpy.mean(oflows))**2))

            return dNS * logdNS

        if self.optimization == 'Nash-Sutcliffe Efficiency':

            # daily NS

            dNS = (1 - sum(
                (numpy.array(sflows) - numpy.array(oflows))**2) / sum(
                    (numpy.array(oflows) - numpy.mean(oflows))**2))

            return dNS
예제 #2
0
파일: hunting.py 프로젝트: MachineAi/PyHSPF
def main():

    # create an instance of the NWIS extractor

    nwisextractor = NWISExtractor(NWIS)

    # download and decompress the source metadata files

    nwisextractor.download_metadata()

    # extract all the gage stations and metadata into a shapefile for the HUC8

    nwisextractor.extract_HUC8(HUC8, output)

    # tell the extractor to use the metadata file above to find gage data

    nwisextractor.set_metadata(gagefile)

    # create an instance of the NHDPlus extractor

    nhdplusextractor = NHDPlusExtractor(drainid, VPU, NHDPlus)

    # download and decompress the source data for the Mid Atlantic Region

    nhdplusextractor.download_data()

    # extract the HUC8 data for the Patuxent watershed

    nhdplusextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlusDelineator to use to build the Watershed

    delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile,
                                   gagefile = gagefile)

    # delineate the watershed (extract the flowlines, catchments and other data)

    delineator.delineate_gage_watershed(gageid, output = gagepath)

    # add land use data from 1988 to the delineator

    delineator.add_basin_landuse(1988, landuse)

    # build the watershed

    delineator.build_gage_watershed(gageid, watershed, masslinkplot = masslink)

    # make the working directory for HSPF simulation files

    if not os.path.isdir(hspf): os.mkdir(hspf)

    # import old data for Hunting Creek

    wdm = WDMUtil()

    # path to hspexp2.4 data files (modify as needed)

    directory = os.path.abspath(os.path.dirname(__file__)) + '/data'

    # the data from the export file (*.exp) provided with hspexp need to be 
    # imported into a wdm file. WDMUtil has a method for this.

    hunthour = '{}/hunthour/huntobs.exp'.format(directory)

    f = 'temp.wdm'

    # import from exp to wdm

    wdm.import_exp(hunthour, f)

    # close the file and re-open the wdm for read access

    wdm.close(f)
    wdm.open(f, 'r')

    # the dsns are known from the exp file so just use those this time

    precip = wdm.get_data(f, 106)
    evap   = wdm.get_data(f, 111)
    flow   = wdm.get_data(f, 281)

    s, e = wdm.get_dates(f, 106)

    # add the time series to deal with HSPF looking backward stepping

    precip = [0] + [p * 25.4 for p in precip]
    evap   = [e * 25.4 / 24 for e in evap for i in range(24)]

    wdm.close(f)

    # create an HSPF model instance

    hunting = HSPFModel()

    # open the watershed built above

    with open(watershed, 'rb') as f: w = pickle.load(f)

    # use the data to build an HSPFModel

    hunting.build_from_watershed(w, model, ifraction = 1., verbose = True)

    # turn on the hydrology modules to the HSPF model

    hunting.add_hydrology()

    # add precip timeseries with label BWI and provided start date to the model

    hunting.add_timeseries('precipitation', 'BWI', s, precip)

    # add evap timeseries with label Beltsville and provided start date 

    hunting.add_timeseries('evaporation', 'Beltsville', s, evap)

    # add flow timeseries with label Hunting, start date, tstep (days)

    hunting.add_timeseries('flowgage', 'Hunting', s, flow, tstep = 60)

    # assign the evaporation and precipiation timeseries to the whole watershed

    hunting.assign_watershed_timeseries('precipitation', 'BWI')
    hunting.assign_watershed_timeseries('evaporation', 'Beltsville')

    # find the subbasin indentfier for the watershed outlet

    subbasin = [up for up, down in w.updown.items() if down == 0][0]

    # assign the flowgage to the outlet subbasin

    hunting.assign_subbasin_timeseries('flowgage', subbasin, 'Hunting')

    # using pan evaporation data, so need a pan coefficient < 1

    hunting.evap_multiplier = 0.75

    calibrator = AutoCalibrator(hunting, start, end, hspf)

    calibrator.autocalibrate(calibrated,
                             variables = variables, 
                             optimization = optimization,
                             perturbations = perturbations,
                             parallel = parallel
                             )

    for variable, value in zip(calibrator.variables, calibrator.values):

        print('{:6s} {:5.3f}'.format(variable, value))

    print('\nsaving the calibration results\n')
예제 #3
0
    def ext_targets_block(self, 
                          comid, 
                          year,
                          tcode = 4,
                          tsstep = 1,
                          verbose = False,
                          ):
        """
        Adds the EXT TARGETS block to a UCI file and creates the output WDM 
        file.
        
        tcode is the time code: 2 = minutes, 3 = hours, 4 = days
        tsstep is the time step in tcode units
        
        e.g., tcode = 3, tsstep = 4 is a 4-hour time step

        this method enables a single external target with aggregation that
        isn't possible using the HSPFModel in the core.
        """
        
        lines = ['EXT TARGETS',
                 '<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Volume->' +
                 ' <Member> Tsys Aggr Amd ***',
                 '<Name>   x        <Name> x x<-factor->strg <Name>' +
                 '   x <Name>qf  tem strg strg***']
    
        wdm = WDMUtil(verbose = verbose, messagepath = self.messagepath)
        wdm.open(self.wdmoutfile, 'w')

        # dataset numbers are assigned by reach in order (subject to revision)
        # keep track of dsns in a dictionary

        n = 1

        # since this class is just for calibration of a single gage, only need
        # to keep up with reach outflow volume

        otype  = 'RCHRES'
        group  = 'HYDR'
        var    = 'ROVOL'
        tstype = 'VOL'
        tsform = 1
        idcons = 'ROVOL'
        func   = 'SUM '

        # this overwrites all the rchreses with just the comid for the gage

        reaches = [r for r in self.rchreses if r.subbasin == comid]

        new = self.add_ext_targets(reaches, wdm, year, n, otype, 
                                   group, var, tsform, tstype, idcons, func,
                                   tcode, tsstep)
        lines = lines + new
        n    += len(new)        

        # close the wdmeditor

        wdm.close(self.wdmoutfile)
        wdm.close_message()
    
        # finish up 
    
        lines = lines + ['END EXT TARGETS', '']
    
        return lines
예제 #4
0
    def run(self, model, targets=["reach_outvolume"], verbose=False):
        """
        Creates a copy of the base model, adjusts a parameter value, runs
        the simulation, calculates and returns the perturbation.
        """

        # build the input files and run

        model.build_wdminfile()

        if self.submodel is None:

            model.build_uci(targets, self.start, self.end, atemp=self.atemp, snow=self.snow, hydrology=self.hydrology)

        else:

            model.build_uci(
                self.comid, self.start, self.end, atemp=self.atemp, snow=self.snow, hydrology=self.hydrology
            )

        model.run(verbose=verbose)

        # get the regression information using the postprocessor

        dates = self.start + datetime.timedelta(days=self.warmup), self.end

        # use WDMUtil to get the simulated values

        wdm = WDMUtil()

        f = "{}_out.wdm".format(model.filename)

        wdm.open(f, "r")
        dsns = wdm.get_datasets(f)
        staids = [wdm.get_attribute(f, n, "STAID") for n in dsns]

        data = wdm.get_data(f, dsns[staids.index(self.comid)], start=dates[0], end=dates[1])

        wdm.close(f)

        if model.units == "Metric":
            conv = 10 ** 6
        else:
            conv = 43560

        # the submodel is daily, full model is hourly

        if self.submodel is None:

            sflows = [sum(data[i : i + 24]) * conv / 86400 for i in range(0, len(data) - 23, 24)]

        else:

            sflows = [d * conv / 86400 for d in data]

        stimes = [self.start + i * datetime.timedelta(days=1) for i in range(self.warmup, (self.end - self.start).days)]

        otimes = self.otimes
        oflows = self.oflows

        # remove points with missing data from both simulated and oberved flows

        sflows = [sflows[stimes.index(t)] for t, f in zip(otimes, oflows) if t in stimes and f is not None]
        oflows = [oflows[otimes.index(t)] for t, f in zip(otimes, oflows) if f is not None]

        # return the appropriate performance metric

        if self.optimization == "Nash-Sutcliffe Product":

            # daily log flows

            log_o = [numpy.log(f) for f in oflows]
            log_s = [numpy.log(f) for f in sflows]

            logdNS = 1 - sum((numpy.array(log_s) - numpy.array(log_o)) ** 2) / sum(
                (numpy.array(log_o) - numpy.mean(log_o)) ** 2
            )

            # daily NS

            dNS = 1 - sum((numpy.array(sflows) - numpy.array(oflows)) ** 2) / sum(
                (numpy.array(oflows) - numpy.mean(oflows)) ** 2
            )

            return dNS * logdNS

        if self.optimization == "Nash-Sutcliffe Efficiency":

            # daily NS

            dNS = 1 - sum((numpy.array(sflows) - numpy.array(oflows)) ** 2) / sum(
                (numpy.array(oflows) - numpy.mean(oflows)) ** 2
            )

            return dNS
예제 #5
0
    def ext_targets_block(
        self,
        comid,
        year,
        tcode=4,
        tsstep=1,
        verbose=False,
    ):
        """
        Adds the EXT TARGETS block to a UCI file and creates the output WDM 
        file.
        
        tcode is the time code: 2 = minutes, 3 = hours, 4 = days
        tsstep is the time step in tcode units
        
        e.g., tcode = 3, tsstep = 4 is a 4-hour time step

        this method enables a single external target with aggregation that
        isn't possible using the HSPFModel in the core.
        """

        lines = [
            'EXT TARGETS',
            '<-Volume-> <-Grp> <-Member-><--Mult-->Tran <-Volume->' +
            ' <Member> Tsys Aggr Amd ***',
            '<Name>   x        <Name> x x<-factor->strg <Name>' +
            '   x <Name>qf  tem strg strg***'
        ]

        wdm = WDMUtil(verbose=verbose, messagepath=self.messagepath)
        wdm.open(self.wdmoutfile, 'w')

        # dataset numbers are assigned by reach in order (subject to revision)
        # keep track of dsns in a dictionary

        n = 1

        # since this class is just for calibration of a single gage, only need
        # to keep up with reach outflow volume

        otype = 'RCHRES'
        group = 'HYDR'
        var = 'ROVOL'
        tstype = 'VOL'
        tsform = 1
        idcons = 'ROVOL'
        func = 'SUM '

        # this overwrites all the rchreses with just the comid for the gage

        reaches = [r for r in self.rchreses if r.subbasin == comid]

        new = self.add_ext_targets(reaches, wdm, year, n, otype, group, var,
                                   tsform, tstype, idcons, func, tcode, tsstep)
        lines = lines + new
        n += len(new)

        # close the wdmeditor

        wdm.close(self.wdmoutfile)
        wdm.close_message()

        # finish up

        lines = lines + ['END EXT TARGETS', '']

        return lines
예제 #6
0
파일: hunting.py 프로젝트: djibi2/PyHSPF
def preprocess():

    # create an instance of the NWIS extractor

    nwisextractor = NWISExtractor(NWIS)

    # download and decompress the source metadata files

    nwisextractor.download_metadata()

    # extract all the gage stations and metadata into a shapefile for the HUC8

    nwisextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlus extractor

    nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

    # download and decompress the source data for the Mid Atlantic Region

    nhdplusextractor.download_data()

    # extract the HUC8 data for the Patuxent watershed

    nhdplusextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlusDelineator to use to build the Watershed

    delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile, gagefile=gagefile)

    # delineate the watershed (extract the flowlines, catchments and other data)

    delineator.delineate_gage_watershed(gageid, output=gagepath)

    # add land use data from 1988 to the delineator

    delineator.add_basin_landuse(1988, landuse)

    # build the watershed

    delineator.build_gage_watershed(gageid, watershed, masslinkplot=masslink)

    # make the working directory for HSPF simulation files

    if not os.path.isdir(hspf):
        os.mkdir(hspf)

    # import old data for Hunting Creek

    wdm = WDMUtil()

    # path to hspexp2.4 data files (modify as needed)

    directory = os.path.abspath(os.path.dirname(__file__)) + "/data"

    # the data from the export file (*.exp) provided with hspexp need to be
    # imported into a wdm file. WDMUtil has a method for this.

    hunthour = "calibrated/huntobs.exp".format(directory)

    f = "temp.wdm"

    # import from exp to wdm

    wdm.import_exp(hunthour, f)

    # close the file and re-open the wdm for read access

    wdm.close(f)
    wdm.open(f, "r")

    # the dsns are known from the exp file so just use those this time

    precip = wdm.get_data(f, 106)
    evap = wdm.get_data(f, 111)
    flow = wdm.get_data(f, 281)

    s, e = wdm.get_dates(f, 106)

    # add the time series to deal with HSPF looking backward stepping

    precip = [0] + [p * 25.4 for p in precip]
    evap = [e * 25.4 / 24 for e in evap for i in range(24)]

    wdm.close(f)

    # create an HSPF model instance

    hunting = HSPFModel()

    # open the watershed built above

    with open(watershed, "rb") as f:
        w = pickle.load(f)

    # use the data to build an HSPFModel

    hunting.build_from_watershed(w, model, ifraction=1.0, verbose=True)

    # turn on the hydrology modules to the HSPF model

    hunting.add_hydrology()

    # add precip timeseries with label BWI and provided start date to the model

    hunting.add_timeseries("precipitation", "BWI", s, precip)

    # add evap timeseries with label Beltsville and provided start date

    hunting.add_timeseries("evaporation", "Beltsville", s, evap)

    # add flow timeseries with label Hunting, start date, tstep (days)

    hunting.add_timeseries("flowgage", "Hunting", s, flow, tstep=60)

    # assign the evaporation and precipiation timeseries to the whole watershed

    hunting.assign_watershed_timeseries("precipitation", "BWI")
    hunting.assign_watershed_timeseries("evaporation", "Beltsville")

    # find the subbasin indentifier for the watershed outlet

    subbasin = [up for up, down in w.updown.items() if down == 0][0]

    # assign the flowgage to the outlet subbasin

    hunting.assign_subbasin_timeseries("flowgage", subbasin, "Hunting")

    # using pan evaporation data, so need a pan coefficient < 1

    hunting.evap_multiplier = 0.75

    with open(calibrated, "wb") as f:
        pickle.dump(hunting, f)
예제 #7
0
    def run(
        self,
        model,
        targets=['reach_outvolume'],
        verbose=False,
    ):
        """
        Creates a copy of the base model, adjusts a parameter value, runs
        the simulation, calculates and returns the perturbation.
        """

        # build the input files and run

        model.build_wdminfile()

        if self.submodel is None:

            model.build_uci(targets,
                            self.start,
                            self.end,
                            atemp=self.atemp,
                            snow=self.snow,
                            hydrology=self.hydrology)

        else:

            model.build_uci(self.comid,
                            self.start,
                            self.end,
                            atemp=self.atemp,
                            snow=self.snow,
                            hydrology=self.hydrology)

        model.run(verbose=verbose)

        # regression period

        dates = self.start + datetime.timedelta(days=self.warmup), self.end

        # use WDMUtil to get the simulated values
        # Mengfei 20180501
        # WDMUtil function is changed!
        messagepath = 'C:/BASINS41/bin/Plugins/BASINS/hspfmsg.wdm'
        print('messagepath is changed to:', messagepath)
        wdm = WDMUtil(verbose=True, messagepath=messagepath)
        #wdm = WDMUtil()
        #
        f = '{}_out.wdm'.format(model.filename)

        wdm.open(f, 'r')
        dsns = wdm.get_datasets(f)
        staids = [wdm.get_attribute(f, n, 'STAID') for n in dsns]
        # Mengfei 20180501
        print('self.comid:',self.comid, 'staids.index(self.comid:',staids.index(self.comid)\
        , 'dsn:', dsns[staids.index(self.comid)])
        #
        data = wdm.get_data(f,
                            dsns[staids.index(self.comid)],
                            start=dates[0],
                            end=dates[1])
        wdm.close(f)

        if model.units == 'Metric': conv = 10**6
        else: conv = 43560

        # the submodel is daily, full model is hourly

        if self.submodel is None:

            sflows = [
                sum(data[i:i + 24]) * conv / 86400
                for i in range(0,
                               len(data) - 23, 24)
            ]

        else:

            sflows = [d * conv / 86400 for d in data]

        stimes = [
            self.start + i * datetime.timedelta(days=1)
            for i in range(self.warmup, (self.end - self.start).days)
        ]

        otimes = self.otimes
        oflows = self.oflows

        # remove points with missing data from both simulated and oberved flows

        sflows = [
            sflows[stimes.index(t)] for t, f in zip(otimes, oflows)
            if t in stimes and f is not None
        ]
        oflows = [
            oflows[otimes.index(t)] for t, f in zip(otimes, oflows)
            if f is not None
        ]

        # return the appropriate performance metric

        if self.optimization == 'Nash-Sutcliffe Product':

            # daily log flows

            log_o = [numpy.log(f) for f in oflows]
            log_s = [numpy.log(f) for f in sflows]

            logdNS = (1 - sum(
                (numpy.array(log_s) - numpy.array(log_o))**2) / sum(
                    (numpy.array(log_o) - numpy.mean(log_o))**2))

            # daily NS

            dNS = (1 - sum(
                (numpy.array(sflows) - numpy.array(oflows))**2) / sum(
                    (numpy.array(oflows) - numpy.mean(oflows))**2))

            return dNS * logdNS

        elif self.optimization == 'Nash-Sutcliffe Efficiency':

            # daily NS

            dNS = (1 - sum(
                (numpy.array(sflows) - numpy.array(oflows))**2) / sum(
                    (numpy.array(oflows) - numpy.mean(oflows))**2))

            return dNS

        # Mengfei Mu 20180529
        elif self.optimization == 'Log Nash-Sutcliffe Efficiency':
            # daily Log NS
            for n, i in enumerate(oflows):
                if oflows[n] == 0 or sflows[n] == 0:
                    try:
                        oflows[n] = (oflows[n - 1] + oflows[n + 1]) / 2.0
                        sflows[n] = (sflows[n - 1] + sflows[n + 1]) / 2.0
                    except:
                        oflows[n] = oflows[n - 1]
                        sflows[n] = sflows[n - 1]
            log_sflows = [log10(f) for f in sflows]
            log_oflows = [log10(f) for f in oflows]
            dLogNS = (1 - sum(
                (np.array(log_sflows) - np.array(log_oflows))**2) / sum(
                    (np.array(log_oflows) - np.mean(log_oflows))**2))
            return dLogNS
        #
        else:

            print('error: optimization parameter ' +
                  '{} not recognized'.format(self.optimization))
            raise
예제 #8
0
def calibrate():
    """Builds and calibrates the model."""

    # make the working directory for HSPF

    if not os.path.isdir(hspf): os.mkdir(hspf)

    # import old data for Hunting Creek

    wdm = WDMUtil()

    # path to hspexp2.4 data files (modify as needed)

    directory = os.path.abspath(os.path.dirname(__file__)) + '/data'

    # the data from the export file (*.exp) provided with hspexp need to be
    # imported into a wdm file. WDMUtil has a method for this.

    hunthour = '{}/hunthour/huntobs.exp'.format(directory)

    f = 'temp.wdm'

    # import from exp to wdm

    wdm.import_exp(hunthour, f)

    # close the file and re-open the wdm for read access

    wdm.close(f)
    wdm.open(f, 'r')

    # the dsns are known from the exp file so just use those this time

    precip = wdm.get_data(f, 106)
    evap = wdm.get_data(f, 111)
    flow = wdm.get_data(f, 281)

    s, e = wdm.get_dates(f, 106)

    # add the time series to deal with HSPF looking backward stepping

    precip = [0] + [p * 25.4 for p in precip]
    evap = [e * 25.4 / 24 for e in evap for i in range(24)]

    wdm.close(f)

    # create an HSPF model instance

    hunting = HSPFModel()

    # open the watershed built above

    with open(watershed, 'rb') as f:
        w = pickle.load(f)

    # use the data to build an HSPFModel

    hunting.build_from_watershed(w, model, verbose=True)

    # turn on the hydrology modules to the HSPF model

    hunting.add_hydrology()

    # add precip timeseries with label BWI and provided start date to the model

    hunting.add_timeseries('precipitation', 'BWI', s, precip)

    # add evap timeseries with label Beltsville and provided start date

    hunting.add_timeseries('evaporation', 'Beltsville', s, evap)

    # add flow timeseries with label Hunting, start date, tstep (days)

    #hunting.add_timeseries('flowgage', 'Hunting', start, flow, tstep = 1440)
    hunting.add_timeseries('flowgage', 'Hunting', s, flow, tstep=60)

    # assign the evaporation and precipiation timeseries to the whole watershed

    hunting.assign_watershed_timeseries('precipitation', 'BWI')
    hunting.assign_watershed_timeseries('evaporation', 'Beltsville')

    # find the subbasin indentfier for the watershed outlet

    subbasin = [up for up, down in w.updown.items() if down == 0][0]

    # assign the flowgage to the outlet subbasin

    hunting.assign_subbasin_timeseries('flowgage', subbasin, 'Hunting')

    # using pan evaporation data, so need a pan coefficient < 1

    hunting.evap_multiplier = 0.75

    calibrator = AutoCalibrator(hunting, start, end, hspf)

    calibrator.autocalibrate(calibrated,
                             variables=variables,
                             optimization=optimization,
                             perturbations=perturbations,
                             parallel=parallel)

    for variable, value in zip(calibrator.variables, calibrator.values):

        print('{:6s} {:5.3f}'.format(variable, value))

    print('')