예제 #1
0
def read_tss_header(tssfilename):
    """ Read header of a tss file (used in inflow)
        :param tssfilename  path and name of the tss
        :returns outlets_id  list of column names in tss file 
    """
    with open(tssfilename) as fp:
        rec = fp.readline()
        if rec.split()[0] == 'timeseries':
            # LISFLOOD tss file with header
            # get total number of outlets
            outlets_tot_number = int(fp.readline())
            fp.readline()
            outlets_id = []
            for i in range(0, outlets_tot_number - 1):
                rec = fp.readline()
                rec = int(rec.strip())
                outlets_id.append(rec)  #Lisflood ID code for output points
            # read tss data
            # tssdata = pd.read_table(tssfilename, delim_whitespace=True, header=None, names=outlets_id, index_col=0,
            #                        skiprows=outlets_tot_number + 2)

        else:
            # LISFLOOD tss file without header (table)
            numserie = len(rec.split())
            outlets_id = []
            for i in range(1, numserie):
                outlets_id.append(
                    i)  #Lisflood progressive ID code for output points
            # read tss data
            # tssdata = pd.read_table(tssfilename, delim_whitespace=True, header=None, names=outlets_id, index_col=0)
    fp.close()
    return outlets_id
예제 #2
0
    def initial(self):
        """ initial part of the evapo water module
        """

        # ************************************************************
        # ***** EVAPORATION
        # ************************************************************
        self.var.EvaCumM3 = MaskInfo.instance().in_zero()
        # water use cumulated amount
        # water use substep amount
        settings = LisSettings.instance()
        option = settings.options
        binding = settings.binding
        maskinfo = MaskInfo.instance()
        if option['openwaterevapo']:
            LakeMask = loadmap('LakeMask', pcr=True)
            lmask = ifthenelse(LakeMask != 0, self.var.LddStructuresKinematic,
                               5)
            LddEva = lddrepair(lmask)
            lddC = compressArray(LddEva)
            inAr = decompress(np.arange(maskinfo.info.mapC[0], dtype="int32"))
            self.var.downEva = (compressArray(downstream(
                LddEva, inAr))).astype("int32")
            # each upstream pixel gets the id of the downstream pixel
            self.var.downEva[lddC == 5] = maskinfo.info.mapC[0]
            self.var.maxNoEva = int(loadmap('maxNoEva'))
            # all pits gets a high number
            # still to test if this works

            # ldd only inside lakes for calculating evaporation

            if option['varfractionwater']:

                self.var.diffmaxwater = loadmap(
                    'FracMaxWater') - self.var.WaterFraction

                # Fraction of maximum extend of water  - fraction of water in lakes and rivers

                varWNo = [
                    1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 370
                ]
                self.var.varW = []  # variable fraction of water
                self.var.varW1 = []

                self.var.varW1.append(12)
                j = 0
                for i in range(1, 367):
                    if i >= varWNo[j + 1]: j += 1
                    self.var.varW1.append(j)

                for i in range(12):
                    varWName = generateName(binding['WFractionMaps'],
                                            varWNo[i])
                    self.var.varW.append(
                        loadLAI(binding['WFractionMaps'], varWName, i))
예제 #3
0
    def _writeTssFile(self):
        """
        writing timeseries to disk
        """
        #
        option = LisSettings.instance().options
        outputFilename = self._configureOutputFilename(self._outputFilename)
        if option['EnKF']:
            if not os.path.exists(outputFilename):
                if self._writeHeader:
                    self._writeFileHeader(outputFilename)
                    outputFile = open(outputFilename, "a")
                else:
                    outputFile = open(outputFilename, "w")
            else:
                outputFile = open(outputFilename, "a")
        else:
            if self._writeHeader:
                self._writeFileHeader(outputFilename)
                outputFile = open(outputFilename, "a")
            else:
                outputFile = open(outputFilename, "w")

        assert outputFile

        start = self._userModel.firstTimeStep()
        end = self._userModel.nrTimeSteps() + 1

        for timestep in range(start, end):
            row = ""
            row += " %8g" % timestep
            if self._spatialIdGiven:
                for cellId in range(0, self._ncodesId):
                    value = self._sampleValues[timestep - start][cellId]
                    if isinstance(value, Decimal):
                        row += "           1e31"
                    else:
                        row += " %14g" % (value)
                row += "\n"
            else:
                value = self._sampleValues[timestep - start]
                if isinstance(value, Decimal):
                    row += "           1e31"
                else:
                    row += " %14g" % (value)
                row += "\n"

            outputFile.write(row)

        outputFile.close()
예제 #4
0
def test_percentage():
    from time import sleep
    total = 100
    d = PercentageDone(total)
    for i in range(total):
        d(i)
        sleep(.5)
예제 #5
0
def test_percentage():
    from time import sleep
    total = 100
    d = PercentageDone(total)
    for i in range(total):
        d(i)
        sleep(.5)
예제 #6
0
def readmapsparse(name, time, oldmap):
    """
    load stack of maps 1 at each timestamp in Pcraster format
    """
    filename = generateName(name, time)
    flags = LisSettings.instance().flags
    try:
        map = iterReadPCRasterMap(filename)
        find = 1
    except:
        find = 2
        if oldmap is None:
            for i in range(time - 1, 0, -1):
                altfilename = generateName(name, i)
                if os.path.exists(altfilename):
                    map = iterReadPCRasterMap(altfilename)
                    find = 1
                    # break
            if find == 2:
                msg = "no map in stack has a smaller time stamp than: " + filename
                raise LisfloodError(msg)
        else:
            map = oldmap
            if flags['loud']:
                s = " last_%s" % (os.path.basename(name))
                print(s)
    if flags['checkfiles']:
        checkmap(os.path.basename(name), filename, map, True, find)
    if flags['nancheck']:
        nanCheckMap(map, filename, name)
    mapC = compressArray(map, name=filename)
    return mapC
예제 #7
0
def readnetcdfsparse(name, time, oldmap):
    """
    NO LONGER USED
    load stack of maps 1 at each timestamp in Netcdf format
    """
    try:
        mapC = readnetcdf(name, time)
        find = 1
        # print name+str(time)+"   "+str(find)
    except:
        find = 2
        if oldmap is None:
            for i in range(time - 1, 0, -1):
                try:
                    mapC = readnetcdf(name, i)
                    find = 1
                    break
                except:
                    pass
                # print name+"   "+str(time)+"   "+str(find)+"   "+str(i)
            if find == 2:
                msg = "no map in stack has a smaller time stamp than: " + str(
                    time)
                raise LisfloodError(msg)
        else:
            settings = LisSettings.instance()
            flags = settings.flags
            mapC = oldmap
            if flags['loud']:
                s = " last_" + (os.path.basename(name)) + str(time)
                # print s,
    return mapC
예제 #8
0
def valuecell(mask, coordx, coordstr):
    """
    to put a value into a pcraster map -> invert of cellvalue
    pcraster map is converted into a numpy array first
    """
    coord = []
    for xy in coordx:
        try:
            coord.append(float(xy))
        except:
            msg = "Gauges: " + xy + " in " + coordstr + " is not a coordinate"
            raise LisfloodError(msg)

    null = np.zeros((pcraster.clone().nrRows(), pcraster.clone().nrCols()))
    null[null == 0] = -9999

    for i in range(int(len(coord) / 2)):
        col = int((coord[i * 2] - pcraster.clone().west()) /
                  pcraster.clone().cellSize())
        row = int((pcraster.clone().north() - coord[i * 2 + 1]) /
                  pcraster.clone().cellSize())
        #if col >= 0 and row >= 0 and col < pcraster.clone().nrCols() and row < pcraster.clone().nrRows():
        if col >= 0 and row >= 0 and col < pcraster.clone().nrCols(
        ) and row < pcraster.clone().nrRows():
            null[row, col] = i + 1
        else:
            msg = "Coordinates: " + str(coord[i * 2]) + ',' + str(coord[
                i * 2 +
                1]) + " to put value in is outside mask map - col,row: " + str(
                    col) + ',' + str(row)
            raise LisfloodError(msg)

    map = numpy2pcr(Nominal, null, -9999)
    return map
예제 #9
0
def valuecell(coordx, coordstr):
    """
    to put a value into a pcraster map -> invert of cellvalue
    pcraster map is converted into a numpy array first
    """
    coord = []
    for xy in coordx:
        try:
            coord.append(float(xy))
        except ValueError:
            msg = 'Gauges: {} in {} is not a coordinate'.format(xy, coordstr)
            raise LisfloodError(msg)

    null = np.zeros((pcraster.clone().nrRows(), pcraster.clone().nrCols()))
    null[null == 0] = -9999

    for i in range(int(len(coord) / 2)):
        col = int((coord[i * 2] - pcraster.clone().west()) / pcraster.clone().cellSize())
        row = int((pcraster.clone().north() - coord[i * 2 + 1]) / pcraster.clone().cellSize())
        if 0 <= col < pcraster.clone().nrCols() and 0 <= row < pcraster.clone().nrRows():
            null[row, col] = i + 1
        else:
            msg = 'Coordinates: {}, {} to put value in is outside mask map - col,row: {}, {}'.format(coord[i * 2], coord[i * 2 + 1], col, row)
            raise LisfloodError(msg)

    return numpy_operations.numpy2pcr(Nominal, null, -9999)
예제 #10
0
def perturbState(var,
                 method="normal",
                 minVal=0,
                 maxVal=1,
                 mu=0,
                 sigma=1,
                 spatial=True,
                 single=True):
    try:
        numVals = len(var)
    except:
        numVals = 1
    if method == "normal":
        if spatial:
            domain = len(var[0])
            out = var
            for i in range(numVals):
                out[i] = np.minimum(
                    np.maximum(np.random.normal(mu, sigma, domain), minVal),
                    maxVal)
        else:
            if single:
                out = np.minimum(
                    np.maximum(np.random.normal(mu, sigma, numVals), minVal),
                    maxVal)
            else:
                out = list(
                    np.minimum(
                        np.maximum(np.random.normal(mu, sigma, numVals),
                                   minVal), maxVal))
    if method == "uniform":
        if spatial:
            domain = len(var[0])
            out = var
            for i in range(numVals):
                out[i] = np.random.uniform(minVal, maxVal, domain)
        else:
            if single:
                out = np.random.uniform(minVal, maxVal, numVals)
            else:
                out = list(np.random.uniform(minVal, maxVal, numVals))
    return (out)
예제 #11
0
 def listest(cls, variable):
     settings = LisSettings.instance()
     output_path = settings.binding['PathOut']
     output_nc = os.path.join(output_path, variable)
     print('\n\n>>> Reference: {} - Current Output: {}'.format(
         cls.reference_files[cls.domain][variable], output_nc))
     results = []
     numsteps = cls.netcdf_steps(cls.reference_files[cls.domain][variable])
     for step in range(0, numsteps):
         results.append(cls.check_var_step(variable, step))
     assert all(results)
예제 #12
0
    def initial(self):
        """ initial part of the leaf area index module
        """

        self.var.kgb = 0.75 * loadmap('kdf')
        # extinction coefficient for global solar radiation [-]
        # kdf= extinction coefficient for diffuse visible light [-], varies between
        # 0.4 and 1.1

        # LAINr=[1,32,60,91,121,152,182,213,244,274,305,335,370]
        LAINr = [
            1, 11, 21, 32, 42, 52, 60, 70, 80, 91, 101, 111, 121, 131, 141,
            152, 162, 172, 182, 192, 202, 213, 223, 233, 244, 254, 264, 274,
            284, 294, 305, 315, 325, 335, 345, 355, 370
        ]

        self.var.LAIX = [[0 for x in range(36)] for x in range(3)]
        self.var.LAI = [0, 0]
        self.var.L1 = []

        # self.var.L1.append(36)
        j = 0
        for i in range(367):
            if i >= LAINr[j + 1]:
                j += 1
            self.var.L1.append(j)
            # print i,self.L1[i],LAINr1[self.L1[i]]
        settings = LisSettings.instance()
        binding = settings.binding
        for i in range(36):
            LAIName = generateName(binding['LAIOtherMaps'], LAINr[i])
            self.var.LAIX[0][i] = loadLAI(binding['LAIOtherMaps'], LAIName, i)

            LAIName = generateName(binding['LAIForestMaps'], LAINr[i])
            self.var.LAIX[1][i] = loadLAI(binding['LAIForestMaps'], LAIName, i)

            LAIName = generateName(binding['LAIIrrigationMaps'], LAINr[i])
            self.var.LAIX[2][i] = loadLAI(binding['LAIIrrigationMaps'],
                                          LAIName, i)
예제 #13
0
    def listest(cls, variable):
        settings = LisSettings.instance()
        binding = settings.binding
        model_steps = settings.model_steps
        reference_path = cls.reference_files[variable]['outpath']
        output_path = os.path.normpath(binding[cls.reference_files[variable]['report_map']])
        print('>>> Reference: {} - Current Output: {}'.format(reference_path, output_path))

        results = []
        start_step, end_step = model_steps[0], model_steps[1]
        for step in range(start_step, end_step + 1):
            results.append(cls.check_var_step(variable, step))
        assert all(results)
예제 #14
0
    def _report_steps(user_settings, bindings):

        res = {}
        repsteps = user_settings['ReportSteps'].split(',')
        if repsteps[-1] == 'endtime':
            repsteps[-1] = bindings['StepEnd']
        jjj = []
        for i in repsteps:
            if '..' in i:
                j = list(map(int, i.split('..')))
                jjj = list(range(j[0], j[1] + 1))
            else:
                jjj.append(i)
        res['rep'] = list(map(int, jjj))
        return res
예제 #15
0
    def _writeFileHeader(self, outputFilename):
        """
        writes header part of tss file
        """
        outputFile = open(outputFilename, "w")
        # header
        from .settings import LisSettings
        settings = LisSettings.instance()
        outputFile.write("timeseries {} settingsfile: {} date: {}\n".format(
            self._spatialDatatype.lower(), settings.settings_path,
            xtime.ctime(xtime.time())))
        # write number of outlets points +1
        outputFile.write(str(self._ncodesId + 1) + "\n")
        outputFile.write("timestep\n")

        for colId in range(0, self._ncodesId):
            outputFile.write(str(self._codesId[colId]) + "\n")
        outputFile.close()
예제 #16
0
    def _writeFileHeader(self, outputFilename):
        """
        writes header part of tss file
        """
        outputFile = open(outputFilename, "w")
        # header
        #outputFile.write("timeseries " + self._spatialDatatype.lower() + "\n")
        outputFile.write("timeseries " + self._spatialDatatype.lower() +
                         " settingsfile: " + os.path.realpath(sys.argv[1]) +
                         " date: " + xtime.ctime(xtime.time()) + "\n")
        sys.argv[1]
        # write number of outlets points +1
        outputFile.write(str(self._ncodesId + 1) + "\n")
        outputFile.write("timestep\n")

        for colId in range(0, self._ncodesId):
            outputFile.write(str(self._codesId[colId]) + "\n")
        outputFile.close()
예제 #17
0
    def _report_steps(user_settings, bindings):

        res = {}
        repsteps = user_settings['ReportSteps'].split(',')
        if repsteps[0] == 'starttime':
            repsteps[0] = bindings['StepStartInt']
        if repsteps[-1] == 'endtime':
            repsteps[-1] = bindings['StepEndInt']
        jjj = []
        for i in repsteps:
            if '..' in i:
                j = list(map(int, i.split('..')))
                jjj = list(range(j[0], j[1] + 1))
            else:
                jjj.append(i)
        res['rep'] = list(map(int, jjj))
        if res['rep'][0] > bindings['StepEndInt'] or res['rep'][-1] < bindings[
                'StepStartInt']:
            warnings.warn(
                LisfloodWarning(
                    'No maps are reported as report steps configuration is outside simulation time interval'
                ))
        return res
예제 #18
0
    def dynamic(self):
        """ dynamic part of the evaporation from open water
        """
        settings = LisSettings.instance()
        option = settings.options
        if option['openwaterevapo']:
            # ***********************************************
            # *********  EVAPORATION FROM OPEN WATER  *******
            # ***********************************************
            UpstreamEva = self.var.EWRef * self.var.MMtoM3 * self.var.WaterFraction
            # evaporation for loop is amount of water per timestep [cu m]
            # Volume of potential evaporation from water surface  per time step (conversion to [m3])
            ChanMIter = self.var.ChanM3Kin.copy()
            # for Iteration loop: First value is amount of water in the channel
            # amount of water in bankful (first line of routing)
            ChanLeft = ChanMIter * 0.1
            # 10% of the discharge must stay in the river
            self.var.EvaAddM3 = MaskInfo.instance().in_zero()
            #   real water consumption is set to 0

            for NoEvaExe in range(self.var.maxNoEva):
                ChanHelp = np.maximum(ChanMIter - UpstreamEva, ChanLeft)
                EvaIter = np.maximum(UpstreamEva - (ChanMIter - ChanHelp), 0)
                # new amount is amout - evaporation use till a limit
                # new evaporation is evaporation - water is used from channel network
                ChanMIter = ChanHelp.copy()
                self.var.EvaAddM3 += UpstreamEva - EvaIter
                # evaporation is added up; the sum is the same as sum of original water use
                # UpstreamEva = upstream(self.var.LddEva,EvaIter)
                UpstreamEva = np.bincount(self.var.downEva,
                                          weights=EvaIter)[:-1]
                # remaining water use is moved down the the river system,

            self.var.EvaAddM3Dt = self.var.EvaAddM3 * self.var.InvNoRoutSteps
            # splitting water use per timestep into water use per sub time step
            self.var.EvaCumM3 += self.var.EvaAddM3
예제 #19
0
    def __init__(self, tssFilename, model, idMap=None, noHeader=False):
        """

        """

        if not isinstance(tssFilename, str):
            raise ValueError(
                "timeseries output filename must be of type string. Found {} of type {}"
                .format(tssFilename, type(tssFilename)))
        settings = LisSettings.instance()
        binding = settings.binding
        self._outputFilename = tssFilename
        self._maxId = 1
        self._ncodesId = 1
        self._spatialId = None
        self._spatialDatatype = None
        self._spatialIdGiven = False
        self._userModel = model
        self._writeHeader = not noHeader
        # array to store the timestep values
        self._sampleValues = None

        _idMap = False
        if isinstance(idMap, str) or isinstance(idMap,
                                                pcraster._pcraster.Field):
            _idMap = True

        # if header reserve rows from 1 to endstep
        # if noheader only from startstep - endstep

        if noHeader:
            nrRows = datetoint(binding['StepEnd'])[0] - datetoint(
                binding['StepStart'])[0] - self._userModel.firstTimeStep() + 2
        else:
            nrRows = datetoint(binding['StepEnd'])[0] - datetoint(
                binding['StepStart'])[0] - self._userModel.firstTimeStep() + 2

        if _idMap:
            self._spatialId = idMap
            if isinstance(idMap, str):
                self._spatialId = iterReadPCRasterMap(idMap)

            _allowdDataTypes = [
                pcraster.Nominal, pcraster.Ordinal, pcraster.Boolean
            ]
            if self._spatialId.dataType() not in _allowdDataTypes:
                #raise Exception(
                #    "idMap must be of type Nominal, Ordinal or Boolean")
                # changed into creating a nominal map instead of bailing out
                self._spatialId = pcraster.nominal(self._spatialId)

            if self._spatialId.isSpatial():
                self._maxId, valid = pcraster.cellvalue(
                    pcraster.mapmaximum(pcraster.ordinal(self._spatialId)), 1)
                # convert to numpy array
                outletsmapnp = pcr2numpy(self._spatialId, np.nan)
                # get outlets codes from outlets map
                codesId = numpy.unique(outletsmapnp)
                # drop negative values (= missing data in pcraster map)
                codesId = codesId[codesId > 0]
                # get number of outlets points
                self._ncodesId = len(codesId)
                # prepare array to store outlets codes
                self._codesId = [-9999 for i in range(self._ncodesId)]

            else:
                self._maxId = 1
                self._codesId = [1]
                self._ncodesId = len(self._codesId)

            # cell indices of the sample locations

            # #self._sampleAddresses = []
            # for cellId in range(1, self._maxId + 1):
            # self._sampleAddresses.append(self._getIndex(cellId))
            # prepare array to store outlets points raster numbers
            self._sampleAddresses = [-9999 for i in range(self._ncodesId)]
            # init with the left/top cell - could also be 0 but then you have to catch it in
            # the sample routine and put an exeption in
            # number of cells in map
            nrCells = pcraster.clone().nrRows() * pcraster.clone().nrCols()
            for cell in range(1, nrCells + 1):
                if (pcraster.cellvalue(self._spatialId, cell)[1]):
                    # get point code from outlets map for pixel cell
                    outlet_code = pcraster.cellvalue(self._spatialId, cell)[0]
                    # get index of the point code in the sorted list of outlets codes
                    outlet_idx = np.where(codesId == outlet_code)[0][0]
                    # store point code
                    self._codesId[outlet_idx] = outlet_code
                    # store outlets location (cell)
                    self._sampleAddresses[outlet_idx] = cell

            self._spatialIdGiven = True

            nrCols = self._ncodesId
            self._sampleValues = [[Decimal("NaN")] * nrCols
                                  for _ in [0] * nrRows]
        else:
            self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows]
예제 #20
0
def random_string(length, chars='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
                                'abcdefghijklmnopqrstuvwxyz'
                                '0123456789'):
    """Return a random string of some `length`."""
    return ''.join((random.choice(chars) for i in range(length)))
예제 #21
0
    def __init__(self, tssFilename, model, idMap=None, noHeader=False):
        """

        """

        if not isinstance(tssFilename, str):
            raise Exception(
                "timeseries output filename must be of type string")

        self._outputFilename = tssFilename
        self._maxId = 1
        self._spatialId = None
        self._spatialDatatype = None
        self._spatialIdGiven = False
        self._userModel = model
        self._writeHeader = not noHeader
        # array to store the timestep values
        self._sampleValues = None

        _idMap = False
        if isinstance(idMap, (str, pcraster.pcraster.Field)):
            _idMap = True

        nrRows = self._userModel.nrTimeSteps() - self._userModel.firstTimeStep() + 1

        if _idMap:
            self._spatialId = idMap
            if isinstance(idMap, str):
                self._spatialId = pcraster.readmap(idMap)

            _allowdDataTypes = [pcraster.Nominal, pcraster.pcraster.Ordinal, pcraster.Boolean]
            if self._spatialId.dataType() not in _allowdDataTypes:
                # raise Exception(
                #    "idMap must be of type Nominal, Ordinal or Boolean")
                # changed into creating a nominal map instead of bailing out
                self._spatialId = nominal(self._spatialId)

            if self._spatialId.isSpatial():
                self._maxId, valid = pcraster.pcraster.cellvalue(pcraster.operations.mapmaximum(pcraster.operations.ordinal(self._spatialId)), 1)
            else:
                self._maxId = 1

            # cell indices of the sample locations

            # #self._sampleAddresses = []
            # for cellId in range(1, self._maxId + 1):
            # self._sampleAddresses.append(self._getIndex(cellId))

            self._sampleAddresses = [1 for _ in range(self._maxId)]
            # init with the left/top cell - could also be 0 but then you have to catch it in
            # the sample routine and put an exeption in
            nrCells = pcraster.pcraster.clone().nrRows() * pcraster.pcraster.clone().nrCols()
            for cell in range(1, nrCells + 1):
                if pcraster.pcraster.cellvalue(self._spatialId, cell)[1]:
                    self._sampleAddresses[pcraster.pcraster.cellvalue(self._spatialId, cell)[0] - 1] = cell

            self._spatialIdGiven = True

            nrCols = self._maxId
            self._sampleValues = [[Decimal("NaN")] * nrCols for _ in [0] * nrRows]
        else:
            self._sampleValues = [[Decimal("NaN")] * 1 for _ in [0] * nrRows]
예제 #22
0
    def dynamic(self, sLoop):
        """ dynamic part of the soil loop module
        """

        # to make things faster: global functions to local functions

        # ************************************************************
        # ***** INTERCEPTION *****************************************
        # ************************************************************
        # Domain: whole pixel (permeable + direct runoff areas)
        #
        maskinfo = MaskInfo.instance()
        np.seterr(invalid='ignore', divide='ignore')

        SMax = np.where(
            self.var.LAI[sLoop] > 0.1, 0.935 + 0.498 * self.var.LAI[sLoop] -
            0.00575 * np.square(self.var.LAI[sLoop]), maskinfo.in_zero())
        SMax = np.where(self.var.LAI[sLoop] > 43.3, 11.718, SMax)
        # maximum interception [mm]
        # Van Hoyningen-Huene (1981), p.46
        # small LAI: no interception
        # Note that for LAI = 43.3, SMax is at its maximum value of 11.718, and dropping
        # after that(but LAI should never be that high))

        self.var.Interception[sLoop] = np.where(
            SMax > 0.0,
            np.minimum(
                SMax - self.var.CumInterception[sLoop],
                SMax *
                (1 -
                 np.exp(-0.046 * self.var.LAI[sLoop] * self.var.Rain / SMax))),
            maskinfo.in_zero())
        self.var.Interception[sLoop] = np.minimum(self.var.Interception[sLoop],
                                                  self.var.Rain)
        # Interception (in [mm] per timestep)
        # Smax is calculated from LAI as a constant map (above)
        # according to Aston (1970), based on Merriam (1960/1970)
        # 0.046*LAI = k = (1-p) p = 1-0.046*LAI  Aston (1970)
        # LAI must be a pixel average, not the average LAI for PER only!

        self.var.CumInterception[sLoop] += self.var.Interception[sLoop]
        # total interception in [mm] per timestep

        # ************************************************************
        # ***** EVAPORATION OF INTERCEPTED WATER *********************
        # ************************************************************
        # Domain: whole pixel (permeable + direct runoff areas)

        TaInterceptionMax = self.var.EWRef * (1 - self.var.LAITerm[sLoop])
        # Maximum evaporation rate of intercepted water, [mm] per timestep
        # TaInterception at rate of open water surface only evaporation
        # of intercepted water in vegetated fraction,hence mutiplication
        # by(1-LAITerm)

        self.var.TaInterception[sLoop] = np.maximum(
            np.minimum(self.var.CumInterception[sLoop], TaInterceptionMax),
            maskinfo.in_zero())
        # amount of interception water [mm] that can be evaporated
        # assumption: at first all interception water is evaporated rate is equal to TaInterceptionMax

        self.var.CumInterception[sLoop] = np.maximum(
            self.var.CumInterception[sLoop] - self.var.TaInterception[sLoop],
            maskinfo.in_zero())
        # evaporated water is subtracted from Cumulative Interception
        self.var.LeafDrainage[
            sLoop] = self.var.LeafDrainageK * self.var.CumInterception[sLoop]
        # leaf drainage in [mm] per timestep, assuming linear reservoir
        # assumption: after 1 day all intercepted water is evaporated or has fallen
        # on the soil surface
        self.var.CumInterception[sLoop] = np.maximum(
            self.var.CumInterception[sLoop] - self.var.LeafDrainage[sLoop],
            maskinfo.in_zero())

        # ************************************************************
        # ***** AVAILABLE WATER FOR INFILTRATION ****************************
        # ************************************************************
        # Domain: AvailableWaterForInfiltration only used for permeable fraction
        # DirectRunoff is total for whole pixel (permeable + direct runoff areas)

        self.var.AvailableWaterForInfiltration[sLoop] =\
            np.maximum(self.var.Rain + self.var.SnowMelt + self.var.LeafDrainage[sLoop] - self.var.Interception[sLoop], maskinfo.in_zero())
        # Water available for infiltration during this timestep [mm]

        # ************************************************************
        # ***** SOIL WATER STRESS ************************************
        # ************************************************************
        # Domain: permeable fraction of pixel only

        p = 1 / (0.76 + 1.5 *
                 np.minimum(0.1 * self.var.ETRef * self.var.InvDtDay, 1.0)
                 ) - 0.10 * (5 - self.var.CropGroupNumber[sLoop])
        # soil water depletion fraction (easily available soil water)
        # Van Diepen et al., 1988: WOFOST 6.0, p.87
        # to avoid a strange behaviour of the p-formula's, ETRef is set to a maximum of
        # 10 mm/day. Thus, p will range from 0.15 to 0.45 at ETRef eq 10 and
        # CropGroupNumber 1-5
        p = np.where(
            self.var.CropGroupNumber[sLoop] <= 2.5, p +
            (np.minimum(0.1 * self.var.ETRef * self.var.InvDtDay, 1.0) - 0.6) /
            (self.var.CropGroupNumber[sLoop] *
             (self.var.CropGroupNumber[sLoop] + 3)), p)
        # correction for crop groups 1 and 2 (Van Diepen et al, 1988)
        p = np.maximum(np.minimum(p, 1.0), maskinfo.in_zero())
        # p is between 0 and 1
        WCrit1 = ((1 - p) * (self.var.WFC1[sLoop] - self.var.WWP1[sLoop])
                  ) + self.var.WWP1[sLoop]
        WCrit1a = ((1 - p) * (self.var.WFC1a[sLoop] - self.var.WWP1a[sLoop])
                   ) + self.var.WWP1a[sLoop]
        WCrit1b = ((1 - p) * (self.var.WFC1b[sLoop] - self.var.WWP1b[sLoop])
                   ) + self.var.WWP1b[sLoop]
        # critical moisture amount ([mm] water slice) for all layers
        settings = LisSettings.instance()
        option = settings.options
        if option['wateruse'] and sLoop == 2:
            self.var.WFilla = np.minimum(WCrit1a, self.var.WPF3a[2])
            self.var.WFillb = np.minimum(WCrit1b, self.var.WPF3b[2])
            # if water use is calculated, get the filling of the soil layer for either pF3 or WCrit1
            # that is the amount of water the soil gets filled by water from irrigation

        #  with np.errstate(invalid='ignore',divide='ignore'):
        #  bc the divisor can have 0 -> this calculation is done first and raise a warning - zero encountered -
        #  even if it is catched afterwards
        self.var.RWS[sLoop] = np.where(
            (WCrit1 - self.var.WWP1[sLoop]) > 0,
            (self.var.W1[sLoop] - self.var.WWP1[sLoop]) /
            (WCrit1 - self.var.WWP1[sLoop]), 1.0)

        # Transpiration reduction factor (in case of water stress)
        # if WCrit1 = WWP1, RWS is zero there is no water stress in that case
        self.var.RWS[sLoop] = np.maximum(np.minimum(self.var.RWS[sLoop], 1.0),
                                         maskinfo.in_zero())
        # Transpiration reduction factor (in case of water stress)

        if option['repStressDays']:
            self.var.SoilMoistureStressDays[sLoop] = np.where(
                self.var.RWS[sLoop] < 1, self.var.DtDay, maskinfo.in_zero())
            # Count number of days with soil water stress, RWS is between 0 and 1
            # no reduction of Transpiration at RWS=1, at RWS=0 there is no Transpiration at all

        # ************************************************************
        # ***** MAXIMUM TRANSPIRATION RATE ***************************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        TranspirMax = self.var.CropCoef[sLoop] * self.var.ETRef * (
            1 - self.var.LAITerm[sLoop])
        # maximum transpiration rate ([mm] per timestep)
        # crop coefficient is mostly 1, except for excessively transpirating crops,
        # such as sugarcane and some forests (coniferous forests)
        self.var.TranspirMaxCorrected = np.maximum(
            TranspirMax - self.var.TaInterception[sLoop], maskinfo.in_zero())
        # subtract TaInterception from TranspirMax to ensure energy balance is respected
        # (maximize statement because TranspirMax and TaInterception are calculated from
        # reference surfaces with slightly diferent properties)
        # ************************************************************
        # ***** ACTUAL TRANSPIRATION RATE ****************************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        self.var.Ta[sLoop] = np.maximum(
            np.minimum(self.var.RWS[sLoop] * self.var.TranspirMaxCorrected,
                       self.var.W1[sLoop] - self.var.WWP1[sLoop]), 0.0)
        # actual transpiration based on both layers 1a and 1b
        self.var.Ta[sLoop] = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), self.var.Ta[sLoop])
        # transpiration is 0 when soil is frozen
        # calculate distribution where to take Ta from:
        # 1st: above wCrit from layer 1a
        # 2nd: above Wcrit from layer 1b
        # 3rd:  distribute take off according to soil moisture availability below wcrit
        wc1a = np.maximum(
            self.var.W1a[sLoop] - WCrit1a, 0
        )  # unstressed water availability from layer 1a without stress (above critical soil moisture)
        wc1b = np.maximum(self.var.W1b[sLoop] - WCrit1b,
                          0)  # (same as above but for layer 1b)
        Ta1a = np.minimum(
            self.var.Ta[sLoop], wc1a
        )  # temporary transpiration from layer 1a (<= unstressed layer 1a availability)
        restTa = np.maximum(
            self.var.Ta[sLoop] - Ta1a, 0
        )  # transpiration left after layer 1a unstressed water has been abstracted
        Ta1b = np.minimum(
            restTa, wc1b
        )  # temporary transpiration from layer 1b (<= unstressed layer 1b availability)
        restTa = np.maximum(
            restTa - Ta1b, 0
        )  # transpiration left after layers 1a and 1b unstressed water have been abstracted
        stressed_availability_1a = np.maximum(self.var.W1a[sLoop] - Ta1a -
                                              self.var.WWP1a[sLoop], 0)  #|
        stressed_availability_1b = np.maximum(self.var.W1b[sLoop] - Ta1b -
                                              self.var.WWP1b[sLoop], 0)  #|
        stressed_availability_tot = stressed_availability_1a + stressed_availability_1b  #|> distribution of abstractions of
        available = stressed_availability_tot > 0  #|> soil moisture below the critical value
        fraction_rest_1a = np.where(
            available, stressed_availability_1a / stressed_availability_tot,
            0)  #|> proportionally to each root-zone layer (1a and 1b)
        fraction_rest_1b = np.where(available, stressed_availability_1b /
                                    stressed_availability_tot,
                                    0)  #|> "stressed" availability
        Ta1a += fraction_rest_1a * restTa  #|
        Ta1b += fraction_rest_1b * restTa  #|
        self.var.W1a[sLoop] -= Ta1a
        self.var.W1b[sLoop] -= Ta1b
        self.var.W1[sLoop] = np.add(self.var.W1a[sLoop], self.var.W1b[sLoop])

        # ************************************************************
        # ***** ACTUAL BARE SOIL EVAPORATION *************************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # ESActPixel valid for whole pixel

        self.var.DSLR[sLoop] = np.where(
            self.var.AvailableWaterForInfiltration[sLoop] >
            self.var.AvWaterThreshold, 1.0,
            self.var.DSLR[sLoop] + self.var.DtDay)
        # Days since last rain (minimum value=1)
        # AvWaterThreshold in mm (Stroosnijder, 1987 in Supit, p. 92)
        # Note that this equation was originally designed for DAILY time steps
        # to make it work with ANY time step AvWaterThreshold has to be provided
        # as an INTENSITY in the binding (AvWaterRateThreshold), which isn't quite
        # right (possible solution: keep track of total AvailableWaterForInfiltration
        # during last 24 hrs look at this later)

        ESMax = self.var.ESRef * self.var.LAITerm[sLoop]
        # Maximum evaporation from a shaded soil surface in [mm] per time step
        self.var.ESAct[sLoop] = ESMax * (np.sqrt(self.var.DSLR[sLoop]) -
                                         np.sqrt(self.var.DSLR[sLoop] - 1))
        # Reduction of actual soil evaporation is assumed to be proportional to the
        # square root of time
        # ESAct in [mm] per timestep

        self.var.ESAct[sLoop] = np.minimum(
            self.var.ESAct[sLoop], self.var.W1[sLoop] - self.var.WRes1[sLoop])
        # either ESAct or availabe water from layer 1a and 1b
        self.var.ESAct[sLoop] = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), self.var.ESAct[sLoop])
        # soil evaporation is 0 when soil is frozen
        self.var.ESAct[sLoop] = np.maximum(self.var.ESAct[sLoop],
                                           maskinfo.in_zero())

        # distributing ESAct over layer 1a and 1b, take the water from 1a first
        testSupply1a = self.var.W1a[sLoop] - self.var.WRes1a[sLoop]
        EsAct1a = np.where(self.var.ESAct[sLoop] > testSupply1a, testSupply1a,
                           self.var.ESAct[sLoop])
        EsAct1b = np.maximum(self.var.ESAct[sLoop] - testSupply1a,
                             maskinfo.in_zero())

        self.var.W1a[sLoop] = self.var.W1a[sLoop] - EsAct1a
        self.var.W1b[sLoop] = self.var.W1b[sLoop] - EsAct1b
        self.var.W1[sLoop] = np.add(self.var.W1a[sLoop], self.var.W1b[sLoop])
        # evaporation is subtracted from W1a (top layer) and W1b

        # ************************************************************
        # ***** INFILTRATION CAPACITY ********************************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        #print np.max(self.var.W1a)
        RelSat1 = np.where(
            self.var.PoreSpaceNotZero1a[sLoop],
            np.minimum(self.var.W1[sLoop] / self.var.WS1[sLoop], 1.0),
            maskinfo.in_zero())
        # Relative saturation term of the first two layers. This will allow to have more infiltration
        # than the storage capacity of layer 1
        # Setting this to  a maximum of 1
        # will prevent MV creation due to small rounding errors
        # 'if' statement prevents division by zero for zero-depth soils
        SatFraction = 1 - (1 - RelSat1)**self.var.b_Xinanjiang
        # Fraction of pixel that is at saturation as a function of
        # the ratio Theta1/ThetaS1. Distribution function taken from
        # Zhao,1977, as cited in Todini, 1996 (JoH 175, 339-382)
        InfiltrationPot = self.var.StoreMaxPervious[sLoop] * (
            1 - SatFraction)**self.var.PowerInfPot * self.var.DtDay
        # Potential infiltration per time step [mm], which is the available pore space in the
        # pervious fraction of each pixel (1-SatFraction) times the depth of the upper soil layer.
        # For derivation see Appendix A in Todini, 1996

        InfiltrationPot = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), InfiltrationPot)
        # When the soil is frozen (frostindex larger than threshold), potential
        # infiltration is zero

        # ************************************************************
        # ***** PREFERENTIAL FLOW (Rapid bypass soil matrix) *********
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # PrefFlowPixel valid for whole pixel

        self.var.PrefFlow[sLoop] = (
            RelSat1**self.var.PowerPrefFlow
        ) * self.var.AvailableWaterForInfiltration[sLoop]
        # Assumption: fraction of available water that bypasses the soil matrix
        # (added directly to Upper Zone) is power function of the
        # relative saturation of the topsoil
        self.var.AvailableWaterForInfiltration[sLoop] -= self.var.PrefFlow[
            sLoop]
        # Update water availabe for infiltration

        # ************************************************************
        # ***** ACTUAL INFILTRATION AND SURFACE RUNOFF ***************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # SurfaceRunoff, InfiltrationPixel are valid for whole pixel

        self.var.Infiltration[sLoop] = np.maximum(
            np.minimum(self.var.AvailableWaterForInfiltration[sLoop],
                       InfiltrationPot), maskinfo.in_zero())
        # infiltration in [mm] per timestep
        # Maximum infiltration is equal to Rainfall-Interception-Snow+Snowmelt

        # if  +Inflitration is more than the maximum storage capacity of layer 1a, than the rest goes to 1b
        # could happen because InfiltrationPot is calculated based on layer 1a + 1b
        testW1a = self.var.W1a[sLoop] + self.var.Infiltration[sLoop]
        # sum up W1a and inflitration to test if it is > saturated WS1a

        #self.var.Infiltration[sLoop] = np.where(testW1a > self.var.WS1a[sLoop], self.var.WS1a[sLoop] - self.var.W1a[sLoop] ,self.var.Infiltration[sLoop])
        # in case we want to put it to runoff
        self.var.W1a[sLoop] = np.minimum(self.var.WS1a[sLoop], testW1a)
        self.var.W1b[sLoop] = self.var.W1b[sLoop] + np.where(
            testW1a > self.var.WS1a[sLoop], testW1a - self.var.WS1a[sLoop],
            maskinfo.in_zero())

        # soil moisture amount is adjusted

        # ************************************************************
        # ***** SOIL MOISTURE: FLUXES BETWEEN SOIL LAYERS   **********
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # SeepTopToSubPixel,SeepSubToGWPixel valid for whole pixel
        # Flow between layer 1 and 2 and seepage out of layer 2: based on Darcy's
        # equation, assuming seepage is entirely gravity-driven,
        # so seepage rate equals unsaturated conductivity
        # The following calculations are performed to determine how many
        # sub-steps are needed to achieve sufficient numerical stability
        KUnSat1a, KUnSat1b, KUnSat2 = self.unsaturatedConductivity(
            sLoop
        )  # Unsaturated conductivity at the beginning of this time step [mm/day]

        AvailableWater1a = self.var.W1a[sLoop] - self.var.WRes1a[sLoop]
        AvailableWater1b = self.var.W1b[sLoop] - self.var.WRes1b[sLoop]
        AvailableWater2 = self.var.W2[sLoop] - self.var.WRes2[sLoop]
        # Available water in both soil layers [mm]

        CapacityLayer1 = self.var.WS1b[sLoop] - self.var.W1b[sLoop]
        CapacityLayer2 = self.var.WS2[sLoop] - self.var.W2[sLoop]
        # Available storage capacity in subsoil

        CourantTopToSubA = np.where(
            AvailableWater1a == 0, maskinfo.in_zero(),
            KUnSat1a * self.var.DtDay / AvailableWater1a)
        CourantTopToSubB = np.where(
            AvailableWater1b == 0, maskinfo.in_zero(),
            KUnSat1b * self.var.DtDay / AvailableWater1b)
        CourantSubToGW = np.where(AvailableWater2 == 0, maskinfo.in_zero(),
                                  KUnSat2 * self.var.DtDay / AvailableWater2)
        # Courant condition for computed soil moisture fluxes:
        # if Courant gt CourantCrit: sub-steps needed for required numerical accuracy
        # 'If'-statement prevents division by zero when available water equals zero:
        # in that case the unsaturated conductivity is zero as well, so
        # solution will be stable.
        CourantSoil = np.maximum(CourantTopToSubA, CourantTopToSubB,
                                 CourantSubToGW)
        # Both flow between soil layers and flow out of layer two
        # need to be numerically stable, so number of sub-steps is
        # based on process with largest Courant number
        NoSubS = np.maximum(1, np.ceil(CourantSoil / self.var.CourantCrit))
        self.var.NoSubSteps = int(np.nanmax(NoSubS))
        # Number of sub-steps needed for required numerical
        # accuracy. Always greater than or equal to 1
        # (otherwise division by zero!)

        # DtSub=self.var.DtDay/NoSubSteps

        # HUh whats this:
        #DtSub = scalar(self.var.DtDay / mapmaximum(self.var.NoSubSteps))
        DtSub = self.var.DtDay / self.var.NoSubSteps

        # DtSub=spatial(DtDay)/mapmaximum(NoSubSteps)
        # Corresponding sub-timestep [days]
        # Soil loop is looping for the maximum of NoSubsteps
        # therefore DtSub is calculated as part of the timestep according to
        # the maximum of NoSubsteps

        WTemp1a = self.var.W1a[sLoop]
        WTemp1b = self.var.W1b[sLoop]
        WTemp2 = self.var.W2[sLoop]
        # Copy current value of W1 and W2 to temporary variables,
        # because computed fluxes may need correction for storage
        # capacity of subsoil and in case soil is frozen (after loop)
        self.var.SeepTopToSubA[sLoop] = 0
        self.var.SeepTopToSubB[sLoop] = 0
        # Initialize top- to subsoil flux (accumulated value for all sub-steps)
        self.var.SeepSubToGW[sLoop] = 0
        # Initialize fluxes out of subsoil (accumulated value for all sub-steps)
        # Start iterating

        #NoSubS = int(mapmaximum(self.var.NoSubSteps))
        #NoSubS = self.var.NoSubSteps

        for i in range(self.var.NoSubSteps):
            if i > 0:
                KUnSat1a, KUnSat1b, KUnSat2 = self.unsaturatedConductivity(
                    sLoop, (WTemp1a, WTemp1b,
                            WTemp2))  # Unsaturated conductivity [mm/day]
            SeepTopToSubSubStepA = np.minimum(KUnSat1a * DtSub, CapacityLayer1)
            SeepTopToSubSubStepB = np.minimum(KUnSat1b * DtSub, CapacityLayer2)
            # Flux from top- to subsoil (cannot exceed storage capacity
            # of layer 2)
            SeepSubToGWSubStep = np.minimum(KUnSat2 * DtSub, AvailableWater2)
            # Flux out of soil [mm]
            # Minimise statement needed for exceptional cases
            # when Theta2 becomes lt 0 (possible due to small precision errors)
            AvailableWater1a = AvailableWater1a - SeepTopToSubSubStepA
            AvailableWater1b = AvailableWater1b + SeepTopToSubSubStepA - SeepTopToSubSubStepB
            AvailableWater2 = AvailableWater2 + SeepTopToSubSubStepB - SeepSubToGWSubStep
            # Update water balance for layers 1 and 2
            WTemp1a = AvailableWater1a + self.var.WRes1a[sLoop]
            WTemp1b = AvailableWater1b + self.var.WRes1b[sLoop]
            WTemp2 = AvailableWater2 + self.var.WRes2[sLoop]
            # Update WTemp1 and WTemp2
            CapacityLayer1 = self.var.WS1b[sLoop] - WTemp1b
            CapacityLayer2 = self.var.WS2[sLoop] - WTemp2
            # Update available storage capacity in layer 2
            self.var.SeepTopToSubA[sLoop] += SeepTopToSubSubStepA
            self.var.SeepTopToSubB[sLoop] += SeepTopToSubSubStepB
            # Update total top- to subsoil flux for this step
            self.var.SeepSubToGW[sLoop] += SeepSubToGWSubStep
            # Update total flux out of subsoil for this step

        self.var.SeepTopToSubA[sLoop] = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), self.var.SeepTopToSubA[sLoop])
        self.var.SeepTopToSubB[sLoop] = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), self.var.SeepTopToSubB[sLoop])
        # When the soil is frozen (frostindex larger than threshold), seepage
        # is zero
        self.var.SeepSubToGW[sLoop] = np.where(
            self.var.FrostIndex > self.var.FrostIndexThreshold,
            maskinfo.in_zero(), self.var.SeepSubToGW[sLoop])
        # When the soil is frozen (frostindex larger than threshold), seepage
        # is zero
        self.var.W1a[
            sLoop] = self.var.W1a[sLoop] - self.var.SeepTopToSubA[sLoop]
        self.var.W1b[sLoop] = self.var.W1b[sLoop] + self.var.SeepTopToSubA[
            sLoop] - self.var.SeepTopToSubB[sLoop]
        self.var.W2[sLoop] = self.var.W2[sLoop] + self.var.SeepTopToSubB[
            sLoop] - self.var.SeepSubToGW[sLoop]
        self.var.W1[sLoop] = np.add(self.var.W1a[sLoop], self.var.W1b[sLoop])
        # Update soil moisture amounts in top- and sub soil

        self.var.Infiltration[
            sLoop] = self.var.Infiltration[sLoop] - np.maximum(
                self.var.W1a[sLoop] - self.var.WS1a[sLoop], 0.0)
        self.var.W1a[sLoop] = np.minimum(self.var.W1a[sLoop],
                                         self.var.WS1a[sLoop])
        # Compute the amount of water that could not infiltrate and add this water to the surface runoff
        # Remove the excess of water in the top layer

        self.var.Theta1a[sLoop] = np.where(
            self.var.PoreSpaceNotZero1a[sLoop],
            self.var.W1a[sLoop] / self.var.SoilDepth1a[sLoop],
            maskinfo.in_zero())
        self.var.Theta1b[sLoop] = np.where(
            self.var.PoreSpaceNotZero1b[sLoop],
            self.var.W1b[sLoop] / self.var.SoilDepth1b[sLoop],
            maskinfo.in_zero())
        self.var.Theta2[sLoop] = np.where(
            self.var.PoreSpaceNotZero2[sLoop],
            self.var.W2[sLoop] / self.var.SoilDepth2[sLoop],
            maskinfo.in_zero())
        # Calculate volumetric soil moisture contents of top- and sub soil
        # [V/V]

        self.var.Sat1a[sLoop] = (self.var.W1a[sLoop] - self.var.WWP1a[sLoop]
                                 ) / (self.var.WFC1a[sLoop] -
                                      self.var.WWP1a[sLoop])
        self.var.Sat1b[sLoop] = (self.var.W1b[sLoop] - self.var.WWP1b[sLoop]
                                 ) / (self.var.WFC1b[sLoop] -
                                      self.var.WWP1b[sLoop])
        self.var.Sat1[sLoop] = (self.var.W1[sLoop] - self.var.WWP1[sLoop]) / (
            self.var.WFC1[sLoop] - self.var.WWP1[sLoop])
        self.var.Sat2[sLoop] = (self.var.W2[sLoop] - self.var.WWP2[sLoop]) / (
            self.var.WFC2[sLoop] - self.var.WWP2[sLoop])
        ## Calculate the saturation term with respect to the WP and FC values. This will indicate potential stress

        # ************************************************************
        # ***** CALCULATION OF PF VALUES FROM SOIL MOISTURE (OPTIONAL)
        # ************************************************************

        if option['simulatePF']:
            SatTerm1a = np.where(
                self.var.PoreSpaceNotZero1a[sLoop],
                (self.var.W1a[sLoop] - self.var.WRes1[sLoop]) /
                (self.var.WS1[sLoop] - self.var.WRes1[sLoop]),
                maskinfo.in_zero())
            SatTerm1b = np.where(
                self.var.PoreSpaceNotZero1b[sLoop],
                (self.var.W1b[sLoop] - self.var.WRes1[sLoop]) /
                (self.var.WS1[sLoop] - self.var.WRes1[sLoop]),
                maskinfo.in_zero())
            SatTerm2 = np.where(self.var.PoreSpaceNotZero2[sLoop],
                                (self.var.W2[sLoop] - self.var.WRes2[sLoop]) /
                                (self.var.WS2[sLoop] - self.var.WRes2[sLoop]),
                                maskinfo.in_zero())
            SatTerm1a = np.maximum(np.minimum(SatTerm1a, 1),
                                   maskinfo.in_zero())
            SatTerm1b = np.maximum(np.minimum(SatTerm1b, 1),
                                   maskinfo.in_zero())
            SatTerm2 = np.maximum(np.minimum(SatTerm2, 1), maskinfo.in_zero())
            # Saturation term in Van Genuchten equation

            Head1a = np.where(
                SatTerm1a == 0, self.var.HeadMax,
                np.minimum(
                    self.var.HeadMax, self.var.GenuInvAlpha1a[sLoop] *
                    ((1 / SatTerm1a)**self.var.GenuInvM1a[sLoop] - 1)**
                    self.var.GenuInvN1a[sLoop]))
            Head1b = np.where(
                SatTerm1b == 0, self.var.HeadMax,
                np.minimum(
                    self.var.HeadMax, self.var.GenuInvAlpha1b[sLoop] *
                    ((1 / SatTerm1b)**self.var.GenuInvM1b[sLoop] - 1)**
                    self.var.GenuInvN1b[sLoop]))
            Head2 = np.where(
                SatTerm2 == 0, self.var.HeadMax,
                np.minimum(
                    self.var.HeadMax, self.var.GenuInvAlpha2[sLoop] *
                    ((1 / SatTerm2)**self.var.GenuInvM2[sLoop] - 1)**
                    self.var.GenuInvN2[sLoop]))
            # Compute capillary heads for both soil layers [cm]

            self.var.pF0[sLoop] = np.where(Head1a > 0, np.log10(Head1a), -1)
            self.var.pF1[sLoop] = np.where(Head1b > 0, np.log10(Head1b), -1)
            self.var.pF2[sLoop] = np.where(Head2 > 0, np.log10(Head2), -1)
            # Compute pF. Set to -1 should heads become equal to mor less than 0. No idea
            # if this can even actually happen (copied this from old LISFLOOD version) but it
            # shouldn't do any harm.

        # ************************************************************
        # ***** GROUNDWATER TRANSFER TO CHANNEL NETWORK ***
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # UZOutflowPixel, LZOutflowToChannelPixel, GwLossPixel valid for whole
        # pixel

        self.var.UZOutflow[sLoop] = np.minimum(
            self.var.UpperZoneK * self.var.UZ[sLoop], self.var.UZ[sLoop])
        # Outflow out of upper zone [mm]

        self.var.UZ[sLoop] = np.maximum(
            self.var.UZ[sLoop] - self.var.UZOutflow[sLoop], maskinfo.in_zero())

        # Update upper-, lower zone storage

        # ************************************************************
        # ***** TOTAL RUNOFF *****************************************
        # ************************************************************
        # Domain: whole pixel

        # TotalRunoff=SurfaceRunoff+UZOutflowPixel+LZOutflowToChannelPixel
        # Total runoff for this time step [mm]
        # Only calculated for reporting purposes!

        # ************************************************************
        # ***** UPPER- AND LOWER ZONE STORAGE ************************
        # ************************************************************
        # Domain: permeable fraction of pixel only
        # GwPercUZLZPixel valid for whole pixel

        if option['drainedIrrigation'] and sLoop == 2:
            self.var.UZOutflow[
                sLoop] += self.var.DrainedFraction * self.var.SeepSubToGW[sLoop]
            self.var.UZ[sLoop] += (
                1 - self.var.DrainedFraction
            ) * self.var.SeepSubToGW[sLoop] + self.var.PrefFlow[sLoop]
            # use map of drainage systems, to determine return flow (if drained, all percolation to channel within day;
            # if not, all normal soil processes)
        else:
            self.var.UZ[sLoop] += self.var.SeepSubToGW[
                sLoop] + self.var.PrefFlow[sLoop]
            # water in upper response box [mm]

        self.var.GwPercUZLZ[sLoop] = np.minimum(self.var.GwPercStep,
                                                self.var.UZ[sLoop])
        # percolation from upper to lower response box in [mm] per timestep
        # maximum value is controlled by GwPercStep (which is
        # GwPercValue*DtDay)
        self.var.UZ[sLoop] = np.maximum(
            self.var.UZ[sLoop] - self.var.GwPercUZLZ[sLoop],
            maskinfo.in_zero())
예제 #23
0
def test_progress():
    from time import sleep
    for index, item in ShowingProgress(range(100), seconds=4):
        sleep(.237)
예제 #24
0
def writenet(flag,
             inputmap,
             netfile,
             DtDay,
             value_standard_name,
             value_long_name,
             value_unit,
             data_format,
             startdate,
             repstepstart,
             repstepend,
             frequency=None):
    """ Write a netcdf stack

    :param flag: 0 netCDF file format; ?
    :param inputmap: values to be written to NetCDF file
    :param netfile: name of output file in NetCDF format
    :param DtDay: model timestep (self.var.DtDay)
    :param value_standard_name: variable name to be put into netCDF file
    :param value_long_name: variable long name to be put into netCDF file
    :param value_unit: variable unit to be put into netCDF file
    :param data_format: data format
    :param startdate: reference date to be used to get start date and end date for netCDF file from start step and end step
    :param: repstepstart: first reporting step
    :param: repstepend: final reporting step
    :param frequency:[None,'all','monthly','annual'] save to netCDF stack; None save to netCDF single
    :return: 
    """
    # prefix = netfile.split('/')[-1].split('\\')[-1].split('.')[0]
    settings = LisSettings.instance()
    binding = settings.binding
    flags = settings.flags
    prefix = os.path.basename(netfile)
    netfile += ".nc"
    cutmap = CutMap.instance()
    row = np.abs(cutmap.cuts[3] - cutmap.cuts[2])
    col = np.abs(cutmap.cuts[1] - cutmap.cuts[0])
    if flag == 0:
        nf1 = iterOpenNetcdf(netfile, "", 'w', format='NETCDF4')
        # general Attributes
        nf1.settingsfile = os.path.realpath(sys.argv[1])
        nf1.date_created = xtime.ctime(xtime.time())
        nf1.Source_Software = 'Lisflood Python'
        nf1.institution = "European Commission DG Joint Research Centre (JRC) - E1, D2 Units"
        nf1.creator_name = "Peter Burek, A de Roo, Johan van der Knijff"
        nf1.source = 'Lisflood output maps'
        nf1.keywords = "Lisflood, EFAS, GLOFAS"
        nf1.Conventions = 'CF-1.6'
        # Dimension
        not_valid_attrs = ('_FillValue', )
        meta_netcdf = NetCDFMetadata.instance()
        if 'x' in meta_netcdf.data:
            lon = nf1.createDimension('x', col)  # x 1000
            longitude = nf1.createVariable('x', 'f8', ('x', ))
            valid_attrs = [
                i for i in meta_netcdf.data['x'] if i not in not_valid_attrs
            ]
            for i in valid_attrs:
                setattr(longitude, i, meta_netcdf.data['x'][i])

        if 'lon' in meta_netcdf.data:
            lon = nf1.createDimension('lon', col)
            longitude = nf1.createVariable('lon', 'f8', ('lon', ))
            valid_attrs = [
                i for i in meta_netcdf.data['lon'] if i not in not_valid_attrs
            ]
            for i in valid_attrs:
                setattr(longitude, i, meta_netcdf.data['lon'][i])

        if 'y' in meta_netcdf.data:
            lat = nf1.createDimension('y', row)  # x 950
            latitude = nf1.createVariable('y', 'f8', ('y', ))
            valid_attrs = [
                i for i in meta_netcdf.data['y'] if i not in not_valid_attrs
            ]
            for i in valid_attrs:
                setattr(latitude, i, meta_netcdf.data['y'][i])

        if 'lat' in meta_netcdf.data:
            lat = nf1.createDimension('lat', row)  # x 950
            latitude = nf1.createVariable('lat', 'f8', ('lat', ))
            valid_attrs = [
                i for i in meta_netcdf.data['lat'] if i not in not_valid_attrs
            ]
            for i in valid_attrs:
                setattr(latitude, i, meta_netcdf.data['lat'][i])

        # projection
        if 'laea' in meta_netcdf.data:
            proj = nf1.createVariable('laea', 'i4')
            for i in meta_netcdf.data['laea']:
                setattr(proj, i, meta_netcdf.data['laea'][i])

        if 'lambert_azimuthal_equal_area' in meta_netcdf.data:
            proj = nf1.createVariable('lambert_azimuthal_equal_area', 'i4')
            for i in meta_netcdf.data['lambert_azimuthal_equal_area']:
                setattr(proj, i,
                        meta_netcdf.data['lambert_azimuthal_equal_area'][i])
        """
        EUROPE
        proj.grid_mapping_name='lambert_azimuthal_equal_area'
        proj.false_easting=4321000.0
        proj.false_northing=3210000.0
        proj.longitude_of_projection_origin = 10.0
        proj.latitude_of_projection_origin = 52.0
        proj.semi_major_axis = 6378137.0
        proj.inverse_flattening = 298.257223563
        proj.proj4_params = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
        proj.EPSG_code = "EPSG:3035"
        """
        # Fill variables
        cell = round(pcraster.clone().cellSize(), 5)
        xl = round((pcraster.clone().west() + cell / 2), 5)
        xr = round((xl + col * cell), 5)
        yu = round((pcraster.clone().north() - cell / 2), 5)
        yd = round((yu - row * cell), 5)
        #lats = np.arange(yu, yd, -cell)
        #lons = np.arange(xl, xr, cell)
        lats = np.linspace(yu, yd, row, endpoint=False)
        lons = np.linspace(xl, xr, col, endpoint=False)
        latitude[:] = lats
        longitude[:] = lons
        if frequency is not None:  # output file with "time" dimension
            #Get initial and final dates for data to be stored in nerCDF file
            first_date, last_date = [
                startdate + datetime.timedelta(days=(int(k) - 1) * DtDay)
                for k in (repstepstart, repstepend)
            ]
            # CM: Create time stamps for each step stored in netCDF file
            time_stamps = [
                first_date + datetime.timedelta(days=d * DtDay)
                for d in range(repstepend - repstepstart + 1)
            ]

            units_time = 'days since %s' % startdate.strftime(
                "%Y-%m-%d %H:%M:%S.0")
            steps = (int(binding["DtSec"]) / 86400.) * np.arange(
                binding["StepStartInt"] - 1, binding["StepEndInt"])
            if frequency != "all":
                dates = num2date(steps, units_time, binding["calendar_type"])
                next_date_times = np.array([
                    j + datetime.timedelta(seconds=int(binding["DtSec"]))
                    for j in dates
                ])
                if frequency == "monthly":
                    months_end = np.array([
                        dates[j].month != next_date_times[j].month
                        for j in range(steps.size)
                    ])
                    steps = steps[months_end]
                elif frequency == "annual":
                    years_end = np.array([
                        dates[j].year != next_date_times[j].year
                        for j in range(steps.size)
                    ])
                    steps = steps[years_end]
            nf1.createDimension('time', steps.size)
            time = nf1.createVariable('time', float, ('time'))
            time.standard_name = 'time'
            # time.units ='days since 1990-01-01 00:00:00.0'
            # time.units = 'hours since %s' % startdate.strftime("%Y-%m-%d %H:%M:%S.0")
            # CM: select the time unit according to model time step
            DtDay_in_sec = DtDay * 86400
            if DtDay_in_sec >= 86400:
                # Daily model time steps or larger
                time.units = 'days since %s' % startdate.strftime(
                    "%Y-%m-%d %H:%M:%S.0")
            elif DtDay_in_sec >= 3600 and DtDay_in_sec < 86400:
                # CM: hours to days model time steps
                time.units = 'hours since %s' % startdate.strftime(
                    "%Y-%m-%d %H:%M:%S.0")
            elif DtDay_in_sec >= 60 and DtDay_in_sec < 3600:
                # CM: minutes to hours model time step
                time.units = 'minutes since %s' % startdate.strftime(
                    "%Y-%m-%d %H:%M:%S.0")

            time.calendar = binding["calendar_type"]
            nf1.variables["time"][:] = date2num(time_stamps, time.units,
                                                time.calendar)
            # for i in metadataNCDF['time']: exec('%s="%s"') % ("time."+i, metadataNCDF['time'][i])
            if 'x' in meta_netcdf.data:
                value = nf1.createVariable(prefix,
                                           data_format, ('time', 'y', 'x'),
                                           zlib=True,
                                           fill_value=-9999,
                                           chunksizes=(1, row, col))
            if 'lon' in meta_netcdf.data:
                value = nf1.createVariable(prefix,
                                           data_format, ('time', 'lat', 'lon'),
                                           zlib=True,
                                           fill_value=-9999,
                                           chunksizes=(1, row, col))
        else:
            if 'x' in meta_netcdf.data:
                value = nf1.createVariable(prefix,
                                           data_format, ('y', 'x'),
                                           zlib=True,
                                           fill_value=-9999)
            if 'lon' in meta_netcdf.data:
                # for world lat/lon coordinates
                value = nf1.createVariable(prefix,
                                           data_format, ('lat', 'lon'),
                                           zlib=True,
                                           fill_value=-9999)

        value.standard_name = value_standard_name
        value.long_name = value_long_name
        value.units = value_unit
        for var in meta_netcdf.data:
            if "esri_pe_string" in meta_netcdf.data[var]:
                value.esri_pe_string = meta_netcdf.data[var]['esri_pe_string']
    else:
        nf1 = iterOpenNetcdf(netfile, "", 'a', format='NETCDF4')
    if flags['nancheck']:
        nanCheckMap(inputmap, netfile, value_standard_name)
    maskinfo = MaskInfo.instance()
    mapnp = maskinfo.info.maskall.copy()
    mapnp[~maskinfo.info.maskflat] = inputmap[:]
    #mapnp = mapnp.reshape(maskinfo['shape']).data
    mapnp = mapnp.reshape(maskinfo.info.shape)
    if frequency is not None:
        nf1.variables[prefix][flag, :, :] = mapnp
        #value[flag,:,:]= mapnp
    else:
        # without timeflag
        nf1.variables[prefix][:, :] = mapnp
    nf1.close()
예제 #25
0
def test_percentage():
    from time import sleep
    for index, item in ShowingPercentage(range(100), max=100, granularity=4):
        sleep(.5)
예제 #26
0
def test_progress():
    from time import sleep
    for index, item in ShowingProgress(range(100), seconds=4):
        sleep(.237)
예제 #27
0
def random_string(length, chars='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
                                'abcdefghijklmnopqrstuvwxyz'
                                '0123456789'):
    '''Returns a random string of some `length`.'''
    return ''.join((random.choice(chars) for i in range(length)))
예제 #28
0
    def dynamic_inloop(self, NoRoutingExecuted):
        """ dynamic part of the lake routine
           inside the sub time step routing routine
        """

        # ************************************************************
        # ***** RESERVOIR
        # ************************************************************
        settings = LisSettings.instance()
        option = settings.options
        maskinfo = MaskInfo.instance()
        if option['simulateReservoirs']:
            InvDtSecDay = 1 / 86400.
            # InvDtSecDay=self.var.InvDtSec
            # ReservoirInflow = cover(ifthen(defined(self.var.ReservoirSites), upstream(
            # self.var.LddStructuresKinematic, self.var.ChanQ)), scalar(0.0))

            ReservoirInflowCC = np.bincount(self.var.downstruct, weights=self.var.ChanQ)[self.var.ReservoirIndex]
            # ReservoirInflow=cover(ifpcr(defined(self.var.ReservoirSites),upstream(self.var.LddStructuresKinematic,self.var.ChanQ)),null)
            # Reservoir inflow in [m3/s]
            # 20-2-2006: Replaced ChanQKin by ChanQ (if this results in problems change back to ChanQKin!)
            # 21-2-2006: Inflow now taken from 1st upstream cell(s), using LddStructuresKinematic
            # (LddStructuresKinematic equals LddKinematic, but without the pits/sinks upstream of the structure
            # locations; note that using Ldd here instead would introduce MV!)

            QResInM3Dt = ReservoirInflowCC * self.var.DtRouting
            # Reservoir inflow in [m3] per timestep (routing step)

            self.var.ReservoirStorageM3CC += QResInM3Dt
            # New reservoir storage [m3] = plus inflow for this sub step
            self.var.ReservoirFillCC = self.var.ReservoirStorageM3CC / self.var.TotalReservoirStorageM3CC
            # New reservoir fill (fraction)

            ReservoirOutflow1 = np.minimum(self.var.MinReservoirOutflowCC, self.var.ReservoirStorageM3CC * InvDtSecDay)
            # Reservoir outflow [m3/s] if ReservoirFill le
            # 2*ConservativeStorageLimit

            ReservoirOutflow2 = self.var.MinReservoirOutflowCC + self.var.DeltaO * (self.var.ReservoirFillCC - 2 * self.var.ConservativeStorageLimitCC) / self.var.DeltaLN
            # Reservoir outflow [m3/s] if NormalStorageLimit le ReservoirFill
            # gt 2*ConservativeStorageLimit

            ReservoirOutflow3a = self.var.NormalReservoirOutflowCC
            ReservoirOutflow3b = self.var.NormalReservoirOutflowCC + ((self.var.ReservoirFillCC - self.var.Normal_FloodStorageLimitCC) / self.var.DeltaNFL) * (self.var.NonDamagingReservoirOutflowCC - self.var.NormalReservoirOutflowCC)
            # Reservoir outflow [m3/s] if FloodStorageLimit le ReservoirFill gt NormalStorageLimit
            # NEW 24-9-2004: linear transition between normal and non-damaging
            # outflow.
            #ReservoirOutflow4 = np.maximum((self.var.ReservoirFillCC - self.var.FloodStorageLimitCC) *
            #    self.var.TotalReservoirStorageM3CC * self.var.InvDtSec, self.var.NonDamagingReservoirOutflowCC)
            temp = np.minimum(self.var.NonDamagingReservoirOutflowCC, np.maximum(ReservoirInflowCC * 1.2, self.var.NormalReservoirOutflowCC))
            ReservoirOutflow4 = np.maximum((self.var.ReservoirFillCC - self.var.FloodStorageLimitCC-0.01) *
                                           self.var.TotalReservoirStorageM3CC * InvDtSecDay, temp)

            # Reservoir outflow [m3/s] if ReservoirFill gt FloodStorageLimit
            # Depending on ReservoirFill the reservoir outflow equals ReservoirOutflow1, ReservoirOutflow2,
            # ReservoirOutflow3 or ReservoirOutflow4

            ReservoirOutflow = ReservoirOutflow1.copy()
            ReservoirOutflow = np.where(self.var.ReservoirFillCC > 2 * self.var.ConservativeStorageLimitCC, ReservoirOutflow2, ReservoirOutflow)
           # ReservoirOutflow = np.where(self.var.ReservoirFillCC > self.var.NormalStorageLimitCC,
           #                     ReservoirOutflow3, ReservoirOutflow)

            ReservoirOutflow = np.where(self.var.ReservoirFillCC > self.var.NormalStorageLimitCC,
                                ReservoirOutflow3a, ReservoirOutflow)
            ReservoirOutflow = np.where(self.var.ReservoirFillCC > self.var.Normal_FloodStorageLimitCC,
                                ReservoirOutflow3b, ReservoirOutflow)

            ReservoirOutflow = np.where(self.var.ReservoirFillCC > self.var.FloodStorageLimitCC, ReservoirOutflow4, ReservoirOutflow)

            temp = np.minimum(ReservoirOutflow,np.maximum(ReservoirInflowCC, self.var.NormalReservoirOutflowCC))

            ReservoirOutflow = np.where((ReservoirOutflow > 1.2 * ReservoirInflowCC) &
                                        (ReservoirOutflow > self.var.NormalReservoirOutflowCC) &
                                        (self.var.ReservoirFillCC < self.var.FloodStorageLimitCC), temp, ReservoirOutflow)

            QResOutM3DtCC = ReservoirOutflow * self.var.DtRouting
            # Reservoir outflow in [m3] per sub step
            QResOutM3DtCC = np.minimum(QResOutM3DtCC, self.var.ReservoirStorageM3CC)
            # Check to prevent outflow from becoming larger than storage +
            # inflow
            QResOutM3DtCC = np.maximum(QResOutM3DtCC, self.var.ReservoirStorageM3CC - self.var.TotalReservoirStorageM3CC)

            # NEW 24-9-2004: Check to prevent reservoir storage from exceeding total capacity
            # expression to the right of comma always negative unless capacity is exceeded

            #self.var.ReservoirStorageM3CC += QResInM3Dt - QResOutM3DtCC
            self.var.ReservoirStorageM3CC -= QResOutM3DtCC
            # New reservoir storage [m3]
            self.var.ReservoirFillCC = self.var.ReservoirStorageM3CC // self.var.TotalReservoirStorageM3CC
            # New reservoir fill

            # CM: Check ReservoirStorageM3CC for negative values and set them to zero
            nel = len(self.var.ReservoirFillCC[:])
            for i in range(0, nel-1):
                if np.isnan(self.var.ReservoirFillCC[i]) or self.var.ReservoirFillCC[i] < 0:
                    msg = "Negative or NaN volume for reservoir fill set to 0. Increase computation time step for routing (DtSecChannel) \n"
                    print(LisfloodWarning(msg))
                    self.var.ReservoirFillCC[self.var.ReservoirFillCC < 0] = 0
                    self.var.ReservoirFillCC[np.isnan(self.var.ReservoirFillCC)] = 0

            # expanding the size as input for routing routine
            self.var.QResOutM3Dt = maskinfo.in_zero()
            np.put(self.var.QResOutM3Dt,self.var.ReservoirIndex,QResOutM3DtCC)
            # this is put to the channel again at each sub timestep

            if option['repsimulateReservoirs']:
                if NoRoutingExecuted == 0:
                    self.var.ReservoirInflowM3S = maskinfo.in_zero()
                    self.var.ReservoirOutflowM3S = maskinfo.in_zero()
                    self.var.sumResInCC = QResInM3Dt
                    self.var.sumResOutCC = QResOutM3DtCC
                    # for timeseries output - in and outflow to the reservoir is sumed up over the sub timesteps and stored in m/s
                    # set to zero at first timestep
                else:
                    self.var.sumResInCC += QResInM3Dt
                    self.var.sumResOutCC += QResOutM3DtCC
                    # summing up over all sub timesteps

            if NoRoutingExecuted == (self.var.NoRoutSteps-1):

                # expanding the size after last sub timestep
                self.var.ReservoirStorageM3 = maskinfo.in_zero()
                self.var.ReservoirFill = maskinfo.in_zero()
                np.put(self.var.ReservoirStorageM3, self.var.ReservoirIndex, self.var.ReservoirStorageM3CC)
                np.put(self.var.ReservoirFill, self.var.ReservoirIndex, self.var.ReservoirFillCC)

                if option['repsimulateReservoirs']:
                    np.put(self.var.ReservoirInflowM3S, self.var.ReservoirIndex, self.var.sumResInCC // self.var.DtSec)
                    np.put(self.var.ReservoirOutflowM3S, self.var.ReservoirIndex, self.var.sumResOutCC // self.var.DtSec)
예제 #29
0
def test_percentage():
    from time import sleep
    for index, item in ShowingPercentage(range(100), max=100, granularity=4):
        sleep(.5)
예제 #30
0
    def dynamic(self):
        """ dynamic part of the snow module
        """
        maskinfo = MaskInfo.instance()
        self.var.Snow = maskinfo.in_zero()
        self.var.Rain = maskinfo.in_zero()
        self.var.SnowMelt = maskinfo.in_zero()
        self.var.SnowCover = maskinfo.in_zero()

        # Snowmelt
        hemisphere_N = self.var.lat_rad > 0
        snowmelt_coeff = np.sin(
            np.radians((self.var.CalendarDay - 81) * self.var.SnowDayDegrees))

        SeasSnowMeltCoef = self.var.SnowSeason * np.where(
            hemisphere_N, snowmelt_coeff, -snowmelt_coeff
        ) + self.var.SnowMeltCoef  # N and S hemispheres have opposite-sign cycles

        # Icemelt
        #####################################################
        # Check if the current day is in the "summer icemelt season" for the Northern (N) and Southern (S) hemispheres
        is_summer_icemelt_N = (self.var.CalendarDay > self.icemelt_start_N) & (
            self.var.CalendarDay < self.icemelt_end_N)
        is_summer_icemelt_S = (self.var.CalendarDay > self.icemelt_start_S) | (
            self.var.CalendarDay < self.icemelt_end_S)
        # Icemelt coefficient: the sine function is the same for both hemispheres due to the imposed 1/2 periodicity; the mask is shifted 6 months
        _ice_melt_coeff = np.sin(
            np.radians((self.var.CalendarDay - self.icemelt_start_N) *
                       self.var.IceDayDegrees))
        ice_melt_coeff_N = _ice_melt_coeff if is_summer_icemelt_N else 0
        ice_melt_coeff_S = _ice_melt_coeff if is_summer_icemelt_S else 0
        SummerSeason = np.where(hemisphere_N, ice_melt_coeff_N,
                                ice_melt_coeff_S)

        #####################################################
        # Bugfix for global simulations - LA 17/7/2018 - uncomment to make it work
        # This bugfix is on hold waiting for approval from main developer
        # Constants
        #SVE= 127.5     # Shifted Vernal Equinox (in days) = Vernal Equinox (20 March) + day shift to have the peak of Summer season on a desired day (9th August instead of 21 June)
        #Sc = 1361      # Solar constant (not really important in this case, as long as it is divided by a meaningful scale parameter)
        #SCp= 400       # Scale parameter of the SummerSeason coefficient
        #SHp= 0.6       # Shape parameter of the SummerSeason coefficient

        #LatRad = 2*np.pi*self.var.lat_rad/360  #Latitude in radians
        #Dec = np.arcsin(0.3987*np.sin(2*np.pi*(self.var.CalendarDay-SVE)/365.25)) #Declination (i.e., CalendarDay in radians)

        #H1 = np.arccos(np.sign(-1*np.tan(Dec)*np.tan(LatRad)))
        #H0 = np.arccos(-1*np.tan(Dec)*np.tan(LatRad))

        #SSc1=np.maximum(0,Sc/np.pi*(H1*np.sin(LatRad)*np.sin(Dec)+np.sin(H1)*np.cos(LatRad)*np.cos(Dec))/SCp-SHp)
        #SSc0=np.maximum(0,Sc/np.pi*(H0*np.sin(LatRad)*np.sin(Dec)+np.sin(H0)*np.cos(LatRad)*np.cos(Dec))/SCp-SHp)

        #mask = np.absolute(-1*np.tan(Dec)*np.tan(LatRad))>1
        # SScoef=np.where(mask,SSc1,SSc0) # org
        #SummerSeason=np.where(mask,SSc1,SSc0) # changed name back to SummerSeason

        ########################################################
        #Previous version with hardcoded dates:
        #if (self.var.CalendarDay > 165) and (self.var.CalendarDay < 260):
        #    SummerSeason = np.sin(math.radians((self.var.CalendarDay - 165) * self.var.IceDayDegrees))
        #else:
        #    SummerSeason = 0.0
        ########################################################

        for i in range(3):
            TavgS = self.var.Tavg + self.var.DeltaTSnow * (i - 1)
            # Temperature at center of each zone (temperature at zone B equals Tavg)
            # i=0 -> highest zone
            # i=2 -> lower zone
            SnowS = np.where(TavgS < self.var.TempSnow,
                             self.var.SnowFactor * self.var.Precipitation,
                             maskinfo.in_zero())
            # Precipitation is assumed to be snow if daily average temperature is below TempSnow
            # Snow is multiplied by correction factor to account for undercatch of
            # snow precipitation (which is common)
            RainS = np.where(TavgS >= self.var.TempSnow,
                             self.var.Precipitation, maskinfo.in_zero())
            # if it's snowing then no rain
            SnowMeltS = (TavgS - self.var.TempMelt) * SeasSnowMeltCoef * (
                1 + 0.01 * RainS) * self.var.DtDay

            if i < 2:
                IceMeltS = self.var.Tavg * 7.0 * self.var.DtDay * SummerSeason
                # if i = 0 and 1 -> higher and middle zone
            else:
                IceMeltS = TavgS * 7.0 * self.var.DtDay * SummerSeason

            SnowMeltS = np.maximum(
                np.minimum(SnowMeltS + IceMeltS, self.var.SnowCoverS[i]),
                maskinfo.in_zero())
            self.var.SnowCoverS[i] = self.var.SnowCoverS[i] + SnowS - SnowMeltS

            self.var.Snow += SnowS
            self.var.Rain += RainS
            self.var.SnowMelt += SnowMeltS
            self.var.SnowCover += self.var.SnowCoverS[i]

        self.var.Snow /= 3
        self.var.Rain /= 3
        self.var.SnowMelt /= 3
        self.var.SnowCover /= 3

        self.var.TotalPrecipitation += self.var.Snow + self.var.Rain
예제 #31
0
    def dynamic(self):
        """ Dynamic part of LISFLOOD
            calls the dynamic part of the hydrological modules
        """
        settings = LisSettings.instance()
        option = settings.options
        flags = settings.flags
        # date corresponding to the model time step (yyyy-mm-dd hh:mm:ss)
        self.CalendarDate = self.CalendarDayStart + datetime.timedelta(days=(self.currentTimeStep()-1) * self.DtDay)
        # day of the year corresponding to the model time step
        self.CalendarDay = int(self.CalendarDate.strftime("%j"))
        # correct method to calculate the day of the year

        # model time step
        i = self.currentTimeStep()
        if i == 1:
            # flag for netcdf output for all, steps and end
            _ = CDFFlags(uuid.uuid4())  # init CDF flags

        self.TimeSinceStart = self.currentTimeStep() - self.firstTimeStep() + 1
        if flags['loud']:
            print("%-6i %10s" % (self.currentTimeStep(), self.CalendarDate.strftime("%d/%m/%Y %H:%M")))
        else:
            if not flags['checkfiles']:
                if flags['quiet'] and not flags['veryquiet']:
                    sys.stdout.write(".")
                if not flags['quiet'] and not flags['veryquiet']:
                    # Print step number and date to console
                    sys.stdout.write("\r%d" % i), sys.stdout.write("%s" % " - "+self.CalendarDate.strftime("%d/%m/%Y %H:%M"))
                    sys.stdout.flush()
        if i == self.nrTimeSteps():
            # last timestep. Send a new line to the terminal for polishness
            sys.stdout.write('\n')
            sys.stdout.flush()

        # ************************************************************
        """ up to here it was fun, now the real stuff starts
        """
        # readmeteo.py
        self.readmeteo_module.dynamic()     
        # timemeasure("Read meteo") # 1. timing after read input maps

        if flags['checkfiles']:
            return  # if check than finish here

        """ Here it starts with hydrological modules:
        """

        # ***** READ land use fraction maps***************************
        self.landusechange_module.dynamic()

        # ***** READ LEAF AREA INDEX DATA ****************************
        self.leafarea_module.dynamic()

        # ***** READ variable water fraction ****************************
        self.evapowater_module.dynamic_init()

        # ***** READ INFLOW HYDROGRAPHS (OPTIONAL)****************
        self.inflow_module.dynamic()
        # timemeasure("Read LAI") # 2. timing after LAI and inflow

        # ***** RAIN AND SNOW *****************************************
        self.snow_module.dynamic()
        # timemeasure("Snow")  # 3. timing after LAI and inflow

        # ***** FROST INDEX IN SOIL **********************************
        self.frost_module.dynamic()
        # timemeasure("Frost")  # 4. timing after frost index

        # ************************************************************
        # ****Looping soil 2 times - second time for forest fraction *
        # ************************************************************

        for soilLoop in range(3):
            self.soilloop_module.dynamic(soilLoop)
            # soil module is repeated 2 times:
            # 1. for remaining areas: no forest, no impervious, no water
            # 2. for forested areas
            # timemeasure("Soil",loops = soilLoop + 1) # 5/6 timing after soil

        # -------------------------------------------------------------------
        # -------------------------------------------------------------------

        # ***** ACTUAL EVAPORATION FROM OPEN WATER AND SEALED SOIL ***
        self.opensealed_module.dynamic()

        # *********  WATER USE   *************************
        self.riceirrigation_module.dynamic()
        self.waterabstraction_module.dynamic()
        # timemeasure("Water abstraction")

        # ***** Calculation per Pixel ********************************
        self.soil_module.dynamic_perpixel()
        # timemeasure("Soil done")

        self.groundwater_module.dynamic()
        # timemeasure("Groundwater")

        # ************************************************************
        # ***** STOP if no routing is required    ********************
        # ************************************************************
        if option['InitLisfloodwithoutSplit']:
            # InitLisfloodwithoutSplit
            # Very fast InitLisflood
            # it is only to compute Lzavin.map and skip completely the routing component
            self.output_module.dynamic()  # only lzavin
            return

        # *********  EVAPORATION FROM OPEN WATER *************
        self.evapowater_module.dynamic()
        # timemeasure("open water eva.")

        # ***** ROUTING SURFACE RUNOFF TO CHANNEL ********************
        self.surface_routing_module.dynamic()
        # timemeasure("Surface routing")  # 7 timing after surface routing

        # ***** POLDER INIT **********************************
        self.polder_module.dynamic_init()

        # ***** INLETS INIT **********************************
        self.inflow_module.dynamic_init()
        # timemeasure("Before routing")  # 8 timing before channel routing

        # ************************************************************
        # ***** LOOP ROUTING SUB TIME STEP   *************************
        # ************************************************************
        maskinfo = MaskInfo.instance()
        self.sumDisDay = maskinfo.in_zero()
        # sums up discharge of the sub steps
        for NoRoutingExecuted in range(self.NoRoutSteps):
            self.routing_module.dynamic(NoRoutingExecuted)
            #   routing sub steps
        # timemeasure("Routing", loops=NoRoutingExecuted + 1)  # 9 timing after routing

        # ----------------------------------------------------------------------

        if option['inflow']:
            self.QInM3Old = self.QInM3
            # to calculate the parts of inflow for every routing timestep
            # for the next timestep the old inflow is preserved
            self.sumIn += self.QInDt*self.NoRoutSteps

        # if option['simulatePolders']:
        # ChannelToPolderM3=ChannelToPolderM3Old;

        if option['InitLisflood'] or (not(option['SplitRouting'])):
            # kinematic routing
            self.ChanM3 = self.ChanM3Kin.copy()
            # Total channel storage [cu m], equal to ChanM3Kin
        else:
            # split routing
            self.ChanM3 = self.ChanM3Kin + self.Chan2M3Kin - self.Chan2M3Start

        # Avoid negative values in ChanM3 and TotalCrossSectionArea
        # self.ChanM3 = np.where(self.ChanM3 > 0, self.ChanM3, 0)

        # Total channel storage [cu m], equal to ChanM3Kin
        # sum of both lines
        # CrossSection2Area = pcraster.max(scalar(0.0), (self.Chan2M3Kin - self.Chan2M3Start) / self.ChanLength)

        self.sumDis += self.sumDisDay
        self.ChanQAvg = self.sumDisDay/self.NoRoutSteps
        self.TotalCrossSectionArea = self.ChanM3 * self.InvChanLength
        # Total volume of water in channel per inv channel length
        # New cross section area (kinematic wave)
        # This is the value after the kinematic wave, so we use ChanM3Kin here
        # (NOT ChanQKin, which is average discharge over whole step, we need state at the end of all iterations!)

        # timemeasure("After routing")  # 10 timing after channel routing

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        if not(option['dynamicWave']):
            # Dummy code if dynamic wave is not used, in which case the total cross-section
            # area equals TotalCrossSectionAreaKin, ChanM3 equals ChanM3Kin and
            # ChanQ equals ChanQKin
            WaterLevelDyn = -9999
            # Set water level dynamic wave to dummy value (needed

        if option['InitLisflood'] or option['repAverageDis']:
            self.CumQ += self.ChanQ
            self.avgdis = self.CumQ/self.TimeSinceStart
            # to calculate average discharge

        self.DischargeM3Out += np.where(self.AtLastPointC ,self.ChanQ * self.DtSec,0)
           # Cumulative outflow out of map

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Calculate water level
        self.waterlevel_module.dynamic()

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

        # ************************************************************
        # *******  Calculate CUMULATIVE MASS BALANCE ERROR  **********
        # ************************************************************
        self.waterbalance_module.dynamic()
        self.indicatorcalc_module.dynamic()

        # ************************************************************
        # ***** WRITING RESULTS: TIME SERIES AND MAPS ****************
        # ************************************************************
        self.output_module.dynamic()
        # timemeasure("Water balance")

        # debug 
        # Print value of variables after computation (from state files)
        if flags['debug']:
            nomefile = 'Debug_out_'+str(self.currentStep)+'.txt'
            ftemp1 = open(nomefile, 'w+')
            nelements = len(self.ChanM3)
            for i in range(0,nelements-1):
                if  hasattr(self,'CrossSection2Area'):
                    print(i, self.TotalCrossSectionArea[i], self.CrossSection2Area[i], self.ChanM3[i], \
                    self.Chan2M3Kin[i], file=ftemp1)
                else:
                    print(i, self.TotalCrossSectionArea[i], self.ChanM3[i], file=ftemp1)
            ftemp1.close()

        ### Report states if EnKF is used and filter moment
        self.stateVar_module.dynamic()
        self.indicatorcalc_module.dynamic_setzero()
        # setting monthly and yearly dindicator to zero at the end of the month (year)

        # garbage collector added to free memory at the end of computation step
        gc.collect()
예제 #32
0
def get_range():
    return range(3)