def loadmap(name):
    """
    :param name: Variable name as defined in XML settings or a filename of a netCDF or PCRaster map
    load a static map either value or pcraster map or netcdf
    """
    settings = LisSettings.instance()
    value = settings.binding[name]
    filename = value

    res = None
    flagmap = False

    # Try first to load the value from settings
    try:
        res = float(value)
        flagmap = False
        load = True
    except ValueError:
        try:
            # try to read a pcraster map
            res = pcraster.readmap(value)
            flagmap = True
            load = True
        except:
            load = False

    if not load:
        # read a netcdf (single one not a stack)
        filename = '{}.{}'.format(os.path.splitext(value)[0], 'nc')

        # get mapextend of netcdf map
        # and calculate the cutting
        cut0, cut1, cut2, cut3 = CutMap.get_cuts(filename)

        # load netcdf map but only the rectangle needed
        nf1 = Dataset(filename, 'r')
        value = listitems(nf1.variables)[-1][0]  # get the last variable name
        mapnp = nf1.variables[value][cut2:cut3, cut0:cut1]
        nf1.close()

        # check if integer map (like outlets, lakes etc)
        checkint = str(mapnp.dtype)
        if checkint == "int16" or checkint == "int32":
            mapnp[mapnp.mask] = -9999
            res = numpy_operations.numpy2pcr(Nominal, mapnp, -9999)
        elif checkint == "int8":
            res = numpy_operations.numpy2pcr(Nominal, mapnp, 0)
        else:
            mapnp[np.isnan(mapnp)] = -9999
            res = numpy_operations.numpy2pcr(Scalar, mapnp, -9999)

        # if the map is a ldd
        if value.split('.')[0][-3:] == 'ldd':
            # FIXME weak...filename must contain 'ldd' string
            res = operations.ldd(operations.nominal(res))
        flagmap = True

    if settings.flags['checkfiles']:
        checkmap(name, filename, res, flagmap, 0)
    return res
def readnetcdf(name, timestep, timestampflag='closest', averageyearflag=False, variable_name=None):
    """ Read maps from netCDF stacks (forcings, fractions, water demand)

    Read maps from netCDF stacks (forcings, fractions, water demand).
    Maps are read by date, so stacks can start at every date also different from CalendarDayStart.
    Units for stacks can be different from model timestep.
    It can read sub-daily steps.
    timestampflag indicates whether to load data with the exact time stamp ('exact'), or the data with the closest time
    stamp when the exact one is not available ('closest').
    averageyearflag indicates whether to load data from a netcdf file containing one single "average" year (it's used for
    water demand and landuse changes in time).

    :param name: string containing path and name of netCDF file to be read
    :param timestep: current simulation timestep of the model as integer number (referred to CalendarStartDay)
    :param timestampflag: look for exact time stamp in netcdf file ('exact') or for the closest (left) time stamp available ('closest')
    :param averageyearflag: if True, use "average year" netcdf file over the entire model simulation period
    :param variable_name: if given, will select the variable from netcdf instead of guessing
    :returns: content of netCDF map for timestep "time" (mapC)
    :except: if current simulation timestep is not stored in the stack, it stops with error message (if timestampflag='exact')
    """

    filename = '{}.nc'.format(name) if not name.endswith('nc') else name
    nf1 = iter_open_netcdf(filename, 'r')
    # read information from netCDF file
    # original code
    # Attempt at checking if input files are not in the format we expect
    if not variable_name:
        # variables = listitems(nf1.variables)
        # get the variable with 3 dimensions (variable order not relevant)
        targets = [k for k in nf1.variables if len(nf1.variables[k].dimensions) == 3]
        if len(targets) > 1:
            warnings.warn('Wrong number of variables found in netCDF file {}}'.format(filename))
        elif not targets:
            raise LisfloodError('No 3 dimensions variable was found in mapstack {}'.format(filename))
        variable_name = targets[0]
    
    current_ncdf_index = netcdf_step(averageyearflag, nf1, timestampflag, timestep)

    cutmaps = CutMap.instance().slices
    mapnp = nf1.variables[variable_name][current_ncdf_index, cutmaps[0], cutmaps[1]]
    nf1.close()
    if variable_name=='rn':
        mapnp[np.isnan(mapnp)] = -9999999
        mapnp = numpy_operations.numpy2pcr(Scalar, mapnp, -9999999)
    else:
        mapnp[np.isnan(mapnp)] = -9999
        mapnp = numpy_operations.numpy2pcr(Scalar, mapnp, -9999)
    timename = os.path.basename(name) + str(timestep)
    settings = LisSettings.instance()
    if settings.flags['checkfiles']:
        checkmap(timename, filename, mapnp, True, 1)
    return mapnp
示例#3
0
def valuecell(coordx, coordstr):
    """
    to put a value into a pcraster map -> invert of cellvalue
    pcraster map is converted into a numpy array first
    """
    coord = []
    for xy in coordx:
        try:
            coord.append(float(xy))
        except ValueError:
            msg = 'Gauges: {} in {} is not a coordinate'.format(xy, coordstr)
            raise LisfloodError(msg)

    null = np.zeros((pcraster.clone().nrRows(), pcraster.clone().nrCols()))
    null[null == 0] = -9999

    for i in range(int(len(coord) / 2)):
        col = int((coord[i * 2] - pcraster.clone().west()) / pcraster.clone().cellSize())
        row = int((pcraster.clone().north() - coord[i * 2 + 1]) / pcraster.clone().cellSize())
        if 0 <= col < pcraster.clone().nrCols() and 0 <= row < pcraster.clone().nrRows():
            null[row, col] = i + 1
        else:
            msg = 'Coordinates: {}, {} to put value in is outside mask map - col,row: {}, {}'.format(coord[i * 2], coord[i * 2 + 1], col, row)
            raise LisfloodError(msg)

    return numpy_operations.numpy2pcr(Nominal, null, -9999)
示例#4
0
def loadsetclone(name):
    """ Load 'MaskMap' and set as clone

    :param name: name of the key in Settings.xml containing path and name of mask map as string
    :return: map: mask map (False=include in modelling; True=exclude from modelling) as pcraster
    """
    settings = LisSettings.instance()
    filename = settings.binding[name]
    coord = filename.split(
    )  # CM: returns a list of all the words in the string
    if len(coord) == 5:
        # CM: read information on clone map from Settings.xml
        # changed order of x, y i- in setclone y is first in Lisflood
        # settings x is first
        # CM: coord[0]=col
        # CM: coord[1]=row
        # CM: coord[2]=cellsize
        # CM: coord[3]=xupleft
        # CM: coord[4]=yupleft
        # setclone row col cellsize xupleft yupleft
        try:
            pcraster.setclone(int(coord[1]), int(coord[0]),
                              Decimal(__DECIMAL_FORMAT.format(coord[2])),
                              Decimal(__DECIMAL_FORMAT.format(coord[3])),
                              Decimal(__DECIMAL_FORMAT.format(
                                  coord[4])))  # CM: pcraster
        except:
            msg = 'Maskmap: [{} {} {} {}] are not valid coordinates (col row cellsize xupleft yupleft)'.format(
                *coord)
            raise LisfloodError(msg)
        mapnp = np.ones((int(coord[1]), int(coord[0])))
        res = numpy_operations.numpy2pcr(Boolean, mapnp, -9999)
    elif len(coord) == 1:
        # CM: read information on clone map from map (pcraster or netcdf)
        try:
            # try to read a pcraster map
            iter_setclone_pcraster(filename)
            res = operations.boolean(iter_read_pcraster(filename))
            flagmap = True
        except:
            # try to read a netcdf file
            filename = '{}.{}'.format(
                os.path.splitext(settings.binding[name])[0], 'nc')
            nf1 = iter_open_netcdf(filename, 'r')
            value = listitems(
                nf1.variables)[-1][0]  # get the last variable name

            x_var = 'x'
            y_var = 'y'
            if 'lon' in nf1.variables.keys():
                x_var = 'lon'
                y_var = 'lat'
            x1 = Decimal(__DECIMAL_FORMAT.format(nf1.variables[x_var][0]))
            x2 = Decimal(__DECIMAL_FORMAT.format(nf1.variables[x_var][1]))
            y1 = Decimal(__DECIMAL_FORMAT.format(nf1.variables[y_var][0]))
            y2 = Decimal(__DECIMAL_FORMAT.format(nf1.variables[y_var][1]))
            xlast = Decimal(__DECIMAL_FORMAT.format(nf1.variables[x_var][-1]))
            ylast = Decimal(__DECIMAL_FORMAT.format(nf1.variables[y_var][-1]))

            cellSizeX = abs(x2 - x1)
            cellSizeY = abs(y2 - y1)

            settings.binding['internal.lons'] = nf1.variables[x_var][:]
            settings.binding['internal.lats'] = nf1.variables[y_var][:]

            nrRows = int(
                Decimal(0.5) + abs(ylast - y1) / cellSizeY + Decimal(1.0))
            nrCols = int(
                Decimal(0.5) + abs(xlast - x1) / cellSizeX + Decimal(1.0))
            x = x1 - cellSizeX * Decimal(
                0.5)  # Coordinate of west side of raster
            y = y1 + cellSizeY * Decimal(
                0.5)  # Coordinate of north side of raster
            mapnp = np.array(nf1.variables[value][0:nrRows, 0:nrCols])
            nf1.close()
            # setclone  row col cellsize xupleft yupleft
            pcraster.setclone(nrRows, nrCols,
                              float(__DECIMAL_FORMAT.format(cellSizeX)),
                              float(__DECIMAL_FORMAT.format(x)),
                              float(__DECIMAL_FORMAT.format(y)))
            res = numpy_operations.numpy2pcr(Boolean, mapnp, 0)
            flagmap = True
        if settings.flags['checkfiles']:
            checkmap(name, filename, res, flagmap, 0)
    else:
        msg = 'Maskmap: {} is not a valid mask map nor valid coordinates'.format(
            name)
        raise LisfloodError(msg)

    # Definition of cellsize, coordinates of the meteomaps and maskmap
    # Get the current PCRaster clone map and it save metadata
    MaskMapMetadata.register(filename)
    return res
示例#5
0
def data_assimilation(begin_date, n_days, perform_DA = False, ConvLSTM_arch = None):
    '''
    
    Keyword arguments:
    begin_date: Define date when DA starts with shape: (YYYY-MM-DD)
    n_days: Define for how many days DA is performed from begin date
    perform_DA: if TRUE: perform DA assimilation, if FALSE: no DA is performed, open loop run
    ConvLSTM_arch: Select architecture: 'stacked_sep_1' (for parallel model) or 'stacked_2' (for stacked model)
    '''
    
    begin_date = begin_date + str(' 00:00:00')
    
    q_val_modeled_6335115 = []
    q_val_modeled_6335117 = []
    q_val_modeled_9316159 = []
    q_val_modeled_9316160 = []
    q_val_modeled_9316161 = []
    q_val_modeled_9316163 = []
    q_val_modeled_9316166 = []
    q_val_modeled_9316168 = []
    q_val_modeled_9316170 = []
    date_list = []
    
    states = np.zeros((n_days, 91, 134))
    
    for i in tqdm(np.arange(n_days)):
        if i == 0:
            begin_date = datetime.strptime(begin_date, '%Y-%m-%d %H:%M:%S')
            begin_date = begin_date - timedelta(days = 1)#2
            start_time = str(begin_date)
        else:
            start_time = end_time
        
        for name in state_keys:
            global state_file
            state = state_file[name][test_split[0] - 1 + i] #-2
            # state = state_file[name][i]
            state = np.ma.getdata(state)
            state = numpy_operations.numpy2pcr(Scalar, state, -9999)
            # aguila(state)
            report(state, ('wflow_sbm/Nahe/instate/' 
                       + state_dict[name] + str('.map')))

                
        end_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
        end_time = end_time + timedelta(days = 1)
        print_time = end_time + timedelta(days = 1)
        print_time = str(print_time)
        end_time = str(end_time)

        config = configparser.ConfigParser()
        config.optionxform = str
        config.read('wflow_sbm/Nahe/wflow_sbm.ini')
        config.set('run', 'starttime', start_time)
        config.set('run', 'endtime', end_time)

        with open('wflow_sbm/Nahe/wflow_sbm.ini', 'w') as configfile:
            config.write(configfile)

        if perform_DA == True:
            if ConvLSTM_arch == 'stacked_sep_1':
                model_stacked_sep_1 = keras.models.load_model('saved_models/model_stacked_sep_1.h5', compile = False)
                prediction = model_stacked_sep_1.predict(x = [features_convlstm_test[0+i:1+i,:,:,:,0:1],
                                                              features_convlstm_test[0+i:1+i,:,:,:,1:2],
                                                              features_convlstm_test[0+i:1+i,:,:,:,2:3],
                                                              features_convlstm_test[0+i:1+i,:,:,:,3:4]])   
            elif ConvLSTM_arch == 'stacked_2':
                model_stacked_2 = keras.models.load_model('saved_models/model_stacked_2.h5', compile = False)
                prediction = model_stacked_2.predict(x = features_convlstm_test[0+i:1+i])
                
            prediction = prediction[0,:,:,0]
            prediction[mask] = -9999
            state_ust_0 = numpy_operations.numpy2pcr(Scalar, prediction, -9999)
            report(state_ust_0, ('wflow_sbm/Nahe/instate/UStoreLayerDepth_0.map'))


        subprocess.run(['.../wflow_sbm.py', '-C',
                        'wflow_sbm/Nahe', '-R', 'da_run', '-f'])
                        
        
        
        states_act = '.../outmaps.nc'
        states_act = nc.Dataset(states_act)
        states_act = states_act['ust_0_'][:]
        states_act = np.ma.getdata(states_act)
        states[i] = states_act
        
        q_modeled = pd.read_csv('.../run.csv')
        
        
        q_val_modeled_6335115.append(q_modeled.loc[0]['6335115'])
        q_val_modeled_6335117.append(q_modeled.loc[0]['6335117'])
        q_val_modeled_9316159.append(q_modeled.loc[0]['9316159'])
        q_val_modeled_9316160.append(q_modeled.loc[0]['9316160'])
        q_val_modeled_9316161.append(q_modeled.loc[0]['9316161'])
        q_val_modeled_9316163.append(q_modeled.loc[0]['9316163'])
        q_val_modeled_9316166.append(q_modeled.loc[0]['9316166'])
        q_val_modeled_9316168.append(q_modeled.loc[0]['9316168'])
        q_val_modeled_9316170.append(q_modeled.loc[0]['9316170'])
        date_list.append(print_time)
    
    q_val_modeled_df = pd.DataFrame(columns = ['6335115', '6335117', '9316159', '9316160', '9316161', '9316163', '9316166', '9316168', '9316170'])
    q_val_modeled_df['6335115'] = q_val_modeled_6335115
    q_val_modeled_df['6335117'] = q_val_modeled_6335117
    q_val_modeled_df['9316159'] = q_val_modeled_9316159
    q_val_modeled_df['9316160'] = q_val_modeled_9316160
    q_val_modeled_df['9316161'] = q_val_modeled_9316161
    q_val_modeled_df['9316163'] = q_val_modeled_9316163
    q_val_modeled_df['9316166'] = q_val_modeled_9316166
    q_val_modeled_df['9316168'] = q_val_modeled_9316168
    q_val_modeled_df['9316170'] = q_val_modeled_9316170
    q_val_modeled_df['date'] = date_list

    np.savetxt(".../run_all.csv", q_val_modeled_df, delimiter=",", header = "6335115, 6335117, 9316159, 9316160, 9316161, 9316163, 9316166, 9316168, 9316170, date", comments = "", fmt="%s")
    
    np.save('.../statefile.npy', states)