예제 #1
0
def Calc_surface_runoff(Dir_Basin, nc_outname, Startdate, Enddate,
                        Example_dataset, ETref_Product, P_Product):

    from netCDF4 import Dataset
    import numpy as np

    import watools.Functions.Four as Four
    import watools.General.raster_conversions as RC

    # Open variables in netcdf
    fh = Dataset(nc_outname)
    Variables_NC = [var for var in fh.variables]
    fh.close()

    # Open or calculate Blue Evapotranspiration
    if not "Green_Evapotranspiration" in Variables_NC:
        # Calc ET blue and green
        DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(
            Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate,
            Enddate)
    else:
        DataCube_ETgreen = RC.Open_nc_array(nc_outname,
                                            "Green_Evapotranspiration",
                                            Startdate, Enddate)

    # Open rainfall data
    DataCube_P = RC.Open_nc_array(nc_outname, "Precipitation", Startdate,
                                  Enddate)

    # Calculate Runoff
    DataCube_surface_runoff = DataCube_P - DataCube_ETgreen
    DataCube_surface_runoff[DataCube_surface_runoff < 0] = 0

    return (DataCube_surface_runoff)
예제 #2
0
def Fraction_Based(nc_outname, Startdate, Enddate):
    """
    This functions divides an array into groundwater and surface water based by using the fractions that are given in the get dictionary script

    Parameters
    ----------
    Name_NC_Parameter : str
        Path to the NetCDF that must be splitted
    Name_NC_LU : str
        Path to the NetCDF containing the LU data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'

    Returns
    -------
    DataCube_SW : Array
        Array containing the total supply [time,lat,lon]
    DataCube_GW : Array
        Array containing the amount of non consumed water [time,lat,lon]
    """

    # import water accounting plus modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start as Start

    # import general modules
    import numpy as np

    # Open Arrays
    DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")
    DataCube_Parameter = RC.Open_nc_array(nc_outname, "Total_Supply",
                                          Startdate, Enddate)

    # Get Classes
    LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()
    LU_Classes_Keys = list(LU_Classes.keys())

    # Get fractions
    sw_supply_dict = Start.Get_Dictionaries.sw_supply_fractions()

    # Create Array for consumed fractions
    DataCube_Parameter_Fractions = np.ones(DataCube_LU.shape) * np.nan

    # Create array with consumed_fractions
    for Classes_LULC in LU_Classes_Keys:
        Values_LULC = LU_Classes[Classes_LULC]
        for Value_LULC in Values_LULC:
            DataCube_Parameter_Fractions[
                DataCube_LU == Value_LULC] = sw_supply_dict[Classes_LULC]

    # Calculate the Surface water and groundwater components based on the fraction
    DataCube_SW_Parameter = DataCube_Parameter[:, :, :] * DataCube_Parameter_Fractions[
        None, :, :]
    DataCube_GW_Parameter = DataCube_Parameter - DataCube_SW_Parameter

    return (DataCube_SW_Parameter, DataCube_GW_Parameter)
예제 #3
0
def Fraction_Based(nc_outname, Startdate, Enddate):
    """
    This functions calculated monthly total supply based ETblue and fractions that are given in the get dictionary script

    Parameters
    ----------
    nc_outname : str
        Path to the NetCDF containing the data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'

    Returns
    -------
    DataCube_Tot_Sup : Array
        Array containing the total supply [time,lat,lon]
    DataCube_Non_Consumed : Array
        Array containing the amount of non consumed water [time,lat,lon]
    """
    # import water accounting plus modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start as Start

    # import general modules
    import numpy as np

    # Open Arrays
    DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")
    DataCube_ETblue = RC.Open_nc_array(nc_outname, "Blue_Evapotranspiration",
                                       Startdate, Enddate)

    # Get Classes
    LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()
    LU_Classes_Keys = LU_Classes.keys()

    # Get fractions
    consumed_fractions_dict = Start.Get_Dictionaries.consumed_fractions()

    # Create Array for consumed fractions
    DataCube_Consumed_Fractions = np.ones(DataCube_LU.shape) * np.nan

    # Create array with consumed_fractions
    for Classes_LULC in LU_Classes_Keys:
        Values_LULC = LU_Classes[Classes_LULC]
        for Value_LULC in Values_LULC:
            DataCube_Consumed_Fractions[
                DataCube_LU ==
                Value_LULC] = consumed_fractions_dict[Classes_LULC]

    # Calculated Total Supply
    DataCube_Tot_Sup = DataCube_ETblue[:, :, :] / DataCube_Consumed_Fractions[
        None, :, :]

    # Calculated Non consumed
    DataCube_Non_Consumed = DataCube_Tot_Sup - DataCube_ETblue

    return (DataCube_Tot_Sup, DataCube_Non_Consumed)
예제 #4
0
def Run(input_nc, output_nc, input_JRC):

    # Define names
    #Name_py_Discharge_dict_CR2 = os.path.join(Dir_Basin, 'Simulations', 'Simulation_%d' %Simulation, 'Sheet_5', 'Discharge_dict_CR2_simulation%d.npy' %(Simulation))
    #Name_py_River_dict_CR2 = os.path.join(Dir_Basin, 'Simulations', 'Simulation_%d' %Simulation, 'Sheet_5', 'River_dict_CR2_simulation%d.npy' %(Simulation))
    #Name_py_DEM_dict_CR2 = os.path.join(Dir_Basin, 'Simulations', 'Simulation_%d' %Simulation, 'Sheet_5', 'DEM_dict_CR2_simulation%d.npy' %(Simulation))
    #Name_py_Distance_dict_CR2 = os.path.join(Dir_Basin, 'Simulations', 'Simulation_%d' %Simulation, 'Sheet_5', 'Distance_dict_CR2_simulation%d.npy' %(Simulation))

    #if not (os.path.exists(Name_py_Discharge_dict_CR2) and os.path.exists(Name_py_River_dict_CR2) and os.path.exists(Name_py_DEM_dict_CR2) and os.path.exists(Name_py_Distance_dict_CR2)):
    # Copy dicts as starting adding reservoir
    import watools.General.raster_conversions as RC
    import numpy as np
    from datetime import date

    Discharge_dict_CR2 = RC.Open_nc_dict(output_nc, "dischargedict_dynamic")

    DEM_dataset = RC.Open_nc_array(input_nc, "dem")
    time = RC.Open_nc_array(output_nc, "time")

    Startdate = date.fromordinal(time[0])
    Enddate = date.fromordinal(time[-1])

    # Define names for reservoirs calculations
    #Name_py_Diff_Water_Volume =  os.path.join(Dir_Basin,'Simulations','Simulation_%d' %Simulation, 'Sheet_5','Diff_Water_Volume_CR2_simulation%d.npy' %(Simulation))
    #Name_py_Regions =  os.path.join(Dir_Basin,'Simulations','Simulation_%d' %Simulation, 'Sheet_5','Regions_simulation%d.npy' %(Simulation))

    geo_out, proj, size_X, size_Y = RC.Open_array_info(input_JRC)

    Boundaries = dict()
    Boundaries['Lonmin'] = geo_out[0]
    Boundaries['Lonmax'] = geo_out[0] + size_X * geo_out[1]
    Boundaries['Latmin'] = geo_out[3] + size_Y * geo_out[5]
    Boundaries['Latmax'] = geo_out[3]

    Regions = Calc_Regions(input_nc, output_nc, input_JRC, Boundaries)

    Amount_months = len(Discharge_dict_CR2[0])

    Diff_Water_Volume = np.zeros([len(Regions), Amount_months, 3])
    reservoir = 0

    for region in Regions:

        popt = Find_Area_Volume_Relation(region, input_JRC, input_nc)

        Area_Reservoir_Values = GEE_calc_reservoir_area(
            region, Startdate, Enddate)

        Diff_Water_Volume[reservoir, :, :] = Calc_Diff_Storage(
            Area_Reservoir_Values, popt)
        reservoir += 1

    ################# 7.3 Add storage reservoirs and change outflows ##################
    Discharge_dict_CR2, River_dict_CR2, DEM_dict_CR2, Distance_dict_CR2 = Add_Reservoirs(
        output_nc, Diff_Water_Volume, Regions)
    return (Discharge_dict_CR2, River_dict_CR2, DEM_dict_CR2,
            Distance_dict_CR2)
def GWF_Based(nc_outname, Startdate, Enddate):
    """
    This functions divides an the non consumed flow into non recovable flow and recovable flow by using the fractions that are given by the grey water footprint

    Parameters
    ----------
    nc_outname : str
        Path to the NetCDF containing the data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'

    Returns
    -------
    DataCube_NonRecovableFlow : Array
        Array containing the non recovable flow [time,lat,lon]
    DataCube_RecovableFlow : Array
        Array containing the recovable flow [time,lat,lon]
    """
    # import water accounting plus modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start as Start

    # General python modules
    import numpy as np

    # Open Arrays
    DataCube_GWF = RC.Open_nc_array(nc_outname, "Grey_Water_Footprint")
    DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")
    DataCube_Non_Consumed = RC.Open_nc_array(nc_outname, "Non_Consumed_Water", Startdate, Enddate)

    # Classes that are manmade in the LULC
    Manmade_Classes = ['Irrigated crops','Managed water bodies','Aquaculture','Residential','Greenhouses','Other']

    # Select the pixels that are manmade
    LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()

    # Create Array for consumed fractions
    DataCube_GWF_Mask = np.zeros(DataCube_LU.shape)

    # Create array with consumed_fractions
    for Manmade_Class in Manmade_Classes:
        Values_LULC = LU_Classes[Manmade_Class]
        for Value_LULC in Values_LULC:
            DataCube_GWF_Mask[DataCube_LU == Value_LULC] = DataCube_GWF[DataCube_LU == Value_LULC]

    # Calculate the Surface water and groundwater components based on the fraction
    DataCube_NonRecovableFlow = DataCube_Non_Consumed[:,:,:] * DataCube_GWF_Mask[None,:,:]
    DataCube_RecovableFlow = DataCube_Non_Consumed - DataCube_NonRecovableFlow

    return(DataCube_NonRecovableFlow, DataCube_RecovableFlow)
예제 #6
0
def Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, nc_outname, output_dir):

    import os
    import watools.General.raster_conversions as RC
    import watools.Functions.Three as Three

    # open LULC map
    LULC_Array = RC.Open_nc_array(nc_outname, "Landuse")

    # dictory of the netcdf file
    dir_nc_outname = os.path.dirname(nc_outname)

    # loop over the crops calendars
    for crop in dict_crops['crops']:

        # Check if the croptype is located within the LULC map
        if crop[4] in LULC_Array:

            # Open the start and enddate of the cropping calendar
            start_dates, end_dates = import_growing_seasons(crop[0])
            result_seasonly = Three.Calc_Y_WP.Seasons(start_dates, end_dates, dir_nc_outname, crop[4], crop[1], os.path.join(output_dir, 'WP_Y_Yearly_csvs'), ab = (1.0,0.9))

            result = Three.Calc_Y_WP.Create_WP_Y_CSV(result_seasonly, os.path.join(output_dir, 'WP_Y_Yearly_csvs'), crop[1])
            if crop[4] > 50:
                wp_y_irrigated_dictionary[crop[2]][crop[3]] = result
            else:
                wp_y_rainfed_dictionary[crop[2]][crop[3]] = result

        else:
            print("skipping crop with lu-class {0}, not on LU-map".format(crop[4]))
            continue

    return(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary)
예제 #7
0
def run(input_nc, Inflow_Text_Files):
    '''
    This functions add inflow to the runoff dataset before the channel routing.
    The inflow must be a text file with a certain format. The first line of this format are the latitude and longitude.
    Hereafter for each line the time (ordinal time) and the inflow (m3/month) seperated with one space is defined. See example below:

    lat lon
    733042 156225.12
    733073 32511321.2
    733102 212315.25
    733133 2313266.554
    '''
    # General modules
    import numpy as np

    # Water Accounting modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start.Area_converter as Area

    Runoff = RC.Open_nc_array(input_nc, Var='Runoff_M')

    # Open information and open the Runoff array
    geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(input_nc)

    # Calculate the surface area of every pixel
    dlat, dlon = Area.Calc_dlat_dlon(geo_out, size_X, size_Y)
    area_in_m2 = dlat * dlon

    for Inflow_Text_File in Inflow_Text_Files:

        # Open the inlet text data
        Inlet = np.genfromtxt(Inflow_Text_File, dtype=None, delimiter=" ")

        # Read out the coordinates
        Coord = Inlet[0, :]
        Lon_coord = Coord[0]
        Lat_coord = Coord[1]

        # Search for the pixel
        lon_pix = int(np.ceil((float(Lon_coord) - geo_out[0]) / geo_out[1]))
        lat_pix = int(np.ceil((float(Lat_coord) - geo_out[3]) / geo_out[5]))

        # Add the value on top of the Runoff array
        for i in range(1, len(Inlet)):
            time = float(Inlet[i, 0])
            time_step = np.argwhere(np.logical_and(Time >= time, Time <= time))
            if len(time_step) > 0:
                time_step_array = int(time_step[0][0])
                value_m3_month = float(Inlet[i, 1])
                area_in_m2_pixel = area_in_m2[lat_pix, lon_pix]
                value_mm = (value_m3_month / area_in_m2_pixel) * 1000
                Runoff[time_step_array, lat_pix,
                       lon_pix] = Runoff[time_step_array, lat_pix,
                                         lon_pix] + value_mm
    return (Runoff)
예제 #8
0
def Calc_surface_withdrawal(Dir_Basin, nc_outname, Startdate, Enddate,
                            Example_dataset, ETref_Product, P_Product):

    from netCDF4 import Dataset

    import watools.Functions.Four as Four
    import watools.General.raster_conversions as RC

    # Open variables in netcdf
    fh = Dataset(nc_outname)
    Variables_NC = [var for var in fh.variables]
    fh.close()

    # Open or calculate Blue Evapotranspiration
    if not "Blue_Evapotranspiration" in Variables_NC:
        # Calc ET blue and green
        DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(
            Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate,
            Enddate)
    else:
        DataCube_ETblue = RC.Open_nc_array(nc_outname,
                                           "Blue_Evapotranspiration",
                                           Startdate, Enddate)

    # Open data array info based on example data
    geo_out, epsg, size_X, size_Y = RC.Open_array_info(Example_dataset)

    # Open array with surface water fractions
    DataCube_frac_sw = RC.Open_nc_array(nc_outname,
                                        "Fraction_Surface_Water_Supply")

    # Total amount of ETblue taken out of rivers
    DataCube_surface_withdrawal = DataCube_ETblue * DataCube_frac_sw[
        None, :, :]

    return (DataCube_surface_withdrawal)
예제 #9
0
def main(files_DEM_dir, files_DEM, files_Basin, files_Runoff, files_Extraction, startdate, enddate, input_nc, resolution, Format_DEM_dir, Format_DEM, Format_Basin, Format_Runoff, Format_Extraction):

    # Define a year to get the epsg and geo
    Startdate_timestamp = pd.Timestamp(startdate)
    year = Startdate_timestamp.year

    ############################## Drainage Direction #####################################

    # Open Array DEM dir as netCDF
    if Format_DEM_dir == "NetCDF":
        file_DEM_dir = os.path.join(files_DEM_dir, "%d.nc" %year)
        DataCube_DEM_dir = RC.Open_nc_array(file_DEM_dir, "Drainage_Direction")
        geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(files_DEM_dir)

        # Create memory file for reprojection
        gland = DC.Save_as_MEM(DataCube_DEM_dir, geo_out_example, epsg_example)
        dataset_example = file_name_DEM_dir = gland

    # Open Array DEM dir as TIFF
    if Format_DEM_dir == "TIFF":
        file_name_DEM_dir = os.path.join(files_DEM_dir,"DIR_HydroShed_-_%s.tif" %resolution)
        DataCube_DEM_dir = RC.Open_tiff_array(file_name_DEM_dir)
        geo_out_example, epsg_example, size_X_example, size_Y_example = RC.Open_array_info(file_name_DEM_dir)
        dataset_example = file_name_DEM_dir

    # Calculate Area per pixel in m2
    import watools.Functions.Start.Area_converter as AC
    DataCube_Area = AC.Degrees_to_m2(file_name_DEM_dir)

    ################################## DEM ##########################################

    # Open Array DEM as netCDF
    if Format_DEM == "NetCDF":
        file_DEM = os.path.join(files_DEM, "%d.nc" %year)
        DataCube_DEM = RC.Open_nc_array(file_DEM, "Elevation")

    # Open Array DEM as TIFF
    if Format_DEM == "TIFF":
        file_name_DEM = os.path.join(files_DEM,"DEM_HydroShed_m_%s.tif" %resolution)
        destDEM = RC.reproject_dataset_example(file_name_DEM, dataset_example, method=1)
        DataCube_DEM = destDEM.GetRasterBand(1).ReadAsArray()
        
    ################################ Landuse ##########################################

    # Open Array Basin as netCDF
    if Format_Basin == "NetCDF":
        file_Basin = os.path.join(files_Basin, "%d.nc" %year)
        DataCube_Basin = RC.Open_nc_array(file_Basin, "Landuse")
        geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(file_Basin, "Landuse")
        dest_basin = DC.Save_as_MEM(DataCube_Basin, geo_out, str(epsg))
        destLU = RC.reproject_dataset_example(dest_basin, dataset_example, method=1)
        DataCube_LU_CR = destLU.GetRasterBand(1).ReadAsArray()
        DataCube_Basin = np.zeros([size_Y_example, size_X_example])
        DataCube_Basin[DataCube_LU_CR > 0] = 1

    # Open Array Basin as TIFF
    if Format_Basin == "TIFF":
        file_name_Basin = files_Basin
        destLU = RC.reproject_dataset_example(file_name_Basin, dataset_example, method=1)
        DataCube_LU_CR = destLU.GetRasterBand(1).ReadAsArray()
        DataCube_Basin = np.zeros([size_Y_example, size_X_example])
        DataCube_Basin[DataCube_LU_CR > 0] = 1

    ################################ Surface Runoff ##########################################

    # Open Array runoff as netCDF
    if Format_Runoff == "NetCDF":
        DataCube_Runoff = RC.Open_ncs_array(files_Runoff, "Surface_Runoff", startdate, enddate)
        size_Z_example = DataCube_Runoff.shape[0]
        file_Runoff = os.path.join(files_Runoff, "%d.nc" %year)
        geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(file_Runoff, "Surface_Runoff")
        DataCube_Runoff_CR = np.ones([size_Z_example, size_Y_example, size_X_example]) * np.nan
        for i in range(0, size_Z):
            DataCube_Runoff_one = DataCube_Runoff[i,:,:]
            dest_Runoff_one = DC.Save_as_MEM(DataCube_Runoff_one, geo_out, str(epsg))
            dest_Runoff = RC.reproject_dataset_example(dest_Runoff_one, dataset_example, method=4)
            DataCube_Runoff_CR[i,:,:] = dest_Runoff.GetRasterBand(1).ReadAsArray()

        DataCube_Runoff_CR[:, DataCube_LU_CR == 0] = -9999
        DataCube_Runoff_CR[DataCube_Runoff_CR < 0] = -9999

    # Open Array runoff as TIFF
    if Format_Runoff == "TIFF":
        DataCube_Runoff_CR = RC.Get3Darray_time_series_monthly(files_Runoff, startdate, enddate, Example_data = dataset_example)

    ################################ Surface Withdrawal ##########################################

    # Open Array Extraction as netCDF
    if Format_Extraction == "NetCDF":
        DataCube_Extraction = RC.Open_ncs_array(files_Extraction, "Surface_Withdrawal", startdate, enddate)
        size_Z_example = DataCube_Extraction.shape[0]
        file_Extraction = os.path.join(files_Extraction, "%d.nc" %year)
        geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(file_Extraction, "Surface_Withdrawal")
        DataCube_Extraction_CR = np.ones([size_Z_example, size_Y_example, size_X_example]) * np.nan
        for i in range(0, size_Z):
            DataCube_Extraction_one = DataCube_Extraction[i,:,:]
            dest_Extraction_one = DC.Save_as_MEM(DataCube_Extraction_one, geo_out, str(epsg))
            dest_Extraction = RC.reproject_dataset_example(dest_Extraction_one, dataset_example, method=4)
            DataCube_Extraction_CR[i,:,:] = dest_Extraction.GetRasterBand(1).ReadAsArray()

        DataCube_Extraction_CR[:, DataCube_LU_CR == 0] = -9999
        DataCube_Extraction_CR[DataCube_Extraction_CR < 0] = -9999

    # Open Array Extraction as TIFF
    if Format_Extraction == "TIFF":
        DataCube_Extraction_CR = RC.Get3Darray_time_series_monthly(files_Extraction, startdate, enddate, Example_data = dataset_example)

    ################################ Create input netcdf ##########################################
    # Save data in one NetCDF file
    geo_out_example = np.array(geo_out_example)

    # Latitude and longitude
    lon_ls = np.arange(size_X_example)*geo_out_example[1]+geo_out_example[0] + 0.5 * geo_out_example[1]
    lat_ls = np.arange(size_Y_example)*geo_out_example[5]+geo_out_example[3] - 0.5 * geo_out_example[5]

    lat_n = len(lat_ls)
    lon_n = len(lon_ls)

    # Create NetCDF file
    nc_file = netCDF4.Dataset(input_nc, 'w')
    nc_file.set_fill_on()

    # Create dimensions
    lat_dim = nc_file.createDimension('latitude', lat_n)
    lon_dim = nc_file.createDimension('longitude', lon_n)

    # Create NetCDF variables
    crso = nc_file.createVariable('crs', 'i4')
    crso.long_name = 'Lon/Lat Coords in WGS84'
    crso.standard_name = 'crs'
    crso.grid_mapping_name = 'latitude_longitude'
    crso.projection = epsg_example
    crso.longitude_of_prime_meridian = 0.0
    crso.semi_major_axis = 6378137.0
    crso.inverse_flattening = 298.257223563
    crso.geo_reference = geo_out_example

    lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
    lat_var.units = 'degrees_north'
    lat_var.standard_name = 'latitude'
    lat_var.pixel_size = geo_out_example[5]

    lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
    lon_var.units = 'degrees_east'
    lon_var.standard_name = 'longitude'
    lon_var.pixel_size = geo_out_example[1]

    Dates = pd.date_range(startdate,enddate,freq = 'MS')
    time_or=np.zeros(len(Dates))
    i = 0
    for Date in Dates:
        time_or[i] = Date.toordinal()
        i += 1
    nc_file.createDimension('time', None)
    timeo = nc_file.createVariable('time', 'f4', ('time',))
    timeo.units = 'Monthly'
    timeo.standard_name = 'time'

    # Variables
    demdir_var = nc_file.createVariable('demdir', 'i',
                                           ('latitude', 'longitude'),
                                           fill_value=-9999)
    demdir_var.long_name = 'Flow Direction Map'
    demdir_var.grid_mapping = 'crs'

    dem_var = nc_file.createVariable('dem', 'f8',
                                           ('latitude', 'longitude'),
                                           fill_value=-9999)
    dem_var.long_name = 'Altitude'
    dem_var.units = 'meters'
    dem_var.grid_mapping = 'crs'

    basin_var = nc_file.createVariable('basin', 'i',
                                           ('latitude', 'longitude'),
                                           fill_value=-9999)
    basin_var.long_name = 'Altitude'
    basin_var.units = 'meters'
    basin_var.grid_mapping = 'crs'

    area_var = nc_file.createVariable('area', 'f8',
                                           ('latitude', 'longitude'),
                                           fill_value=-9999)
    area_var.long_name = 'area in squared meters'
    area_var.units = 'squared_meters'
    area_var.grid_mapping = 'crs'

    runoff_var = nc_file.createVariable('Runoff_M', 'f8',
                                   ('time', 'latitude', 'longitude'),
                                   fill_value=-9999)
    runoff_var.long_name = 'Runoff'
    runoff_var.units = 'm3/month'
    runoff_var.grid_mapping = 'crs'

    extraction_var = nc_file.createVariable('Extraction_M', 'f8',
                                    ('time', 'latitude', 'longitude'),
                                    fill_value=-9999)
    extraction_var.long_name = 'Surface water Extraction'
    extraction_var.units = 'm3/month'
    extraction_var.grid_mapping = 'crs'


    # Load data
    lat_var[:] = lat_ls
    lon_var[:] = lon_ls
    timeo[:] = time_or

    # Static variables
    demdir_var[:, :] = DataCube_DEM_dir[:, :]
    dem_var[:, :] = DataCube_DEM[:, :]
    basin_var[:, :] = DataCube_Basin[:, :]
    area_var[:, :] = DataCube_Area[:, :]
    for i in range(len(Dates)):
        runoff_var[i,:,:] = DataCube_Runoff_CR[i,:,:]
    for i in range(len(Dates)):
        extraction_var[i,:,:] = DataCube_Extraction_CR[i,:,:]

    # Close file
    nc_file.close()
    return()
예제 #10
0
파일: Calc_Y_WP.py 프로젝트: CMicha/watools
def Season(startdate,
           enddate,
           dir_nc_outname,
           lu_class,
           croptype,
           ab=(1.0, 0.9)):
    """
    Calculate Yields and WPs for one season.

    Parameters
    ----------
    startdate : object
        datetime.date object specifying the startdate of the growing season.
    enddate : ndarray
        datetime.date object specifying the enddate of the growing season.
    nc_outname : str
        Path all the data.
    lu_class : int
        Landuseclass for which to calculate Y and WP.
    croptype : str
        Name of croptype, should be present in HIWC_dict.keys().
    HIWC_dict : dict
        Dictionary with Harvest indices and Water Contents, see get_dictionaries.get_hi_and_ec().
    ab : tuple, optional
        Two parameters used to split Yield into irrigation and precipitation yield, see split_Yield.

    Returns
    -------
    Yield_Ave_Value : float
        The yield for the croptype.
    Yield_pr_Ave_Value : float
        The yield_precip for the croptype.
    Yield_irr_Ave_Value : float
        The yield_irri for the croptype.
    WP_Ave_Value : float
        The waterproductivity for the croptype.
    WPblue_Ave_Value : float
        The blue waterproductivity for the croptype.
    WPgreen_Ave_Value : float
        The green waterproductivity for the croptype.
    WC_Ave_Value : float
        The water consumption for the croptype.
    WCblue_Ave_Value : float
        The blue water consumption for the croptype.
    WCgreen_Ave_Value : float
        The green water consumption for the croptype.
    """

    import watools.Functions.Three as Three
    import watools.Functions.Start.Get_Dictionaries as GD
    import watools.General.raster_conversions as RC

    # Open the HIWC dict
    HIWC_dict = GD.get_hi_and_ec()

    # Get Harvest Index and Moisture content for a specific crop
    harvest_index = HIWC_dict[croptype][0]
    moisture_content = HIWC_dict[croptype][1]

    # Get the start and enddate current season
    current = datetime.date(startdate.year, startdate.month, 1)
    end_month = datetime.date(enddate.year, enddate.month, 1)

    req_dates = np.array([current])
    while current < end_month:
        current = current + relativedelta(months=1)
        req_dates = np.append(req_dates, current)

    # Define input one nc file
    nc_outname_start = os.path.join(dir_nc_outname,
                                    "%d.nc" % (int(startdate.year)))
    nc_outname_end = os.path.join(dir_nc_outname,
                                  "%d.nc" % (int(enddate.year)))

    if not (os.path.exists(nc_outname_start)
            or os.path.exists(nc_outname_end)):
        date = req_dates[0]
        print("{0} missing in input data, skipping this season".format(date))
        Yield_Ave_Value = Yield_pr_Ave_Value = Yield_irr_Ave_Value = WP_Ave_Value = WPblue_Ave_Value = WPgreen_Ave_Value = WC_Ave_Value = WCblue_Ave_Value = WCgreen_Ave_Value = np.nan

    else:

        # Calculate the monthly fraction (if season is not whithin the whole month)
        fractions = np.ones(np.shape(req_dates))

        # The get the start month and end month fraction and report those to fraction
        start_month_length = float(
            calendar.monthrange(startdate.year, startdate.month)[1])
        end_month_length = float(
            calendar.monthrange(enddate.year, enddate.month)[1])

        fractions[0] = (start_month_length - startdate.day +
                        1) / start_month_length
        fractions[-1] = (enddate.day - 1) / end_month_length

        # Get total sum NDM over the growing season
        NDM_array = RC.Open_ncs_array(dir_nc_outname, "Normalized_Dry_Matter",
                                      startdate.replace(day=1), enddate)
        NDM = np.nansum(NDM_array * fractions[:, None, None], axis=0)
        del NDM_array

        # Get total sum ET blue over the growing season
        ETgreen_array = RC.Open_ncs_array(dir_nc_outname,
                                          "Green_Evapotranspiration",
                                          startdate.replace(day=1), enddate)
        ETgreen = np.nansum(ETgreen_array * fractions[:, None, None], axis=0)
        del ETgreen_array

        # Get total sum ET green over the growing season
        ETblue_array = RC.Open_ncs_array(dir_nc_outname,
                                         "Blue_Evapotranspiration",
                                         startdate.replace(day=1), enddate)
        ETblue = np.nansum(ETblue_array * fractions[:, None, None], axis=0)
        del ETblue_array

        # Get total sum Precipitation over the growing season
        P_array = RC.Open_ncs_array(dir_nc_outname, "Precipitation",
                                    startdate.replace(day=1), enddate)
        P = np.nansum(P_array * fractions[:, None, None], axis=0)
        del P_array

        # Open Landuse map
        LULC = RC.Open_nc_array(nc_outname_start, "Landuse")

        # only select the pixels for this Landuse class
        NDM[NDM == 0] = np.nan
        NDM[LULC != lu_class] = ETblue[LULC != lu_class] = ETgreen[
            LULC != lu_class] = np.nan

        # Calculate Yield
        Y_Array = (harvest_index * NDM) / (1 - moisture_content)

        # Calculate fractions of ETblue and green and blue Yield
        ETblue_fraction = ETblue / (ETblue + ETgreen)
        p_fraction = P / np.nanmax(P)
        fraction = Three.SplitYield.P_ET_based(p_fraction, ETblue_fraction,
                                               ab[0], ab[1])

        # Calculate yield from irrigation and precipitation
        Yirr_Array = Y_Array * fraction
        Ypr_Array = Y_Array - Yirr_Array
        '''
        if output_dir:
            x = y = np.arange(0.0, 1.1, 0.1)
            XX, YY = np.meshgrid(x, y)
            Z = split_Yield(XX,YY, ab[0], ab[1])
            plt.figure(1, figsize = (12,10))
            plt.clf()
            cmap = LinearSegmentedColormap.from_list('mycmap', ['#6bb8cc','#a3db76','#d98d8e'])
            plt.contourf(XX,YY,Z,np.arange(0.0,1.1,0.1), cmap = cmap)
            plt.colorbar(ticks = np.arange(0.0,1.1,0.1), label= 'Yirr as fraction of total Y [-]', boundaries = [0,1])
            plt.xlabel('Normalized Precipitation [-]')
            plt.ylabel('ETblue/ET [-]')
            plt.title('Split Yield into Yirr and Ypr')
            plt.suptitle('Z(X,Y) = -(((Y-1) * a)^2 - ((X-1) * b)^2) + 0.5 with a = {0:.2f} and b = {1:.2f}'.format(ab[0],ab[1]))
            plt.scatter(pfraction, etbfraction, color = 'w', label = croptype, edgecolors = 'k')
            plt.legend()
            plt.xlim((0,1))
            plt.ylim((0,1))
            plt.savefig(os.path.join(output_dir, '{0}_{1}_{2}_cloud.png'.format(croptype, req_dates[0], req_dates[-1])))
        '''

        # calculate average Yields
        Yield_Ave_Value = np.nanmean(Y_Array)
        Yield_pr_Ave_Value = np.nanmean(Ypr_Array)
        Yield_irr_Ave_Value = np.nanmean(Yirr_Array)

        # calculate average blue and green ET
        ETblue_Ave_Value = np.nanmean(ETblue)
        ETgreen_Ave_Value = np.nanmean(ETgreen)

        # Calculate Areas for one pixel
        areas_m2 = watools.Functions.Start.Area_converter.Degrees_to_m2(
            nc_outname_start)

        # Calculate the total area in km2
        areas_m2[LULC != lu_class] = np.nan
        areas_km2 = areas_m2 / 1000**2
        print('{0}: {1} km2'.format(croptype, np.nansum(areas_km2)))

        # Calculate the Water consumpution in km3
        WCblue_Ave_Value = np.nansum(ETblue_Ave_Value / (1000**2) * areas_km2)
        WCgreen_Ave_Value = np.nansum(ETgreen_Ave_Value / (1000**2) *
                                      areas_km2)
        WC_Ave_Value = WCblue_Ave_Value + WCgreen_Ave_Value

        # Calculate water productivity
        WP_Ave_Value = Yield_Ave_Value / (
            (ETblue_Ave_Value + ETgreen_Ave_Value) * 10)
        WPblue_Ave_Value = np.where(
            ETblue_Ave_Value == 0, [np.nan],
            [Yield_irr_Ave_Value / (ETblue_Ave_Value * 10)])[0]
        WPgreen_Ave_Value = np.where(
            ETgreen_Ave_Value == 0, [np.nan],
            [Yield_pr_Ave_Value / (ETgreen_Ave_Value * 10)])[0]

    return Yield_Ave_Value, Yield_pr_Ave_Value, Yield_irr_Ave_Value, WP_Ave_Value, WPblue_Ave_Value, WPgreen_Ave_Value, WC_Ave_Value, WCblue_Ave_Value, WCgreen_Ave_Value
def Run(input_nc, output_nc):

    # Extract flow direction data from NetCDF file
    flow_directions = RC.Open_nc_array(input_nc, Var = 'demdir')

    # Open River Array
    Rivers = RC.Open_nc_array(output_nc, Var = 'rivers')

    # Open Accumulated Pixel Array
    Accumulated_Pixels = RC.Open_nc_array(output_nc, Var = 'accpix')

    # Open Routed discharge Array
    Routed_Array = RC.Open_nc_array(output_nc, Var = 'discharge_natural')

    # Get the raster shape
    geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(input_nc)
    geo_out_example = np.array(geo_out_example)

    # Create a river array with a boundary of 1 pixel
    Rivers_bounds = np.zeros([size_Y_example+2, size_X_example+2])
    Rivers_bounds[1:-1,1:-1] = Rivers

    # Create a flow direction array with a boundary of 1 pixel
    flow_directions[flow_directions==0]=-32768
    flow_directions_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
    flow_directions_bound[1:-1,1:-1] = flow_directions

    # Create ID Matrix
    y,x = np.indices((size_Y_example, size_X_example))
    ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y_example,size_X_example),mode='clip').reshape(x.shape))
    ID_Matrix_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
    ID_Matrix_bound[1:-1,1:-1] = ID_Matrix + 1
    ID_Matrix_bound[flow_directions_bound==-32768]=-32768
    del  x, y

    # Empty total from and to arrays
    ID_to_total=np.array([])
    ID_from_total=np.array([])

    # The flow directions parameters of HydroSHED
    Directions = [1, 2, 4, 8, 16, 32, 64, 128]

    # Loop over the directions
    for Direction in Directions:

        # empty from and to arrays for 1 direction
        data_flow_to = np.zeros([size_Y_example + 2, size_X_example + 2])
        data_flow_from = np.zeros([size_Y_example + 2, size_X_example + 2])

        # Get the ID of only the rivers
        data_flow_to_ID = np.zeros([size_Y_example + 2, size_X_example + 2])
        data_flow_in = np.ones([size_Y_example + 2, size_X_example + 2])	* Rivers_bounds

        # Mask only one direction
        data_flow_from[flow_directions_bound == Direction] = data_flow_in[flow_directions_bound == Direction] * ID_Matrix_bound[flow_directions_bound == Direction]

        # Add the data flow to ID
        if Direction == 4:
            data_flow_to[1:,:] = data_flow_from[:-1,:]
        if Direction == 2:
            data_flow_to[1:,1:] = data_flow_from[:-1,:-1]
        if Direction == 1:
            data_flow_to[:,1:] = data_flow_from[:,:-1]
        if Direction == 128:
            data_flow_to[:-1,1:] = data_flow_from[1:,:-1]
        if Direction == 64:
            data_flow_to[:-1,:] = data_flow_from[1:,:]
        if Direction == 32:
            data_flow_to[:-1,:-1] = data_flow_from[1:,1:]
        if Direction == 16:
            data_flow_to[:,:-1] = data_flow_from[:,1:]
        if Direction == 8:
            data_flow_to[1:,:-1] = data_flow_from[:-1,1:]

        # mask out the no river pixels
        data_flow_to_ID[data_flow_to>0] = ID_Matrix_bound[data_flow_to>0]

        # Collect to and from arrays
        ID_from_total = np.append(ID_from_total,data_flow_from[data_flow_from!=0].ravel())
        ID_to_total = np.append(ID_to_total,data_flow_to_ID[data_flow_to_ID!=0].ravel())


    ######################## Define the starting point ############################

    # Open Basin area
    Basin = RC.Open_nc_array(input_nc, Var = 'basin')
    Basin = -1 * (Basin - 1)
    Basin_Buffer = RC.Create_Buffer(Basin, 8)
    Possible_End_Points = np.zeros(Basin.shape)
    Possible_End_Points[(Basin_Buffer + Rivers) == 2] = 1
    End_Points = [[0,0]]

    rows_col_possible_end_pixels = np.argwhere(Possible_End_Points == 1)
    #  Accumulated_Pixels_possible = ID_Matrix * Possible_End_Points

    for PosPix in rows_col_possible_end_pixels:
        Accumulated_Pixels_possible_Area = Accumulated_Pixels[PosPix[0]-1:PosPix[0]+2, PosPix[1]-1:PosPix[1]+2]
        Max_acc_possible_area = np.max(Accumulated_Pixels_possible_Area)
        middle_pixel = Accumulated_Pixels_possible_Area[1,1]
        if Max_acc_possible_area == middle_pixel:
            if flow_directions[PosPix[0],PosPix[1]] == -32768:
                acc_aux = np.copy(Accumulated_Pixels_possible_Area)
                acc_aux[1,1] = 0
                off_y = np.where(acc_aux == np.max(acc_aux))[1][0] - 1
                off_x = np.where(acc_aux == np.max(acc_aux))[0][0] - 1
                PosPix[0] = PosPix[0] + off_x
                PosPix[1] = PosPix[1] + off_y
            if End_Points == []:
                End_Points = PosPix
            else:
                End_Points = np.vstack([End_Points, PosPix])

    # Create an empty dictionary for the rivers
    River_dict = dict()

    # Create empty array for the loop
    ID_starts_next = []
    i = 0

    for End_Point in End_Points[1:]:

    # Define starting point
    # Max_Acc_Pix = np.nanmax(Accumulated_Pixels[ID_Matrix_bound[1:-1,1:-1]>0])
    # ncol, nrow = np.argwhere(Accumulated_Pixels==Max_Acc_Pix)[0]

    # Add Bounds
    # col = ncol + 1
    # row = nrow + 1

        col = End_Point[0] + 1
        row = End_Point[1] + 1

        ############################ Route the river ##################################

        # Get the ID of the starting point
        ID_starts = [ID_Matrix_bound[col,row]]


        # Keep going on till all the branches are looped
        while len(ID_starts) > 0:
            for ID_start in ID_starts:
                ID_start = int(ID_start)

                # Empty parameters for new starting point
                new = 0
                IDs = []

                # Add starting point
                Arrays_from = np.argwhere(ID_from_total[:] == ID_start)
                ID_from = ID_to_total[int(Arrays_from[0])]
                IDs = np.array([ID_from, ID_start])
                ID_start_now = ID_start

                # Keep going till the branch ends
                while new == 0:

                    Arrays_to = np.argwhere(ID_to_total[:] == ID_start)

                    # Add IDs to the river dictionary
                    if len(Arrays_to)>1 or len(Arrays_to) == 0:
                        River_dict[i] = IDs
                        i += 1
                        new = 1

                        # Define the next loop for the new branches
                        for j in range(0, len(Arrays_to)):
                            ID_starts_next = np.append(ID_starts_next,ID_from_total[int(Arrays_to[j])])

                        # If it was the last one then empty ID_start_next
                        if ID_start_now == ID_starts[-1]:
                            ID_starts = ID_starts_next
                            ID_starts_next = []

                    # Add pixel to tree for river dictionary
                    else:
                        ID_start = ID_from_total[Arrays_to[0]]
                        IDs = np.append(IDs, ID_start)

    ######################## Create dict distance and dict dem ####################

    # Extract DEM data from NetCDF file
    DEM = RC.Open_nc_array(input_nc, Var = 'dem')

    # Get the distance of a horizontal and vertical flow pixel (assuming it flows in a straight line)
    import watools.Functions.Start.Area_converter as AC
    vertical, horizontal = AC.Calc_dlat_dlon(geo_out_example,size_X_example, size_Y_example)

    # Calculate a diagonal flowing pixel (assuming it flos in a straight line)
    diagonal = np.power((np.square(vertical) + np.square(horizontal)),0.5)

    # Create empty distance array
    Distance = np.zeros([size_Y_example, size_X_example])

    # Fill in the distance array
    Distance[np.logical_or(flow_directions == 1,flow_directions == 16)] = horizontal[np.logical_or(flow_directions == 1,flow_directions == 16)]
    Distance[np.logical_or(flow_directions == 64,flow_directions == 4)] = vertical[np.logical_or(flow_directions == 64,flow_directions == 4)]
    Distance[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)] = diagonal[np.logical_or(np.logical_or(np.logical_or(flow_directions == 32,flow_directions == 8),flow_directions == 128),flow_directions == 2)]

    # Create empty dicionaries for discharge, distance, and DEM
    Discharge_dict = dict()
    Distance_dict = dict()
    DEM_dict = dict()

    # Create empty arrays needed for the loop
    River_end = []
    River_ends = np.zeros([2,3])


    # Loop over the branches
    for River_number in range(0,len(River_dict)):

        # Get the pixels associated with the river section
        River = River_dict[River_number]
        i=1

        # Create empty arrays
        Distances_river = np.zeros([len(River)])
        DEM_river = np.zeros([len(River)])
        Discharge_river = np.zeros([len(River)])

        # for the first pixel get the previous pixel value from another branche
        row_start = np.argwhere(River_ends[:,0] == River[0])
        if len(row_start) < 1:
            Distances_river[0] = 0
            row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
            DEM_river[0] = DEM[row - 1, col - 1]
            Discharge_river[0] = -9999

        else:
            Distances_river[0] = River_ends[row_start, 1]
            DEM_river[0] = River_ends[row_start, 2]
            row, col = np.argwhere(ID_Matrix_bound == River[0])[0][:]
            #Discharge_river[0] = Routed_Discharge[timestep, row - 1, col - 1]

        # For the other pixels get the value of the River ID pixel
        for River_part in River[1:]:
            row, col = np.argwhere(ID_Matrix_bound == River_part)[0][:]
            Distances_river[i] = Distance[row - 1, col - 1]
            DEM_river[i] = np.max([DEM_river[i-1],DEM[row - 1, col - 1]])
            #Discharge_river[i] = Routed_Discharge[timestep, row - 1, col - 1]

            if River_part == River[1] and Discharge_river[i-1] == -9999:
                Discharge_river[i - 1] = Discharge_river[i]

            i += 1

        # Write array in dictionary
        DEM_dict[River_number] = DEM_river
        Discharge_dict[River_number] = Discharge_river
        Distance_dict[River_number] = np.cumsum(Distances_river)

        # Save the last pixel value
        River_end[:] = [River_part , np.cumsum(Distances_river)[-1], DEM_river[-1]]
        River_ends = np.vstack((River_ends, River_end))


    ########################## Discharge Dictionary ###############################

    # Create ID Matrix
    y,x = np.indices((size_Y_example, size_X_example))
    ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y_example,size_X_example),mode='clip').reshape(x.shape))
    ID_Matrix_bound = np.ones([size_Y_example+2, size_X_example+2]) * -32768
    ID_Matrix_bound[1:-1,1:-1] = ID_Matrix + 1
    del  x, y

    # Create empty dicionaries for discharge, distance, and DEM
    Discharge_dict = dict()

    Amount_months = len(RC.Open_nc_array(input_nc, Var = 'time'))

    # Loop over the branches
    for River_number in range(0,len(River_dict)):

        # Get the pixels associated with the river section
        River = River_dict[River_number]
        i=0

        # Create empty arrays
        Discharge_river = np.zeros([Amount_months, len(River)])

        # For the other pixels get the value of the River ID pixel
        for River_part in River[:]:
            row, col = np.argwhere(ID_Matrix_bound == River_part)[0][:]
            Discharge_river[:,i] = Routed_Array[:, row - 1, col - 1]
            i += 1

        # Write array in dictionary
        Discharge_dict[River_number] = Discharge_river
        print(River_number)

    return(DEM_dict, River_dict, Distance_dict, Discharge_dict)
예제 #12
0
def Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate,
               Enddate):
    """
    This functions split the evapotranspiration into green and blue evapotranspiration.
    Parameters
    ----------
    nc_outname : str
        Path to the .nc file containing data
    Moving_Averaging_Length: integer
        Number defines the amount of months that are taken into account

    Returns
    -------
    ET_Blue : array
              Array[time, lat, lon] contains Blue Evapotranspiration
    ET_Green : array
              Array[time, lat, lon] contains Green Evapotranspiration
    """
    import watools.General.raster_conversions as RC
    import watools.Functions.Start.Get_Dictionaries as GD

    # Input Parameters functions
    scale = 1.1

    # Open LU map for example
    LU = RC.Open_nc_array(nc_outname, "Landuse")

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq='MS')

    # Get moving window period

    # Get dictionaries and keys for the moving average
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(
        version='1.0')
    Classes = list(ET_Blue_Green_Classes_dict.keys())
    Moving_Averages_Values_Array = np.ones(LU.shape) * np.nan

    # Create array based on the dictionary that gives the Moving average tail for every pixel
    for Class in Classes:
        Values_Moving_Window_Class = Moving_Window_Per_Class_dict[Class]
        for Values_Class in ET_Blue_Green_Classes_dict[Class]:
            Moving_Averages_Values_Array[
                LU == Values_Class] = Values_Moving_Window_Class

    Additional_Months_front = int(np.nanmax(Moving_Averages_Values_Array))
    Additional_Months_tail = 0
    Start_period = Additional_Months_front
    End_period = Additional_Months_tail * -1

    ########################### Extract ETref data #################################

    if ETref_Product is 'WA_ETref':
        # Define data path
        Data_Path_ETref = os.path.join(Dir_Basin, 'ETref', 'Monthly')
    else:
        Data_Path_ETref = ETref_Product

    ETref = Complete_3D_Array(nc_outname, 'Reference_Evapotranspiration',
                              Startdate, Enddate, Additional_Months_front,
                              Additional_Months_tail, Data_Path_ETref)

    ######################## Extract Precipitation data ########################

    if (P_Product == "CHIRPS" or P_Product == "RFE"):
        # Define data path
        Data_Path_P = os.path.join(Dir_Basin, 'Precipitation', P_Product,
                                   'Monthly')
    else:
        Data_Path_P = P_Product

    P = Complete_3D_Array(nc_outname, 'Precipitation', Startdate, Enddate,
                          Additional_Months_front, Additional_Months_tail,
                          Data_Path_P)

    ########################## Extract actET data ##############################
    ET = RC.Open_nc_array(nc_outname, "Actual_Evapotranspiration", Startdate,
                          Enddate)

    ############ Create average ETref and P using moving window ################
    ETref_Ave = np.ones([len(Dates),
                         int(LU.shape[0]),
                         int(LU.shape[1])]) * np.nan
    P_Ave = np.ones([len(Dates), int(LU.shape[0]), int(LU.shape[1])]) * np.nan
    if End_period == 0:
        P_period = P[Start_period:, :, :]
        ETref_period = ETref[Start_period:, :, :]
    else:
        P_period = P[Start_period:End_period, :, :]
        ETref_period = ETref[Start_period:End_period, :, :]

    # Loop over the different moving average tails
    for One_Value in np.unique(list(Moving_Window_Per_Class_dict.values())):

        # If there is no moving average is 1 than use the value of the original ETref or P
        if One_Value == 1:
            Values_Ave_ETref = ETref[int(ETref.shape[0]) - len(Dates):, :, :]
            Values_Ave_P = P[int(ETref.shape[0]) - len(Dates):, :, :]

        # If there is tail, apply moving average over the whole datacube
        else:
            Values_Ave_ETref_tot = RC.Moving_average(ETref, One_Value - 1, 0)
            Values_Ave_P_tot = RC.Moving_average(P, One_Value - 1, 0)
            Values_Ave_ETref = Values_Ave_ETref_tot[
                int(Values_Ave_ETref_tot.shape[0]) - len(Dates):, :, :]
            Values_Ave_P = Values_Ave_P_tot[int(Values_Ave_P_tot.shape[0]) -
                                            len(Dates):, :, :]

        # Only add the data where the corresponding tail corresponds with the one_value
        ETref_Ave[:, Moving_Averages_Values_Array ==
                  One_Value] = Values_Ave_ETref[:,
                                                Moving_Averages_Values_Array ==
                                                One_Value]
        P_Ave[:, Moving_Averages_Values_Array ==
              One_Value] = Values_Ave_P[:, Moving_Averages_Values_Array ==
                                        One_Value]

    ##################### Calculate ET blue and green ###########################

    # Mask out the nan values(if one of the parameters is nan, then they are all nan)
    mask = np.any([
        np.isnan(LU) *
        np.ones([len(Dates), int(LU.shape[0]),
                 int(LU.shape[1])]) == 1,
        np.isnan(ET),
        np.isnan(ETref[int(ETref.shape[0]) - len(Dates):, :, :]),
        np.isnan(P[int(ETref.shape[0]) - len(Dates):, :, :]),
        np.isnan(P_Ave),
        np.isnan(ETref_Ave)
    ],
                  axis=0)
    ETref_period[mask] = ETref_Ave[mask] = ET[mask] = P_period[mask] = P_Ave[
        mask] = np.nan

    phi = ETref_Ave / P_Ave

    # Calculate Budyko-index
    Budyko = scale * np.sqrt(phi * np.tanh(1 / phi) * (1 - np.exp(-phi)))

    # Calculate ET green
    ETgreen_DataCube = np.minimum(
        Budyko * P[int(ETref.shape[0]) - len(Dates):, :, :], ET)

    # Calculate ET blue
    ETblue_DataCube = ET - ETgreen_DataCube

    return (np.array(ETblue_DataCube), np.array(ETgreen_DataCube))
예제 #13
0
def Run(input_nc, output_nc):

    # Open discharge dict
    Discharge_dict = RC.Open_nc_dict(output_nc, 'dischargedict_dynamic')

    # Open River dict
    River_dict = RC.Open_nc_dict(output_nc, 'riverdict_static')

    # Open River Array
    Rivers = RC.Open_nc_array(output_nc, Var='rivers')

    # Open Supply Array
    DataCube_surface_withdrawal_mm = RC.Open_nc_array(input_nc,
                                                      Var='Extraction_M')
    Areas_in_m2 = RC.Open_nc_array(input_nc, Var='area')
    DataCube_surface_withdrawal_m3 = ((DataCube_surface_withdrawal_mm / 1000) *
                                      Areas_in_m2)

    # Open Basin Array
    Basin = RC.Open_nc_array(input_nc, Var='basin')

    # Copy dicts as starting adding reservoir
    Discharge_dict_new = copy.deepcopy(Discharge_dict)

    # Open data array info based on example data
    geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(
        input_nc)

    # Create ID Matrix
    y, x = np.indices((size_Y_example, size_X_example))
    ID_Matrix = np.int32(
        np.ravel_multi_index(np.vstack((y.ravel(), x.ravel())),
                             (size_Y_example, size_X_example),
                             mode='clip').reshape(x.shape)) + 1
    del x, y

    # Find IDs
    ID_Rivers = Rivers * ID_Matrix

    # find IDs drainage for only the basin
    ID_Rivers_flow = RC.gap_filling(ID_Rivers, NoDataValue=0.) * Basin
    Water_Error = 0
    Count = 0

    for i in np.unique(ID_Rivers_flow)[1:]:
        Count += 1
        if np.nansum(DataCube_surface_withdrawal_m3[:,
                                                    ID_Rivers_flow == i]) > 0:
            sys.stdout.write(
                "\r%s Procent of adding irrigation completed with %.2f x 10^9 m3 Water Error       "
                % (np.int(
                    np.ceil((np.float(Count) / len(np.unique(ID_Rivers_flow)) *
                             100))), Water_Error / 1e9))
            sys.stdout.flush()
            #print('%s Procent of adding irrigation completed' %np.int(i/np.unique(ID_Rivers_flow)[-1]*100))
            total_surface_withdrawal = np.nansum(
                DataCube_surface_withdrawal_m3[:, ID_Rivers_flow == i], 1)

            # Find exact area in river directory
            for River_part in iter(River_dict.items()):
                if len(np.argwhere(River_part[1] == i)) > 0:

                    # Find the river part in the dictionery
                    row_discharge = np.argwhere(River_part[1] == i)[0][0]

                    # Subtract the withdrawal from that specific riverpart
                    Real_Surface_Withdrawal = np.minimum(
                        Discharge_dict_new[
                            River_part[0]][:, row_discharge].flatten(),
                        total_surface_withdrawal[:, None].flatten())

                    Water_Error += np.maximum(
                        np.nansum(total_surface_withdrawal[:, None].flatten() -
                                  Real_Surface_Withdrawal), 0)
                    Discharge_dict_new[River_part[
                        0]][:, 0:row_discharge] = Discharge_dict_new[River_part[
                            0]][:, 0:
                                row_discharge] - Real_Surface_Withdrawal[:,
                                                                         None]

                    # Subtract the withdrawal from the part downstream of the riverpart within the same dictionary
                    Discharge_dict_new[River_part[0]][np.logical_and(
                        Discharge_dict_new[River_part[0]] <= 0,
                        Discharge_dict[River_part[0]] >= 0)] = 0
                    End_river = River_dict[River_part[0]][0]
                    times = 0

                    # Subtract the withdrawal from all the other downstream dictionaries
                    while len(River_dict) > times:
                        for River_part_downstream in iter(River_dict.items()):
                            if River_part_downstream[1][-1] == End_river:

                                Discharge_dict_new[River_part_downstream[
                                    0]][:, :] = Discharge_dict_new[
                                        River_part_downstream[
                                            0]][:, :] - Real_Surface_Withdrawal[:,
                                                                                None]
                                #Discharge_dict_new[River_part_downstream[0]][:,1:] = Discharge_dict_new[River_part_downstream[0]][:,1:] - total_surface_withdrawal[:,None]

                                Discharge_dict_new[River_part_downstream[0]][
                                    np.logical_and(
                                        Discharge_dict_new[
                                            River_part_downstream[0]] <= 0,
                                        Discharge_dict[
                                            River_part_downstream[0]] >=
                                        0)] = 0
                                End_river = River_dict[
                                    River_part_downstream[0]][0]
                                times = 0
                            times += 1

    return (Discharge_dict_new)
예제 #14
0
def Calc_Regions(input_nc, output_nc, input_JRC, Boundaries):

    import numpy as np
    import watools.General.raster_conversions as RC

    sensitivity = 700  # 900 is less sensitive 1 is very sensitive

    # Get JRC array and information
    Array_JRC_occ = RC.Open_tiff_array(input_JRC)
    Geo_out, proj, size_X, size_Y = RC.Open_array_info(input_JRC)

    # Get Basin boundary based on LU
    Array_LU = RC.Open_nc_array(input_nc, "basin")
    LU_array = RC.resize_array_example(Array_LU, Array_JRC_occ)
    basin_array = np.zeros(np.shape(LU_array))
    basin_array[LU_array > 0] = 1
    del LU_array

    # find all pixels with water occurence
    Array_JRC_occ[basin_array < 1] = 0
    Array_JRC_occ[Array_JRC_occ < 30] = 0
    Array_JRC_occ[Array_JRC_occ >= 30] = 1
    del basin_array

    # sum larger areas to find lakes
    x_size = np.round(int(np.shape(Array_JRC_occ)[0]) / 30)
    y_size = np.round(int(np.shape(Array_JRC_occ)[1]) / 30)
    sum_array = np.zeros([x_size, y_size])

    for i in range(0, len(sum_array)):
        for j in range(0, len(sum_array[1])):
            sum_array[i, j] = np.sum(Array_JRC_occ[i * 30:(i + 1) * 30,
                                                   j * 30:(j + 1) * 30])

    del Array_JRC_occ

    lakes = np.argwhere(sum_array >= sensitivity)
    lake_info = np.zeros([1, 4])

    i = 0
    k = 1

    # find all neighboring pixels
    for lake in lakes:
        added = 0
        for j in range(0, k):
            if (lake[0] >= lake_info[j, 0] and lake[0] <= lake_info[j, 1]
                    and lake[1] >= lake_info[j, 2]
                    and lake[1] <= lake_info[j, 3]):
                lake_info[j, 0] = np.maximum(
                    np.minimum(lake_info[j, 0], lake[0] - 8), 0)
                lake_info[j, 1] = np.minimum(
                    np.maximum(lake_info[j, 1], lake[0] + 8), x_size)
                lake_info[j, 2] = np.maximum(
                    np.minimum(lake_info[j, 2], lake[1] - 8), 0)
                lake_info[j, 3] = np.minimum(
                    np.maximum(lake_info[j, 3], lake[1] + 8), y_size)
                added = 1

        if added == 0:
            lake_info_one = np.zeros([4])
            lake_info_one[0] = np.maximum(0, lake[0] - 8)
            lake_info_one[1] = np.minimum(x_size, lake[0] + 8)
            lake_info_one[2] = np.maximum(0, lake[1] - 8)
            lake_info_one[3] = np.minimum(y_size, lake[1] + 8)
            lake_info = np.append(lake_info, lake_info_one)
            lake_info = np.resize(lake_info, (k + 1, 4))
            k += 1

    # merge all overlaping regions
    p = 0
    lake_info_end = np.zeros([1, 4])

    for i in range(1, k):
        added = 0
        lake_info_one = lake_info[i, :]
        lake_y_region = list(
            range(int(lake_info_one[0]), int(lake_info_one[1] + 1)))
        lake_x_region = list(
            range(int(lake_info_one[2]), int(lake_info_one[3] + 1)))

        for j in range(0, p + 1):
            if len(lake_y_region) + len(
                    list(
                        range(int(lake_info_end[j, 0]),
                              int(lake_info_end[j, 1] + 1)))
            ) is not len(
                    np.unique(
                        np.append(
                            lake_y_region,
                            list(
                                range(int(lake_info_end[j, 0]),
                                      int(lake_info_end[j, 1] + 1)))))
            ) and len(lake_x_region) + len(
                    list(
                        range(int(lake_info_end[j, 2]),
                              int(lake_info_end[j, 3] + 1)))) is not len(
                                  np.unique(
                                      np.append(
                                          lake_x_region,
                                          list(
                                              range(
                                                  int(lake_info_end[j, 2]),
                                                  int(lake_info_end[j, 3] +
                                                      1)))))):
                lake_info_end[j, 0] = np.min(
                    np.unique(
                        np.append(
                            lake_y_region,
                            list(
                                range(int(lake_info_end[j, 0]),
                                      int(lake_info_end[j, 1] + 1))))))
                lake_info_end[j, 1] = np.max(
                    np.unique(
                        np.append(
                            lake_y_region,
                            list(
                                range(int(lake_info_end[j, 0]),
                                      int(lake_info_end[j, 1] + 1))))))
                lake_info_end[j, 2] = np.min(
                    np.unique(
                        np.append(
                            lake_x_region,
                            list(
                                range(int(lake_info_end[j, 2]),
                                      int(lake_info_end[j, 3] + 1))))))
                lake_info_end[j, 3] = np.max(
                    np.unique(
                        np.append(
                            lake_x_region,
                            list(
                                range(int(lake_info_end[j, 2]),
                                      int(lake_info_end[j, 3] + 1))))))
                added = 1

        if added == 0:
            lake_info_one = lake_info[i, :]
            lake_info_end = np.append(lake_info_end, lake_info_one)
            lake_info_end = np.resize(lake_info_end, (p + 2, 4))

            p += 1

    # calculate the area
    Regions = np.zeros([p, 4])
    pixel_x_size = Geo_out[1] * 30
    pixel_y_size = Geo_out[5] * 30
    for region in range(1, p + 1):
        Regions[region - 1,
                0] = Geo_out[0] + pixel_x_size * lake_info_end[region, 2]
        Regions[region - 1,
                1] = Geo_out[0] + pixel_x_size * (lake_info_end[region, 3] + 1)
        Regions[region - 1,
                2] = Geo_out[3] + pixel_y_size * (lake_info_end[region, 1] + 1)
        Regions[region - 1,
                3] = Geo_out[3] + pixel_y_size * lake_info_end[region, 0]

    return (Regions)
예제 #15
0
def Add_Reservoirs(output_nc, Diff_Water_Volume, Regions):

    import numpy as np

    import watools.General.raster_conversions as RC
    import watools.General.data_conversions as DC

    # Extract data from NetCDF file
    Discharge_dict = RC.Open_nc_dict(output_nc, "dischargedict_dynamic")
    River_dict = RC.Open_nc_dict(output_nc, "riverdict_static")
    DEM_dict = RC.Open_nc_dict(output_nc, "demdict_static")
    Distance_dict = RC.Open_nc_dict(output_nc, "distancedict_static")
    Rivers = RC.Open_nc_array(output_nc, "rivers")
    acc_pixels = RC.Open_nc_array(output_nc, "accpix")

    # Open data array info based on example data
    geo_out, epsg, size_X, size_Y, size_Z, time = RC.Open_nc_info(output_nc)

    # Create ID Matrix
    y, x = np.indices((size_Y, size_X))
    ID_Matrix = np.int32(
        np.ravel_multi_index(np.vstack((y.ravel(), x.ravel())),
                             (size_Y, size_X),
                             mode='clip').reshape(x.shape)) + 1
    del x, y

    Acc_Pixels_Rivers = Rivers * acc_pixels
    ID_Rivers = Rivers * ID_Matrix

    Amount_of_Reservoirs = len(Regions)

    Reservoir_is_in_River = np.ones([len(Regions), 3]) * -9999

    for reservoir in range(0, Amount_of_Reservoirs):

        region = Regions[reservoir, :]

        dest = DC.Save_as_MEM(Acc_Pixels_Rivers, geo_out, projection='WGS84')
        Rivers_Acc_Pixels_reservoir, Geo_out = RC.clip_data(
            dest, latlim=[region[2], region[3]], lonlim=[region[0], region[1]])

        dest = DC.Save_as_MEM(ID_Rivers, geo_out, projection='WGS84')
        Rivers_ID_reservoir, Geo_out = RC.clip_data(
            dest, latlim=[region[2], region[3]], lonlim=[region[0], region[1]])

        size_Y_reservoir, size_X_reservoir = np.shape(
            Rivers_Acc_Pixels_reservoir)
        IDs_Edges = []
        IDs_Edges = np.append(IDs_Edges, Rivers_Acc_Pixels_reservoir[0, :])
        IDs_Edges = np.append(IDs_Edges, Rivers_Acc_Pixels_reservoir[:, 0])
        IDs_Edges = np.append(
            IDs_Edges,
            Rivers_Acc_Pixels_reservoir[int(size_Y_reservoir) - 1, :])
        IDs_Edges = np.append(
            IDs_Edges, Rivers_Acc_Pixels_reservoir[:,
                                                   int(size_X_reservoir) - 1])
        Value_Reservoir = np.max(np.unique(IDs_Edges))

        y_pix_res, x_pix_res = np.argwhere(
            Rivers_Acc_Pixels_reservoir == Value_Reservoir)[0]
        ID_reservoir = Rivers_ID_reservoir[y_pix_res, x_pix_res]

        # Find exact reservoir area in river directory
        for River_part in River_dict.items():
            if len(np.argwhere(River_part[1] == ID_reservoir)) > 0:
                Reservoir_is_in_River[reservoir, 0] = np.argwhere(
                    River_part[1] == ID_reservoir)  #River_part_good
                Reservoir_is_in_River[reservoir,
                                      1] = River_part[0]  #River_Add_Reservoir
                Reservoir_is_in_River[reservoir, 2] = 1  #Reservoir_is_in_River

    numbers = abs(Reservoir_is_in_River[:, 1].argsort() -
                  len(Reservoir_is_in_River) + 1)

    for number in range(0, len(Reservoir_is_in_River)):

        row_reservoir = np.argwhere(numbers == number)[0][0]

        if not Reservoir_is_in_River[row_reservoir, 2] == -9999:

            # Get discharge into the reservoir:
            Flow_in_res_m3 = Discharge_dict[int(Reservoir_is_in_River[
                row_reservoir, 1])][:,
                                    int(Reservoir_is_in_River[row_reservoir,
                                                              0])]

            # Get difference reservoir
            Change_Reservoir_m3 = Diff_Water_Volume[row_reservoir, :, 2]

            # Total Change outflow
            Change_outflow_m3 = np.minimum(Flow_in_res_m3, Change_Reservoir_m3)

            Difference = Change_outflow_m3 - Change_Reservoir_m3
            if abs(np.sum(Difference)) > 10000 and np.sum(
                    Change_Reservoir_m3[Change_outflow_m3 > 0]) > 0:
                Change_outflow_m3[Change_outflow_m3 < 0] = Change_outflow_m3[
                    Change_outflow_m3 < 0] * np.sum(
                        Change_outflow_m3[Change_outflow_m3 > 0]) / np.sum(
                            Change_Reservoir_m3[Change_outflow_m3 > 0])

            # Find key name (which is also the lenght of the river dictionary)
            i = len(River_dict)

            #River_with_reservoirs_dict[i]=list((River_dict[River_Add_Reservoir][River_part_good[0][0]:]).flat) < MAAK DIRECTORIES ARRAYS OP DEZE MANIER DAN IS DE ARRAY 1D
            River_dict[i] = River_dict[int(Reservoir_is_in_River[
                row_reservoir, 1])][int(Reservoir_is_in_River[row_reservoir,
                                                              0]):]
            River_dict[int(
                Reservoir_is_in_River[row_reservoir, 1])] = River_dict[int(
                    Reservoir_is_in_River[
                        row_reservoir,
                        1])][:int(Reservoir_is_in_River[row_reservoir, 0]) + 1]

            DEM_dict[i] = DEM_dict[int(Reservoir_is_in_River[
                row_reservoir, 1])][int(Reservoir_is_in_River[row_reservoir,
                                                              0]):]
            DEM_dict[int(
                Reservoir_is_in_River[row_reservoir, 1])] = DEM_dict[int(
                    Reservoir_is_in_River[
                        row_reservoir,
                        1])][:int(Reservoir_is_in_River[row_reservoir, 0]) + 1]

            Distance_dict[i] = Distance_dict[int(Reservoir_is_in_River[
                row_reservoir, 1])][int(Reservoir_is_in_River[row_reservoir,
                                                              0]):]
            Distance_dict[int(
                Reservoir_is_in_River[row_reservoir, 1])] = Distance_dict[int(
                    Reservoir_is_in_River[
                        row_reservoir,
                        1])][:int(Reservoir_is_in_River[row_reservoir, 0]) + 1]

            Discharge_dict[i] = Discharge_dict[int(Reservoir_is_in_River[
                row_reservoir, 1])][:,
                                    int(Reservoir_is_in_River[row_reservoir,
                                                              0]):]
            Discharge_dict[int(
                Reservoir_is_in_River[row_reservoir, 1])] = Discharge_dict[int(
                    Reservoir_is_in_River[
                        row_reservoir,
                        1])][:, :int(Reservoir_is_in_River[row_reservoir, 0]) +
                             1]
            Discharge_dict[int(Reservoir_is_in_River[
                row_reservoir,
                1])][:, 1:int(Reservoir_is_in_River[row_reservoir, 0]) +
                     1] = Discharge_dict[int(
                         Reservoir_is_in_River[row_reservoir, 1]
                     )][:, 1:int(Reservoir_is_in_River[row_reservoir, 0]) +
                        1] - Change_outflow_m3[:, None]
            Next_ID = River_dict[int(Reservoir_is_in_River[row_reservoir,
                                                           1])][0]

            times = 0
            while len(River_dict) > times:
                for River_part in River_dict.items():
                    if River_part[-1][-1] == Next_ID:
                        Next_ID = River_part[-1][0]
                        item = River_part[0]
                        #Always 10 procent of the incoming discharge will pass the dam
                        Change_outflow_m3[:, None] = np.minimum(
                            0.9 * Discharge_dict[item][:, -1:],
                            Change_outflow_m3[:, None])

                        Discharge_dict[item][:, 1:] = Discharge_dict[
                            item][:, 1:] - Change_outflow_m3[:, None]
                        print(item)
                        times = 0
                    times += 1

    return (Discharge_dict, River_dict, DEM_dict, Distance_dict)
예제 #16
0
def Find_Area_Volume_Relation(region, input_JRC, input_nc):

    # Find relation between V and A

    import numpy as np
    import watools.General.raster_conversions as RC
    import watools.General.data_conversions as DC
    from scipy.optimize import curve_fit
    import matplotlib.pyplot as plt

    def func(x, a, b):
        """
        This function is used for finding relation area and volume

        """
        return (a * x**b)

    def func3(x, a, b, c, d):
        """
        This function is used for finding relation area and volume

        """
        return (a * (x - c)**b + d)

    #Array, Geo_out = RC.clip_data(input_JRC,latlim=[14.528,14.985],lonlim =[35.810,36.005])
    Array, Geo_out = RC.clip_data(
        input_JRC,
        latlim=[region[2], region[3]],
        lonlim=[region[0], region[1]
                ])  # This reservoir was not filled when SRTM was taken
    size_Y = int(np.shape([Array])[-2])
    size_X = int(np.shape([Array])[-1])

    Water_array = np.zeros(np.shape(Array))
    buffer_zone = 4
    Array[Array > 0] = 1
    for i in range(0, size_Y):
        for j in range(0, size_X):
            Water_array[i, j] = np.max(Array[
                np.maximum(0, i -
                           buffer_zone):np.minimum(size_Y, i + buffer_zone +
                                                   1),
                np.maximum(0, j -
                           buffer_zone):np.minimum(size_X, j + buffer_zone +
                                                   1)])
    del Array

    # Open DEM and reproject
    DEM_Array = RC.Open_nc_array(input_nc, "dem")
    Geo_out_dem, proj_dem, size_X_dem, size_Y_dem, size_Z_dem, time = RC.Open_nc_info(
        input_nc)

    # Save Example as memory file
    dest_example = DC.Save_as_MEM(Water_array, Geo_out, projection='WGS84')
    dest_dem = DC.Save_as_MEM(DEM_Array, Geo_out_dem, projection='WGS84')

    # reproject DEM by using example
    dest_out = RC.reproject_dataset_example(dest_dem, dest_example, method=2)
    DEM = dest_out.GetRasterBand(1).ReadAsArray()

    # find DEM water heights
    DEM_water = np.zeros(np.shape(Water_array))
    DEM_water[Water_array != 1] = np.nan
    DEM_water[Water_array == 1.] = DEM[Water_array == 1.]

    # Get array with areas
    import watools.Functions.Start.Area_converter as Area
    dlat, dlon = Area.Calc_dlat_dlon(Geo_out, size_X, size_Y)
    area_in_m2 = dlat * dlon

    # find volume and Area
    min_DEM_water = int(np.round(np.nanmin(DEM_water)))
    max_DEM_water = int(np.round(np.nanmax(DEM_water)))

    Reservoir_characteristics = np.zeros([1, 5])
    i = 0

    for height in range(min_DEM_water + 1, max_DEM_water):
        DEM_water_below_height = np.zeros(np.shape(DEM_water))
        DEM_water[np.isnan(DEM_water)] = 1000000
        DEM_water_below_height[DEM_water < height] = 1
        pixels = np.sum(DEM_water_below_height)

        area = np.sum(DEM_water_below_height * area_in_m2)
        if height == min_DEM_water + 1:
            volume = 0.5 * area
            histogram = pixels
            Reservoir_characteristics[:] = [
                height, pixels, area, volume, histogram
            ]
        else:
            area_previous = Reservoir_characteristics[i, 2]
            volume_previous = Reservoir_characteristics[i, 3]
            volume = volume_previous + 0.5 * (
                area - area_previous) + 1 * area_previous
            histogram_previous = Reservoir_characteristics[i, 1]
            histogram = pixels - histogram_previous
            Reservoir_characteristics_one = [
                height, pixels, area, volume, histogram
            ]
            Reservoir_characteristics = np.append(
                Reservoir_characteristics, Reservoir_characteristics_one)
            i += 1
            Reservoir_characteristics = np.resize(Reservoir_characteristics,
                                                  (i + 1, 5))

    maxi = int(len(Reservoir_characteristics[:, 3]))

    # find minimum value for reservoirs height (DEM is same value if reservoir was already filled whe SRTM was created)
    Historgram = Reservoir_characteristics[:, 4]
    hist_mean = np.mean(Historgram)
    hist_std = np.std(Historgram)

    mini_tresh = hist_std * 5 + hist_mean

    Check_hist = np.zeros([len(Historgram)])
    Check_hist[Historgram > mini_tresh] = Historgram[Historgram > mini_tresh]
    if np.max(Check_hist) != 0.0:
        col = np.argwhere(Historgram == np.max(Check_hist))[0][0]
        mini = col + 1
    else:
        mini = 0

    fitted = 0

    # find starting point reservoirs
    V0 = Reservoir_characteristics[mini, 3]
    A0 = Reservoir_characteristics[mini, 2]

    # Calculate the best maxi reservoir characteristics, based on the normal V = a*x**b relation
    while fitted == 0:
        try:
            if mini == 0:
                popt1, pcov1 = curve_fit(
                    func, Reservoir_characteristics[mini:maxi, 2],
                    Reservoir_characteristics[mini:maxi, 3])
            else:
                popt1, pcov1 = curve_fit(
                    func, Reservoir_characteristics[mini:maxi, 2] - A0,
                    Reservoir_characteristics[mini:maxi, 3] - V0)
            fitted = 1
        except:
            maxi -= 1

        if maxi < mini:
            print('ERROR: was not able to find optimal fit')
            fitted = 1

    # Remove last couple of pixels of maxi
    maxi_end = int(np.round(maxi - 0.2 * (maxi - mini)))

    done = 0
    times = 0

    while done == 0 and times > 20 and maxi_end < mini:
        try:
            if mini == 0:
                popt, pcov = curve_fit(
                    func, Reservoir_characteristics[mini:maxi_end, 2],
                    Reservoir_characteristics[mini:maxi_end, 3])
            else:
                popt, pcov = curve_fit(
                    func3, Reservoir_characteristics[mini:maxi_end, 2],
                    Reservoir_characteristics[mini:maxi_end, 3])

        except:
            maxi_end = int(maxi)
            if mini == 0:
                popt, pcov = curve_fit(
                    func, Reservoir_characteristics[mini:maxi_end, 2],
                    Reservoir_characteristics[mini:maxi_end, 3])
            else:
                popt, pcov = curve_fit(
                    func3, Reservoir_characteristics[mini:maxi_end, 2],
                    Reservoir_characteristics[mini:maxi_end, 3])

        if mini == 0:
            plt.plot(Reservoir_characteristics[mini:maxi_end, 2],
                     Reservoir_characteristics[mini:maxi_end, 3], 'ro')
            t = np.arange(0., np.max(Reservoir_characteristics[:, 2]), 1000)
            plt.plot(t, popt[0] * (t)**popt[1], 'g--')
            plt.axis([
                0,
                np.max(Reservoir_characteristics[mini:maxi_end, 2]), 0,
                np.max(Reservoir_characteristics[mini:maxi_end, 3])
            ])
            plt.show()
            done = 1

        else:
            plt.plot(Reservoir_characteristics[mini:maxi_end, 2],
                     Reservoir_characteristics[mini:maxi_end, 3], 'ro')
            t = np.arange(0., np.max(Reservoir_characteristics[:, 2]), 1000)
            plt.plot(t, popt[0] * (t - popt[2])**popt[1] + popt[3], 'g--')
            plt.axis([
                0,
                np.max(Reservoir_characteristics[mini:maxi_end, 2]), 0,
                np.max(Reservoir_characteristics[mini:maxi_end, 3])
            ])
            plt.show()
            Volume_error = popt[3] / V0 * 100 - 100
            print('error Volume = %s percent' % Volume_error)
            print('error Area = %s percent' % (A0 / popt[2] * 100 - 100))

            if Volume_error < 30 and Volume_error > -30:
                done = 1
            else:
                times += 1
                maxi_end -= 1
                print('Another run is done in order to improve the result')

    if done == 0:
        popt = np.append(popt1, [A0, V0])

    if len(popt) == 2:
        popt = np.append(popt, [0, 0])

    return (popt)
예제 #17
0
def Complete_3D_Array(nc_outname, Var, Startdate, Enddate,
                      Additional_Months_front, Additional_Months_tail,
                      Data_Path):

    from netCDF4 import Dataset
    import watools.General.raster_conversions as RC

    # Define startdate and enddate with moving average
    Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(
        months=Additional_Months_front)
    Enddate_Moving_Average = pd.Timestamp(Enddate) + pd.DateOffset(
        months=Additional_Months_tail)
    Startdate_Moving_Average_String = '%d-%02d-%02d' % (
        Startdate_Moving_Average.year, Startdate_Moving_Average.month,
        Startdate_Moving_Average.day)
    Enddate_Moving_Average_String = '%d-%02d-%02d' % (
        Enddate_Moving_Average.year, Enddate_Moving_Average.month,
        Enddate_Moving_Average.day)

    # Extract moving average period before
    Year_front = int(Startdate_Moving_Average.year)
    filename_front = os.path.join(os.path.dirname(nc_outname),
                                  "%d.nc" % Year_front)
    Enddate_Front = pd.Timestamp(Startdate) - pd.DateOffset(days=1)

    # Extract inside start and enddate
    Array_main = RC.Open_nc_array(nc_outname, Var, Startdate, Enddate)

    if Additional_Months_front > 0:

        # Extract moving average period before
        if os.path.exists(filename_front):

            # Open variables in netcdf
            fh = Dataset(filename_front)
            Variables_NC = [var for var in fh.variables]
            fh.close()

            if Var in Variables_NC:
                Array_front = RC.Open_nc_array(
                    filename_front, Var, Startdate_Moving_Average_String,
                    Enddate_Front)
            else:
                Array_front = RC.Get3Darray_time_series_monthly(
                    Data_Path, Startdate_Moving_Average_String, Enddate_Front,
                    nc_outname)

        else:
            Array_front = RC.Get3Darray_time_series_monthly(
                Data_Path, Startdate_Moving_Average_String, Enddate_Front,
                nc_outname)

        # Merge dataset
        Array_main = np.vstack([Array_front, Array_main])

    if Additional_Months_tail > 0:

        # Extract moving average period after
        Year_tail = int(Enddate_Moving_Average.year)
        filename_tail = os.path.join(os.path.dirname(nc_outname),
                                     "%d.nc" % Year_tail)
        Startdate_tail = pd.Timestamp(Enddate) + pd.DateOffset(days=1)

        # Extract moving average period after
        if os.path.exists(filename_tail):

            # Open variables in netcdf
            fh = Dataset(filename_tail)
            Variables_NC = [var for var in fh.variables]
            fh.close()

            if Var in Variables_NC:
                Array_tail = RC.Open_nc_array(filename_tail, Var,
                                              Startdate_tail,
                                              Enddate_Moving_Average_String)
            else:
                Array_tail = RC.Get3Darray_time_series_monthly(
                    Data_Path, Startdate_tail, Enddate_Moving_Average_String,
                    nc_outname)

        else:
            Array_tail = RC.Get3Darray_time_series_monthly(
                Data_Path, Startdate_tail, Enddate_Moving_Average_String,
                nc_outname)

        # Merge dataset
        Array_main = np.vstack([Array_main, Array_tail])

    return (Array_main)
예제 #18
0
def Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, nc_outname):
    """
    This functions create the CSV files for the sheets

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    Simulation : int
        Defines the simulation
    Basin : str
        Name of the basin
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    nc_outname : str
        Path to the .nc file containing the data

    Returns
    -------
    Data_Path_CSV : str
        Data path pointing to the CSV output files

    """
    # import WA modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start as Start

    # Create output folder for CSV files
    Data_Path_CSV = os.path.join(Dir_Basin, "Simulations",
                                 "Simulation_%d" % Simulation, "CSV")
    if not os.path.exists(Data_Path_CSV):
        os.mkdir(Data_Path_CSV)

    # Open LULC map
    DataCube_LU = RC.Open_nc_array(nc_outname, 'Landuse')

    # Open all needed layers
    DataCube_Total_Supply_GW = RC.Open_nc_array(nc_outname,
                                                "Total_Supply_Ground_Water",
                                                Startdate, Enddate)
    DataCube_Total_Supply_SW = RC.Open_nc_array(nc_outname,
                                                "Total_Supply_Surface_Water",
                                                Startdate, Enddate)
    DataCube_Consumed_ET = RC.Open_nc_array(nc_outname, "Total_Supply",
                                            Startdate, Enddate)
    DataCube_Non_Consumed = RC.Open_nc_array(nc_outname, "Non_Consumed_Water",
                                             Startdate, Enddate)
    DataCube_RecovableFlow_Return_GW = RC.Open_nc_array(
        nc_outname, "Recovable_Flow_Ground_Water", Startdate, Enddate)
    DataCube_RecovableFlow_Return_SW = RC.Open_nc_array(
        nc_outname, "Recovable_Flow_Surface_Water", Startdate, Enddate)
    DataCube_NonRecovableFlow_Return_GW = RC.Open_nc_array(
        nc_outname, "Non_Recovable_Flow_Ground_Water", Startdate, Enddate)
    DataCube_NonRecovableFlow_Return_SW = RC.Open_nc_array(
        nc_outname, "Non_Recovable_Flow_Surface_Water", Startdate, Enddate)

    # Set the months
    Dates = pd.date_range(Startdate, Enddate, freq="MS")

    # Define whole years
    YearsStart = pd.date_range(Startdate, Enddate, freq="AS")
    YearsEnd = pd.date_range(Startdate, Enddate, freq="A")
    if len(YearsStart) > 0 and len(YearsEnd) > 0:
        Years = range(int(YearsStart[0].year), int(YearsEnd[-1].year + 1))
        Start_Year = np.argwhere(str(YearsStart[0])[0:10] == Dates)[0][0]
    else:
        Years = []

    # Calculate the area for each pixel in square meters
    area_in_m2 = Start.Area_converter.Degrees_to_m2(nc_outname)

    # Get all the LULC types that are defined for sheet 4
    LU_Classes = Start.Get_Dictionaries.get_sheet5_classes()
    LU_Classes_Keys = list(LU_Classes.keys())
    Required_LU_Classes = np.append(LU_Classes_Keys,
                                    ['Industry', 'Power and Energy'])

    # Convert data from mm/month to km3/month
    Total_Supply_GW_km3 = np.einsum('ij,kij->kij', area_in_m2,
                                    DataCube_Total_Supply_GW) / 1e12
    Total_Supply_SW_km3 = np.einsum('ij,kij->kij', area_in_m2,
                                    DataCube_Total_Supply_SW) / 1e12
    Non_Consumed_km3 = np.einsum('ij,kij->kij', area_in_m2,
                                 DataCube_Non_Consumed) / 1e12
    Consumed_km3 = np.einsum('ij,kij->kij', area_in_m2,
                             DataCube_Consumed_ET) / 1e12
    RecovableFlow_Return_GW_km3 = np.einsum(
        'ij,kij->kij', area_in_m2, DataCube_RecovableFlow_Return_GW) / 1e12
    RecovableFlow_Return_SW_km3 = np.einsum(
        'ij,kij->kij', area_in_m2, DataCube_RecovableFlow_Return_SW) / 1e12
    NonRecovableFlow_Return_GW_km3 = np.einsum(
        'ij,kij->kij', area_in_m2, DataCube_NonRecovableFlow_Return_GW) / 1e12
    NonRecovableFlow_Return_SW_km3 = np.einsum(
        'ij,kij->kij', area_in_m2, DataCube_NonRecovableFlow_Return_SW) / 1e12

    # Create mask for all LU classes
    All_mask = dict()

    # Create masks
    for Required_LU_Class in Required_LU_Classes[:-2]:
        Mask_Class = np.zeros(DataCube_LU.shape)
        Values_LULC = LU_Classes[Required_LU_Class]
        for Value_LULC in Values_LULC:
            Mask_Class[DataCube_LU == Value_LULC] = 1

        All_mask[Required_LU_Class] = Mask_Class

    # Enter additional map for industry and power and energy
    All_mask['Industry'] = np.zeros(DataCube_LU.shape)
    All_mask['Power and Energy'] = np.zeros(DataCube_LU.shape)

    # Create empty arrays for the values
    Values_Total_Supply_GW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])
    Values_Total_Supply_SW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])
    Values_Non_Consumed_km3 = np.zeros([len(Required_LU_Classes), len(Dates)])
    Values_Consumed_km3 = np.zeros([len(Required_LU_Classes), len(Dates)])
    Values_RecovableFlow_Return_GW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])
    Values_RecovableFlow_Return_SW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])
    Values_NonRecovableFlow_Return_GW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])
    Values_NonRecovableFlow_Return_SW_km3 = np.zeros(
        [len(Required_LU_Classes), len(Dates)])

    # zero values for now
    Values_Consumed_Others = np.zeros([len(Required_LU_Classes), len(Dates)])
    Values_Demand = np.zeros([len(Required_LU_Classes), len(Dates)])

    i = 0
    Max_value = 0

    # Calculate sum by applying mask over the data
    for Required_LU_Class in Required_LU_Classes:
        Mask_one_class = All_mask[Required_LU_Class]
        Values_Total_Supply_GW_km3[i, :] = np.nansum(
            np.nansum(Total_Supply_GW_km3 * Mask_one_class, 2), 1)
        Values_Total_Supply_SW_km3[i, :] = np.nansum(
            np.nansum(Total_Supply_SW_km3 * Mask_one_class, 2), 1)
        Values_Non_Consumed_km3[i, :] = np.nansum(
            np.nansum(Non_Consumed_km3 * Mask_one_class, 2), 1)
        Values_Consumed_km3[i, :] = np.nansum(
            np.nansum(Consumed_km3 * Mask_one_class, 2), 1)
        Values_RecovableFlow_Return_GW_km3[i, :] = np.nansum(
            np.nansum(RecovableFlow_Return_GW_km3 * Mask_one_class, 2), 1)
        Values_RecovableFlow_Return_SW_km3[i, :] = np.nansum(
            np.nansum(RecovableFlow_Return_SW_km3 * Mask_one_class, 2), 1)
        Values_NonRecovableFlow_Return_GW_km3[i, :] = np.nansum(
            np.nansum(NonRecovableFlow_Return_GW_km3 * Mask_one_class, 2), 1)
        Values_NonRecovableFlow_Return_SW_km3[i, :] = np.nansum(
            np.nansum(NonRecovableFlow_Return_SW_km3 * Mask_one_class, 2), 1)
        i += 1

        Max_value_one_LU = np.nanmax(
            np.nanmax(Values_Total_Supply_GW_km3 + Values_Total_Supply_SW_km3))

        if Max_value_one_LU > Max_value:
            Max_value = Max_value_one_LU

    # Check if scaling is needed
    scaling = 1
    Max_value_str = str(Max_value)
    Values = Max_value_str.split('.')
    if int(Values[0]) > 10:
        exponent = len(Values[0]) - 1
        scaling = float(np.power(10., -1. * exponent))
    if int(Values[0]) == 0:
        Values_all = Values[1].split('0')
        exponent = 1
        Values_now = Values_all[0]
        while len(Values_now) == 0:
            exponent += 1
            Values_now = Values_all[exponent - 1]

        scaling = np.power(10, exponent)

    if scaling is not 1:
        Unit_front = '1x10^%d ' % (-1 * exponent)
    else:
        Unit_front = ''

    # Create CSV

    # First row of the CSV file
    first_row = [
        'LANDUSE_TYPE', 'SUPPLY_GROUNDWATER', 'NON_RECOVERABLE_GROUNDWATER',
        'SUPPLY_SURFACEWATER', 'NON_CONVENTIONAL_ET',
        'RECOVERABLE_GROUNDWATER', 'CONSUMED_OTHER', 'CONSUMED_ET', 'DEMAND',
        'RECOVERABLE_SURFACEWATER', 'NON_RECOVERABLE_SURFACEWATER'
    ]

    # Counter for dates
    i = 0

    # Create monthly CSV
    for Date in Dates:

        # Create csv-file.
        csv_filename = os.path.join(
            Data_Path_CSV, 'Sheet4_Sim%d_%s_%d_%02d.csv' %
            (Simulation, Basin, Date.year, Date.month))
        csv_file = open(csv_filename, 'w')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        # Counter for landuse types
        j = 0

        # Loop over landuse and class
        for LAND_USE in Required_LU_Classes:

            # Get the value of the current class and landuse
            Value_Total_Supply_GW_km3 = Values_Total_Supply_GW_km3[j,
                                                                   i] * scaling
            Value_Total_Supply_SW_km3 = Values_Total_Supply_SW_km3[j,
                                                                   i] * scaling
            Value_Non_Consumed_km3 = Values_Non_Consumed_km3[j, i] * scaling
            Value_Consumed_km3 = Values_Consumed_km3[j, i] * scaling
            Value_RecovableFlow_Return_GW_km3 = Values_RecovableFlow_Return_GW_km3[
                j, i] * scaling
            Value_RecovableFlow_Return_SW_km3 = Values_RecovableFlow_Return_SW_km3[
                j, i] * scaling
            Value_NonRecovableFlow_Return_GW_km3 = Values_NonRecovableFlow_Return_GW_km3[
                j, i] * scaling
            Value_NonRecovableFlow_Return_SW_km3 = Values_NonRecovableFlow_Return_SW_km3[
                j, i] * scaling
            Value_Consumed_Others = Values_Consumed_Others[j, i] * scaling
            Value_Demand = Values_Demand[j, i] * scaling

            # Set special cases.
            # not defined yet

            # Create the row to be written
            row = [
                LAND_USE,
                "{0:.2f}".format(np.nansum([0, Value_Total_Supply_GW_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_NonRecovableFlow_Return_GW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Total_Supply_SW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Non_Consumed_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_RecovableFlow_Return_GW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Consumed_Others])),
                "{0:.2f}".format(np.nansum([0, Value_Consumed_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Demand])),
                "{0:.2f}".format(
                    np.nansum([0, Value_RecovableFlow_Return_SW_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_NonRecovableFlow_Return_SW_km3]))
            ]

            # Write the row.
            writer.writerow(row)

            # Add one LU counter
            j += 1

        # Close the csv-file.
        csv_file.close()

        # Add one date counter
        i += 1

    # Create yearly CSV
    i = 0
    for Year in Years:

        # Create csv-file.
        csv_filename = os.path.join(
            Data_Path_CSV,
            'Sheet4_Sim%d_%s_%d.csv' % (Simulation, Basin, Year))
        csv_file = open(csv_filename, 'w')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in Required_LU_Classes:

            # Get the value of the current class and landuse
            Value_Total_Supply_GW_km3 = np.sum(
                Values_Total_Supply_GW_km3[j, Start_Year:Start_Year +
                                           12]) * scaling
            Value_Total_Supply_SW_km3 = np.sum(
                Values_Total_Supply_SW_km3[j, Start_Year:Start_Year +
                                           12]) * scaling
            Value_Non_Consumed_km3 = np.sum(
                Values_Non_Consumed_km3[j,
                                        Start_Year:Start_Year + 12]) * scaling
            Value_Consumed_km3 = np.sum(
                Values_Consumed_km3[j, Start_Year:Start_Year + 12]) * scaling
            Value_RecovableFlow_Return_GW_km3 = np.sum(
                Values_RecovableFlow_Return_GW_km3[j, Start_Year:Start_Year +
                                                   12]) * scaling
            Value_RecovableFlow_Return_SW_km3 = np.sum(
                Values_RecovableFlow_Return_SW_km3[j, Start_Year:Start_Year +
                                                   12]) * scaling
            Value_NonRecovableFlow_Return_GW_km3 = np.sum(
                Values_NonRecovableFlow_Return_GW_km3[j,
                                                      Start_Year:Start_Year +
                                                      12]) * scaling
            Value_NonRecovableFlow_Return_SW_km3 = np.sum(
                Values_NonRecovableFlow_Return_SW_km3[j,
                                                      Start_Year:Start_Year +
                                                      12]) * scaling
            Value_Consumed_Others = np.sum(
                Values_Consumed_Others[j,
                                       Start_Year:Start_Year + 12]) * scaling
            Value_Demand = np.sum(
                Values_Demand[j, Start_Year:Start_Year + 12]) * scaling

            # Set special cases.
            # not defined yet

            # Create the row to be written
            row = [
                LAND_USE,
                "{0:.2f}".format(np.nansum([0, Value_Total_Supply_GW_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_NonRecovableFlow_Return_GW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Total_Supply_SW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Non_Consumed_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_RecovableFlow_Return_GW_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Consumed_Others])),
                "{0:.2f}".format(np.nansum([0, Value_Consumed_km3])),
                "{0:.2f}".format(np.nansum([0, Value_Demand])),
                "{0:.2f}".format(
                    np.nansum([0, Value_RecovableFlow_Return_SW_km3])),
                "{0:.2f}".format(
                    np.nansum([0, Value_NonRecovableFlow_Return_SW_km3]))
            ]

            # Write the row.
            writer.writerow(row)

            # Add one LU counter
            j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1
        Start_Year += 12

    return (Data_Path_CSV, Unit_front)
예제 #19
0
def main(input_nc,
         output_nc,
         input_JRC,
         Inflow_Text_Files,
         include_reservoirs=1):

    import time
    import watools.General.raster_conversions as RC
    import watools.General.data_conversions as DC
    import numpy as np
    import netCDF4

    ####################### add inflow text files #################################

    if len(Inflow_Text_Files) > 0:

        import watools.Models.SurfWAT.Part0_Add_Inlets as Part0_Add_Inlets

        # Calculate the runoff that will be routed by including the inlets
        Runoff = Part0_Add_Inlets(input_nc, Inflow_Text_Files)

    else:
        # Extract runoff data from NetCDF file
        Runoff = RC.Open_nc_array(input_nc, Var='Runoff_M')

    ###############################################################################

    # Extract flow direction data from NetCDF file
    flow_directions = RC.Open_nc_array(input_nc, Var='demdir')

    # Extract basin data from NetCDF file
    Basin = RC.Open_nc_array(input_nc, Var='basin')
    Areas_in_m2 = RC.Open_nc_array(input_nc, Var='area')
    Runoff_in_m3_month = ((Runoff / 1000) * Areas_in_m2)

    ###############################################################################
    ############################### Run Part 1 ####################################
    ###############################################################################

    import watools.Models.SurfWAT.Part1_Channel_Routing as Part1_Channel_Routing
    Routed_Array, Accumulated_Pixels, Rivers = Part1_Channel_Routing.Run(
        Runoff_in_m3_month, flow_directions, Basin)

    ###############################################################################
    ################## Create NetCDF Part 1 results ###############################
    ###############################################################################

    ################### Get Example parameters for NetCDF #########################

    # Create NetCDF
    geo_out_example, epsg_example, size_X_example, size_Y_example, size_Z_example, Time_example = RC.Open_nc_info(
        input_nc)
    geo_out_example = np.array(geo_out_example)

    time_or = RC.Open_nc_array(input_nc, Var='time')

    # Latitude and longitude
    lon_ls = np.arange(size_X_example) * geo_out_example[1] + geo_out_example[
        0] + 0.5 * geo_out_example[1]
    lat_ls = np.arange(size_Y_example) * geo_out_example[5] + geo_out_example[
        3] - 0.5 * geo_out_example[5]

    lat_n = len(lat_ls)
    lon_n = len(lon_ls)

    ################################ Save NetCDF ##################################

    # Create NetCDF file
    nc_file = netCDF4.Dataset(output_nc, 'w')
    nc_file.set_fill_on()

    # Create dimensions
    lat_dim = nc_file.createDimension('latitude', lat_n)
    lon_dim = nc_file.createDimension('longitude', lon_n)

    # Create NetCDF variables
    crso = nc_file.createVariable('crs', 'i4')
    crso.long_name = 'Lon/Lat Coords in WGS84'
    crso.standard_name = 'crs'
    crso.grid_mapping_name = 'latitude_longitude'
    crso.projection = epsg_example
    crso.longitude_of_prime_meridian = 0.0
    crso.semi_major_axis = 6378137.0
    crso.inverse_flattening = 298.257223563
    crso.geo_reference = geo_out_example

    ######################### Save Rasters in NetCDF ##############################

    lat_var = nc_file.createVariable('latitude', 'f8', ('latitude', ))
    lat_var.units = 'degrees_north'
    lat_var.standard_name = 'latitude'
    lat_var.pixel_size = geo_out_example[5]

    lon_var = nc_file.createVariable('longitude', 'f8', ('longitude', ))
    lon_var.units = 'degrees_east'
    lon_var.standard_name = 'longitude'
    lon_var.pixel_size = geo_out_example[1]

    nc_file.createDimension('time', None)
    timeo = nc_file.createVariable('time', 'f4', ('time', ))
    timeo.units = 'Monthly'
    timeo.standard_name = 'time'

    # Variables
    rivers_var = nc_file.createVariable('rivers',
                                        'i', ('latitude', 'longitude'),
                                        fill_value=-9999)
    rivers_var.long_name = 'Rivers'
    rivers_var.grid_mapping = 'crs'

    accpix_var = nc_file.createVariable('accpix',
                                        'f8', ('latitude', 'longitude'),
                                        fill_value=-9999)
    accpix_var.long_name = 'Accumulated Pixels'
    accpix_var.units = 'AmountPixels'
    accpix_var.grid_mapping = 'crs'

    discharge_nat_var = nc_file.createVariable(
        'discharge_natural',
        'f8', ('time', 'latitude', 'longitude'),
        fill_value=-9999)
    discharge_nat_var.long_name = 'Natural Discharge'
    discharge_nat_var.units = 'm3/month'
    discharge_nat_var.grid_mapping = 'crs'

    # Load data
    lat_var[:] = lat_ls
    lon_var[:] = lon_ls
    timeo[:] = time_or

    # Static variables
    rivers_var[:, :] = Rivers[:, :]
    accpix_var[:, :] = Accumulated_Pixels[:, :]
    for i in range(len(time_or)):
        discharge_nat_var[i, :, :] = Routed_Array[i, :, :]

    time.sleep(1)
    nc_file.close()
    del Routed_Array, Accumulated_Pixels

    ###############################################################################
    ############################### Run Part 2 ####################################
    ###############################################################################

    import watools.Models.SurfWAT.Part2_Create_Dictionaries as Part2_Create_Dictionaries
    DEM_dict, River_dict, Distance_dict, Discharge_dict = Part2_Create_Dictionaries.Run(
        input_nc, output_nc)

    ###############################################################################
    ################## Create NetCDF Part 2 results ###############################
    ###############################################################################

    # Create NetCDF file
    nc_file = netCDF4.Dataset(output_nc, 'r+')
    nc_file.set_fill_on()

    ###################### Save Dictionaries in NetCDF ############################

    parmsdem = nc_file.createGroup('demdict_static')
    for k, v in list(DEM_dict.items()):
        setattr(parmsdem, str(k), str(v.tolist()))

    parmsriver = nc_file.createGroup('riverdict_static')
    for k, v in list(River_dict.items()):
        setattr(parmsriver, str(k), str(v.tolist()))

    parmsdist = nc_file.createGroup('distancedict_static')
    for k, v in list(Distance_dict.items()):
        setattr(parmsdist, str(k), str(v.tolist()))

    parmsdis = nc_file.createGroup('dischargedict_dynamic')
    for k, v in list(Discharge_dict.items()):
        setattr(parmsdis, str(k), str(v.tolist()))

    # Close file
    time.sleep(1)
    nc_file.close()

    ###############################################################################
    ############################### Run Part 3 ####################################
    ###############################################################################

    if include_reservoirs == 1:
        import watools.Models.SurfWAT.Part3_Reservoirs as Part3_Reservoirs
        Discharge_dict_2, River_dict_2, DEM_dict_2, Distance_dict_2 = Part3_Reservoirs.Run(
            input_nc, output_nc, input_JRC)

    else:
        import copy
        Discharge_dict_2 = copy.deepcopy(Discharge_dict)
        River_dict_2 = copy.deepcopy(River_dict)
        DEM_dict_2 = copy.deepcopy(DEM_dict)
        Distance_dict_2 = copy.deepcopy(Distance_dict)

    ###############################################################################
    ################## Create NetCDF Part 3 results ###############################
    ###############################################################################

    # Create NetCDF file
    nc_file = netCDF4.Dataset(output_nc, 'r+')
    nc_file.set_fill_on()

    ###################### Save Dictionaries in NetCDF ############################

    parmsdisres = nc_file.createGroup('dischargedictreservoirs_dynamic')
    for k, v in list(Discharge_dict_2.items()):
        setattr(parmsdisres, str(k), str(v.tolist()))

    parmsrivresend = nc_file.createGroup('riverdictres_static')
    for k, v in list(River_dict_2.items()):
        setattr(parmsrivresend, str(k), str(v.tolist()))

    parmsdemres = nc_file.createGroup('demdictres_static')
    for k, v in list(DEM_dict_2.items()):
        setattr(parmsdemres, str(k), str(v.tolist()))

    parmsdistres = nc_file.createGroup('distancedictres_static')
    for k, v in list(Distance_dict_2.items()):
        setattr(parmsdistres, str(k), str(v.tolist()))

    # Close file
    time.sleep(1)
    nc_file.close()

    del DEM_dict, River_dict, Distance_dict, Discharge_dict
    ###############################################################################
    ############################### Run Part 4 ####################################
    ###############################################################################

    import watools.Models.SurfWAT.Part4_Withdrawals as Part4_Withdrawals
    Discharge_dict_end, Error_map = Part4_Withdrawals.Run(input_nc, output_nc)

    ###############################################################################
    ################## Create NetCDF Part 4 results ###############################
    ###############################################################################

    # Create NetCDF file
    nc_file = netCDF4.Dataset(output_nc, 'r+')
    nc_file.set_fill_on()

    ######################### Save Rasters in NetCDF ##############################

    error_map_var = nc_file.createVariable('error_map_mm',
                                           'f8',
                                           ('time', 'latitude', 'longitude'),
                                           fill_value=-9999)
    error_map_var.long_name = 'Error Map'
    error_map_var.units = 'mm/month'
    error_map_var.grid_mapping = 'crs'

    for i in range(len(time_or)):
        error_map_var[i, :, :] = Error_map[i, :, :]

    ###################### Save Dictionaries in NetCDF ############################

    parmsdisend = nc_file.createGroup('dischargedictend_dynamic')
    for k, v in list(Discharge_dict_end.items()):
        setattr(parmsdisend, str(k), str(v.tolist()))

    # Close file
    time.sleep(1)
    nc_file.close()
    del Discharge_dict_end

    ###############################################################################
    ############### Part 5 Convert dictionaries to rasters ########################
    ###############################################################################

    River_dict = RC.Open_nc_dict(output_nc, 'riverdict_static')

    # End discharge dictionary to raster
    Discharge_dict_end = RC.Open_nc_dict(output_nc, 'dischargedictend_dynamic')
    DataCube_Discharge_end = DC.Convert_dict_to_array(River_dict,
                                                      Discharge_dict_end,
                                                      input_nc)

    ###################### Save Dictionaries in NetCDF ############################

    # Create NetCDF file
    nc_file = netCDF4.Dataset(output_nc, 'r+')
    nc_file.set_fill_on()

    discharge_end_var = nc_file.createVariable(
        'discharge_end',
        'f8', ('time', 'latitude', 'longitude'),
        fill_value=-9999)
    discharge_end_var.long_name = 'End Discharge'
    discharge_end_var.units = 'm3/month'
    discharge_end_var.grid_mapping = 'crs'

    for i in range(len(time_or)):
        discharge_end_var[i, :, :] = DataCube_Discharge_end[i, :, :]

    # Close file
    nc_file.close()
    del DataCube_Discharge_end
예제 #20
0
def Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, nc_outname, Example_dataset):
    """
    This functions create the CSV files for the sheets

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    Simulation : int
        Defines the simulation
    Basin : str
        Name of the basin
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    nc_outname : str
        Path to the .nc file containing the  data
    Example_dataset : str
         Data path to the example tiff file containing the right amount of pixels and projection

    Returns
    -------
    Data_Path_CSV : str
        Data path pointing to the CSV output files

    """
    # import WA modules
    import watools.Functions.Start.Get_Dictionaries as GD
    import watools.General.raster_conversions as RC
    from watools.Functions import Start

    # Create output folder for CSV files
    Data_Path_CSV = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation, "CSV")
    if not os.path.exists(Data_Path_CSV):
        os.mkdir(Data_Path_CSV)

    # Open LULC map
    LULC = RC.Open_nc_array(nc_outname, 'Landuse')

    # Open I, T, E
    DataCube_I = RC.Open_nc_array(nc_outname, 'Interception', Startdate, Enddate)
    DataCube_T = RC.Open_nc_array(nc_outname, 'Transpiration', Startdate, Enddate)
    DataCube_E = RC.Open_nc_array(nc_outname, 'Evaporation', Startdate, Enddate)

    # Set the months
    Dates = pd.date_range(Startdate, Enddate, freq = "MS")

    # Define whole years
    YearsStart = pd.date_range(Startdate, Enddate, freq = "AS")
    YearsEnd = pd.date_range(Startdate, Enddate, freq = "A")
    if len(YearsStart) > 0 and len(YearsEnd) > 0:
        Years = list(range(int(YearsStart[0].year), int(YearsEnd[-1].year + 1)))
        Start_Year = np.argwhere(str(YearsStart[0])[0:10]==Dates)[0][0]
    else:
        Years = []

    # Calculate the area for each pixel in square meters
    area_in_m2 = Start.Area_converter.Degrees_to_m2(Example_dataset)

    # Create Beneficial Maps
    lulc_dict = GD.get_lulcs()

    # Get all the LULC values
    Values_LULC = np.unique(LULC)

    # Create new Benefial arrays
    T_ben_array = np.zeros(np.shape(LULC))
    E_ben_array = np.zeros(np.shape(LULC))
    I_ben_array = np.zeros(np.shape(LULC))
    agriculture_array = np.zeros(np.shape(LULC))
    environment_array= np.zeros(np.shape(LULC))
    economic_array = np.zeros(np.shape(LULC))
    energy_array = np.zeros(np.shape(LULC))
    leisure_array = np.zeros(np.shape(LULC))

    # Loop over LULC values and set benefial fractions
    for Value_LULC in Values_LULC:
        if Value_LULC in list(lulc_dict.keys()):
            T_ben = lulc_dict[Value_LULC][3]
            E_ben = lulc_dict[Value_LULC][4]
            I_ben = lulc_dict[Value_LULC][5]
            agriculture = lulc_dict[Value_LULC][6]
            environment = lulc_dict[Value_LULC][7]
            economic = lulc_dict[Value_LULC][8]
            energy = lulc_dict[Value_LULC][9]
            leisure = lulc_dict[Value_LULC][10]

            T_ben_array[LULC == Value_LULC] = T_ben/100.
            E_ben_array[LULC == Value_LULC] = E_ben/100.
            I_ben_array[LULC == Value_LULC] = I_ben/100.
            agriculture_array[LULC == Value_LULC] = agriculture/100.
            environment_array[LULC == Value_LULC] = environment/100.
            economic_array[LULC == Value_LULC] = economic /100.
            energy_array[LULC == Value_LULC] = energy/100.
            leisure_array[LULC == Value_LULC] = leisure /100.

    # Open sheet 2 dict
    sheet2_classes_dict = GD.get_sheet2_classes()

    # Convert data from mm/month to km3/month
    I_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_I)/ 1e12
    E_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_E)/ 1e12
    T_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_T)/ 1e12

    # Calculate beneficial I, E, and T
    Iben_km3 = np.einsum('ij,kij->kij', I_ben_array, I_km3)
    Eben_km3 = np.einsum('ij,kij->kij', E_ben_array, E_km3)
    Tben_km3 = np.einsum('ij,kij->kij', T_ben_array, T_km3)
    ETben_tot_km3 = Iben_km3 + Eben_km3 + Tben_km3

    # Determine service contribution
    agriculture_km3 = np.einsum('ij,kij->kij', agriculture_array, ETben_tot_km3)
    environment_km3 = np.einsum('ij,kij->kij', environment_array, ETben_tot_km3)
    economic_km3 = np.einsum('ij,kij->kij', economic_array, ETben_tot_km3)
    energy_km3 = np.einsum('ij,kij->kij', energy_array, ETben_tot_km3)
    leisure_km3 = np.einsum('ij,kij->kij', leisure_array, ETben_tot_km3)

    # Create empty arrays
    DataT = np.zeros([29,len(Dates)])
    DataI = np.zeros([29,len(Dates)])
    DataE = np.zeros([29,len(Dates)])
    DataBT = np.zeros([29,len(Dates)])
    DataBI = np.zeros([29,len(Dates)])
    DataBE = np.zeros([29,len(Dates)])
    DataAgriculture = np.zeros([29,len(Dates)])
    DataEnvironment = np.zeros([29,len(Dates)])
    DataEconomic = np.zeros([29,len(Dates)])
    DataEnergy = np.zeros([29,len(Dates)])
    DataLeisure = np.zeros([29,len(Dates)])
    i = 0

    # Loop over the LULC by using the Sheet 2 dictionary
    for LAND_USE in list(sheet2_classes_dict.keys()):
        for CLASS in list(sheet2_classes_dict[LAND_USE].keys()):
            lulcs = sheet2_classes_dict[LAND_USE][CLASS]

            # Create a mask to ignore non relevant pixels.
            mask=np.logical_or.reduce([LULC == value for value in lulcs])
            mask3d = mask * np.ones(len(Dates))[:,None,None]

            # Calculate the spatial sum of the different parameters.
            T_LU_tot = np.nansum(np.nansum((T_km3 * mask3d),1),1)
            I_LU_tot = np.nansum(np.nansum((I_km3 * mask3d),1),1)
            E_LU_tot = np.nansum(np.nansum((E_km3 * mask3d),1),1)
            BT_LU_tot = np.nansum(np.nansum((Tben_km3 * mask3d),1),1)
            BI_LU_tot = np.nansum(np.nansum((Iben_km3 * mask3d),1),1)
            BE_LU_tot = np.nansum(np.nansum((Eben_km3 * mask3d),1),1)
            Agriculture_LU_tot = np.nansum(np.nansum((agriculture_km3 * mask3d),1),1)
            Environment_LU_tot = np.nansum(np.nansum((environment_km3 * mask3d),1),1)
            Economic_LU_tot = np.nansum(np.nansum((economic_km3 * mask3d),1),1)
            Energy_LU_tot = np.nansum(np.nansum((energy_km3 * mask3d),1),1)
            Leisure_LU_tot = np.nansum(np.nansum((leisure_km3 * mask3d),1),1)

            DataT[i,:] = T_LU_tot
            DataBT[i,:] = BT_LU_tot
            DataI[i,:] = I_LU_tot
            DataBI[i,:] = BI_LU_tot
            DataE[i,:] = E_LU_tot
            DataBE[i,:] = BE_LU_tot
            DataAgriculture[i,:] = Agriculture_LU_tot
            DataEnvironment[i,:] = Environment_LU_tot
            DataEconomic[i,:] = Economic_LU_tot
            DataEnergy[i,:] = Energy_LU_tot
            DataLeisure[i,:] = Leisure_LU_tot
            i += 1

    # Calculate non benefial components
    DataNBT = DataT - DataBT
    DataNBI = DataI - DataBI
    DataNBE = DataE - DataBE
    DataNB_tot = DataNBT + DataNBI + DataNBE

    # Create CSV
    first_row = ['LAND_USE', 'CLASS', 'TRANSPIRATION', 'WATER', 'SOIL', 'INTERCEPTION', 'AGRICULTURE', 'ENVIRONMENT', 'ECONOMY', 'ENERGY', 'LEISURE', 'NON_BENEFICIAL']
    i = 0

    # Create monthly CSV
    for Date in Dates:

        # Create csv-file.
        csv_filename = os.path.join(Data_Path_CSV, 'Sheet2_Sim%d_%s_%d_%02d.csv' %(Simulation, Basin, Date.year, Date.month))
        csv_file = open(csv_filename, 'w')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in list(sheet2_classes_dict.keys()):
             for CLASS in list(sheet2_classes_dict[LAND_USE].keys()):

                # Get the value of the current class and landuse
                Transpiration = DataT[j,i]
                Evaporation = DataE[j,i]
                Interception = DataI[j,i]
                Agriculture = DataAgriculture[j,i]
                Environment = DataEnvironment[j,i]
                Economic = DataEconomic[j,i]
                Energy = DataEnergy[j,i]
                Leisure = DataLeisure[j,i]
                Non_beneficial = DataNB_tot[j,i]

                # Set special cases.
                if np.any([CLASS == 'Natural water bodies', CLASS == 'Managed water bodies']):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [LAND_USE, CLASS, "{0:.2f}".format(np.nansum([0, Transpiration])), "{0:.2f}".format(np.nansum([0, Water_evaporation])), "{0:.2f}".format(np.nansum([0, Soil_evaporation])), "{0:.2f}".format(np.nansum([0, Interception])), "{0:.2f}".format(np.nansum([0, Agriculture])), "{0:.2f}".format(np.nansum([0, Environment])), "{0:.2f}".format(np.nansum([0, Economic])), "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(np.nansum([0, Leisure])), "{0:.2f}".format(np.nansum([0, Non_beneficial]))]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1

    # Create yearly CSV
    i = 0
    for Year in Years:

        # Create csv-file.
        csv_filename = os.path.join(Data_Path_CSV, 'Sheet2_Sim%d_%s_%d.csv' %(Simulation, Basin, Year))
        csv_file = open(csv_filename, 'w')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in list(sheet2_classes_dict.keys()):
             for CLASS in list(sheet2_classes_dict[LAND_USE].keys()):

                # Get the yearly value of the current class and landuse
                Transpiration = np.sum(DataT[j,Start_Year:Start_Year+12])
                Evaporation = np.sum(DataE[j,Start_Year:Start_Year+12])
                Interception = np.sum(DataI[j,Start_Year:Start_Year+12])
                Agriculture = np.sum(DataAgriculture[j,Start_Year:Start_Year+12])
                Environment = np.sum(DataEnvironment[j,Start_Year:Start_Year+12])
                Economic = np.sum(DataEconomic[j,Start_Year:Start_Year+12])
                Energy = np.sum(DataEnergy[j,Start_Year:Start_Year+12])
                Leisure = np.sum(DataLeisure[j,Start_Year:Start_Year+12])
                Non_beneficial = np.sum(DataNB_tot[j,Start_Year:Start_Year+12])

                # Set special cases.
                if np.any([CLASS == 'Natural water bodies', CLASS == 'Managed water bodies']):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [LAND_USE, CLASS, "{0:.2f}".format(np.nansum([0, Transpiration])), "{0:.2f}".format(np.nansum([0, Water_evaporation])), "{0:.2f}".format(np.nansum([0, Soil_evaporation])), "{0:.2f}".format(np.nansum([0, Interception])), "{0:.2f}".format(np.nansum([0, Agriculture])), "{0:.2f}".format(np.nansum([0, Environment])), "{0:.2f}".format(np.nansum([0, Economic])), "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(np.nansum([0, Leisure])), "{0:.2f}".format(np.nansum([0, Non_beneficial]))]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1
        Start_Year += 12

    return(Data_Path_CSV)
예제 #21
0
파일: main.py 프로젝트: CMicha/watools
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, ETref_Product,
              DEM_Product, Water_Occurence_Product, Inflow_Text_Files,
              WaterPIX_filename, Reservoirs_GEE_on_off, Supply_method,
              Startdate, Enddate, Simulation):
    '''
    This functions consists of the following sections:
    1. Set General Parameters
    2. Download Data
    3. Convert the RAW data to NETCDF files
    4. Run SurfWAT

    '''
    # import General modules
    import os
    import gdal
    import numpy as np
    import pandas as pd
    from netCDF4 import Dataset

    # import WA plus modules
    from watools.General import raster_conversions as RC
    from watools.General import data_conversions as DC
    import watools.Functions.Five as Five
    import watools.Functions.Start as Start
    import watools.Functions.Start.Get_Dictionaries as GD

    ######################### 1. Set General Parameters ##############################

    # Get environmental variable for the Home folder
    if WA_HOME_folder == '':
        WA_env_paths = os.environ["WA_HOME"].split(';')
        Dir_Home = WA_env_paths[0]
    else:
        Dir_Home = WA_HOME_folder

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    output_dir = os.path.join(Dir_Basin, "Simulations",
                              "Simulation_%d" % Simulation)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(
        Basin, Dir_Home)
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Example_dataset)

    # Define resolution of SRTM
    Resolution = '15s'

    # Find the maximum moving window value
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(
        version='1.0')
    Additional_Months_tail = np.max(list(
        Moving_Window_Per_Class_dict.values()))

    ############## Cut dates into pieces if it is needed ######################

    # Check the years that needs to be calculated
    years = list(
        range(int(Startdate.split('-')[0]),
              int(Enddate.split('-')[0]) + 1))

    for year in years:

        # Create .nc file if not exists
        nc_outname = os.path.join(output_dir, "%d.nc" % year)
        if not os.path.exists(nc_outname):
            DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)

        # Open variables in netcdf
        fh = Dataset(nc_outname)
        Variables_NC = [var for var in fh.variables]
        fh.close()

        # Create Start and End date for time chunk
        Startdate_part = '%d-01-01' % int(year)
        Enddate_part = '%s-12-31' % int(year)

        if int(year) == int(years[0]):
            Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(
                months=Additional_Months_tail)
            Startdate_Moving_Average_String = Startdate_Moving_Average.strftime(
                '%Y-%m-%d')
        else:
            Startdate_Moving_Average_String = Startdate_part

        ############################# 2. Download Data ###################################

        # Download data
        if not "Precipitation" in Variables_NC:
            Data_Path_P_Monthly = Start.Download_Data.Precipitation(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, P_Product)

        if not "Actual_Evapotranspiration" in Variables_NC:
            Data_Path_ET = Start.Download_Data.Evapotranspiration(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, ET_Product)

        if (WaterPIX_filename == "" or Supply_method == "Fraction") \
                and not ("Reference_Evapotranspiration" in Variables_NC):

            Data_Path_ETref = Start.Download_Data.ETreference(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']],
                Startdate_Moving_Average_String, Enddate_part, ETref_Product)

        if Reservoirs_GEE_on_off == 1 and not ("Water_Occurrence"
                                               in Variables_NC):
            Data_Path_JRC_occurrence = Start.Download_Data.JRC_occurrence(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']],
                Water_Occurence_Product)

            input_JRC = os.path.join(Data_Path_JRC_occurrence,
                                     "JRC_Occurrence_percent.tif")

        else:
            input_JRC = None

        # WaterPIX input
        Data_Path_DEM_Dir = Start.Download_Data.DEM_Dir(
            Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
            [Boundaries['Lonmin'], Boundaries['Lonmax']], Resolution,
            DEM_Product)

        Data_Path_DEM = Start.Download_Data.DEM(
            Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
            [Boundaries['Lonmin'], Boundaries['Lonmax']], Resolution,
            DEM_Product)

        ###################### 3. Convert the RAW data to NETCDF files ##############################
        # The sequence of converting the data into netcdf is:
        # Precipitation
        # Evapotranspiration
        # Reference Evapotranspiration
        # DEM flow directions

        #______________________________Precipitation_______________________________

        # 1.) Precipitation data
        if not "Precipitation" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_Prec = RC.Get3Darray_time_series_monthly(
                Data_Path_P_Monthly,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)

            DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec,
                                     "Precipitation", "mm/month", 0.01)
            del DataCube_Prec

        #_______________________________Evaporation________________________________

        # 2.) Evapotranspiration data
        if not "Actual_Evapotranspiration" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_ET = RC.Get3Darray_time_series_monthly(
                Data_Path_ET,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ET,
                                     "Actual_Evapotranspiration", "mm/month",
                                     0.01)
            del DataCube_ET

        #_______________________Reference Evaporation______________________________

        # 3.) Reference Evapotranspiration data
        if (WaterPIX_filename == "" or Supply_method == "Fraction") and not \
                ("Reference_Evapotranspiration" in Variables_NC):
            # Get the data of Precipitation and save as nc
            DataCube_ETref = RC.Get3Darray_time_series_monthly(
                Data_Path_ETref,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref,
                                     "Reference_Evapotranspiration",
                                     "mm/month", 0.01)
            del DataCube_ETref

        #____________________________fraction surface water _______________________

        if not "Fraction_Surface_Water_Supply" in Variables_NC:
            DataCube_frac_sw = np.ones([size_Y, size_X]) * np.nan

            import watools.Functions.Start.Get_Dictionaries as GD

            # Open LU dataset
            DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")

            # Get dictionaries and keys
            lulc = GD.get_sheet5_classes()
            lulc_dict = list(GD.get_sheet5_classes().keys())
            consumed_frac_dict = GD.sw_supply_fractions()

            for key in lulc_dict:
                Numbers = lulc[key]
                for LU_nmbr in Numbers:
                    DataCube_frac_sw[DataCube_LU ==
                                     LU_nmbr] = consumed_frac_dict[key]

            DC.Add_NC_Array_Static(nc_outname, DataCube_frac_sw,
                                   "Fraction_Surface_Water_Supply", "fraction",
                                   0.01)
            del DataCube_frac_sw, DataCube_LU

        ################### 4. Calculate Runoff (2 methods: a = Budyko and b = WaterPIX) #####################

        ################ 4a. Calculate Runoff based on Precipitation and Evapotranspiration ##################

        if (Supply_method == "Fraction"
                and not "Surface_Runoff" in Variables_NC):

            # Calculate runoff based on Budyko
            DataCube_Runoff = Five.Fraction_Based.Calc_surface_runoff(
                Dir_Basin, nc_outname, Startdate_part, Enddate_part,
                Example_dataset, ETref_Product, P_Product)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Runoff,
                                     "Surface_Runoff", "mm/month", 0.01)
            del DataCube_Runoff

        ###################### 4b. Get Runoff from WaterPIX ###########################
        if (Supply_method == "WaterPIX"
                and not "Surface_Runoff" in Variables_NC):

            # Get WaterPIX data
            WaterPIX_Var = 'TotalRunoff_M'
            DataCube_Runoff = Five.Read_WaterPIX.Get_Array(
                WaterPIX_filename, WaterPIX_Var, Example_dataset,
                Startdate_part, Enddate_part)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Runoff,
                                     "Surface_Runoff", "mm/month", 0.01)
            del DataCube_Runoff

        ####################### 5. Calculate Extraction (2 methods: a = Fraction, b = WaterPIX) ##################

        ###################### 5a. Get extraction from fraction method by using budyko ###########################
        if (Supply_method == "Fraction"
                and not "Surface_Withdrawal" in Variables_NC):
            DataCube_surface_withdrawal = Five.Fraction_Based.Calc_surface_withdrawal(
                Dir_Basin, nc_outname, Startdate_part, Enddate_part,
                Example_dataset, ETref_Product, P_Product)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_surface_withdrawal,
                                     "Surface_Withdrawal", "mm/month", 0.01)
            del DataCube_surface_withdrawal

        #################################### 5b. Get extraction from WaterPIX ####################################
        if (Supply_method == "WaterPIX"
                and not "Surface_Withdrawal" in Variables_NC):
            WaterPIX_Var = 'Supply_M'
            DataCube_Supply = Five.Read_WaterPIX.Get_Array(
                WaterPIX_filename, WaterPIX_Var, Example_dataset, Startdate,
                Enddate)

            # Open array with surface water fractions
            DataCube_frac_sw = RC.Open_nc_array(
                nc_outname, "Fraction_Surface_Water_Supply")

            # Total amount of ETblue taken out of rivers
            DataCube_surface_withdrawal = DataCube_Supply * DataCube_frac_sw[
                None, :, :]

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_surface_withdrawal,
                                     "Surface_Withdrawal", "mm/month", 0.01)
            del DataCube_surface_withdrawal

        ################################## 5. Run SurfWAT #####################################

        import watools.Models.SurfWAT as SurfWAT

        # Define formats of input data
        Format_DEM = "TIFF"  # or "TIFF"
        Format_Runoff = "NetCDF"  # or "TIFF"
        Format_Extraction = "NetCDF"  # or "TIFF"
        Format_DEM_dir = "TIFF"  # or "TIFF"
        Format_Basin = "NetCDF"  # or "TIFF"

        # Give path (for tiff) or file (netcdf)
        input_nc = os.path.join(Dir_Basin, "Simulations",
                                "Simulation_%s" % Simulation,
                                "SurfWAT_in_%d.nc" % year)
        output_nc = os.path.join(Dir_Basin, "Simulations",
                                 "Simulation_%s" % Simulation,
                                 "SurfWAT_out_%d.nc" % year)

        # Create Input File for SurfWAT
        SurfWAT.Create_input_nc.main(Data_Path_DEM_Dir, Data_Path_DEM,
                                     os.path.dirname(nc_outname),
                                     os.path.dirname(nc_outname),
                                     os.path.dirname(nc_outname), Startdate,
                                     Enddate, input_nc, Resolution,
                                     Format_DEM_dir, Format_DEM, Format_Basin,
                                     Format_Runoff, Format_Extraction)

        # Run SurfWAT
        SurfWAT.Run_SurfWAT.main(input_nc, output_nc, input_JRC,
                                 Inflow_Text_Files, Reservoirs_GEE_on_off)
    '''
    ################################# Plot graph ##################################

    # Draw graph
    Five.Channel_Routing.Graph_DEM_Distance_Discharge(Discharge_dict_CR3, Distance_dict_CR2, DEM_dict_CR2, River_dict_CR2, Startdate, Enddate, Example_dataset)

    ######################## Change data to fit the LU data #######################

    # Discharge
    # Define info for the nc files
    info = ['monthly','m3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Discharge = DC.Create_NC_name('DischargeEnd', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Discharge_CR = DC.Convert_dict_to_array(River_dict_CR2, Discharge_dict_CR3, Example_dataset)
        DC.Save_as_NC(Name_NC_Discharge, DataCube_Discharge_CR, 'Discharge_End_CR', Example_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Discharge_CR


    '''
    '''

    # DEM
    Name_NC_DEM = DC.Create_NC_name('DEM', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_CR = RC.Open_nc_array(Name_NC_DEM_CR)
        DataCube_DEM = RC.resize_array_example(DataCube_DEM_CR, LU_data, method=1)
        DC.Save_as_NC(Name_NC_DEM, DataCube_DEM, 'DEM', LU_dataset)
        del DataCube_DEM

    # flow direction
    Name_NC_DEM_Dir = DC.Create_NC_name('DEM_Dir', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM_Dir):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_Dir_CR = RC.Open_nc_array(Name_NC_DEM_Dir_CR)
        DataCube_DEM_Dir = RC.resize_array_example(DataCube_DEM_Dir_CR, LU_data, method=1)
        DC.Save_as_NC(Name_NC_DEM_Dir, DataCube_DEM_Dir, 'DEM_Dir', LU_dataset)
        del DataCube_DEM_Dir

    # Precipitation
    # Define info for the nc files
    info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Prec = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_Prec):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_Prec, DataCube_Prec, 'Prec', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_Prec

    # Evapotranspiration
    Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_ET):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_ET

    # Reference Evapotranspiration data
    Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_ETref):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_ETref

    # Rivers
    Name_NC_Rivers = DC.Create_NC_name('Rivers', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Rivers):

        # Get the data of Reference Evapotranspiration and save as nc
        Rivers_CR = RC.Open_nc_array(Name_NC_Rivers_CR)
        DataCube_Rivers = RC.resize_array_example(Rivers_CR, LU_data)
        DC.Save_as_NC(Name_NC_Rivers, DataCube_Rivers, 'Rivers', LU_dataset)
        del DataCube_Rivers, Rivers_CR

    # Discharge
    # Define info for the nc files
    info = ['monthly','m3', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Routed_Discharge = DC.Create_NC_name('Routed_Discharge', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Routed_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        Routed_Discharge_CR = RC.Open_nc_array(Name_NC_Discharge)
        DataCube_Routed_Discharge = RC.resize_array_example(Routed_Discharge_CR, LU_data)
        DC.Save_as_NC(Name_NC_Routed_Discharge, DataCube_Routed_Discharge, 'Routed_Discharge', LU_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Routed_Discharge, Routed_Discharge_CR





    # Get raster information
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Example_dataset)

    Rivers = RC.Open_nc_array(Name_NC_Rivers_CR)

    # Create ID Matrix
    y,x = np.indices((size_Y, size_X))
    ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape)) + 1

    # Get tiff array time dimension:
    time_dimension = int(np.shape(Discharge_dict_CR3[0])[0])

    # create an empty array
    Result = np.zeros([time_dimension, size_Y, size_X])

    for river_part in range(0,len(River_dict_CR2)):
        for river_pixel in range(1,len(River_dict_CR2[river_part])):
            river_pixel_ID = River_dict_CR2[river_part][river_pixel]
            if len(np.argwhere(ID_Matrix == river_pixel_ID))>0:
                row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]
                Result[:,row,col] = Discharge_dict_CR3[river_part][:,river_pixel]
        print(river_part)


    Outflow = Discharge_dict_CR3[0][:,1]

    for i in range(0,time_dimension):
        output_name = r'C:/testmap/rtest_%s.tif' %i
        Result_one = Result[i, :, :]
        DC.Save_as_tiff(output_name, Result_one, geo_out, "WGS84")

    import os

    # Get environmental variable for the Home folder
    WA_env_paths = os.environ["WA_HOME"].split(';')
    Dir_Home = WA_env_paths[0]

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    info = ['monthly','m3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]
    Name_Result = DC.Create_NC_name('DischargeEnd', Simulation, Dir_Basin, 5, info)
    Result[np.logical_and(Result == 0.0, Rivers == 0.0)] = np.nan

    DC.Save_as_NC(Name_Result, Result, 'DischargeEnd', Example_dataset, Startdate, Enddate, 'monthly')



    '''

    return ()
예제 #22
0
def ITE(Dir_Basin, nc_outname, Startdate, Enddate, Simulation):
    """
    This functions split the evapotranspiration into interception, transpiration, and evaporation.

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    nc_outname : str
        Path to the .nc file containing all data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    Simulation : int
        Defines the simulation

    Returns
    -------
    I : array
        Array[time, lat, lon] contains the interception data
    T : array
        Array[time, lat, lon] contains the transpiration data
    E : array
        Array[time, lat, lon] contains the evaporation data

    """

    # import WA modules
    import watools.General.raster_conversions as RC
    import watools.Functions.Start.Get_Dictionaries as GD

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq="MS")

    # Extract LU data from NetCDF file
    LU = RC.Open_nc_array(nc_outname, Var='Landuse')

    # Create a mask to ignore non relevant pixels.
    if sys.version_info[0] == 2:
        lulc_dict = GD.get_lulcs().keys()
        mask = np.logical_or.reduce([LU == value for value in lulc_dict[:-1]])
    if sys.version_info[0] == 3:
        lulc_dict = list(GD.get_lulcs().keys())
        mask = np.logical_or.reduce([LU == value for value in lulc_dict[1:]])

    mask3d = mask * np.ones(len(Dates))[:, None, None]
    mask3d_neg = (mask3d - 1) * 9999

    # Extract Evapotranspiration data from NetCDF file
    ET = RC.Open_nc_array(nc_outname, 'Actual_Evapotranspiration', Startdate,
                          Enddate)
    # Extract Leaf Area Index data from NetCDF file
    LAI = RC.Open_nc_array(nc_outname, 'LAI', Startdate, Enddate)
    # Extract Precipitation data from NetCDF file
    P = RC.Open_nc_array(nc_outname, 'Precipitation', Startdate, Enddate)
    # Extract Rainy Days data from NetCDF file
    RD = RC.Open_nc_array(nc_outname, 'Rainy_Days', Startdate, Enddate)
    # Extract Normalized Dry Matter data and time from NetCDF file
    NDM = RC.Open_nc_array(nc_outname, 'Normalized_Dry_Matter', Startdate,
                           Enddate)
    timeNDM = RC.Open_nc_array(nc_outname, 'time')

    # Create dictory to get every month and year for each timestep
    datesNDMmonth = dict()
    datesNDMyear = dict()
    # Loop over all timestep
    for i in range(0, len(timeNDM)):
        # change toordinal to month and year
        datesNDMmonth[i] = datetime.date.fromordinal(timeNDM[i]).month
        datesNDMyear[i] = datetime.date.fromordinal(timeNDM[i]).year

    # Calculate the max monthly NDM over the whole period
    NDMmax = dict()

    # loop over the months
    for month in range(1, 13):
        dimensions = []
        # Define which dimension must be opened to get the same month
        for dimension, monthdict in datesNDMmonth.items():
            if monthdict == month:
                dimensions = np.append(dimensions, dimension)
        # Open those time dimension
        NDMmonth = np.zeros([
            np.size(dimensions),
            int(np.shape(NDM)[1]),
            int(np.shape(NDM)[2])
        ])
        dimensions = np.int_(dimensions)
        NDMmonth[:, :, :] = NDM[dimensions, :, :]
        # Calculate the maximum over the month
        NDMmax[month] = np.nanmax(NDMmonth, 0)

    NDMmax_months = np.zeros(
        [len(timeNDM),
         int(np.shape(NDM)[1]),
         int(np.shape(NDM)[2])])

    # Create 3D array with NDMmax
    for i in range(0, len(timeNDM)):
        NDMmax_months[i, :, :] = np.nanmax(NDMmax[datesNDMmonth[i]])

    # Create some variables needed to plot graphs.
    et = np.array([])
    i = np.array([])
    t = np.array([])

    # Change zero values in RD so we do not get errors
    RD[RD == 0] = 0.001
    LAI[LAI == 0] = 0.001
    LAI[np.isnan(LAI)] = 0.1

    # Calculate I
    I = LAI * (1 -
               np.power(1 + (P / RD) * (1 - np.exp(-0.5 * LAI)) *
                        (1 / LAI), -1)) * RD

    # Set boundary
    I[np.isnan(LAI)] = np.nan

    # Calculate T
    T = np.minimum(
        (NDM / NDMmax_months), np.ones(np.shape(NDM))) * 0.95 * (ET - I)

    # Mask Data
    ET = ET * mask3d
    T = T * mask3d
    I = I * mask3d
    ET[mask3d_neg < -1] = np.nan
    T[mask3d_neg < -1] = np.nan
    I[mask3d_neg < -1] = np.nan

    # Calculate E
    E = ET - T - I

    # Calculate monthly averages
    et = np.nanmean(ET.reshape(ET.shape[0], -1), 1)
    i = np.nanmean(I.reshape(I.shape[0], -1), 1)
    t = np.nanmean(T.reshape(T.shape[0], -1), 1)

    # Plot graph of ET and E, T and I fractions.
    fig = plt.figure(figsize=(10, 10))
    plt.grid(b=True, which='Major', color='0.65', linestyle='--', zorder=0)
    ax = fig.add_subplot(111)
    ax.plot(Dates, et, color='k')
    ax.patch.set_visible(False)
    ax.set_title('Average ET and E, T and I fractions')
    ax.set_ylabel('ET [mm/month]')
    ax.patch.set_visible(True)
    ax.fill_between(Dates, et, color='#a3db76', label='Evapotranspiration')
    ax.fill_between(Dates, i + t, color='#6bb8cc', label='Transpiration')
    ax.fill_between(Dates, i, color='#497e7c', label='Interception')
    ax.scatter(Dates, et, color='k')
    ax.legend(loc='upper left', fancybox=True, shadow=True)
    fig.autofmt_xdate()
    ax.set_xlim([Dates[0], Dates[-1]])
    ax.set_ylim([0, max(et) * 1.2])
    ax.set_xlabel('Time')
    [r.set_zorder(10) for r in iter(ax.spines.values())]

    # Define output folder and name for image
    NamePic = "Sim%s_Mean_ET_E_T_I.jpg" % Simulation
    Dir_Basin_Image = os.path.join(Dir_Basin, "Simulations",
                                   "Simulation_%d" % Simulation, "Images")
    if not os.path.exists(Dir_Basin_Image):
        os.mkdir(Dir_Basin_Image)

    # Save Images
    plt.savefig(os.path.join(Dir_Basin_Image, NamePic))

    return (I, T, E)