コード例 #1
0
ファイル: main.py プロジェクト: jupaladin/wa
def Calculate(Basin, P_Product, ET_Product, Inflow_Text_Files,
              Reservoirs_Lakes_Calculations, Startdate, Enddate, Simulation):
    '''
    This functions consists of the following sections:
    1. Set General Parameters
    2. Download Data
    3. Convert the RAW data to NETCDF files
    4. Create Mask based on LU map
    5. Calculate Runoff based on Budyko
    6. Add inflow in Runoff
    7. Calculate River flow
       7.1  Route Runoff
       7.2  Add Reservoirs
       7.3  Add surface water withdrawals
    '''
    # import General modules
    import os
    import gdal
    import numpy as np
    import pandas as pd
    import copy

    # import WA plus modules
    from wa.General import raster_conversions as RC
    from wa.General import data_conversions as DC
    import wa.Functions.Five as Five
    import wa.Functions.Start as Start

    ######################### 1. Set General Parameters ##############################

    # Get environmental variable for the Home folder
    WA_env_paths = os.environ["WA_HOME"].split(';')
    Dir_Home = WA_env_paths[0]

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    if not os.path.exists(Dir_Basin):
        os.makedirs(Dir_Basin)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, LU_dataset = Start.Boundaries.Determine_LU_Based(Basin)
    LU_data = RC.Open_tiff_array(LU_dataset)
    geo_out_LU, proj_LU, size_X_LU, size_Y_LU = RC.Open_array_info(LU_dataset)

    # Define resolution of SRTM
    Resolution = '15s'

    # Get the amount of months
    Amount_months = len(pd.date_range(Startdate, Enddate, freq='MS'))
    Amount_months_reservoirs = Amount_months + 1

    # Startdate for moving window Budyko
    Startdate_2months_Timestamp = pd.Timestamp(Startdate) - pd.DateOffset(
        months=2)
    Startdate_2months = Startdate_2months_Timestamp.strftime('%Y-%m-%d')

    ############################# 2. Download Data ###################################

    # Download data
    Data_Path_P = Start.Download_Data.Precipitation(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_2months,
        Enddate, P_Product)
    Data_Path_ET = Start.Download_Data.Evapotranspiration(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_2months,
        Enddate, ET_Product)
    Data_Path_DEM = Start.Download_Data.DEM(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']], Resolution)
    if Resolution is not '3s':
        Data_Path_DEM = Start.Download_Data.DEM(
            Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
            [Boundaries['Lonmin'], Boundaries['Lonmax']], Resolution)
    Data_Path_DEM_Dir = Start.Download_Data.DEM_Dir(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']], Resolution)
    Data_Path_ETref = Start.Download_Data.ETreference(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_2months,
        Enddate)
    Data_Path_JRC_occurrence = Start.Download_Data.JRC_occurrence(
        Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
        [Boundaries['Lonmin'], Boundaries['Lonmax']])
    Data_Path_P_Monthly = os.path.join(Data_Path_P, 'Monthly')

    ###################### 3. Convert the RAW data to NETCDF files ##############################
    # The sequence of converting the data is:
    # DEM
    # DEM flow directions
    # Precipitation
    # Evapotranspiration
    # Reference Evapotranspiration

    #_____________________________________DEM__________________________________
    # Get the data of DEM and save as nc, This dataset is also used as reference for others
    Example_dataset = os.path.join(Dir_Basin, Data_Path_DEM,
                                   'DEM_HydroShed_m_%s.tif' % Resolution)
    DEMdest = gdal.Open(Example_dataset)
    Xsize_CR = int(DEMdest.RasterXSize)
    Ysize_CR = int(DEMdest.RasterYSize)
    DataCube_DEM_CR = DEMdest.GetRasterBand(1).ReadAsArray()

    Name_NC_DEM_CR = DC.Create_NC_name('DEM_CR', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM_CR):
        DC.Save_as_NC(Name_NC_DEM_CR, DataCube_DEM_CR, 'DEM_CR',
                      Example_dataset)
    DEMdest = None

    #___________________________________DEM Dir________________________________
    # Get the data of flow direction and save as nc.
    Dir_dataset = os.path.join(Dir_Basin, Data_Path_DEM_Dir,
                               'DIR_HydroShed_-_%s.tif' % Resolution)
    DEMDirdest = gdal.Open(Dir_dataset)
    DataCube_DEM_Dir_CR = DEMDirdest.GetRasterBand(1).ReadAsArray()

    Name_NC_DEM_Dir_CR = DC.Create_NC_name('DEM_Dir_CR', Simulation, Dir_Basin,
                                           5)
    if not os.path.exists(Name_NC_DEM_Dir_CR):
        DC.Save_as_NC(Name_NC_DEM_Dir_CR, DataCube_DEM_Dir_CR, 'DEM_Dir_CR',
                      Example_dataset)
    DEMDirdest = None
    del DataCube_DEM_Dir_CR

    #______________________________ Precipitation______________________________
    # Define info for the nc files
    info = [
        'monthly', 'mm',
        ''.join([Startdate_2months[5:7], Startdate_2months[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]

    # Precipitation data
    Name_NC_Prec_CR = DC.Create_NC_name('Prec_CR', Simulation, Dir_Basin, 5,
                                        info)
    if not os.path.exists(Name_NC_Prec_CR):

        # Get the data of Precipitation and save as nc
        DataCube_Prec_CR = RC.Get3Darray_time_series_monthly(
            Dir_Basin,
            Data_Path_P_Monthly,
            Startdate_2months,
            Enddate,
            Example_data=Example_dataset)
        DC.Save_as_NC(Name_NC_Prec_CR, DataCube_Prec_CR, 'Prec_CR',
                      Example_dataset, Startdate_2months, Enddate, 'monthly',
                      0.01)
        del DataCube_Prec_CR

    #____________________________ Evapotranspiration___________________________
    # Evapotranspiration data
    info = [
        'monthly', 'mm',
        ''.join([Startdate_2months[5:7], Startdate_2months[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]
    Name_NC_ET_CR = DC.Create_NC_name('ET_CR', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_ET_CR):

        # Get the data of Evaporation and save as nc
        DataCube_ET_CR = RC.Get3Darray_time_series_monthly(
            Dir_Basin,
            Data_Path_ET,
            Startdate_2months,
            Enddate,
            Example_data=Example_dataset)
        DC.Save_as_NC(Name_NC_ET_CR, DataCube_ET_CR, 'ET_CR', Example_dataset,
                      Startdate_2months, Enddate, 'monthly', 0.01)
        del DataCube_ET_CR

    #_______________________Reference Evapotranspiration_______________________
    # Reference Evapotranspiration data
    Name_NC_ETref_CR = DC.Create_NC_name('ETref_CR', Simulation, Dir_Basin, 5,
                                         info)
    if not os.path.exists(Name_NC_ETref_CR):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ETref_CR = RC.Get3Darray_time_series_monthly(
            Dir_Basin,
            Data_Path_ETref,
            Startdate_2months,
            Enddate,
            Example_data=Example_dataset)
        DC.Save_as_NC(Name_NC_ETref_CR, DataCube_ETref_CR, 'ETref_CR',
                      Example_dataset, Startdate_2months, Enddate, 'monthly',
                      0.01)
        del DataCube_ETref_CR

    #_______________________fraction surface water _______________________

    Name_NC_frac_sw_CR = DC.Create_NC_name('Fraction_SW_CR', Simulation,
                                           Dir_Basin, 5)
    if not os.path.exists(Name_NC_frac_sw_CR):
        DataCube_frac_sw = np.ones_like(LU_data) * np.nan

        import wa.Functions.Start.Get_Dictionaries as GD

        # Get dictionaries and keys
        lulc = GD.get_sheet5_classes()
        lulc_dict = GD.get_sheet5_classes().keys()
        consumed_frac_dict = GD.sw_supply_fractions_sheet5()

        for key in lulc_dict:
            Numbers = lulc[key]
            for LU_nmbr in Numbers:
                Mask = np.zeros_like(LU_data)
                Mask[LU_data == LU_nmbr] = 1
                DataCube_frac_sw[Mask == 1] = consumed_frac_dict[key]

        dest_frac_sw = DC.Save_as_MEM(DataCube_frac_sw, geo_out_LU, proj_LU)
        dest_frac_sw_CR = RC.reproject_dataset_example(dest_frac_sw,
                                                       Example_dataset)
        DataCube_frac_sw_CR = dest_frac_sw_CR.ReadAsArray()
        DataCube_frac_sw_CR[DataCube_frac_sw_CR == 0] = np.nan

        DC.Save_as_NC(Name_NC_frac_sw_CR,
                      DataCube_frac_sw_CR,
                      'Fraction_SW_CR',
                      Example_dataset,
                      Scaling_factor=0.01)
        del DataCube_frac_sw_CR

    del DataCube_DEM_CR
    ##################### 4. Create Mask based on LU map ###########################

    # Now a mask will be created to define the area of interest (pixels where there is a landuse defined)

    #_____________________________________LU___________________________________
    destLU = RC.reproject_dataset_example(LU_dataset,
                                          Example_dataset,
                                          method=1)
    DataCube_LU_CR = destLU.GetRasterBand(1).ReadAsArray()

    Raster_Basin_CR = np.zeros([Ysize_CR, Xsize_CR])
    Raster_Basin_CR[DataCube_LU_CR > 0] = 1
    Name_NC_Basin_CR = DC.Create_NC_name('Basin_CR', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_Basin_CR):
        DC.Save_as_NC(Name_NC_Basin_CR, Raster_Basin_CR, 'Basin_CR',
                      Example_dataset)
        #del Raster_Basin
    '''
    Name_NC_Basin = DC.Create_NC_name('Basin_CR', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_Basin):

        Raster_Basin = RC.Vector_to_Raster(Dir_Basin, Shape_file_name_shp, Example_dataset)
        Raster_Basin = np.clip(Raster_Basin, 0, 1)
        DC.Save_as_NC(Name_NC_Basin, Raster_Basin, 'Basin_CR', Example_dataset)
        #del Raster_Basin
    '''
    ###################### 5. Calculate Runoff based on Budyko ###########################

    # Define info for the nc files
    info = [
        'monthly', 'mm', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]

    # Define the output names of section 5 and 6
    Name_NC_Runoff_CR = DC.Create_NC_name('Runoff_CR', Simulation, Dir_Basin,
                                          5, info)
    Name_NC_Runoff_for_Routing_CR = Name_NC_Runoff_CR

    if not os.path.exists(Name_NC_Runoff_CR):

        # Calculate runoff based on Budyko
        DataCube_Runoff_CR = Five.Budyko.Calc_runoff(Name_NC_ETref_CR,
                                                     Name_NC_Prec_CR)

        # Save the runoff as netcdf
        DC.Save_as_NC(Name_NC_Runoff_CR, DataCube_Runoff_CR, 'Runoff_CR',
                      Example_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_Runoff_CR
    '''  
    ###################### Calculate Runoff with P min ET ###########################
  
    Name_NC_Runoff_CR = DC.Create_NC_name('Runoff_CR', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Runoff_CR):

        ET = RC.Open_nc_array(Name_NC_ET_CR)
        P = RC.Open_nc_array(Name_NC_Prec_CR) 
        DataCube_Runoff_CR = P - ET
        DataCube_Runoff_CR[:,:,:][DataCube_Runoff_CR<=0.1] = 0
        DataCube_Runoff_CR[:,:,:][np.isnan(DataCube_Runoff_CR)] = 0                          
        DC.Save_as_NC(Name_NC_Runoff_CR, DataCube_Runoff_CR, 'Runoff_CR', Example_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Runoff_CR

     '''
    ############### 6. Add inflow in basin by using textfile #########################

    # add inlets if there are textfiles defined
    if len(Inflow_Text_Files) > 0:

        # Create name of the Runoff with inlets
        Name_NC_Runoff_with_Inlets_CR = DC.Create_NC_name(
            'Runoff_with_Inlets_CR', Simulation, Dir_Basin, 5, info)

        # Use this runoff name for the routing (it will overwrite the runoff without inlets)
        Name_NC_Runoff_for_Routing_CR = Name_NC_Runoff_with_Inlets_CR

        # Create the file if it not exists
        if not os.path.exists(Name_NC_Runoff_with_Inlets_CR):

            # Calculate the runoff that will be routed by including the inlets
            DataCube_Runoff_with_Inlets_CR = Five.Inlets.Add_Inlets(
                Name_NC_Runoff_CR, Inflow_Text_Files)

            # Save this runoff as netcdf
            DC.Save_as_NC(Name_NC_Runoff_with_Inlets_CR,
                          DataCube_Runoff_with_Inlets_CR,
                          'Runoff_with_Inlets_CR', Example_dataset, Startdate,
                          Enddate, 'monthly', 0.01)
            del DataCube_Runoff_with_Inlets_CR

    ######################### 7. Now the surface water is calculated #################

    # Names for dicionaries and nc files
    # CR1 = Natural_flow with only green water
    # CR2 = Natural_flow with only green water and reservoirs
    # CR3 = Flow with green, blue and reservoirs

    ######################### 7.1 Apply Channel Routing ###############################

    # Create the name for the netcdf outputs for section 7.1
    info = [
        'monthly', 'pixels', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]
    Name_NC_Acc_Pixels_CR = DC.Create_NC_name('Acc_Pixels_CR', Simulation,
                                              Dir_Basin, 5)
    info = [
        'monthly', 'm3', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]
    Name_NC_Discharge_CR1 = DC.Create_NC_name('Discharge_CR1', Simulation,
                                              Dir_Basin, 5, info)

    # If one of the outputs does not exists, run this part
    if not (os.path.exists(Name_NC_Acc_Pixels_CR)
            and os.path.exists(Name_NC_Discharge_CR1)):

        Accumulated_Pixels_CR, Discharge_CR1 = Five.Channel_Routing.Channel_Routing(
            Name_NC_DEM_Dir_CR,
            Name_NC_Runoff_for_Routing_CR,
            Name_NC_Basin_CR,
            Example_dataset,
            Degrees=1)

        # Save Results
        DC.Save_as_NC(Name_NC_Acc_Pixels_CR, Accumulated_Pixels_CR,
                      'Acc_Pixels_CR', Example_dataset)
        DC.Save_as_NC(Name_NC_Discharge_CR1, Discharge_CR1, 'Discharge_CR1',
                      Example_dataset, Startdate, Enddate, 'monthly')

    ################# Calculate the natural river and river zones #################

    Name_NC_Rivers_CR = DC.Create_NC_name('Rivers_CR', Simulation, Dir_Basin,
                                          5, info)
    if not os.path.exists(Name_NC_Rivers_CR):

        # Open routed discharge array
        Discharge_CR1 = RC.Open_nc_array(Name_NC_Discharge_CR1)
        Raster_Basin = RC.Open_nc_array(Name_NC_Basin_CR)

        # Calculate mean average over the period
        if len(np.shape(Discharge_CR1)) > 2:
            Routed_Discharge_Ave = np.nanmean(Discharge_CR1, axis=0)
        else:
            Routed_Discharge_Ave = Discharge_CR1

        # Define the 2% highest pixels as rivers
        Rivers = np.zeros([
            np.size(Routed_Discharge_Ave, 0),
            np.size(Routed_Discharge_Ave, 1)
        ])
        Routed_Discharge_Ave[Raster_Basin != 1] = np.nan
        Routed_Discharge_Ave_number = np.nanpercentile(Routed_Discharge_Ave,
                                                       98)
        Rivers[
            Routed_Discharge_Ave >
            Routed_Discharge_Ave_number] = 1  # if yearly average is larger than 5000km3/month that it is a river

        # Save the river file as netcdf file
        DC.Save_as_NC(Name_NC_Rivers_CR, Rivers, 'Rivers_CR', Example_dataset)

    ########################## Create river directories ###########################

    Name_py_River_dict_CR1 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'River_dict_CR1_simulation%d.npy' % (Simulation))
    Name_py_DEM_dict_CR1 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'DEM_dict_CR1_simulation%d.npy' % (Simulation))
    Name_py_Distance_dict_CR1 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Distance_dict_CR1_simulation%d.npy' % (Simulation))

    if not (os.path.exists(Name_py_River_dict_CR1)
            and os.path.exists(Name_py_DEM_dict_CR1)
            and os.path.exists(Name_py_Distance_dict_CR1)):

        # Get river and DEM dict
        River_dict_CR1, DEM_dict_CR1, Distance_dict_CR1 = Five.Create_Dict.Rivers_General(
            Name_NC_DEM_CR, Name_NC_DEM_Dir_CR, Name_NC_Acc_Pixels_CR,
            Name_NC_Rivers_CR, Example_dataset)
        np.save(Name_py_River_dict_CR1, River_dict_CR1)
        np.save(Name_py_DEM_dict_CR1, DEM_dict_CR1)
        np.save(Name_py_Distance_dict_CR1, Distance_dict_CR1)
    else:
        # Load
        River_dict_CR1 = np.load(Name_py_River_dict_CR1).item()
        DEM_dict_CR1 = np.load(Name_py_DEM_dict_CR1).item()
        Distance_dict_CR1 = np.load(Name_py_Distance_dict_CR1).item()

    Name_py_Discharge_dict_CR1 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Discharge_dict_CR1_simulation%d.npy' % (Simulation))

    if not os.path.exists(Name_py_Discharge_dict_CR1):
        # Get discharge dict
        Discharge_dict_CR1 = Five.Create_Dict.Discharge(
            Name_NC_Discharge_CR1, River_dict_CR1, Amount_months,
            Example_dataset)
        np.save(Name_py_Discharge_dict_CR1, Discharge_dict_CR1)
    else:
        # Load
        Discharge_dict_CR1 = np.load(Name_py_Discharge_dict_CR1).item()

    ###################### 7.2 Calculate surface water storage characteristics ######################

    Name_py_Discharge_dict_CR2 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Discharge_dict_CR2_simulation%d.npy' % (Simulation))
    Name_py_River_dict_CR2 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'River_dict_CR2_simulation%d.npy' % (Simulation))
    Name_py_DEM_dict_CR2 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'DEM_dict_CR2_simulation%d.npy' % (Simulation))
    Name_py_Distance_dict_CR2 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Distance_dict_CR2_simulation%d.npy' % (Simulation))
    Name_py_Diff_Water_Volume = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Diff_Water_Volume_CR2_simulation%d.npy' % (Simulation))
    Name_py_Regions = os.path.join(Dir_Basin, 'Simulations',
                                   'Simulation_%d' % Simulation, 'Sheet_5',
                                   'Regions_simulation%d.npy' % (Simulation))

    if not (os.path.exists(Name_py_Discharge_dict_CR2)
            and os.path.exists(Name_py_River_dict_CR2)
            and os.path.exists(Name_py_DEM_dict_CR2)
            and os.path.exists(Name_py_Distance_dict_CR2)):

        # Copy dicts as starting adding reservoir
        Discharge_dict_CR2 = copy.deepcopy(Discharge_dict_CR1)
        River_dict_CR2 = copy.deepcopy(River_dict_CR1)
        DEM_dict_CR2 = copy.deepcopy(DEM_dict_CR1)
        Distance_dict_CR2 = copy.deepcopy(Distance_dict_CR1)

        if Reservoirs_Lakes_Calculations == 1:

            # define input tiffs for surface water calculations
            input_JRC = os.path.join(Dir_Basin, Data_Path_JRC_occurrence,
                                     'JRC_Occurrence_percent.tif')
            DEM_dataset = os.path.join(Dir_Basin, Data_Path_DEM,
                                       'DEM_HydroShed_m_3s.tif')

            sensitivity = 700  # 900 is less sensitive 1 is very sensitive
            Regions = Five.Reservoirs.Calc_Regions(Name_NC_Basin_CR, input_JRC,
                                                   sensitivity, Boundaries)

            Diff_Water_Volume = np.zeros(
                [len(Regions), Amount_months_reservoirs - 1, 3])
            reservoir = 0

            for region in Regions:

                popt = Five.Reservoirs.Find_Area_Volume_Relation(
                    region, input_JRC, DEM_dataset)

                Area_Reservoir_Values = Five.Reservoirs.GEE_calc_reservoir_area(
                    region, Startdate, Enddate)

                Diff_Water_Volume[
                    reservoir, :, :] = Five.Reservoirs.Calc_Diff_Storage(
                        Area_Reservoir_Values, popt)
                reservoir += 1

            ################# 7.3 Add storage reservoirs and change outflows ##################
            Discharge_dict_CR2, River_dict_CR2, DEM_dict_CR2, Distance_dict_CR2 = Five.Reservoirs.Add_Reservoirs(
                Name_NC_Rivers_CR, Name_NC_Acc_Pixels_CR, Diff_Water_Volume,
                River_dict_CR2, Discharge_dict_CR2, DEM_dict_CR2,
                Distance_dict_CR2, Regions, Example_dataset)

            np.save(Name_py_Regions, Regions)
            np.save(Name_py_Diff_Water_Volume, Diff_Water_Volume)

        np.save(Name_py_Discharge_dict_CR2, Discharge_dict_CR2)
        np.save(Name_py_River_dict_CR2, River_dict_CR2)
        np.save(Name_py_DEM_dict_CR2, DEM_dict_CR2)
        np.save(Name_py_Distance_dict_CR2, Distance_dict_CR2)

    else:
        # Load
        Discharge_dict_CR2 = np.load(Name_py_Discharge_dict_CR2).item()
        River_dict_CR2 = np.load(Name_py_River_dict_CR2).item()
        DEM_dict_CR2 = np.load(Name_py_DEM_dict_CR2).item()
        Distance_dict_CR2 = np.load(Name_py_Distance_dict_CR2).item()

    ####################### 7.3 Add surface water withdrawals #############################

    Name_py_Discharge_dict_CR3 = os.path.join(
        Dir_Basin, 'Simulations', 'Simulation_%d' % Simulation, 'Sheet_5',
        'Discharge_dict_CR3_simulation%d.npy' % (Simulation))

    if not os.path.exists(Name_py_Discharge_dict_CR3):

        Discharge_dict_CR3, DataCube_ETblue_m3 = Five.Irrigation.Add_irrigation(
            Discharge_dict_CR2, River_dict_CR2, Name_NC_Rivers_CR,
            Name_NC_ET_CR, Name_NC_ETref_CR, Name_NC_Prec_CR, Name_NC_Basin_CR,
            Name_NC_frac_sw_CR, Startdate, Enddate, Example_dataset)
        np.save(Name_py_Discharge_dict_CR3, Discharge_dict_CR3)

        # save ETblue as nc
        info = [
            'monthly', 'm3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]),
            ''.join([Enddate[5:7], Enddate[0:4]])
        ]
        Name_NC_ETblue = DC.Create_NC_name('ETblue', Simulation, Dir_Basin, 5,
                                           info)
        DC.Save_as_NC(Name_NC_ETblue, DataCube_ETblue_m3, 'ETblue',
                      Example_dataset, Startdate, Enddate, 'monthly')

    else:
        Discharge_dict_CR3 = np.load(Name_py_Discharge_dict_CR3).item()

    ################################# Plot graph ##################################

    # Draw graph
    Five.Channel_Routing.Graph_DEM_Distance_Discharge(
        Discharge_dict_CR3, Distance_dict_CR2, DEM_dict_CR2, River_dict_CR2,
        Startdate, Enddate, Example_dataset)

    ######################## Change data to fit the LU data #######################

    # Discharge
    # Define info for the nc files
    info = [
        'monthly', 'm3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]

    Name_NC_Discharge = DC.Create_NC_name('Discharge', Simulation, Dir_Basin,
                                          5, info)
    if not os.path.exists(Name_NC_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Discharge_CR = DC.Convert_dict_to_array(
            River_dict_CR2, Discharge_dict_CR3, Example_dataset)
        DC.Save_as_NC(Name_NC_Discharge, DataCube_Discharge_CR, 'Discharge',
                      Example_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Discharge_CR

    # DEM
    Name_NC_DEM = DC.Create_NC_name('DEM', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_CR = RC.Open_nc_array(Name_NC_DEM_CR)
        DataCube_DEM = RC.resize_array_example(DataCube_DEM_CR,
                                               LU_data,
                                               method=1)
        DC.Save_as_NC(Name_NC_DEM, DataCube_DEM, 'DEM', LU_dataset)
        del DataCube_DEM

    # flow direction
    Name_NC_DEM_Dir = DC.Create_NC_name('DEM_Dir', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM_Dir):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_Dir_CR = RC.Open_nc_array(Name_NC_DEM_Dir_CR)
        DataCube_DEM_Dir = RC.resize_array_example(DataCube_DEM_Dir_CR,
                                                   LU_data,
                                                   method=1)
        DC.Save_as_NC(Name_NC_DEM_Dir, DataCube_DEM_Dir, 'DEM_Dir', LU_dataset)
        del DataCube_DEM_Dir

    # Precipitation
    # Define info for the nc files
    info = [
        'monthly', 'mm', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]

    Name_NC_Prec = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_Prec):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Prec = RC.Get3Darray_time_series_monthly(
            Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_Prec, DataCube_Prec, 'Prec', LU_dataset,
                      Startdate, Enddate, 'monthly', 0.01)
        del DataCube_Prec

    # Evapotranspiration
    Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_ET):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ET = RC.Get3Darray_time_series_monthly(
            Dir_Basin, Data_Path_ET, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', LU_dataset, Startdate,
                      Enddate, 'monthly', 0.01)
        del DataCube_ET

    # Reference Evapotranspiration data
    Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_ETref):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ETref = RC.Get3Darray_time_series_monthly(
            Dir_Basin, Data_Path_ETref, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', LU_dataset,
                      Startdate, Enddate, 'monthly', 0.01)
        del DataCube_ETref

    # Rivers
    Name_NC_Rivers = DC.Create_NC_name('Rivers', Simulation, Dir_Basin, 5,
                                       info)
    if not os.path.exists(Name_NC_Rivers):

        # Get the data of Reference Evapotranspiration and save as nc
        Rivers_CR = RC.Open_nc_array(Name_NC_Rivers_CR)
        DataCube_Rivers = RC.resize_array_example(Rivers_CR, LU_data)
        DC.Save_as_NC(Name_NC_Rivers, DataCube_Rivers, 'Rivers', LU_dataset)
        del DataCube_Rivers, Rivers_CR

    # Discharge
    # Define info for the nc files
    info = [
        'monthly', 'm3', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]

    Name_NC_Routed_Discharge = DC.Create_NC_name('Routed_Discharge',
                                                 Simulation, Dir_Basin, 5,
                                                 info)
    if not os.path.exists(Name_NC_Routed_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        Routed_Discharge_CR = RC.Open_nc_array(Name_NC_Discharge)
        DataCube_Routed_Discharge = RC.resize_array_example(
            Routed_Discharge_CR, LU_data)
        DC.Save_as_NC(Name_NC_Routed_Discharge, DataCube_Routed_Discharge,
                      'Routed_Discharge', LU_dataset, Startdate, Enddate,
                      'monthly')
        del DataCube_Routed_Discharge, Routed_Discharge_CR

    # Get raster information
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Example_dataset)

    Rivers = RC.Open_nc_array(Name_NC_Rivers_CR)

    # Create ID Matrix
    y, x = np.indices((size_Y, size_X))
    ID_Matrix = np.int32(
        np.ravel_multi_index(np.vstack((y.ravel(), x.ravel())),
                             (size_Y, size_X),
                             mode='clip').reshape(x.shape)) + 1

    # Get tiff array time dimension:
    time_dimension = int(np.shape(Discharge_dict_CR3[0])[0])

    # create an empty array
    Result = np.zeros([time_dimension, size_Y, size_X])

    for river_part in range(0, len(River_dict_CR2)):
        for river_pixel in range(1, len(River_dict_CR2[river_part])):
            river_pixel_ID = River_dict_CR2[river_part][river_pixel]
            if len(np.argwhere(ID_Matrix == river_pixel_ID)) > 0:
                row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]
                Result[:, row,
                       col] = Discharge_dict_CR3[river_part][:, river_pixel]
        print(river_part)

    Outflow = Discharge_dict_CR3[0][:, 1]

    for i in range(0, time_dimension):
        output_name = r'C:/testmap/rtest_%s.tif' % i
        Result_one = Result[i, :, :]
        DC.Save_as_tiff(output_name, Result_one, geo_out, "WGS84")

    import os

    # Get environmental variable for the Home folder
    WA_env_paths = os.environ["WA_HOME"].split(';')
    Dir_Home = WA_env_paths[0]

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    info = [
        'monthly', 'm3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]),
        ''.join([Enddate[5:7], Enddate[0:4]])
    ]
    Name_Result = DC.Create_NC_name('DischargeEnd', Simulation, Dir_Basin, 5,
                                    info)
    Result[np.logical_and(Result == 0.0, Rivers == 0.0)] = np.nan

    DC.Save_as_NC(Name_Result, Result, 'DischargeEnd', Example_dataset,
                  Startdate, Enddate, 'monthly')

    return ()
コード例 #2
0
ファイル: SplitET.py プロジェクト: jupaladin/wa
def ITE(Dir_Basin, Name_NC_ET, Name_NC_LAI, Name_NC_P, Name_NC_RD, Name_NC_NDM,
        Name_NC_LU, Startdate, Enddate, Simulation):
    """
    This functions split the evapotranspiration into interception, transpiration, and evaporation.

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    Name_NC_ET : str
        Path to the .nc file containing ET data
    Name_NC_LAI : str
        Path to the .nc file containing LAI data
    Name_NC_P : str
        Path to the .nc file containing P data
    Name_NC_RD : str
        Path to the .nc file containing Rainy Days data
    Name_NC_NDM : str
        Path to the .nc file containing Normalized Dry Matter data
    Name_NC_LU : str
        Path to the .nc file containing Landuse data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'    
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd' 
    Simulation : int
        Defines the simulation    
        
    Returns
    -------
    I : array
        Array[time, lat, lon] contains the interception data
    T : array
        Array[time, lat, lon] contains the transpiration data
    E : array
        Array[time, lat, lon] contains the evaporation data

    """

    # import WA modules
    import wa.General.raster_conversions as RC
    import wa.Functions.Start.Get_Dictionaries as GD

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq="MS")

    # Extract LU data from NetCDF file
    LU = RC.Open_nc_array(Name_NC_LU, Var='LU')

    # Create a mask to ignore non relevant pixels.
    lulc_dict = GD.get_lulcs().keys()
    mask = np.logical_or.reduce([LU == value for value in lulc_dict[:-1]])
    mask3d = mask * np.ones(len(Dates))[:, None, None]
    mask3d_neg = (mask3d - 1) * 9999

    # Extract Evapotranspiration data from NetCDF file
    ET = RC.Open_nc_array(Name_NC_ET, Var='ET')[-len(Dates):, :, :]
    # Extract Leaf Area Index data from NetCDF file
    LAI = RC.Open_nc_array(Name_NC_LAI, Var='LAI')[-len(Dates):, :, :]
    # Extract Precipitation data from NetCDF file
    P = RC.Open_nc_array(Name_NC_P, Var='Prec')[-len(Dates):, :, :]
    # Extract Rainy Days data from NetCDF file
    RD = RC.Open_nc_array(Name_NC_RD, Var='RD')[-len(Dates):, :, :]
    # Extract Normalized Dry Matter data and time from NetCDF file
    NDM = RC.Open_nc_array(Name_NC_NDM, Var='NDM')[-len(Dates):, :, :]
    timeNDM = RC.Open_nc_array(Name_NC_NDM, Var='time')

    # Create dictory to get every month and year for each timestep
    datesNDMmonth = dict()
    datesNDMyear = dict()
    # Loop over all timestep
    for i in range(0, len(timeNDM)):
        # change toordinal to month and year
        datesNDMmonth[i] = datetime.date.fromordinal(timeNDM[i]).month
        datesNDMyear[i] = datetime.date.fromordinal(timeNDM[i]).year

    # Calculate the max monthly NDM over the whole period
    NDMmax = dict()

    # loop over the months
    for month in range(1, 13):
        dimensions = []
        # Define which dimension must be opened to get the same month
        for dimension, monthdict in datesNDMmonth.items():
            if monthdict == month:
                dimensions = np.append(dimensions, dimension)
        # Open those time dimension
        NDMmonth = np.zeros([
            np.size(dimensions),
            int(np.shape(NDM)[1]),
            int(np.shape(NDM)[2])
        ])
        dimensions = np.int_(dimensions)
        NDMmonth[:, :, :] = NDM[dimensions, :, :]
        # Calculate the maximum over the month
        NDMmax[month] = np.nanmax(NDMmonth, 0)

    NDMmax_months = np.zeros(
        [len(timeNDM),
         int(np.shape(NDM)[1]),
         int(np.shape(NDM)[2])])

    # Create 3D array with NDMmax
    for i in range(0, len(timeNDM)):
        NDMmax_months[i, :, :] = np.nanmax(NDMmax[datesNDMmonth[i]])

    # Create some variables needed to plot graphs.
    et = np.array([])
    i = np.array([])
    t = np.array([])

    # Change zero values in RD so we do not get errors
    RD[RD == 0] = 0.001
    LAI[LAI == 0] = 0.001
    LAI[np.isnan(LAI)] = 0.1

    # Calculate I
    I = LAI * (1 -
               np.power(1 + (P / RD) * (1 - np.exp(-0.5 * LAI)) *
                        (1 / LAI), -1)) * RD

    # Set boundary
    I[np.isnan(LAI)] = np.nan

    # Calculate T
    T = np.minimum(
        (NDM / NDMmax_months), np.ones(np.shape(NDM))) * 0.95 * (ET - I)

    # Mask Data
    ET = ET * mask3d
    T = T * mask3d
    I = I * mask3d
    ET[mask3d_neg < -1] = np.nan
    T[mask3d_neg < -1] = np.nan
    I[mask3d_neg < -1] = np.nan

    # Calculate E
    E = ET - T - I

    # Calculate monthly averages
    et = np.nanmean(ET.reshape(ET.shape[0], -1), 1)
    i = np.nanmean(I.reshape(I.shape[0], -1), 1)
    t = np.nanmean(T.reshape(T.shape[0], -1), 1)

    # Plot graph of ET and E, T and I fractions.
    fig = plt.figure(figsize=(10, 10))
    plt.grid(b=True, which='Major', color='0.65', linestyle='--', zorder=0)
    ax = fig.add_subplot(111)
    ax.plot(Dates, et, color='k')
    ax.patch.set_visible(False)
    ax.set_title('Average ET and E, T and I fractions')
    ax.set_ylabel('ET [mm/month]')
    ax.patch.set_visible(True)
    ax.fill_between(Dates, et, color='#a3db76', label='Evapotranspiration')
    ax.fill_between(Dates, i + t, color='#6bb8cc', label='Transpiration')
    ax.fill_between(Dates, i, color='#497e7c', label='Interception')
    ax.scatter(Dates, et, color='k')
    ax.legend(loc='upper left', fancybox=True, shadow=True)
    fig.autofmt_xdate()
    ax.set_xlim([Dates[0], Dates[-1]])
    ax.set_ylim([0, max(et) * 1.2])
    ax.set_xlabel('Time')
    [r.set_zorder(10) for r in ax.spines.itervalues()]

    # Define output folder and name for image
    NamePic = "Sim%s_Mean_ET_E_T_I.jpg" % Simulation
    Dir_Basin_Image = os.path.join(Dir_Basin, "Simulations", "Images")
    if not os.path.exists(Dir_Basin_Image):
        os.mkdir(Dir_Basin_Image)

    # Save Images
    plt.savefig(os.path.join(Dir_Basin_Image, NamePic))

    return (I, T, E)
コード例 #3
0
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product,
              NDM_Product, NDVI_Product, ETref_Product, dict_crops,
              dict_non_crops, Startdate, Enddate, Simulation):
    """
    This functions is the main framework for calculating sheet 3.

    Parameters
    ----------
    Basin : str
        Name of the basin
    P_Product : str
        Name of the rainfall product that will be used
    ET_Product : str
        Name of the evapotranspiration product that will be used
    LAI_Product : str
        Name of the LAI product that will be used
    NDM_Product : str
        Name of the NDM product that will be used
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    Simulation : int
        Defines the simulation

    """
    ######################### Import WA modules ###################################

    from wa.General import raster_conversions as RC
    from wa.General import data_conversions as DC
    import wa.Functions.Three as Three
    import wa.Functions.Two as Two
    import wa.Functions.Start as Start
    import wa.Functions.Four as Four
    import wa.Generator.Sheet3 as Generate
    import wa.Functions.Start.Get_Dictionaries as GD

    ######################### Set General Parameters ##############################

    # Check if there is a full year selected  between Startdate and Enddate, otherwise Sheet 3 cannot be produced
    try:
        years_end = pd.date_range(Startdate, Enddate, freq="A").year
        years_start = pd.date_range(Startdate, Enddate, freq="AS").year
        if (len(years_start) == 0 or len(years_end) == 0):
            print "Calculation period is less than a year, which is not possible for sheet 3"
            quit
        years = np.unique(np.append(years_end, years_start))
    except:
        print "Calculation period is less than a year, which is not possible for sheet 3"
        quit

    # Get environmental variable for the Home folder
    if WA_HOME_folder == '':
        WA_env_paths = os.environ["WA_HOME"].split(';')
        Dir_Home = WA_env_paths[0]
    else:
        Dir_Home = WA_HOME_folder

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    output_dir = os.path.join(Dir_Basin, "Simulations",
                              "Simulation_%d" % Simulation)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(
        Basin, Dir_Home)

    ############################# Download Data ###################################
    # Check the years that needs to be calculated
    years = range(int(Startdate.split('-')[0]), int(Enddate.split('-')[0]) + 1)

    # Find the maximum moving window value
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(
        version='1.0')
    Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())

    for year in years:

        # Create Start and End date for time chunk
        Startdate_part = '%d-01-01' % int(year)
        Enddate_part = '%s-12-31' % year

        # Create .nc file if not exists
        nc_outname = os.path.join(output_dir, "%d.nc" % year)
        if not os.path.exists(nc_outname):
            DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)

        #Set Startdate for moving average
        if int(year) == int(years[0]):
            Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(
                months=Additional_Months_tail)
            Startdate_Moving_Average_String = Startdate_Moving_Average.strftime(
                '%Y-%m-%d')
        else:
            Startdate_Moving_Average_String = Startdate_part

        # Open variables in netcdf
        fh = netCDF4.Dataset(nc_outname)
        Variables_NC = [var for var in fh.variables]
        fh.close()

        # Download data
        if not "Precipitation" in Variables_NC:
            Data_Path_P_Monthly = Start.Download_Data.Precipitation(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, P_Product)

        if not "Actual_Evapotransporation" in Variables_NC:
            Data_Path_ET = Start.Download_Data.Evapotranspiration(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, ET_Product)

        if not "Reference_Evapotranspiration" in Variables_NC:
            Data_Path_ETref = Start.Download_Data.ETreference(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']],
                Startdate_Moving_Average_String, Enddate_part, ETref_Product)

        if not "NDVI" in Variables_NC:
            Data_Path_NDVI = Start.Download_Data.NDVI(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part)

        if not "Normalized_Dry_Matter" in Variables_NC:
            Data_Path_NPP = Start.Download_Data.NPP(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, NDM_Product)
            Data_Path_GPP = Start.Download_Data.GPP(
                Dir_Basin, [Boundaries['Latmin'], Boundaries['Latmax']],
                [Boundaries['Lonmin'], Boundaries['Lonmax']], Startdate_part,
                Enddate_part, NDM_Product)

        ########################### Create input data #################################

        if not "Normalized_Dry_Matter" in Variables_NC:
            # Create NDM based on MOD17
            if NDM_Product == 'MOD17':
                # Create monthly GPP
                Start.Eightdaily_to_monthly_state.Nearest_Interpolate(
                    Data_Path_GPP, Startdate_part, Enddate_part)
                Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(
                    Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate_part,
                    Enddate_part)

        if not "NDVI" in Variables_NC:
            # Create monthly NDVI based on MOD13
            if NDVI_Product == 'MOD13':
                Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(
                    Data_Path_NDVI, Startdate_part, Enddate_part)

        ###################### Save Data as netCDF files ##############################
        #______________________________Precipitation_______________________________

        # 1.) Precipitation data
        if not "Precipitation" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_Prec = RC.Get3Darray_time_series_monthly(
                Data_Path_P_Monthly,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec,
                                     "Precipitation", "mm/month", 0.01)
            del DataCube_Prec

        #_______________________________Evaporation________________________________

        # 2.) Evapotranspiration data
        if not "Actual_Evapotranspiration" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_ET = RC.Get3Darray_time_series_monthly(
                Data_Path_ET,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ET,
                                     "Actual_Evapotranspiration", "mm/month",
                                     0.01)
            del DataCube_ET

        #___________________________Normalized Dry Matter__________________________

        # 3.) Normalized Dry Matter
        if not "Normalized_Dry_Matter" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_NDM = RC.Get3Darray_time_series_monthly(
                Data_Path_NDM,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NDM,
                                     "Normalized_Dry_Matter", "kg_ha", 0.01)
            del DataCube_NDM

        #_______________________Reference Evaporation______________________________

        # 4.) Reference Evapotranspiration data
        if not "Reference_Evapotranspiration" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_ETref = RC.Get3Darray_time_series_monthly(
                Data_Path_ETref,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref,
                                     "Reference_Evapotranspiration",
                                     "mm/month", 0.01)
            del DataCube_ETref

        #____________________________________NDVI__________________________________

        # 4.) Reference Evapotranspiration data
        if not "NDVI" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_NDVI = RC.Get3Darray_time_series_monthly(
                Data_Path_NDVI,
                Startdate_part,
                Enddate_part,
                Example_data=Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NDVI, "NDVI",
                                     "Fraction", 0.0001)
            del DataCube_NDVI

        ############################# Calculate Sheet 3 ###########################

        #____________ Evapotranspiration data split in ETblue and ETgreen ____________

        if not ("Blue_Evapotranspiration" in Variables_NC
                or "Green_Evapotranspiration" in Variables_NC):

            # Calculate Blue and Green ET
            DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(
                Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate,
                Enddate)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue,
                                     "Blue_Evapotranspiration", "mm/month",
                                     0.01)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen,
                                     "Green_Evapotranspiration", "mm/month",
                                     0.01)
            del DataCube_ETblue, DataCube_ETgreen

    #____________________________ Create the empty dictionaries ____________________________

    # Create the dictionaries that are required for sheet 3
    wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties(
    )

    #____________________________________ Fill in the dictionaries ________________________

    # Fill in the crops dictionaries
    wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(
        wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops,
        nc_outname, Dir_Basin)

    # Fill in the non crops dictionaries
    wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(
        wp_y_non_crop_dictionary, dict_non_crops)

    ############################ Create CSV 3 #################################

    csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary,
                                             wp_y_rainfed_dictionary,
                                             wp_y_non_crop_dictionary, Basin,
                                             Simulation, year, Dir_Basin)

    ############################ Create Sheet 3 ###############################

    Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)

    return ()
コード例 #4
0
def Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, Name_NC_LU,
           DataCube_I, DataCube_T, DataCube_E, Example_dataset):
    """
    This functions create the CSV files for the sheets

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    Simulation : int
        Defines the simulation            
    Basin : str
        Name of the basin
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'    
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd' 
    Name_NC_LU : str
        Path to the .nc file containing the LU data
    DataCube_I : array
         3D array [time, lat, lon] containing the interception data   
    DataCube_T : array
         3D array [time, lat, lon] containing the transpiration data         
    DataCube_E : array
         3D array [time, lat, lon] containing the evaporation data         
    Example_dataset : str
         Data path to the example tiff file containing the right amount of pixels and projection     

    Returns
    -------
    Data_Path_CSV : str
        Data path pointing to the CSV output files

    """
    # import WA modules
    import wa.Functions.Start.Get_Dictionaries as GD
    import wa.General.raster_conversions as RC
    from wa.Functions import Start

    # Create output folder for CSV files
    Data_Path_CSV = os.path.join(Dir_Basin, "Simulations",
                                 "Simulation_%d" % Simulation, "Sheet_2",
                                 "CSV")
    if not os.path.exists(Data_Path_CSV):
        os.mkdir(Data_Path_CSV)

    # Open LULC map
    LULC = RC.Open_nc_array(Name_NC_LU, 'LU')

    # Set the months
    Dates = pd.date_range(Startdate, Enddate, freq="MS")

    # Define whole years
    YearsStart = pd.date_range(Startdate, Enddate, freq="AS")
    YearsEnd = pd.date_range(Startdate, Enddate, freq="A")
    if len(YearsStart) > 0 and len(YearsEnd) > 0:
        Years = range(int(YearsStart[0].year), int(YearsEnd[-1].year + 1))
        Start_Year = np.argwhere(str(YearsStart[0])[0:10] == Dates)[0][0]
    else:
        Years = []

    # Calculate the area for each pixel in square meters
    area_in_m2 = Start.Area_converter.Degrees_to_m2(Example_dataset)

    # Create Beneficial Maps
    lulc_dict = GD.get_lulcs()

    # Get all the LULC values
    Values_LULC = np.unique(LULC)

    # Create new Benefial arrays
    T_ben_array = np.zeros(np.shape(LULC))
    E_ben_array = np.zeros(np.shape(LULC))
    I_ben_array = np.zeros(np.shape(LULC))
    agriculture_array = np.zeros(np.shape(LULC))
    environment_array = np.zeros(np.shape(LULC))
    economic_array = np.zeros(np.shape(LULC))
    energy_array = np.zeros(np.shape(LULC))
    leisure_array = np.zeros(np.shape(LULC))

    # Loop over LULC values and set benefial fractions
    for Value_LULC in Values_LULC:
        if Value_LULC in lulc_dict.keys():
            T_ben = lulc_dict[Value_LULC][3]
            E_ben = lulc_dict[Value_LULC][4]
            I_ben = lulc_dict[Value_LULC][5]
            agriculture = lulc_dict[Value_LULC][6]
            environment = lulc_dict[Value_LULC][7]
            economic = lulc_dict[Value_LULC][8]
            energy = lulc_dict[Value_LULC][9]
            leisure = lulc_dict[Value_LULC][10]

            T_ben_array[LULC == Value_LULC] = T_ben / 100.
            E_ben_array[LULC == Value_LULC] = E_ben / 100.
            I_ben_array[LULC == Value_LULC] = I_ben / 100.
            agriculture_array[LULC == Value_LULC] = agriculture / 100.
            environment_array[LULC == Value_LULC] = environment / 100.
            economic_array[LULC == Value_LULC] = economic / 100.
            energy_array[LULC == Value_LULC] = energy / 100.
            leisure_array[LULC == Value_LULC] = leisure / 100.

    # Open sheet 2 dict
    sheet2_classes_dict = GD.get_sheet2_classes()

    # Convert data from mm/month to km3/month
    I_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_I) / 1e12
    E_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_E) / 1e12
    T_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_T) / 1e12

    # Calculate beneficial I, E, and T
    Iben_km3 = np.einsum('ij,kij->kij', I_ben_array, I_km3)
    Eben_km3 = np.einsum('ij,kij->kij', E_ben_array, E_km3)
    Tben_km3 = np.einsum('ij,kij->kij', T_ben_array, T_km3)
    ETben_tot_km3 = Iben_km3 + Eben_km3 + Tben_km3

    # Determine service contribution
    agriculture_km3 = np.einsum('ij,kij->kij', agriculture_array,
                                ETben_tot_km3)
    environment_km3 = np.einsum('ij,kij->kij', environment_array,
                                ETben_tot_km3)
    economic_km3 = np.einsum('ij,kij->kij', economic_array, ETben_tot_km3)
    energy_km3 = np.einsum('ij,kij->kij', energy_array, ETben_tot_km3)
    leisure_km3 = np.einsum('ij,kij->kij', leisure_array, ETben_tot_km3)

    # Create empty arrays
    DataT = np.zeros([29, len(Dates)])
    DataI = np.zeros([29, len(Dates)])
    DataE = np.zeros([29, len(Dates)])
    DataBT = np.zeros([29, len(Dates)])
    DataBI = np.zeros([29, len(Dates)])
    DataBE = np.zeros([29, len(Dates)])
    DataAgriculture = np.zeros([29, len(Dates)])
    DataEnvironment = np.zeros([29, len(Dates)])
    DataEconomic = np.zeros([29, len(Dates)])
    DataEnergy = np.zeros([29, len(Dates)])
    DataLeisure = np.zeros([29, len(Dates)])
    i = 0

    # Loop over the LULC by using the Sheet 2 dictionary
    for LAND_USE in sheet2_classes_dict.keys():
        for CLASS in sheet2_classes_dict[LAND_USE].keys():
            lulcs = sheet2_classes_dict[LAND_USE][CLASS]

            # Create a mask to ignore non relevant pixels.
            mask = np.logical_or.reduce([LULC == value for value in lulcs])
            mask3d = mask * np.ones(len(Dates))[:, None, None]

            # Calculate the spatial sum of the different parameters.
            T_LU_tot = np.nansum(np.nansum((T_km3 * mask3d), 1), 1)
            I_LU_tot = np.nansum(np.nansum((I_km3 * mask3d), 1), 1)
            E_LU_tot = np.nansum(np.nansum((E_km3 * mask3d), 1), 1)
            BT_LU_tot = np.nansum(np.nansum((Tben_km3 * mask3d), 1), 1)
            BI_LU_tot = np.nansum(np.nansum((Iben_km3 * mask3d), 1), 1)
            BE_LU_tot = np.nansum(np.nansum((Eben_km3 * mask3d), 1), 1)
            Agriculture_LU_tot = np.nansum(
                np.nansum((agriculture_km3 * mask3d), 1), 1)
            Environment_LU_tot = np.nansum(
                np.nansum((environment_km3 * mask3d), 1), 1)
            Economic_LU_tot = np.nansum(np.nansum((economic_km3 * mask3d), 1),
                                        1)
            Energy_LU_tot = np.nansum(np.nansum((energy_km3 * mask3d), 1), 1)
            Leisure_LU_tot = np.nansum(np.nansum((leisure_km3 * mask3d), 1), 1)

            DataT[i, :] = T_LU_tot
            DataBT[i, :] = BT_LU_tot
            DataI[i, :] = I_LU_tot
            DataBI[i, :] = BI_LU_tot
            DataE[i, :] = E_LU_tot
            DataBE[i, :] = BE_LU_tot
            DataAgriculture[i, :] = Agriculture_LU_tot
            DataEnvironment[i, :] = Environment_LU_tot
            DataEconomic[i, :] = Economic_LU_tot
            DataEnergy[i, :] = Energy_LU_tot
            DataLeisure[i, :] = Leisure_LU_tot
            i += 1

    # Calculate non benefial components
    DataNBT = DataT - DataBT
    DataNBI = DataI - DataBI
    DataNBE = DataE - DataBE
    DataNB_tot = DataNBT + DataNBI + DataNBE

    # Create CSV
    first_row = [
        'LAND_USE', 'CLASS', 'TRANSPIRATION', 'WATER', 'SOIL', 'INTERCEPTION',
        'AGRICULTURE', 'ENVIRONMENT', 'ECONOMY', 'ENERGY', 'LEISURE',
        'NON_BENEFICIAL'
    ]
    i = 0

    # Create monthly CSV
    for Date in Dates:

        # Create csv-file.
        csv_filename = os.path.join(
            Data_Path_CSV, 'Sheet2_Sim%d_%s_%d_%02d.csv' %
            (Simulation, Basin, Date.year, Date.month))
        csv_file = open(csv_filename, 'wb')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in sheet2_classes_dict.keys():
            for CLASS in sheet2_classes_dict[LAND_USE].keys():

                # Get the value of the current class and landuse
                Transpiration = DataT[j, i]
                Evaporation = DataE[j, i]
                Interception = DataI[j, i]
                Agriculture = DataAgriculture[j, i]
                Environment = DataEnvironment[j, i]
                Economic = DataEconomic[j, i]
                Energy = DataEnergy[j, i]
                Leisure = DataLeisure[j, i]
                Non_beneficial = DataNB_tot[j, i]

                # Set special cases.
                if np.any([
                        CLASS == 'Natural water bodies',
                        CLASS == 'Managed water bodies'
                ]):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [
                    LAND_USE, CLASS,
                    "{0:.2f}".format(np.nansum([0, Transpiration])),
                    "{0:.2f}".format(np.nansum([0, Water_evaporation])),
                    "{0:.2f}".format(np.nansum([0, Soil_evaporation])),
                    "{0:.2f}".format(np.nansum([0, Interception])),
                    "{0:.2f}".format(np.nansum([0, Agriculture])),
                    "{0:.2f}".format(np.nansum([0, Environment])),
                    "{0:.2f}".format(np.nansum([0, Economic])),
                    "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(
                        np.nansum([0, Leisure])),
                    "{0:.2f}".format(np.nansum([0, Non_beneficial]))
                ]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1

    # Create yearly CSV
    i = 0
    for Year in Years:

        # Create csv-file.
        csv_filename = os.path.join(
            Data_Path_CSV,
            'Sheet2_Sim%d_%s_%d.csv' % (Simulation, Basin, Year))
        csv_file = open(csv_filename, 'wb')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in sheet2_classes_dict.keys():
            for CLASS in sheet2_classes_dict[LAND_USE].keys():

                # Get the yearly value of the current class and landuse
                Transpiration = np.sum(DataT[j, Start_Year:Start_Year + 12])
                Evaporation = np.sum(DataE[j, Start_Year:Start_Year + 12])
                Interception = np.sum(DataI[j, Start_Year:Start_Year + 12])
                Agriculture = np.sum(DataAgriculture[j, Start_Year:Start_Year +
                                                     12])
                Environment = np.sum(DataEnvironment[j, Start_Year:Start_Year +
                                                     12])
                Economic = np.sum(DataEconomic[j, Start_Year:Start_Year + 12])
                Energy = np.sum(DataEnergy[j, Start_Year:Start_Year + 12])
                Leisure = np.sum(DataLeisure[j, Start_Year:Start_Year + 12])
                Non_beneficial = np.sum(DataNB_tot[j,
                                                   Start_Year:Start_Year + 12])

                # Set special cases.
                if np.any([
                        CLASS == 'Natural water bodies',
                        CLASS == 'Managed water bodies'
                ]):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [
                    LAND_USE, CLASS,
                    "{0:.2f}".format(np.nansum([0, Transpiration])),
                    "{0:.2f}".format(np.nansum([0, Water_evaporation])),
                    "{0:.2f}".format(np.nansum([0, Soil_evaporation])),
                    "{0:.2f}".format(np.nansum([0, Interception])),
                    "{0:.2f}".format(np.nansum([0, Agriculture])),
                    "{0:.2f}".format(np.nansum([0, Environment])),
                    "{0:.2f}".format(np.nansum([0, Economic])),
                    "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(
                        np.nansum([0, Leisure])),
                    "{0:.2f}".format(np.nansum([0, Non_beneficial]))
                ]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1
        Start_Year += 12

    return (Data_Path_CSV)
コード例 #5
0
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, ETref_Product, DEM_Product, Water_Occurence_Product, Inflow_Text_Files, WaterPIX_filename, Reservoirs_GEE_on_off, Supply_method, Startdate, Enddate, Simulation):
    '''
    This functions consists of the following sections:
    1. Set General Parameters
    2. Download Data
    3. Convert the RAW data to NETCDF files
    4. Run SurfWAT

    '''
    # import General modules
    import os
    import gdal
    import numpy as np
    import pandas as pd
    from netCDF4 import Dataset

    # import WA plus modules
    from wa.General import raster_conversions as RC
    from wa.General import data_conversions as DC
    import wa.Functions.Five as Five
    import wa.Functions.Start as Start
    import wa.Functions.Start.Get_Dictionaries as GD

    ######################### 1. Set General Parameters ##############################

    # Get environmental variable for the Home folder
    if WA_HOME_folder == '':
        WA_env_paths = os.environ["WA_HOME"].split(';')
        Dir_Home = WA_env_paths[0]
    else:
        Dir_Home = WA_HOME_folder

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Example_dataset)

    # Define resolution of SRTM
    Resolution = '15s'

    # Find the maximum moving window value
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
    Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())

    ############## Cut dates into pieces if it is needed ######################

    # Check the years that needs to be calculated
    years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)

    for year in years:

        # Create .nc file if not exists
        nc_outname = os.path.join(output_dir, "%d.nc" % year)
        if not os.path.exists(nc_outname):
            DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)

        # Open variables in netcdf
        fh = Dataset(nc_outname)
        Variables_NC = [var for var in fh.variables]
        fh.close()

        # Create Start and End date for time chunk
        Startdate_part = '%d-01-01' %int(year)
        Enddate_part = '%s-12-31' %int(year)

        if int(year) == int(years[0]):
            Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
            Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
        else:
            Startdate_Moving_Average_String = Startdate_part

        ############################# 2. Download Data ###################################

        # Download data
        if not "Precipitation" in Variables_NC:
            Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)

        if not "Actual_Evapotranspiration" in Variables_NC:
            Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)

        if (WaterPIX_filename == "" or Supply_method == "Fraction") and not ("Reference_Evapotranspiration" in Variables_NC):
            Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)

        if Reservoirs_GEE_on_off == 1 and not ("Water_Occurrence" in Variables_NC):
            Data_Path_JRC_occurrence = Start.Download_Data.JRC_occurrence(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Water_Occurence_Product)
            input_JRC = os.path.join(Data_Path_JRC_occurrence, "JRC_Occurrence_percent.tif")
        else:
            input_JRC = None

        # WaterPIX input
        Data_Path_DEM_Dir = Start.Download_Data.DEM_Dir(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Resolution, DEM_Product)
        Data_Path_DEM = Start.Download_Data.DEM(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Resolution, DEM_Product)

        ###################### 3. Convert the RAW data to NETCDF files ##############################
        # The sequence of converting the data into netcdf is:
        # Precipitation
        # Evapotranspiration
        # Reference Evapotranspiration
        # DEM flow directions

        #______________________________Precipitation_______________________________

        # 1.) Precipitation data
        if not "Precipitation" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
            del DataCube_Prec

        #_______________________________Evaporation________________________________

        # 2.) Evapotranspiration data
        if not "Actual_Evapotranspiration" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ET

        #_______________________Reference Evaporation______________________________

        # 3.) Reference Evapotranspiration data
        if (WaterPIX_filename == "" or Supply_method == "Fraction") and not ("Reference_Evapotranspiration" in Variables_NC):
            # Get the data of Precipitation and save as nc
            DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ETref

        #____________________________fraction surface water _______________________

        DataCube_frac_sw = np.ones([size_Y, size_X]) * np.nan

        import wa.Functions.Start.Get_Dictionaries as GD

        # Open LU dataset
        DataCube_LU = RC.Open_nc_array(nc_outname, "Landuse")

        # Get dictionaries and keys
        lulc = GD.get_sheet5_classes()
        lulc_dict = GD.get_sheet5_classes().keys()
        consumed_frac_dict = GD.sw_supply_fractions()

        for key in lulc_dict:
            Numbers = lulc[key]
            for LU_nmbr in Numbers:
                DataCube_frac_sw[DataCube_LU==LU_nmbr] = consumed_frac_dict[key]

        DC.Add_NC_Array_Static(nc_outname, DataCube_frac_sw, "Fraction_Surface_Water_Supply", "fraction", 0.01)
        del DataCube_frac_sw, DataCube_LU

        ################### 4. Calculate Runoff (2 methods: a = Budyko and b = WaterPIX) #####################

        ################ 4a. Calculate Runoff based on Precipitation and Evapotranspiration ##################

        if (Supply_method == "Fraction" and not "Surface_Runoff" in Variables_NC):

            # Calculate runoff based on Budyko
            DataCube_Runoff = Five.Fraction_Based.Calc_surface_runoff(Dir_Basin, nc_outname, Startdate_part, Enddate_part, Example_dataset, ETref_Product, P_Product)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Runoff, "Surface_Runoff", "mm/month", 0.01)
            del DataCube_Runoff

        ###################### 4b. Get Runoff from WaterPIX ###########################
        if (Supply_method == "WaterPIX" and not "Surface_Runoff" in Variables_NC):

            # Get WaterPIX data
            WaterPIX_Var = 'TotalRunoff_M'
            DataCube_Runoff = Five.Read_WaterPIX.Get_Array(WaterPIX_filename, WaterPIX_Var, Example_dataset, Startdate_part, Enddate_part)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Runoff, "Surface_Runoff", "mm/month", 0.01)
            del DataCube_Runoff

        ####################### 5. Calculate Extraction (2 methods: a = Fraction, b = WaterPIX) #############################

        ###################### 5a. Get extraction from fraction method by using budyko ###########################
        if (Supply_method == "Fraction" and not "Surface_Withdrawal" in Variables_NC):
            DataCube_surface_withdrawal = Five.Fraction_Based.Calc_surface_withdrawal(Dir_Basin, nc_outname, Startdate_part, Enddate_part, Example_dataset, ETref_Product, P_Product)

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_surface_withdrawal, "Surface_Withdrawal", "mm/month", 0.01)
            del DataCube_surface_withdrawal

        #################################### 5b. Get extraction from WaterPIX ####################################
        if (Supply_method == "WaterPIX" and not "Surface_Withdrawal" in Variables_NC):
            WaterPIX_Var = 'Supply_M'
            DataCube_Supply = Five.Read_WaterPIX.Get_Array(WaterPIX_filename, WaterPIX_Var, Example_dataset, Startdate, Enddate)

            # Open array with surface water fractions
            DataCube_frac_sw = RC.Open_nc_array(nc_outname, "Fraction_Surface_Water_Supply")

            # Total amount of ETblue taken out of rivers
            DataCube_surface_withdrawal = DataCube_Supply * DataCube_frac_sw[None,:,:]

            # Save the runoff as netcdf
            DC.Add_NC_Array_Variable(nc_outname, DataCube_surface_withdrawal, "Surface_Withdrawal", "mm/month", 0.01)
            del DataCube_surface_withdrawal

        ################################## 5. Run SurfWAT #####################################

        import wa.Models.SurfWAT as SurfWAT

        # Define formats of input data
        Format_DEM = "TIFF"    # or "TIFF"
        Format_Runoff = "NetCDF"    # or "TIFF"
        Format_Extraction = "NetCDF"    # or "TIFF"
        Format_DEM_dir = "TIFF"    # or "TIFF"
        Format_Basin = "NetCDF"    # or "TIFF"

        # Give path (for tiff) or file (netcdf)
        input_nc = os.path.join(Dir_Basin, "Simulations", "Simulation_%s"%Simulation,"SurfWAT_in_%d.nc" %year)
        output_nc = os.path.join(Dir_Basin, "Simulations", "Simulation_%s"%Simulation,"SurfWAT_out_%d.nc" %year)

        # Create Input File for SurfWAT
        SurfWAT.Create_input_nc.main(Data_Path_DEM_Dir,
                                     Data_Path_DEM,
                                     os.path.dirname(nc_outname),
                                     os.path.dirname(nc_outname),
                                     os.path.dirname(nc_outname),
                                     Startdate,
                                     Enddate,
                                     input_nc,
                                     Resolution,
                                     Format_DEM_dir,
                                     Format_DEM,
                                     Format_Basin,
                                     Format_Runoff,
                                     Format_Extraction)

        # Run SurfWAT
        SurfWAT.Run_SurfWAT.main(input_nc, output_nc, input_JRC, Inflow_Text_Files, Reservoirs_GEE_on_off)



























    '''
    ################################# Plot graph ##################################

    # Draw graph
    Five.Channel_Routing.Graph_DEM_Distance_Discharge(Discharge_dict_CR3, Distance_dict_CR2, DEM_dict_CR2, River_dict_CR2, Startdate, Enddate, Example_dataset)

    ######################## Change data to fit the LU data #######################

    # Discharge
    # Define info for the nc files
    info = ['monthly','m3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Discharge = DC.Create_NC_name('DischargeEnd', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Discharge_CR = DC.Convert_dict_to_array(River_dict_CR2, Discharge_dict_CR3, Example_dataset)
        DC.Save_as_NC(Name_NC_Discharge, DataCube_Discharge_CR, 'Discharge_End_CR', Example_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Discharge_CR


    '''





























    '''

    # DEM
    Name_NC_DEM = DC.Create_NC_name('DEM', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_CR = RC.Open_nc_array(Name_NC_DEM_CR)
        DataCube_DEM = RC.resize_array_example(DataCube_DEM_CR, LU_data, method=1)
        DC.Save_as_NC(Name_NC_DEM, DataCube_DEM, 'DEM', LU_dataset)
        del DataCube_DEM

    # flow direction
    Name_NC_DEM_Dir = DC.Create_NC_name('DEM_Dir', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_DEM_Dir):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_DEM_Dir_CR = RC.Open_nc_array(Name_NC_DEM_Dir_CR)
        DataCube_DEM_Dir = RC.resize_array_example(DataCube_DEM_Dir_CR, LU_data, method=1)
        DC.Save_as_NC(Name_NC_DEM_Dir, DataCube_DEM_Dir, 'DEM_Dir', LU_dataset)
        del DataCube_DEM_Dir

    # Precipitation
    # Define info for the nc files
    info = ['monthly','mm', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Prec = DC.Create_NC_name('Prec', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_Prec):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_Prec = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_P_Monthly, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_Prec, DataCube_Prec, 'Prec', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_Prec

    # Evapotranspiration
    Name_NC_ET = DC.Create_NC_name('ET', Simulation, Dir_Basin, 5)
    if not os.path.exists(Name_NC_ET):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ET = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ET, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ET, DataCube_ET, 'ET', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_ET

    # Reference Evapotranspiration data
    Name_NC_ETref = DC.Create_NC_name('ETref', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_ETref):

        # Get the data of Reference Evapotranspiration and save as nc
        DataCube_ETref = RC.Get3Darray_time_series_monthly(Dir_Basin, Data_Path_ETref, Startdate, Enddate, LU_dataset)
        DC.Save_as_NC(Name_NC_ETref, DataCube_ETref, 'ETref', LU_dataset, Startdate, Enddate, 'monthly', 0.01)
        del DataCube_ETref

    # Rivers
    Name_NC_Rivers = DC.Create_NC_name('Rivers', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Rivers):

        # Get the data of Reference Evapotranspiration and save as nc
        Rivers_CR = RC.Open_nc_array(Name_NC_Rivers_CR)
        DataCube_Rivers = RC.resize_array_example(Rivers_CR, LU_data)
        DC.Save_as_NC(Name_NC_Rivers, DataCube_Rivers, 'Rivers', LU_dataset)
        del DataCube_Rivers, Rivers_CR

    # Discharge
    # Define info for the nc files
    info = ['monthly','m3', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]

    Name_NC_Routed_Discharge = DC.Create_NC_name('Routed_Discharge', Simulation, Dir_Basin, 5, info)
    if not os.path.exists(Name_NC_Routed_Discharge):

        # Get the data of Reference Evapotranspiration and save as nc
        Routed_Discharge_CR = RC.Open_nc_array(Name_NC_Discharge)
        DataCube_Routed_Discharge = RC.resize_array_example(Routed_Discharge_CR, LU_data)
        DC.Save_as_NC(Name_NC_Routed_Discharge, DataCube_Routed_Discharge, 'Routed_Discharge', LU_dataset, Startdate, Enddate, 'monthly')
        del DataCube_Routed_Discharge, Routed_Discharge_CR





    # Get raster information
    geo_out, proj, size_X, size_Y = RC.Open_array_info(Example_dataset)

    Rivers = RC.Open_nc_array(Name_NC_Rivers_CR)

    # Create ID Matrix
    y,x = np.indices((size_Y, size_X))
    ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape)) + 1

    # Get tiff array time dimension:
    time_dimension = int(np.shape(Discharge_dict_CR3[0])[0])

    # create an empty array
    Result = np.zeros([time_dimension, size_Y, size_X])

    for river_part in range(0,len(River_dict_CR2)):
        for river_pixel in range(1,len(River_dict_CR2[river_part])):
            river_pixel_ID = River_dict_CR2[river_part][river_pixel]
            if len(np.argwhere(ID_Matrix == river_pixel_ID))>0:
                row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]
                Result[:,row,col] = Discharge_dict_CR3[river_part][:,river_pixel]
        print(river_part)


    Outflow = Discharge_dict_CR3[0][:,1]

    for i in range(0,time_dimension):
        output_name = r'C:/testmap/rtest_%s.tif' %i
        Result_one = Result[i, :, :]
        DC.Save_as_tiff(output_name, Result_one, geo_out, "WGS84")

    import os

    # Get environmental variable for the Home folder
    WA_env_paths = os.environ["WA_HOME"].split(';')
    Dir_Home = WA_env_paths[0]

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    info = ['monthly','m3-month-1', ''.join([Startdate[5:7], Startdate[0:4]]) , ''.join([Enddate[5:7], Enddate[0:4]])]
    Name_Result = DC.Create_NC_name('DischargeEnd', Simulation, Dir_Basin, 5, info)
    Result[np.logical_and(Result == 0.0, Rivers == 0.0)] = np.nan

    DC.Save_as_NC(Name_Result, Result, 'DischargeEnd', Example_dataset, Startdate, Enddate, 'monthly')



    '''


    return()
コード例 #6
0
ファイル: Calc_Y_WP.py プロジェクト: wateraccounting/wa
def Season(startdate, enddate, dir_nc_outname, lu_class, croptype, ab = (1.0,0.9)):

    """
    Calculate Yields and WPs for one season.

    Parameters
    ----------
    startdate : object
        datetime.date object specifying the startdate of the growing season.
    enddate : ndarray
        datetime.date object specifying the enddate of the growing season.
    nc_outname : str
        Path all the data.
    lu_class : int
        Landuseclass for which to calculate Y and WP.
    croptype : str
        Name of croptype, should be present in HIWC_dict.keys().
    HIWC_dict : dict
        Dictionary with Harvest indices and Water Contents, see get_dictionaries.get_hi_and_ec().
    ab : tuple, optional
        Two parameters used to split Yield into irrigation and precipitation yield, see split_Yield.

    Returns
    -------
    Yield_Ave_Value : float
        The yield for the croptype.
    Yield_pr_Ave_Value : float
        The yield_precip for the croptype.
    Yield_irr_Ave_Value : float
        The yield_irri for the croptype.
    WP_Ave_Value : float
        The waterproductivity for the croptype.
    WPblue_Ave_Value : float
        The blue waterproductivity for the croptype.
    WPgreen_Ave_Value : float
        The green waterproductivity for the croptype.
    WC_Ave_Value : float
        The water consumption for the croptype.
    WCblue_Ave_Value : float
        The blue water consumption for the croptype.
    WCgreen_Ave_Value : float
        The green water consumption for the croptype.
    """

    import wa.Functions.Three as Three
    import wa.Functions.Start.Get_Dictionaries as GD
    import wa.General.raster_conversions as RC

    # Open the HIWC dict
    HIWC_dict = GD.get_hi_and_ec()

    # Get Harvest Index and Moisture content for a specific crop
    harvest_index = HIWC_dict[croptype][0]
    moisture_content = HIWC_dict[croptype][1]

    # Get the start and enddate current season
    current = datetime.date(startdate.year, startdate.month, 1)
    end_month = datetime.date(enddate.year, enddate.month, 1)

    req_dates = np.array([current])
    while current < end_month:
        current = current + relativedelta(months = 1)
        req_dates = np.append(req_dates, current)

    # Define input one nc file
    nc_outname_start = os.path.join(dir_nc_outname, "%d.nc" %(int(startdate.year)))
    nc_outname_end = os.path.join(dir_nc_outname, "%d.nc" %(int(enddate.year)))

    if not (os.path.exists(nc_outname_start) or os.path.exists(nc_outname_end)):
        date = req_dates[0]
        print("{0} missing in input data, skipping this season".format(date))
        Yield_Ave_Value = Yield_pr_Ave_Value = Yield_irr_Ave_Value = WP_Ave_Value = WPblue_Ave_Value = WPgreen_Ave_Value = WC_Ave_Value = WCblue_Ave_Value = WCgreen_Ave_Value = np.nan

    else:

        # Calculate the monthly fraction (if season is not whithin the whole month)
        fractions = np.ones(np.shape(req_dates))

        # The get the start month and end month fraction and report those to fraction
        start_month_length = float(calendar.monthrange(startdate.year, startdate.month)[1])
        end_month_length = float(calendar.monthrange(enddate.year, enddate.month)[1])

        fractions[0] = (start_month_length - startdate.day + 1) / start_month_length
        fractions[-1] = (enddate.day -1) / end_month_length

        # Get total sum NDM over the growing season
        NDM_array = RC.Open_ncs_array(dir_nc_outname, "Normalized_Dry_Matter", startdate.replace(day=1), enddate)
        NDM = np.nansum(NDM_array * fractions[:,None,None], axis=0)
        del NDM_array

        # Get total sum ET blue over the growing season
        ETgreen_array = RC.Open_ncs_array(dir_nc_outname, "Green_Evapotranspiration", startdate.replace(day=1), enddate)
        ETgreen = np.nansum(ETgreen_array * fractions[:,None,None], axis=0)
        del ETgreen_array

        # Get total sum ET green over the growing season
        ETblue_array = RC.Open_ncs_array(dir_nc_outname, "Blue_Evapotranspiration", startdate.replace(day=1), enddate)
        ETblue = np.nansum(ETblue_array * fractions[:,None,None], axis=0)
        del ETblue_array

        # Get total sum Precipitation over the growing season
        P_array = RC.Open_ncs_array(dir_nc_outname, "Precipitation", startdate.replace(day=1), enddate)
        P = np.nansum(P_array * fractions[:,None,None], axis=0)
        del P_array

        # Open Landuse map
        LULC = RC.Open_nc_array(nc_outname_start, "Landuse")

        # only select the pixels for this Landuse class
        NDM[NDM == 0] = np.nan
        NDM[LULC != lu_class] = ETblue[LULC != lu_class] = ETgreen[LULC != lu_class] =  np.nan

        # Calculate Yield
        Y_Array = (harvest_index * NDM) / (1 - moisture_content)

        # Calculate fractions of ETblue and green and blue Yield
        ETblue_fraction = ETblue / (ETblue + ETgreen)
        p_fraction = P / np.nanmax(P)
        fraction = Three.SplitYield.P_ET_based(p_fraction, ETblue_fraction, ab[0], ab[1])

        # Calculate yield from irrigation and precipitation
        Yirr_Array = Y_Array * fraction
        Ypr_Array = Y_Array - Yirr_Array

        '''
        if output_dir:
            x = y = np.arange(0.0, 1.1, 0.1)
            XX, YY = np.meshgrid(x, y)
            Z = split_Yield(XX,YY, ab[0], ab[1])
            plt.figure(1, figsize = (12,10))
            plt.clf()
            cmap = LinearSegmentedColormap.from_list('mycmap', ['#6bb8cc','#a3db76','#d98d8e'])
            plt.contourf(XX,YY,Z,np.arange(0.0,1.1,0.1), cmap = cmap)
            plt.colorbar(ticks = np.arange(0.0,1.1,0.1), label= 'Yirr as fraction of total Y [-]', boundaries = [0,1])
            plt.xlabel('Normalized Precipitation [-]')
            plt.ylabel('ETblue/ET [-]')
            plt.title('Split Yield into Yirr and Ypr')
            plt.suptitle('Z(X,Y) = -(((Y-1) * a)^2 - ((X-1) * b)^2) + 0.5 with a = {0:.2f} and b = {1:.2f}'.format(ab[0],ab[1]))
            plt.scatter(pfraction, etbfraction, color = 'w', label = croptype, edgecolors = 'k')
            plt.legend()
            plt.xlim((0,1))
            plt.ylim((0,1))
            plt.savefig(os.path.join(output_dir, '{0}_{1}_{2}_cloud.png'.format(croptype, req_dates[0], req_dates[-1])))
        '''

        # calculate average Yields
        Yield_Ave_Value = np.nanmean(Y_Array)
        Yield_pr_Ave_Value = np.nanmean(Ypr_Array)
        Yield_irr_Ave_Value = np.nanmean(Yirr_Array)

        # calculate average blue and green ET
        ETblue_Ave_Value = np.nanmean(ETblue)
        ETgreen_Ave_Value = np.nanmean(ETgreen)

        # Calculate Areas for one pixel
        areas_m2 = wa.Functions.Start.Area_converter.Degrees_to_m2(nc_outname_start)

        # Calculate the total area in km2
        areas_m2[LULC != lu_class] = np.nan
        areas_km2 = areas_m2/1000**2
        print('{0}: {1} km2'.format(croptype, np.nansum(areas_km2)))

        # Calculate the Water consumpution in km3
        WCblue_Ave_Value = np.nansum(ETblue_Ave_Value /1000**2 * areas_km2)
        WCgreen_Ave_Value = np.nansum(ETgreen_Ave_Value /1000**2 * areas_km2)
        WC_Ave_Value = WCblue_Ave_Value + WCgreen_Ave_Value

        # Calculate water productivity
        WP_Ave_Value = Yield_Ave_Value / ((ETblue_Ave_Value + ETgreen_Ave_Value) * 10)
        WPblue_Ave_Value = np.where(ETblue_Ave_Value == 0, [np.nan], [Yield_irr_Ave_Value / (ETblue_Ave_Value * 10)])[0]
        WPgreen_Ave_Value = np.where(ETgreen_Ave_Value == 0, [np.nan], [Yield_pr_Ave_Value / (ETgreen_Ave_Value * 10)])[0]

    return Yield_Ave_Value, Yield_pr_Ave_Value, Yield_irr_Ave_Value, WP_Ave_Value, WPblue_Ave_Value, WPgreen_Ave_Value, WC_Ave_Value, WCblue_Ave_Value, WCgreen_Ave_Value
コード例 #7
0
ファイル: main.py プロジェクト: wateraccounting/wa
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, ETref_Product, Runoff_Product, Startdate, Enddate, Simulation):
    """
    This functions is the main framework for calculating sheet 4.

    Parameters
    ----------
    Basin : str
        Name of the basin
    P_Product : str
        Name of the rainfall product that will be used
    ET_Product : str
        Name of the evapotranspiration product that will be used
    LAI_Product : str
        Name of the LAI product that will be used
    Runoff_Product : str
        Name of the Runoff product that will be used
    Moving_Averiging_Length, int
        Defines the length of the moving average
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    Simulation : int
        Defines the simulation

    """
    ######################### Import WA modules ###################################

    from wa.General import raster_conversions as RC
    from wa.General import data_conversions as DC
    import wa.Functions.Four as Four
    import wa.Functions.Start as Start
    import wa.Generator.Sheet4 as Generate
    import wa.Functions.Start.Get_Dictionaries as GD

    ######################### Set General Parameters ##############################

    # Get environmental variable for the Home folder
    if WA_HOME_folder == '':
        WA_env_paths = os.environ["WA_HOME"].split(';')
        Dir_Home = WA_env_paths[0]
    else:
        Dir_Home = WA_HOME_folder

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)

    # Find the maximum moving window value
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
    Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())

    ############## Cut dates into pieces if it is needed ######################

    # Check the years that needs to be calculated
    years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)

    for year in years:

        # Create .nc file if not exists
        nc_outname = os.path.join(output_dir, "%d.nc" % year)
        if not os.path.exists(nc_outname):
            DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)

        # Open variables in netcdf
        fh = Dataset(nc_outname)
        Variables_NC = [var for var in fh.variables]
        fh.close()

        # Create Start and End date for time chunk
        Startdate_part = '%d-01-01' %int(year)
        Enddate_part = '%s-12-31' %int(year)

        if int(year) == int(years[0]):
            Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
            Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
        else:
            Startdate_Moving_Average_String = Startdate_part

        ############################# Download Data ###################################

        # Download data
        if not "Precipitation" in Variables_NC:
            Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)

        if not "Actual_Evapotranspiration" in Variables_NC:
            Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)

        if not "Reference_Evapotranspiration" in Variables_NC:
            Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)

        if not "Grey_Water_Footprint" in Variables_NC:
            Data_Path_GWF = Start.Download_Data.GWF(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']])

        if not "Theta_Saturated_Topsoil" in Variables_NC:
            Data_Path_ThetaSat_topsoil = Start.Download_Data.Soil_Properties(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Para = 'ThetaSat_TopSoil')

        ###################### Save Data as netCDF files ##############################

        #______________________________Precipitation_______________________________

        # 1.) Precipitation data
        if not "Precipitation" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
            del DataCube_Prec

       #_______________________Reference Evaporation______________________________

        # 2.) Reference Evapotranspiration data
        if not "Reference_Evapotranspiration" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ETref

        #_______________________________Evaporation________________________________

        # 3.) Evapotranspiration data
        if not "Actual_Evapotranspiration" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ET

        #_____________________________________GWF__________________________________

        # 4.) Grey Water Footprint data
        if not "Grey_Water_Footprint" in Variables_NC:
            # Get the data of grey water footprint and save as nc
            GWF_Filepath = os.path.join(Dir_Basin, Data_Path_GWF, "Gray_Water_Footprint_Fraction.tif")
            dest_GWF = RC.reproject_dataset_example(GWF_Filepath, Example_dataset, method=1)
            DataCube_GWF = dest_GWF.GetRasterBand(1).ReadAsArray()
            DC.Add_NC_Array_Static(nc_outname, DataCube_GWF, "Grey_Water_Footprint", "fraction", 0.0001)
            del DataCube_GWF

    ####################### Calculations Sheet 4 ##############################

    ############## Cut dates into pieces if it is needed ######################

    years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)

    for year in years:

        if len(years) > 1.0:

            if year is years[0]:
                Startdate_part = Startdate
                Enddate_part = '%s-12-31' %year
            if year is years[-1]:
                Startdate_part = '%s-01-01' %year
                Enddate_part = Enddate

        else:
            Startdate_part = Startdate
            Enddate_part = Enddate

        #____________ Evapotranspiration data split in ETblue and ETgreen ____________

        if not ("Blue_Evapotranspiration" in Variables_NC or "Green_Evapotranspiration" in Variables_NC):

            # Calculate Blue and Green ET
            DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue, "Blue_Evapotranspiration", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen, "Green_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ETblue, DataCube_ETgreen

        #____________ Calculate non-consumend and Total supply maps by using fractions and consumed maps (blue ET) ____________

        if not ("Total_Supply" in Variables_NC or "Non_Consumed_Water" in Variables_NC):

            # Do the calculations
            DataCube_Total_Supply, DataCube_Non_Consumed = Four.Total_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)

            # Save the Total Supply and non consumed data as NetCDF files
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply, "Total_Supply", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Non_Consumed, "Non_Consumed_Water", "mm/month", 0.01)
            del DataCube_Total_Supply, DataCube_Non_Consumed

        #____________ Apply fractions over total supply to calculate gw and sw supply ____________

        if not ("Total_Supply_Surface_Water" in Variables_NC or "Total_Supply_Ground_Water" in Variables_NC):

            # Do the calculations
            DataCube_Total_Supply_SW, DataCube_Total_Supply_GW = Four.SplitGW_SW_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)

            # Save the Total Supply surface water and Total Supply ground water data as NetCDF files
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_SW, "Total_Supply_Surface_Water", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_GW, "Total_Supply_Ground_Water", "mm/month", 0.01)
            del DataCube_Total_Supply_SW, DataCube_Total_Supply_GW

        #____________ Apply gray water footprint fractions to calculated non recoverable flow based on the non consumed flow ____________


        if not ("Non_Recovable_Flow" in Variables_NC or "Recovable_Flow" in Variables_NC):

            # Calculate the non recovable flow and recovable flow by using Grey Water Footprint values
            DataCube_NonRecovableFlow, Datacube_RecovableFlow = Four.SplitNonConsumed_NonRecov.GWF_Based(nc_outname, Startdate_part, Enddate_part)

            # Get the data of Evaporation and save as nc
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow, "Non_Recovable_Flow", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow, "Recovable_Flow", "mm/month", 0.01)
            del DataCube_NonRecovableFlow, Datacube_RecovableFlow

        #____________Apply fractions to calculate the non recovarable SW/GW and recovarable SW/GW ____________

        # 1. Non recovarable flow
        if not ("Non_Recovable_Flow_Ground_Water" in Variables_NC or "Non_Recovable_Flow_Surface_Water" in Variables_NC):

            # Calculate the non recovable return flow to ground and surface water
            DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Non_Recovable_Flow", Startdate_part, Enddate_part)

            # Get the data of Evaporation and save as nc
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow_Return_GW, "Non_Recovable_Flow_Ground_Water", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, Datacube_NonRecovableFlow_Return_SW, "Non_Recovable_Flow_Surface_Water", "mm/month", 0.01)
            del DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW

        # 2. Recovarable flow
        if not ("Recovable_Flow_Ground_Water" in Variables_NC or "Recovable_Flow_Surface_Water" in Variables_NC):

            # Calculate the non recovable return flow to ground and surface water
            DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Recovable_Flow", Startdate_part, Enddate_part)

            # Get the data of Evaporation and save as nc
            DC.Add_NC_Array_Variable(nc_outname, DataCube_RecovableFlow_Return_GW, "Recovable_Flow_Ground_Water", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow_Return_SW, "Recovable_Flow_Surface_Water", "mm/month", 0.01)
            del DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW

        ############################ Create CSV 4 #################################

        Dir_Basin_CSV, Unit_front = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate_part, Enddate_part, nc_outname)

    ############################ Create Sheet 4 ###############################

    Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV, Unit_front)

    return()
コード例 #8
0
def Season(startdate,
           enddate,
           dir_nc_outname,
           lu_class,
           croptype,
           ab=(1.0, 0.9)):
    """
    Calculate Yields and WPs for one season.

    Parameters
    ----------
    startdate : object
        datetime.date object specifying the startdate of the growing season.
    enddate : ndarray
        datetime.date object specifying the enddate of the growing season.
    nc_outname : str
        Path all the data.
    lu_class : int
        Landuseclass for which to calculate Y and WP.
    croptype : str
        Name of croptype, should be present in HIWC_dict.keys().
    HIWC_dict : dict
        Dictionary with Harvest indices and Water Contents, see get_dictionaries.get_hi_and_ec().
    ab : tuple, optional
        Two parameters used to split Yield into irrigation and precipitation yield, see split_Yield.

    Returns
    -------
    Yield_Ave_Value : float
        The yield for the croptype.
    Yield_pr_Ave_Value : float
        The yield_precip for the croptype.
    Yield_irr_Ave_Value : float
        The yield_irri for the croptype.
    WP_Ave_Value : float
        The waterproductivity for the croptype.
    WPblue_Ave_Value : float
        The blue waterproductivity for the croptype.
    WPgreen_Ave_Value : float
        The green waterproductivity for the croptype.
    WC_Ave_Value : float
        The water consumption for the croptype.
    WCblue_Ave_Value : float
        The blue water consumption for the croptype.
    WCgreen_Ave_Value : float
        The green water consumption for the croptype.
    """

    import wa.Functions.Three as Three
    import wa.Functions.Start.Get_Dictionaries as GD
    import wa.General.raster_conversions as RC

    # Open the HIWC dict
    HIWC_dict = GD.get_hi_and_ec()

    # Get Harvest Index and Moisture content for a specific crop
    harvest_index = HIWC_dict[croptype][0]
    moisture_content = HIWC_dict[croptype][1]

    # Get the start and enddate current season
    current = datetime.date(startdate.year, startdate.month, 1)
    end_month = datetime.date(enddate.year, enddate.month, 1)

    req_dates = np.array([current])
    while current < end_month:
        current = current + relativedelta(months=1)
        req_dates = np.append(req_dates, current)

    # Define input one nc file
    nc_outname_start = os.path.join(dir_nc_outname,
                                    "%d.nc" % (int(startdate.year)))
    nc_outname_end = os.path.join(dir_nc_outname,
                                  "%d.nc" % (int(enddate.year)))

    if not (os.path.exists(nc_outname_start)
            or os.path.exists(nc_outname_end)):
        date = req_dates[0]
        print("{0} missing in input data, skipping this season".format(date))
        Yield_Ave_Value = Yield_pr_Ave_Value = Yield_irr_Ave_Value = WP_Ave_Value = WPblue_Ave_Value = WPgreen_Ave_Value = WC_Ave_Value = WCblue_Ave_Value = WCgreen_Ave_Value = np.nan

    else:

        # Calculate the monthly fraction (if season is not whithin the whole month)
        fractions = np.ones(np.shape(req_dates))

        # The get the start month and end month fraction and report those to fraction
        start_month_length = float(
            calendar.monthrange(startdate.year, startdate.month)[1])
        end_month_length = float(
            calendar.monthrange(enddate.year, enddate.month)[1])

        fractions[0] = (start_month_length - startdate.day +
                        1) / start_month_length
        fractions[-1] = (enddate.day - 1) / end_month_length

        # Get total sum NDM over the growing season
        NDM_array = RC.Open_ncs_array(dir_nc_outname, "Normalized_Dry_Matter",
                                      startdate.replace(day=1), enddate)
        NDM = np.nansum(NDM_array * fractions[:, None, None], axis=0)
        del NDM_array

        # Get total sum ET blue over the growing season
        ETgreen_array = RC.Open_ncs_array(dir_nc_outname,
                                          "Green_Evapotranspiration",
                                          startdate.replace(day=1), enddate)
        ETgreen = np.nansum(ETgreen_array * fractions[:, None, None], axis=0)
        del ETgreen_array

        # Get total sum ET green over the growing season
        ETblue_array = RC.Open_ncs_array(dir_nc_outname,
                                         "Blue_Evapotranspiration",
                                         startdate.replace(day=1), enddate)
        ETblue = np.nansum(ETblue_array * fractions[:, None, None], axis=0)
        del ETblue_array

        # Get total sum Precipitation over the growing season
        P_array = RC.Open_ncs_array(dir_nc_outname, "Precipitation",
                                    startdate.replace(day=1), enddate)
        P = np.nansum(P_array * fractions[:, None, None], axis=0)
        del P_array

        # Open Landuse map
        LULC = RC.Open_nc_array(nc_outname_start, "Landuse")

        # only select the pixels for this Landuse class
        NDM[NDM == 0] = np.nan
        NDM[LULC != lu_class] = ETblue[LULC != lu_class] = ETgreen[
            LULC != lu_class] = np.nan

        # Calculate Yield
        Y_Array = (harvest_index * NDM) / (1 - moisture_content)

        # Calculate fractions of ETblue and green and blue Yield
        ETblue_fraction = ETblue / (ETblue + ETgreen)
        p_fraction = P / np.nanmax(P)
        fraction = Three.SplitYield.P_ET_based(p_fraction, ETblue_fraction,
                                               ab[0], ab[1])

        # Calculate yield from irrigation and precipitation
        Yirr_Array = Y_Array * fraction
        Ypr_Array = Y_Array - Yirr_Array
        '''
        if output_dir:
            x = y = np.arange(0.0, 1.1, 0.1)
            XX, YY = np.meshgrid(x, y)
            Z = split_Yield(XX,YY, ab[0], ab[1])
            plt.figure(1, figsize = (12,10))
            plt.clf()
            cmap = LinearSegmentedColormap.from_list('mycmap', ['#6bb8cc','#a3db76','#d98d8e'])
            plt.contourf(XX,YY,Z,np.arange(0.0,1.1,0.1), cmap = cmap)
            plt.colorbar(ticks = np.arange(0.0,1.1,0.1), label= 'Yirr as fraction of total Y [-]', boundaries = [0,1])
            plt.xlabel('Normalized Precipitation [-]')
            plt.ylabel('ETblue/ET [-]')
            plt.title('Split Yield into Yirr and Ypr')
            plt.suptitle('Z(X,Y) = -(((Y-1) * a)^2 - ((X-1) * b)^2) + 0.5 with a = {0:.2f} and b = {1:.2f}'.format(ab[0],ab[1]))
            plt.scatter(pfraction, etbfraction, color = 'w', label = croptype, edgecolors = 'k')
            plt.legend()
            plt.xlim((0,1))
            plt.ylim((0,1))
            plt.savefig(os.path.join(output_dir, '{0}_{1}_{2}_cloud.png'.format(croptype, req_dates[0], req_dates[-1])))
        '''

        # calculate average Yields
        Yield_Ave_Value = np.nanmean(Y_Array)
        Yield_pr_Ave_Value = np.nanmean(Ypr_Array)
        Yield_irr_Ave_Value = np.nanmean(Yirr_Array)

        # calculate average blue and green ET
        ETblue_Ave_Value = np.nanmean(ETblue)
        ETgreen_Ave_Value = np.nanmean(ETgreen)

        # Calculate Areas for one pixel
        areas_m2 = wa.Functions.Start.Area_converter.Degrees_to_m2(
            nc_outname_start)

        # Calculate the total area in km2
        areas_m2[LULC != lu_class] = np.nan
        areas_km2 = areas_m2 / 1000**2
        print('{0}: {1} km2'.format(croptype, np.nansum(areas_km2)))

        # Calculate the Water consumpution in km3
        WCblue_Ave_Value = np.nansum(ETblue_Ave_Value / 1000**2 * areas_km2)
        WCgreen_Ave_Value = np.nansum(ETgreen_Ave_Value / 1000**2 * areas_km2)
        WC_Ave_Value = WCblue_Ave_Value + WCgreen_Ave_Value

        # Calculate water productivity
        WP_Ave_Value = Yield_Ave_Value / (
            (ETblue_Ave_Value + ETgreen_Ave_Value) * 10)
        WPblue_Ave_Value = np.where(
            ETblue_Ave_Value == 0, [np.nan],
            [Yield_irr_Ave_Value / (ETblue_Ave_Value * 10)])[0]
        WPgreen_Ave_Value = np.where(
            ETgreen_Ave_Value == 0, [np.nan],
            [Yield_pr_Ave_Value / (ETgreen_Ave_Value * 10)])[0]

    return Yield_Ave_Value, Yield_pr_Ave_Value, Yield_irr_Ave_Value, WP_Ave_Value, WPblue_Ave_Value, WPgreen_Ave_Value, WC_Ave_Value, WCblue_Ave_Value, WCgreen_Ave_Value
コード例 #9
0
ファイル: main.py プロジェクト: wateraccounting/wa
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, NDM_Product, NDVI_Product, ETref_Product, dict_crops, dict_non_crops, Startdate, Enddate, Simulation):
    """
    This functions is the main framework for calculating sheet 3.

    Parameters
    ----------
    Basin : str
        Name of the basin
    P_Product : str
        Name of the rainfall product that will be used
    ET_Product : str
        Name of the evapotranspiration product that will be used
    LAI_Product : str
        Name of the LAI product that will be used
    NDM_Product : str
        Name of the NDM product that will be used
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    Simulation : int
        Defines the simulation

    """
    ######################### Import WA modules ###################################

    from wa.General import raster_conversions as RC
    from wa.General import data_conversions as DC
    import wa.Functions.Three as Three
    import wa.Functions.Two as Two
    import wa.Functions.Start as Start
    import wa.Functions.Four as Four
    import wa.Generator.Sheet3 as Generate
    import wa.Functions.Start.Get_Dictionaries as GD

    ######################### Set General Parameters ##############################

    # Check if there is a full year selected  between Startdate and Enddate, otherwise Sheet 3 cannot be produced
    try:
        years_end = pd.date_range(Startdate,Enddate,freq="A").year
        years_start = pd.date_range(Startdate,Enddate,freq="AS").year
        if (len(years_start) == 0 or len(years_end) == 0):
            print "Calculation period is less than a year, which is not possible for sheet 3"
            quit
        years = np.unique(np.append(years_end,years_start))
    except:
        print "Calculation period is less than a year, which is not possible for sheet 3"
        quit

    # Get environmental variable for the Home folder
    if WA_HOME_folder == '':
        WA_env_paths = os.environ["WA_HOME"].split(';')
        Dir_Home = WA_env_paths[0]
    else:
        Dir_Home = WA_HOME_folder

    # Create the Basin folder
    Dir_Basin = os.path.join(Dir_Home, Basin)
    output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Get the boundaries of the basin based on the shapefile of the watershed
    # Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
    Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)

    ############################# Download Data ###################################
    # Check the years that needs to be calculated
    years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)

    # Find the maximum moving window value
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
    Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())

    for year in years:

        # Create Start and End date for time chunk
        Startdate_part = '%d-01-01' %int(year)
        Enddate_part = '%s-12-31' %year

        # Create .nc file if not exists
        nc_outname = os.path.join(output_dir, "%d.nc" % year)
        if not os.path.exists(nc_outname):
            DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)

        #Set Startdate for moving average
        if int(year) == int(years[0]):
            Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
            Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
        else:
            Startdate_Moving_Average_String = Startdate_part

        # Open variables in netcdf
        fh = netCDF4.Dataset(nc_outname)
        Variables_NC = [var for var in fh.variables]
        fh.close()

        # Download data
        if not "Precipitation" in Variables_NC:
            Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)

        if not "Actual_Evapotransporation" in Variables_NC:
            Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)

        if not "Reference_Evapotranspiration" in Variables_NC:
            Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)

        if not "NDVI" in Variables_NC:
            Data_Path_NDVI = Start.Download_Data.NDVI(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part)

        if not "Normalized_Dry_Matter" in Variables_NC:
            Data_Path_NPP = Start.Download_Data.NPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, NDM_Product)
            Data_Path_GPP = Start.Download_Data.GPP(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, NDM_Product)

        ########################### Create input data #################################

        if not "Normalized_Dry_Matter" in Variables_NC:
            # Create NDM based on MOD17
            if NDM_Product == 'MOD17':
                # Create monthly GPP
                Start.Eightdaily_to_monthly_state.Nearest_Interpolate(Data_Path_GPP, Startdate_part, Enddate_part)
                Data_Path_NDM = Two.Calc_NDM.NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate_part, Enddate_part)

        if not "NDVI" in Variables_NC:
            # Create monthly NDVI based on MOD13
            if NDVI_Product == 'MOD13':
                Start.Sixteendaily_to_monthly_state.Nearest_Interpolate(Data_Path_NDVI, Startdate_part, Enddate_part)

        ###################### Save Data as netCDF files ##############################
        #______________________________Precipitation_______________________________

        # 1.) Precipitation data
        if not "Precipitation" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
            del DataCube_Prec

        #_______________________________Evaporation________________________________

        # 2.) Evapotranspiration data
        if not "Actual_Evapotranspiration" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ET

        #___________________________Normalized Dry Matter__________________________

        # 3.) Normalized Dry Matter
        if not "Normalized_Dry_Matter" in Variables_NC:
            # Get the data of Evaporation and save as nc
            DataCube_NDM = RC.Get3Darray_time_series_monthly(Data_Path_NDM, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NDM, "Normalized_Dry_Matter", "kg_ha", 0.01)
            del DataCube_NDM

        #_______________________Reference Evaporation______________________________

        # 4.) Reference Evapotranspiration data
        if not "Reference_Evapotranspiration" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ETref

        #____________________________________NDVI__________________________________

         # 4.) Reference Evapotranspiration data
        if not "NDVI" in Variables_NC:
            # Get the data of Precipitation and save as nc
            DataCube_NDVI = RC.Get3Darray_time_series_monthly(Data_Path_NDVI, Startdate_part, Enddate_part, Example_data = Example_dataset)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_NDVI, "NDVI", "Fraction", 0.0001)
            del DataCube_NDVI

        ############################# Calculate Sheet 3 ###########################

        #____________ Evapotranspiration data split in ETblue and ETgreen ____________

        if not ("Blue_Evapotranspiration" in Variables_NC or "Green_Evapotranspiration" in Variables_NC):

            # Calculate Blue and Green ET
            DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue, "Blue_Evapotranspiration", "mm/month", 0.01)
            DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen, "Green_Evapotranspiration", "mm/month", 0.01)
            del DataCube_ETblue, DataCube_ETgreen

    #____________________________ Create the empty dictionaries ____________________________

    # Create the dictionaries that are required for sheet 3
    wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary = GD.get_sheet3_empties()

    #____________________________________ Fill in the dictionaries ________________________

    # Fill in the crops dictionaries
    wp_y_irrigated_dictionary, wp_y_rainfed_dictionary = Three.Fill_Dicts.Crop_Dictionaries(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, dict_crops, nc_outname, Dir_Basin)

    # Fill in the non crops dictionaries
    wp_y_non_crop_dictionary = Three.Fill_Dicts.Non_Crop_Dictionaries(wp_y_non_crop_dictionary, dict_non_crops)

    ############################ Create CSV 3 #################################

    csv_fh_a, csv_fh_b = Generate.CSV.Create(wp_y_irrigated_dictionary, wp_y_rainfed_dictionary, wp_y_non_crop_dictionary, Basin, Simulation, year, Dir_Basin)


    ############################ Create Sheet 3 ###############################

    Generate.PDF.Create(Dir_Basin, Basin, Simulation, csv_fh_a, csv_fh_b)

    return()
コード例 #10
0
ファイル: SplitET.py プロジェクト: ali1100/wa
def Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate,
               Enddate):
    """
    This functions split the evapotranspiration into green and blue evapotranspiration.
    Parameters
    ----------
    nc_outname : str
        Path to the .nc file containing data
    Moving_Averaging_Length: integer
        Number defines the amount of months that are taken into account

    Returns
    -------
    ET_Blue : array
              Array[time, lat, lon] contains Blue Evapotranspiration
    ET_Green : array
              Array[time, lat, lon] contains Green Evapotranspiration
    """
    import wa.General.raster_conversions as RC
    import wa.Functions.Start.Get_Dictionaries as GD

    # Input Parameters functions
    scale = 1.1

    # Open LU map for example
    LU = RC.Open_nc_array(nc_outname, "Landuse")

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq='MS')

    # Get moving window period

    # Get dictionaries and keys for the moving average
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(
        version='1.0')
    Classes = ET_Blue_Green_Classes_dict.keys()
    Moving_Averages_Values_Array = np.ones(LU.shape) * np.nan

    # Create array based on the dictionary that gives the Moving average tail for every pixel
    for Class in Classes:
        Values_Moving_Window_Class = Moving_Window_Per_Class_dict[Class]
        for Values_Class in ET_Blue_Green_Classes_dict[Class]:
            Moving_Averages_Values_Array[
                LU == Values_Class] = Values_Moving_Window_Class

    Additional_Months_front = int(np.nanmax(Moving_Averages_Values_Array))
    Additional_Months_tail = 0
    Start_period = Additional_Months_front
    End_period = Additional_Months_tail * -1

    ########################### Extract ETref data #################################

    if ETref_Product is 'WA_ETref':
        # Define data path
        Data_Path_ETref = os.path.join(Dir_Basin, 'ETref', 'Monthly')
    else:
        Data_Path_ETref = ETref_Product

    ETref = Complete_3D_Array(nc_outname, 'Reference_Evapotranspiration',
                              Startdate, Enddate, Additional_Months_front,
                              Additional_Months_tail, Data_Path_ETref)

    ######################## Extract Precipitation data ########################

    if (P_Product == "CHIRPS" or P_Product == "RFE" or P_Product == "TRMM"):
        # Define data path
        Data_Path_P = os.path.join(Dir_Basin, 'Precipitation', P_Product,
                                   'Monthly')
    else:
        Data_Path_P = P_Product

    P = Complete_3D_Array(nc_outname, 'Precipitation', Startdate, Enddate,
                          Additional_Months_front, Additional_Months_tail,
                          Data_Path_P)

    ########################## Extract actET data ##############################
    ET = RC.Open_nc_array(nc_outname, "Actual_Evapotranspiration", Startdate,
                          Enddate)

    ############ Create average ETref and P using moving window ################
    ETref_Ave = np.ones([len(Dates),
                         int(LU.shape[0]),
                         int(LU.shape[1])]) * np.nan
    P_Ave = np.ones([len(Dates), int(LU.shape[0]), int(LU.shape[1])]) * np.nan
    if End_period == 0:
        P_period = P[Start_period:, :, :]
        ETref_period = ETref[Start_period:, :, :]
    else:
        P_period = P[Start_period:End_period, :, :]
        ETref_period = ETref[Start_period:End_period, :, :]

    # Loop over the different moving average tails
    for One_Value in np.unique(Moving_Window_Per_Class_dict.values()):

        # If there is no moving average is 1 than use the value of the original ETref or P
        if One_Value == 1:
            Values_Ave_ETref = ETref[int(ETref.shape[0]) - len(Dates):, :, :]
            Values_Ave_P = P[int(ETref.shape[0]) - len(Dates):, :, :]

        # If there is tail, apply moving average over the whole datacube
        else:
            Values_Ave_ETref_tot = RC.Moving_average(ETref, One_Value - 1, 0)
            Values_Ave_P_tot = RC.Moving_average(P, One_Value - 1, 0)
            Values_Ave_ETref = Values_Ave_ETref_tot[
                int(Values_Ave_ETref_tot.shape[0]) - len(Dates):, :, :]
            Values_Ave_P = Values_Ave_P_tot[int(Values_Ave_P_tot.shape[0]) -
                                            len(Dates):, :, :]

        # Only add the data where the corresponding tail corresponds with the one_value
        ETref_Ave[:, Moving_Averages_Values_Array ==
                  One_Value] = Values_Ave_ETref[:,
                                                Moving_Averages_Values_Array ==
                                                One_Value]
        P_Ave[:, Moving_Averages_Values_Array ==
              One_Value] = Values_Ave_P[:, Moving_Averages_Values_Array ==
                                        One_Value]

    ##################### Calculate ET blue and green ###########################

    # Mask out the nan values(if one of the parameters is nan, then they are all nan)
    mask = np.any([
        np.isnan(LU) *
        np.ones([len(Dates), int(LU.shape[0]),
                 int(LU.shape[1])]) == 1,
        np.isnan(ET),
        np.isnan(ETref[int(ETref.shape[0]) - len(Dates):, :, :]),
        np.isnan(P[int(ETref.shape[0]) - len(Dates):, :, :]),
        np.isnan(P_Ave),
        np.isnan(ETref_Ave)
    ],
                  axis=0)
    ETref_period[mask] = ETref_Ave[mask] = ET[mask] = P_period[mask] = P_Ave[
        mask] = np.nan

    phi = ETref_Ave / P_Ave

    # Calculate Budyko-index
    Budyko = scale * np.sqrt(phi * np.tanh(1 / phi) * (1 - np.exp(-phi)))

    # Calculate ET green
    ETgreen_DataCube = np.minimum(
        Budyko * P[int(ETref.shape[0]) - len(Dates):, :, :], ET)

    # Calculate ET blue
    ETblue_DataCube = ET - ETgreen_DataCube

    return (np.array(ETblue_DataCube), np.array(ETgreen_DataCube))
コード例 #11
0
ファイル: SplitET.py プロジェクト: wateraccounting/wa
def ITE(Dir_Basin, nc_outname, Startdate, Enddate, Simulation):
    """
    This functions split the evapotranspiration into interception, transpiration, and evaporation.

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    nc_outname : str
        Path to the .nc file containing all data
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    Simulation : int
        Defines the simulation

    Returns
    -------
    I : array
        Array[time, lat, lon] contains the interception data
    T : array
        Array[time, lat, lon] contains the transpiration data
    E : array
        Array[time, lat, lon] contains the evaporation data

    """

    # import WA modules
    import wa.General.raster_conversions as RC
    import wa.Functions.Start.Get_Dictionaries as GD

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq = "MS")

    # Extract LU data from NetCDF file
    LU = RC.Open_nc_array(nc_outname, Var = 'Landuse')

    # Create a mask to ignore non relevant pixels.
    lulc_dict = GD.get_lulcs().keys()
    mask=np.logical_or.reduce([LU == value for value in lulc_dict[:-1]])
    mask3d = mask * np.ones(len(Dates))[:,None,None]
    mask3d_neg = (mask3d-1) * 9999

    # Extract Evapotranspiration data from NetCDF file
    ET = RC.Open_nc_array(nc_outname, 'Actual_Evapotranspiration', Startdate, Enddate)
    # Extract Leaf Area Index data from NetCDF file
    LAI = RC.Open_nc_array(nc_outname, 'LAI', Startdate, Enddate)
    # Extract Precipitation data from NetCDF file
    P = RC.Open_nc_array(nc_outname, 'Precipitation', Startdate, Enddate)
    # Extract Rainy Days data from NetCDF file
    RD = RC.Open_nc_array(nc_outname, 'Rainy_Days', Startdate, Enddate)
    # Extract Normalized Dry Matter data and time from NetCDF file
    NDM = RC.Open_nc_array(nc_outname, 'Normalized_Dry_Matter', Startdate, Enddate)
    timeNDM = RC.Open_nc_array(nc_outname, 'time')

    # Create dictory to get every month and year for each timestep
    datesNDMmonth = dict()
    datesNDMyear = dict()
    # Loop over all timestep
    for i in range(0,len(timeNDM)):
        # change toordinal to month and year
        datesNDMmonth[i] = datetime.date.fromordinal(timeNDM[i]).month
        datesNDMyear[i] = datetime.date.fromordinal(timeNDM[i]).year

    # Calculate the max monthly NDM over the whole period
    NDMmax = dict()

    # loop over the months
    for month in range(1,13):
        dimensions = []
        # Define which dimension must be opened to get the same month
        for dimension, monthdict in datesNDMmonth.items():
            if monthdict == month:
                dimensions = np.append(dimensions,dimension)
        # Open those time dimension
        NDMmonth = np.zeros([np.size(dimensions), int(np.shape(NDM)[1]), int(np.shape(NDM)[2])])
        dimensions = np.int_(dimensions)
        NDMmonth[:,:,:] = NDM[dimensions, :,:]
        # Calculate the maximum over the month
        NDMmax[month] = np.nanmax(NDMmonth,0)

    NDMmax_months = np.zeros([len(timeNDM), int(np.shape(NDM)[1]), int(np.shape(NDM)[2])])

    # Create 3D array with NDMmax
    for i in range(0,len(timeNDM)):
        NDMmax_months[i,:,:] = np.nanmax(NDMmax[datesNDMmonth[i]])

    # Create some variables needed to plot graphs.
    et = np.array([])
    i = np.array([])
    t = np.array([])

    # Change zero values in RD so we do not get errors
    RD[RD==0] = 0.001
    LAI[LAI==0] = 0.001
    LAI[np.isnan(LAI)] = 0.1

    # Calculate I
    I = LAI * (1 - np.power(1 + (P/RD) * (1 - np.exp(-0.5 * LAI)) * (1/LAI),-1)) * RD

    # Set boundary
    I[np.isnan(LAI)] = np.nan

    # Calculate T
    T = np.minimum((NDM/NDMmax_months),np.ones(np.shape(NDM))) * 0.95 * (ET - I)

    # Mask Data
    ET = ET * mask3d
    T = T * mask3d
    I = I * mask3d
    ET[mask3d_neg<-1] = np.nan
    T[mask3d_neg<-1] = np.nan
    I[mask3d_neg<-1] = np.nan

    # Calculate E
    E = ET - T - I

    # Calculate monthly averages
    et = np.nanmean(ET.reshape(ET.shape[0], -1),1)
    i = np.nanmean(I.reshape(I.shape[0], -1),1)
    t = np.nanmean(T.reshape(T.shape[0], -1),1)

    # Plot graph of ET and E, T and I fractions.
    fig = plt.figure(figsize = (10,10))
    plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
    ax = fig.add_subplot(111)
    ax.plot(Dates, et, color = 'k')
    ax.patch.set_visible(False)
    ax.set_title('Average ET and E, T and I fractions')
    ax.set_ylabel('ET [mm/month]')
    ax.patch.set_visible(True)
    ax.fill_between(Dates, et, color = '#a3db76', label = 'Evapotranspiration')
    ax.fill_between(Dates, i + t , color = '#6bb8cc', label = 'Transpiration')
    ax.fill_between(Dates, i , color = '#497e7c', label = 'Interception')
    ax.scatter(Dates, et, color = 'k')
    ax.legend(loc = 'upper left',fancybox=True, shadow=True)
    fig.autofmt_xdate()
    ax.set_xlim([Dates[0], Dates[-1]])
    ax.set_ylim([0, max(et) * 1.2])
    ax.set_xlabel('Time')
    [r.set_zorder(10) for r in ax.spines.itervalues()]

    # Define output folder and name for image
    NamePic = "Sim%s_Mean_ET_E_T_I.jpg" %Simulation
    Dir_Basin_Image = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" % Simulation, "Images")
    if not os.path.exists(Dir_Basin_Image):
        os.mkdir(Dir_Basin_Image)

    # Save Images
    plt.savefig(os.path.join(Dir_Basin_Image,NamePic))

    return(I, T, E)
コード例 #12
0
ファイル: SplitET.py プロジェクト: wateraccounting/wa
def Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate):
    """
    This functions split the evapotranspiration into green and blue evapotranspiration.
    Parameters
    ----------
    nc_outname : str
        Path to the .nc file containing data
    Moving_Averaging_Length: integer
        Number defines the amount of months that are taken into account

    Returns
    -------
    ET_Blue : array
              Array[time, lat, lon] contains Blue Evapotranspiration
    ET_Green : array
              Array[time, lat, lon] contains Green Evapotranspiration
    """
    import wa.General.raster_conversions as RC
    import wa.Functions.Start.Get_Dictionaries as GD


    # Input Parameters functions
    scale = 1.1

    # Open LU map for example
    LU = RC.Open_nc_array(nc_outname, "Landuse")

    # Define monthly dates
    Dates = pd.date_range(Startdate, Enddate, freq = 'MS')

    # Get moving window period

    # Get dictionaries and keys for the moving average
    ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
    Classes = ET_Blue_Green_Classes_dict.keys()
    Moving_Averages_Values_Array = np.ones(LU.shape) * np.nan

    # Create array based on the dictionary that gives the Moving average tail for every pixel
    for Class in Classes:
        Values_Moving_Window_Class = Moving_Window_Per_Class_dict[Class]
        for Values_Class in ET_Blue_Green_Classes_dict[Class]:
            Moving_Averages_Values_Array[LU == Values_Class] = Values_Moving_Window_Class

    Additional_Months_front = int(np.nanmax(Moving_Averages_Values_Array))
    Additional_Months_tail = 0
    Start_period = Additional_Months_front
    End_period = Additional_Months_tail * -1

     ########################### Extract ETref data #################################

    if ETref_Product is 'WA_ETref':
        # Define data path
        Data_Path_ETref = os.path.join(Dir_Basin, 'ETref', 'Monthly')
    else:
        Data_Path_ETref = ETref_Product

    ETref = Complete_3D_Array(nc_outname, 'Reference_Evapotranspiration', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_ETref)

    ######################## Extract Precipitation data ########################

    if (P_Product == "CHIRPS" or P_Product == "RFE"):
        # Define data path
        Data_Path_P = os.path.join(Dir_Basin, 'Precipitation', P_Product, 'Monthly')
    else:
        Data_Path_P = P_Product

    P = Complete_3D_Array(nc_outname, 'Precipitation', Startdate, Enddate, Additional_Months_front, Additional_Months_tail, Data_Path_P)

    ########################## Extract actET data ##############################
    ET = RC.Open_nc_array(nc_outname, "Actual_Evapotranspiration", Startdate, Enddate)

    ############ Create average ETref and P using moving window ################
    ETref_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
    P_Ave = np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])]) * np.nan
    if End_period == 0:
        P_period = P[Start_period:,:,:]
        ETref_period = ETref[Start_period:,:,:]
    else:
        P_period = P[Start_period:End_period,:,:]
        ETref_period = ETref[Start_period:End_period,:,:]

     # Loop over the different moving average tails
    for One_Value in np.unique(Moving_Window_Per_Class_dict.values()):

        # If there is no moving average is 1 than use the value of the original ETref or P
        if One_Value == 1:
            Values_Ave_ETref = ETref[int(ETref.shape[0])-len(Dates):,:,:]
            Values_Ave_P = P[int(ETref.shape[0])-len(Dates):,:,:]

        # If there is tail, apply moving average over the whole datacube
        else:
            Values_Ave_ETref_tot = RC.Moving_average(ETref, One_Value - 1, 0)
            Values_Ave_P_tot = RC.Moving_average(P, One_Value - 1, 0)
            Values_Ave_ETref = Values_Ave_ETref_tot[int(Values_Ave_ETref_tot.shape[0])-len(Dates):,:,:]
            Values_Ave_P = Values_Ave_P_tot[int(Values_Ave_P_tot.shape[0])-len(Dates):,:,:]

        # Only add the data where the corresponding tail corresponds with the one_value
        ETref_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_ETref[:,Moving_Averages_Values_Array == One_Value]
        P_Ave[:,Moving_Averages_Values_Array == One_Value] = Values_Ave_P[:,Moving_Averages_Values_Array == One_Value]

    ##################### Calculate ET blue and green ###########################

    # Mask out the nan values(if one of the parameters is nan, then they are all nan)
    mask = np.any([np.isnan(LU)*np.ones([len(Dates),int(LU.shape[0]),int(LU.shape[1])])==1, np.isnan(ET), np.isnan(ETref[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P[int(ETref.shape[0])-len(Dates):,:,:]), np.isnan(P_Ave), np.isnan(ETref_Ave)],axis=0)
    ETref_period[mask] = ETref_Ave[mask] = ET[mask] = P_period[mask] = P_Ave[mask] = np.nan

    phi = ETref_Ave / P_Ave

    # Calculate Budyko-index
    Budyko = scale * np.sqrt(phi*np.tanh(1/phi)*(1-np.exp(-phi)))

    # Calculate ET green
    ETgreen_DataCube = np.minimum(Budyko*P[int(ETref.shape[0])-len(Dates):,:,:],ET)

    # Calculate ET blue
    ETblue_DataCube = ET - ETgreen_DataCube

    return(np.array(ETblue_DataCube), np.array(ETgreen_DataCube))
コード例 #13
0
ファイル: CSV.py プロジェクト: wateraccounting/wa
def Create(Dir_Basin, Simulation, Basin, Startdate, Enddate, nc_outname, Example_dataset):
    """
    This functions create the CSV files for the sheets

    Parameters
    ----------
    Dir_Basin : str
        Path to all the output data of the Basin
    Simulation : int
        Defines the simulation
    Basin : str
        Name of the basin
    Startdate : str
        Contains the start date of the model 'yyyy-mm-dd'
    Enddate : str
        Contains the end date of the model 'yyyy-mm-dd'
    nc_outname : str
        Path to the .nc file containing the  data
    Example_dataset : str
         Data path to the example tiff file containing the right amount of pixels and projection

    Returns
    -------
    Data_Path_CSV : str
        Data path pointing to the CSV output files

    """
    # import WA modules
    import wa.Functions.Start.Get_Dictionaries as GD
    import wa.General.raster_conversions as RC
    from wa.Functions import Start

    # Create output folder for CSV files
    Data_Path_CSV = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation, "CSV")
    if not os.path.exists(Data_Path_CSV):
        os.mkdir(Data_Path_CSV)

    # Open LULC map
    LULC = RC.Open_nc_array(nc_outname, 'Landuse')

    # Open I, T, E
    DataCube_I = RC.Open_nc_array(nc_outname, 'Interception', Startdate, Enddate)
    DataCube_T = RC.Open_nc_array(nc_outname, 'Transpiration', Startdate, Enddate)
    DataCube_E = RC.Open_nc_array(nc_outname, 'Evaporation', Startdate, Enddate)

    # Set the months
    Dates = pd.date_range(Startdate, Enddate, freq = "MS")

    # Define whole years
    YearsStart = pd.date_range(Startdate, Enddate, freq = "AS")
    YearsEnd = pd.date_range(Startdate, Enddate, freq = "A")
    if len(YearsStart) > 0 and len(YearsEnd) > 0:
        Years = range(int(YearsStart[0].year), int(YearsEnd[-1].year + 1))
        Start_Year = np.argwhere(str(YearsStart[0])[0:10]==Dates)[0][0]
    else:
        Years = []

    # Calculate the area for each pixel in square meters
    area_in_m2 = Start.Area_converter.Degrees_to_m2(Example_dataset)

    # Create Beneficial Maps
    lulc_dict = GD.get_lulcs()

    # Get all the LULC values
    Values_LULC = np.unique(LULC)

    # Create new Benefial arrays
    T_ben_array = np.zeros(np.shape(LULC))
    E_ben_array = np.zeros(np.shape(LULC))
    I_ben_array = np.zeros(np.shape(LULC))
    agriculture_array = np.zeros(np.shape(LULC))
    environment_array= np.zeros(np.shape(LULC))
    economic_array = np.zeros(np.shape(LULC))
    energy_array = np.zeros(np.shape(LULC))
    leisure_array = np.zeros(np.shape(LULC))

    # Loop over LULC values and set benefial fractions
    for Value_LULC in Values_LULC:
        if Value_LULC in lulc_dict.keys():
            T_ben = lulc_dict[Value_LULC][3]
            E_ben = lulc_dict[Value_LULC][4]
            I_ben = lulc_dict[Value_LULC][5]
            agriculture = lulc_dict[Value_LULC][6]
            environment = lulc_dict[Value_LULC][7]
            economic = lulc_dict[Value_LULC][8]
            energy = lulc_dict[Value_LULC][9]
            leisure = lulc_dict[Value_LULC][10]

            T_ben_array[LULC == Value_LULC] = T_ben/100.
            E_ben_array[LULC == Value_LULC] = E_ben/100.
            I_ben_array[LULC == Value_LULC] = I_ben/100.
            agriculture_array[LULC == Value_LULC] = agriculture/100.
            environment_array[LULC == Value_LULC] = environment/100.
            economic_array[LULC == Value_LULC] = economic /100.
            energy_array[LULC == Value_LULC] = energy/100.
            leisure_array[LULC == Value_LULC] = leisure /100.

    # Open sheet 2 dict
    sheet2_classes_dict = GD.get_sheet2_classes()

    # Convert data from mm/month to km3/month
    I_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_I)/ 1e12
    E_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_E)/ 1e12
    T_km3 = np.einsum('ij,kij->kij', area_in_m2, DataCube_T)/ 1e12

    # Calculate beneficial I, E, and T
    Iben_km3 = np.einsum('ij,kij->kij', I_ben_array, I_km3)
    Eben_km3 = np.einsum('ij,kij->kij', E_ben_array, E_km3)
    Tben_km3 = np.einsum('ij,kij->kij', T_ben_array, T_km3)
    ETben_tot_km3 = Iben_km3 + Eben_km3 + Tben_km3

    # Determine service contribution
    agriculture_km3 = np.einsum('ij,kij->kij', agriculture_array, ETben_tot_km3)
    environment_km3 = np.einsum('ij,kij->kij', environment_array, ETben_tot_km3)
    economic_km3 = np.einsum('ij,kij->kij', economic_array, ETben_tot_km3)
    energy_km3 = np.einsum('ij,kij->kij', energy_array, ETben_tot_km3)
    leisure_km3 = np.einsum('ij,kij->kij', leisure_array, ETben_tot_km3)


    # Create empty arrays
    DataT = np.zeros([29,len(Dates)])
    DataI = np.zeros([29,len(Dates)])
    DataE = np.zeros([29,len(Dates)])
    DataBT = np.zeros([29,len(Dates)])
    DataBI = np.zeros([29,len(Dates)])
    DataBE = np.zeros([29,len(Dates)])
    DataAgriculture = np.zeros([29,len(Dates)])
    DataEnvironment = np.zeros([29,len(Dates)])
    DataEconomic = np.zeros([29,len(Dates)])
    DataEnergy = np.zeros([29,len(Dates)])
    DataLeisure = np.zeros([29,len(Dates)])
    i = 0

    # Loop over the LULC by using the Sheet 2 dictionary
    for LAND_USE in sheet2_classes_dict.keys():
        for CLASS in sheet2_classes_dict[LAND_USE].keys():
            lulcs = sheet2_classes_dict[LAND_USE][CLASS]

            # Create a mask to ignore non relevant pixels.
            mask=np.logical_or.reduce([LULC == value for value in lulcs])
            mask3d = mask * np.ones(len(Dates))[:,None,None]

            # Calculate the spatial sum of the different parameters.
            T_LU_tot = np.nansum(np.nansum((T_km3 * mask3d),1),1)
            I_LU_tot = np.nansum(np.nansum((I_km3 * mask3d),1),1)
            E_LU_tot = np.nansum(np.nansum((E_km3 * mask3d),1),1)
            BT_LU_tot = np.nansum(np.nansum((Tben_km3 * mask3d),1),1)
            BI_LU_tot = np.nansum(np.nansum((Iben_km3 * mask3d),1),1)
            BE_LU_tot = np.nansum(np.nansum((Eben_km3 * mask3d),1),1)
            Agriculture_LU_tot = np.nansum(np.nansum((agriculture_km3 * mask3d),1),1)
            Environment_LU_tot = np.nansum(np.nansum((environment_km3 * mask3d),1),1)
            Economic_LU_tot = np.nansum(np.nansum((economic_km3 * mask3d),1),1)
            Energy_LU_tot = np.nansum(np.nansum((energy_km3 * mask3d),1),1)
            Leisure_LU_tot = np.nansum(np.nansum((leisure_km3 * mask3d),1),1)

            DataT[i,:] = T_LU_tot
            DataBT[i,:] = BT_LU_tot
            DataI[i,:] = I_LU_tot
            DataBI[i,:] = BI_LU_tot
            DataE[i,:] = E_LU_tot
            DataBE[i,:] = BE_LU_tot
            DataAgriculture[i,:] = Agriculture_LU_tot
            DataEnvironment[i,:] = Environment_LU_tot
            DataEconomic[i,:] = Economic_LU_tot
            DataEnergy[i,:] = Energy_LU_tot
            DataLeisure[i,:] = Leisure_LU_tot
            i += 1

    # Calculate non benefial components
    DataNBT = DataT - DataBT
    DataNBI = DataI - DataBI
    DataNBE = DataE - DataBE
    DataNB_tot = DataNBT + DataNBI + DataNBE

    # Create CSV
    first_row = ['LAND_USE', 'CLASS', 'TRANSPIRATION', 'WATER', 'SOIL', 'INTERCEPTION', 'AGRICULTURE', 'ENVIRONMENT', 'ECONOMY', 'ENERGY', 'LEISURE', 'NON_BENEFICIAL']
    i = 0

    # Create monthly CSV
    for Date in Dates:

        # Create csv-file.
        csv_filename = os.path.join(Data_Path_CSV, 'Sheet2_Sim%d_%s_%d_%02d.csv' %(Simulation, Basin, Date.year, Date.month))
        csv_file = open(csv_filename, 'wb')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in sheet2_classes_dict.keys():
             for CLASS in sheet2_classes_dict[LAND_USE].keys():

                # Get the value of the current class and landuse
                Transpiration = DataT[j,i]
                Evaporation = DataE[j,i]
                Interception = DataI[j,i]
                Agriculture = DataAgriculture[j,i]
                Environment = DataEnvironment[j,i]
                Economic = DataEconomic[j,i]
                Energy = DataEnergy[j,i]
                Leisure = DataLeisure[j,i]
                Non_beneficial = DataNB_tot[j,i]

                # Set special cases.
                if np.any([CLASS == 'Natural water bodies', CLASS == 'Managed water bodies']):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [LAND_USE, CLASS, "{0:.2f}".format(np.nansum([0, Transpiration])), "{0:.2f}".format(np.nansum([0, Water_evaporation])), "{0:.2f}".format(np.nansum([0, Soil_evaporation])), "{0:.2f}".format(np.nansum([0, Interception])), "{0:.2f}".format(np.nansum([0, Agriculture])), "{0:.2f}".format(np.nansum([0, Environment])), "{0:.2f}".format(np.nansum([0, Economic])), "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(np.nansum([0, Leisure])), "{0:.2f}".format(np.nansum([0, Non_beneficial]))]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1

    # Create yearly CSV
    i = 0
    for Year in Years:

        # Create csv-file.
        csv_filename = os.path.join(Data_Path_CSV, 'Sheet2_Sim%d_%s_%d.csv' %(Simulation, Basin, Year))
        csv_file = open(csv_filename, 'wb')
        writer = csv.writer(csv_file, delimiter=';')
        writer.writerow(first_row)

        j = 0

        # Loop over landuse and class
        for LAND_USE in sheet2_classes_dict.keys():
             for CLASS in sheet2_classes_dict[LAND_USE].keys():

                # Get the yearly value of the current class and landuse
                Transpiration = np.sum(DataT[j,Start_Year:Start_Year+12])
                Evaporation = np.sum(DataE[j,Start_Year:Start_Year+12])
                Interception = np.sum(DataI[j,Start_Year:Start_Year+12])
                Agriculture = np.sum(DataAgriculture[j,Start_Year:Start_Year+12])
                Environment = np.sum(DataEnvironment[j,Start_Year:Start_Year+12])
                Economic = np.sum(DataEconomic[j,Start_Year:Start_Year+12])
                Energy = np.sum(DataEnergy[j,Start_Year:Start_Year+12])
                Leisure = np.sum(DataLeisure[j,Start_Year:Start_Year+12])
                Non_beneficial = np.sum(DataNB_tot[j,Start_Year:Start_Year+12])

                # Set special cases.
                if np.any([CLASS == 'Natural water bodies', CLASS == 'Managed water bodies']):
                    Soil_evaporation = 0
                    Water_evaporation = Evaporation
                else:
                    Soil_evaporation = Evaporation
                    Water_evaporation = 0

                # Create the row to be written
                row = [LAND_USE, CLASS, "{0:.2f}".format(np.nansum([0, Transpiration])), "{0:.2f}".format(np.nansum([0, Water_evaporation])), "{0:.2f}".format(np.nansum([0, Soil_evaporation])), "{0:.2f}".format(np.nansum([0, Interception])), "{0:.2f}".format(np.nansum([0, Agriculture])), "{0:.2f}".format(np.nansum([0, Environment])), "{0:.2f}".format(np.nansum([0, Economic])), "{0:.2f}".format(np.nansum([0, Energy])), "{0:.2f}".format(np.nansum([0, Leisure])), "{0:.2f}".format(np.nansum([0, Non_beneficial]))]

                # Write the row.
                writer.writerow(row)

                j += 1

        # Close the csv-file.
        csv_file.close()
        i += 1
        Start_Year += 12

    return(Data_Path_CSV)