Esempio n. 1
0
def calculate_net_consumption(abstraction, return_flow_irrigation, return_flow_non_irrigation, idlist):
 
    d = Dataset('D:/Fate/data/totalAbstractions_annuaTot_1960to2010.nc')

    times = d.variables['time'][:]
     
    net_consumption = abstraction - return_flow_irrigation - return_flow_non_irrigation
    
    net_consumption = ma.masked_where(isnan(net_consumption), net_consumption)
    

    
    
    total_return_flows = return_flow_irrigation + return_flow_non_irrigation

    total_return_flows = ma.masked_where(isnan(total_return_flows), total_return_flows)

    world = np.sum(net_consumption, axis = 0)

    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world/1e9, label='consumption')  # Plot some data on the axes.
    ax.plot(np.sum(total_return_flows, axis =0)/1e9, label='return flows')  # Plot some data on the axes.
    ax.set_xlabel('month')  # Add an x-label to the axes.
    ax.set_ylabel('km3')  # Add a y-label to the axes.
    ax.set_title("World consumption and retunr flows "+Input.name_timeperiod)  # Add a title to the axes.
    ax.legend()  # Add a legend.


    ID = ma.getdata(idlist)[:-1]
    #ID = ma.getdata(np.unique(spatial_unit)[3:-1])#for valerios catchment
    
    
    module.new_stressor_out_netcdf(Input.outputDir + '/'+'consumption_' + Input.name_timeperiod + '_' + Input.name_scale, net_consumption[:-1,:], ID, times, 'net water consumption', 'year', 'm3') 

    module.new_stressor_out_netcdf(Input.outputDir + '/'+'return_flows_'+ Input.name_timeperiod + '_' + Input.name_scale, total_return_flows[:-1,:] , ID, times, 'total return flows irrigation  and non irrigation', 'year', 'm3') 

    return net_consumption


# def calculate_variation(aggregated, spatial_unit, pointer_array):
#     '''
    

#     Parameters
#     ----------
#      aggregated_mean : TYPE groundwater aggregated array (spatial_units, time)
#         DESCRIPTION.represents the mean GWH for each catchments defined in spatial_unit
#     basin : TYPE array
#         DESCRIPTION. spatial_unit delineation and clonemap
#     pointer_array : TYPE array
#         DESCRIPTION.filter array that identified the coordinates of each spatial units

#     Returns map of the sum of differences between timesteps over the period correpsonding to spatial units, and values for each catchemnts
#     -------
#     None.

#     '''

#     d = Dataset('D:/Fate/data/totalAbstractions_annuaTot_1960to2010.nc')

#    # time = d.variables["time"][:]
#     lat = d.variables["latitude"][:]
#     lon = d.variables["longitude"][:]
    
# #average FF
#     var = np.sum(np.diff(aggregated, axis = 1), axis = 1)
#     ref = np.mean(aggregated, axis = 1)
    
#     if ref !=0:
#         change = var/ref
#     else: change = var
    
#     map_var = module.make_map(change, pointer_array, spatial_unit)
    
#     plt.matshow(map_var, vmin = stats.scoreatpercentile(var,5), vmax = stats.scoreatpercentile(var,95))#ok
    
#     module.new_map_netcdf(Input.outputDir +'/'+"test_net_consumption_change_normalized_" + Input.name_timeperiod + '_' + Input.name_scale, map_var, "net consumption variation", "m3", lat, lon)

#     return map_var, var


    


# def calculate_grad_avg(aggregated, spatial_unit, pointer_array):
#     """
    

#     Parameters
#     ----------
#     aggregated_mean : TYPE groundwater aggregated array (spatial_units, time)
#         DESCRIPTION.represents the mean GWH for each catchments defined in spatial_unit
#     basin : TYPE array
#         DESCRIPTION. spatial_unit delineation and clonemap
#     pointer_array : TYPE array
#         DESCRIPTION.filter array that identified the coordinates of each spatial units

#     Returns map of the annual gradient average over the last 10 years in each spatial unit,  gradient values for each spatial unit
#     -------
#     None.

#     """
#     d = Dataset('D:/Fate/data/totalAbstractions_annuaTot_1960to2010.nc')

#    # time = d.variables["time"][:]
#     lat = d.variables["latitude"][:]
#     lon = d.variables["longitude"][:]
    
      
#     grad = np.mean(np.gradient(aggregated, axis = 1)[:, -10:], axis = 1)
    
#     ref = np.mean(np.gradient(aggregated, axis = 1), axis = 1)
    
#     change = grad/ref
    
#     stats.scoreatpercentile(grad,5)
#     stats.scoreatpercentile(grad,50)
#     stats.scoreatpercentile(grad,95)
    
#     map_grad = module.make_map(change, pointer_array, spatial_unit)
    
#     plt.imshow(map_grad)
#     #plt.imshow(s_map_grad/1e6, vmin = stats.scoreatpercentile(s_grad/1e6,5), vmax = stats.scoreatpercentile(s_grad/1e6,95))
#     #yellow is max, blue is min
    
#     module.new_map_netcdf(Input.outputDir +'/'+ "test_net consumption_gradient_1950_to_2010"+ Input.name_scale,  map_grad, "net consumption gradient", "m3/yr", lat, lon)

#     return map_grad, grad
Esempio n. 2
0
def aggregate(idlist, pointer_array):
    '''
    

    Parameters
    ----------
    basin : TYPE scalar array
        DESCRIPTION. spatial unit IDs
    pointer_array : TYPE array (spatialunit,indexes)
        DESCRIPTION. for each spatial unit (rows) there is an index filter pointing at the coordinates of the spatial unit
        correspondence between catchment ID and spatial_unit row is does using the idlist 

    Returns timeseries of the groundwater head average over each spatial unit for all timesteps 
    -------
    None.

    '''

    #the flow accumulation file is used to identify the outlet point
    flowacc = tifffile.imread(Input.inputDir + '/' + 'flowAcc_5min.tif')
    flowacc = ma.masked_equal(flowacc, -2147483647)

    d = Dataset(Input.inputDir + '/' + Input.fn_discharge)
    #area = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_area_map), mv = 1e20)

    print(d)
    discharge = d.variables["discharge"]  #m3/s
    times = d.variables["time"][:]  #month
    #lat = d.variables["latitude"][:]
    #lon = d.variables["longitude"][:]

    d1 = Dataset(Input.inputDir + '/' + Input.fn_discharge_natural)
    discharge_natural = d1.variables['discharge']

    #aggrgeate
    ntime, nlat, nlon = discharge.shape
    n_spatial_unit = pointer_array.shape[0]
    s_aggregated = np.full((n_spatial_unit, ntime), 1e20)

    for t in range(ntime):
        temp = discharge[t, :, :] - discharge_natural[t, :, :]
        for k in range(n_spatial_unit):
            coord = np.argmax(
                flowacc[pointer_array[k][0], pointer_array[k][1]], axis=None
            )  #returns the index of the max value of the flattenned array.
            s_aggregated[k, t] = np.ravel(
                temp[pointer_array[k][0], pointer_array[k][1]])[
                    coord]  #select the value at the coordinate point
            #s_aggregated[k,t] = np.max(temp[pointer_array[k][0],pointer_array[k][1]])

    s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)

    ID = ma.getdata(idlist[:-1])

    module.new_stressor_out_netcdf(
        Input.outputDir + '/' + 'discharge_outlet_human-natural-rev' +
        Input.name_timeperiod + '_' + Input.name_scale, s_aggregated[:-1, :],
        ID, times, 'discharge flow', 'month', 'm3/s')

    world_Q = np.sum(s_aggregated, axis=0)
    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world_Q, label='human - natural')  # Plot some data on the axes.
    ax.set_xlabel('year')  # Add an x-label to the axes.
    ax.set_ylabel('m3/s')  # Add a y-label to the axes.
    ax.set_title(
        "World discharge at outlet 1960/2010")  # Add a title to the axes.
    ax.legend()  # Add a legend.

    return s_aggregated
Esempio n. 3
0
def aggregate(idlist, pointer_array):
    '''
    

    Parameters
    ----------
    basin : TYPE scalar array
        DESCRIPTION. spatial unit IDs
    pointer_array : TYPE array (spatialunit,indexes)
        DESCRIPTION. for each spatial unit (rows) there is an index filter pointing at the coordinates of the spatial unit

    Returns timeseries of the groundwater head average over each spatial unit for all timesteps 
    -------
    None.

    '''

    #groundwater head-----------------------------------------------------------
    #we have to do it manually because the gwd file is to big to load in the memory.
    d = Dataset(Input.inputDir + '/' + Input.fn_ground_water_depth)
    gwd = d.variables[Input.var_groundwater_depth]
    times = d.variables["time"][:]
    # lat = d.variables["lat"][:]
    # lon = d.variables["lon"][:]

    #d1 = Dataset(Input.fn_ground_water_depth_natural)
    d1 = Dataset(
        'C:/Users/easpi/Documents/PhD Water Footprint/Papers/2 FF modelling with GHM/calculations/data/groundwaterDepthLayer1_yearavg_natural_output_19602010.nc'
    )
    gwd1 = d1.variables[Input.var_groundwater_depth_natural]
    #times = d1.variables["time"][:]

    dem = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_dem), mv=1e20)

    sy = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_aquifer_yield),
                   mv=1e20)
    sy = ma.masked_values(sy, 1e20)

    area = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_area_map),
                     mv=1e20)

    #aggrgeate
    ntime, nlat, nlon = gwd.shape
    n_spatial_unit = pointer_array.shape[0]
    s_aggregated = np.full((n_spatial_unit, ntime), 1e20)

    # for t in range(0,ntime-1):
    #     temp =  (gwd[t+1,:,:] - gwd[t,:,:])*sy*area#here no need to convert yearly to monthly average
    #     for k in range(n_spatial_unit):
    #         s_aggregated[k,t] = np.sum(temp[pointer_array[k][0],pointer_array[k][1]])

    # s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)
    # gws_depletion_human = s_aggregated#monthly
    # gws_depletion_human = ma.masked_equal(gws_depletion_human, 1e20)

    # gws_depletion_human_yr = module.convert_month_to_year_sum(gws_depletion_human)

    # s_aggregated = np.full((n_spatial_unit, gwd1.shape[0]), 1e20)
    # for t in range(0,gwd1.shape[0]-1):
    #     temp =  (gwd1[t,:,:] - gwd1[t-1,:,:])*sy*area#here no need to convert yearly to monthly average
    #     for k in range(n_spatial_unit):
    #         s_aggregated[k,t] = np.sum(temp[pointer_array[k][0],pointer_array[k][1]])

    # s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)
    # gws_depletion_natural_yr = s_aggregated#yearly
    # gws_depletion_natural_yr = ma.masked_equal(gws_depletion_natural_yr, 1e20)

    # total_depletion = gws_depletion_human_yr - gws_depletion_natural_yr

    for t in range(ntime):
        temp = (
            dem - gwd[t, :, :] - (dem - gwd1[t // 12, :, :])
        ) * sy * area  #here no need to convert yearly to monthly average
        for k in range(n_spatial_unit):
            s_aggregated[k, t] = np.sum(temp[pointer_array[k][0],
                                             pointer_array[k][1]])

    s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)

    ID = ma.getdata(idlist[:-1])

    module.new_stressor_out_netcdf(
        Input.outputDir + '/' + 'groundwater_storage_' +
        Input.name_timeperiod + '_' + Input.name_scale, s_aggregated[:-1], ID,
        times, 'groundwater storage', 'month', 'm3')

    world = np.sum(s_aggregated, axis=0)
    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world / 1e9, label='human-natural')  # Plot some data on the axes.
    #ax.set_xlabel('yr')  # Add an x-label to the axes.
    ax.set_ylabel('km3')  # Add a y-label to the axes.
    ax.set_title("World groundwater storage " +
                 Input.name_timeperiod)  # Add a title to the axes.
    ax.legend()  # Add a legend.

    return s_aggregated
Esempio n. 4
0
def aggregate_wavg(idlist, pointer_array):
    '''
    

    Parameters
    ----------
    basin : TYPE scalar array
        DESCRIPTION. spatial unit IDs
    pointer_array : TYPE array (spatialunit,indexes)
        DESCRIPTION. for each spatial unit (rows) there is an index filter pointing at the coordinates of the spatial unit
        correspondence between catchment ID and spatial_unit row is does using the idlist 

    Returns timeseries of the groundwater head average over each spatial unit for all timesteps 
    -------
    None.

    '''

    #groundwater head-----------------------------------------------------------
    #we have to do it manually because the gwd file is to big to load in the memory.
    d = Dataset(Input.inputDir + '/' + Input.fn_ground_water_depth)
    gwd = d.variables[Input.var_groundwater_depth]
    times = d.variables["time"][:]
    # lat = d.variables["lat"][:]
    # lon = d.variables["lon"][:]

    d1 = Dataset(Input.fn_ground_water_depth_natural)
    gwd1 = d1.variables[Input.var_groundwater_depth_natural]

    dem = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_dem), mv=1e20)

    area = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_area_map),
                     mv=1e20)

    #aggrgeate
    ntime, nlat, nlon = gwd.shape
    n_spatial_unit = pointer_array.shape[0]
    gwh_aggregated_mean = np.full((n_spatial_unit, ntime), 1e20)

    for t in range(ntime):
        #s =  module.calculate_groundwater_head(gwd, dem, t)
        s = (dem - gwd[t, :, :]) - (dem - gwd1[t // 12, :, :])
        for k in range(n_spatial_unit):
            gwh_aggregated_mean[k, t] = np.average(
                s[pointer_array[k][0], pointer_array[k][1]],
                weights=area[pointer_array[k][0], pointer_array[k][1]])

    gwh_aggregated_mean = ma.masked_where(isnan(gwh_aggregated_mean),
                                          gwh_aggregated_mean)

    ID = ma.getdata(idlist[:-1])

    module.new_stressor_out_netcdf(
        Input.outputDir + '/' + 'groundwater_head_wavg_' +
        Input.name_timeperiod + '_' + Input.name_scale,
        gwh_aggregated_mean[:-1], ID, times, 'groundwater head', 'year', 'm')

    world = np.mean(gwh_aggregated_mean, axis=0)
    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world, label='groundwater head')  # Plot some data on the axes.
    ax.set_xlabel('month')  # Add an x-label to the axes.
    ax.set_ylabel('m')  # Add a y-label to the axes.
    ax.set_title("World groundwater head human - natural " +
                 Input.name_timeperiod)  # Add a title to the axes.
    ax.legend()  # Add a legend.

    return gwh_aggregated_mean
Esempio n. 5
0
def aggregate(idlist, pointer_array):
    '''
    

    Parameters
    ----------
    basin : TYPE scalar array
        DESCRIPTION. spatial unit IDs
    pointer_array : TYPE array (spatialunit,indexes)
        DESCRIPTION. for each spatial unit (rows) there is an index filter pointing at the coordinates of the spatial unit
        correspondence between catchment ID and spatial_unit row is does using the idlist 

    Returns timeseries of the groundwater head average over each spatial unit for all timesteps 
    -------
    None.

    '''

#groundwater head-----------------------------------------------------------
#we have to do it manually because the gwd file is to big to load in the memory.
    
    area = pcr2numpy(readmap(Input.inputDir +'/'+ Input.fn_area_map), mv = 1e20)

    d = Dataset(Input.inputDir +'/'+'totalEvaporation_annuaTot_output_1960to2010_human.nc')
    s = d.variables['total_evaporation'][:]
    #times = d.variables["time"][:]#year
    # lat = d.variables["latitude"][:]
    # lon = d.variables["longitude"][:]
    
    d1 = Dataset(Input.inputDir +'/'+'totalEvaporation_monthTot_output_1960to2004_natural.nc')#title is inaccurate, the timespan is identical to hu,man run 1960-2012 - 52 years of register
    s1 = d1.variables['total_evaporation']#month
    times1 = d1.variables['time'][:]

    
#aggrgeate 
    ntime, nlat, nlon = s1.shape
    n_spatial_unit = pointer_array.shape[0]
    s_aggregated = np.full((n_spatial_unit, ntime), 1e20)
    
    
    for t in range(ntime):
        temp =  (s[t//12,:,:]/12 - s1[t,:,:])*area#conversion to m3, the annual human run ET us converted to monthly ET
        for i in range(n_spatial_unit):#select catchment in the same order as the pointer array sot hat the idlist is still valid
            s_aggregated[i,t] = np.sum(temp[pointer_array[i][0], pointer_array[i][1]])

    s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)

    ID = ma.getdata(idlist[:-1])#exclude where the idlist does not correspondto a catchment
    
    module.new_stressor_out_netcdf(Input.outputDir + '/'+'evapotranspiration_human-natural_1960_2004_1'  + '_' + Input.name_scale, s_aggregated[:-1,:], ID, times1, 'evapotranspiration', 'yr', 'm3') 
    
    
    
    world = np.sum(s_aggregated, axis = 0)
    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world/1e9, label='evapotranspiration')  # Plot some data on the axes.
    ax.set_xlabel('month')  # Add an x-label to the axes.
    ax.set_ylabel('km3')  # Add a y-label to the axes.
    ax.set_title("World evapotranspiration human - natural 1960-2004"+Input.name_timeperiod)  # Add a title to the axes.
    ax.legend()  # Add a legend.
    
    return s_aggregated
Esempio n. 6
0
def aggregate(idlist, pointer_array):
    '''
    

    Parameters
    ----------
    basin : TYPE scalar array
        DESCRIPTION. spatial unit IDs
    pointer_array : TYPE array (spatialunit,indexes)
        DESCRIPTION. for each spatial unit (rows) there is an index filter pointing at the coordinates of the spatial unit
        correspondence between catchment ID and spatial_unit row is does using the idlist 

    Returns timeseries of the groundwater head average over each spatial unit for all timesteps 
    -------
    None.

    '''

    #-----------------------------------------------------------

    area = pcr2numpy(readmap(Input.inputDir + '/' + Input.fn_area_map),
                     mv=1e20)

    d = Dataset(Input.inputDir + '/' + Input.fn_soil_low)
    slow = d.variables["lower_soil_storage"][:]

    #lat = d.variables["latitude"][:]
    #lon = d.variables["longitude"][:]
    d.close()

    d = Dataset(Input.inputDir + '/' + Input.fn_soil_upp)
    sup = d.variables["upper_soil_storage"][:]
    d.close()

    soil_moisture = slow + sup

    d1 = Dataset(Input.inputDir + '/' + Input.fn_soil_low_nat)
    times = d1.variables["time"][:]
    slow1 = d1.variables["lower_soil_storage"][:]
    #lat = d.variables["latitude"][:]
    #lon = d.variables["longitude"][:]
    d1.close()

    d1 = Dataset(Input.inputDir + '/' + Input.fn_soil_upp_nat)
    sup1 = d1.variables["upper_soil_storage"][:]
    d1.close()

    soil_moisture1 = slow1 + sup1

    s = soil_moisture[45, :, :] - soil_moisture1

    #aggrgeate
    ntime, nlat, nlon = s.shape
    n_spatial_unit = pointer_array.shape[0]
    s_aggregated = np.full((n_spatial_unit, ntime), 1e20)

    for t in range(ntime):
        temp = s[t, :, :] * area  #conversion to m3
        for i in range(
                n_spatial_unit
        ):  #select catchment in the same order as the pointer array sot hat the idlist is still valid
            s_aggregated[i, t] = np.sum(temp[pointer_array[i][0],
                                             pointer_array[i][1]])

    s_aggregated = ma.masked_where(isnan(s_aggregated), s_aggregated)

    ID = ma.getdata(idlist[:-1])

    module.new_stressor_out_netcdf(
        Input.outputDir + '/' + 'soil_moisture_human-natural_' +
        Input.name_timeperiod + '_' + Input.name_scale, s_aggregated[:-1], ID,
        times, 'soil moisture', 'year', 'm3')

    world_SM = np.sum(s_aggregated, axis=0)

    # Note that even in the OO-style, we use `.pyplot.figure` to create the figure.
    fig, ax = plt.subplots()  # Create a figure and an axes.
    ax.plot(world_SM / 1e9,
            label='soil moisture')  # Plot some data on the axes.
    ax.set_xlabel('year')  # Add an x-label to the axes.
    ax.set_ylabel('km3')  # Add a y-label to the axes.
    ax.set_title("World soil moisture human - natural " +
                 Input.name_timeperiod)  # Add a title to the axes.
    ax.legend()  # Add a legend.

    np.sum(world_SM) / 1e9

    return s_aggregated