Ejemplo n.º 1
0
 def __init__(
         self,
         name=None,
         fueltype=None,
         eff_by=None,
         eff_ey=None,
         year_eff_ey=None,
         eff_achieved=None,
         diff_method=None,
         market_entry=2015,
         tech_type=None,
         tech_max_share=None,
         description=None
     ):
     self.name = name
     self.fueltype_str = fueltype
     self.fueltype_int = tech_related.get_fueltype_int(fueltype)
     self.eff_by = eff_by
     self.eff_ey = eff_ey
     self.year_eff_ey = year_eff_ey
     self.eff_achieved = eff_achieved
     self.diff_method = diff_method
     self.market_entry = market_entry
     self.tech_type = tech_type
     self.tech_max_share = tech_max_share
     self.description = description
Ejemplo n.º 2
0
def test_get_fueltype_int():
    """Testing function
    """
    fueltypes = {'gas': 1}
    in_value = 'gas'
    expected = 1

    # call function
    out_value = tech_related.get_fueltype_int(fueltypes, in_value)

    assert out_value == expected
Ejemplo n.º 3
0
    def __init__(self,
                 name,
                 tech_type,
                 fueltype_str=None,
                 eff_achieved=None,
                 diff_method=None,
                 eff_by=None,
                 eff_ey=None,
                 year_eff_ey=None,
                 market_entry=None,
                 tech_max_share=None,
                 other_enduse_mode_info=None,
                 base_yr=None,
                 curr_yr=None,
                 fueltypes=None,
                 temp_by=None,
                 temp_cy=None,
                 t_base_heating_by=None,
                 t_base_heating_cy=None,
                 description=''):
        """Contructor
        """
        self.name = name
        self.tech_type = tech_type
        self.description = description
        self.fueltype_str = fueltype_str
        self.fueltype_int = tech_related.get_fueltype_int(
            fueltypes, fueltype_str)
        self.eff_achieved_f = eff_achieved
        self.diff_method = diff_method
        self.market_entry = market_entry
        self.tech_max_share = tech_max_share

        if tech_type == 'placeholder_tech':
            self.eff_by = 1.0
            self.eff_cy = 1.0
        elif tech_type == 'heat_pump':
            self.eff_by = tech_related.calc_hp_eff(temp_by, eff_by,
                                                   t_base_heating_by)

            self.eff_cy = tech_related.calc_hp_eff(
                temp_cy,
                tech_related.calc_eff_cy(base_yr, curr_yr, eff_by, eff_ey,
                                         year_eff_ey, other_enduse_mode_info,
                                         self.eff_achieved_f,
                                         self.diff_method), t_base_heating_cy)
        else:
            self.eff_by = eff_by
            self.eff_cy = tech_related.calc_eff_cy(base_yr, curr_yr, eff_by,
                                                   eff_ey, year_eff_ey,
                                                   other_enduse_mode_info,
                                                   self.eff_achieved_f,
                                                   self.diff_method)
def test_calc_service_fuel_switched():
    """
    """
    fueltype_lookup = {
        'solid_fuel': 0,
        'gas': 1,
        'electricity': 2,
        'oil': 3,
        'heat_sold': 4,
        'biomass': 5,
        'hydrogen': 6,
        'heat': 7}

    technologies = {
        'boilerA': read_data.TechnologyData(
            fueltype='gas',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990),
        'boilerB': read_data.TechnologyData(
            fueltype='electricity',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990),
        'boilerC': read_data.TechnologyData(
            fueltype='gas',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990)}

    enduse = 'heating'

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=2020,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=1.0
        )]

    service_fueltype_p = {1: 1.0, 2: 0.0}
    s_tech_by_p = {'boilerA': 1.0, 'boilerB': 0.0}
    fuel_tech_p_by = {1: {'boilerA': 1.0}, 2: {'boilerB': 1.0}}

    result = s_generate_sigmoid.calc_service_fuel_switched(
        fuel_switches,
        technologies,
        service_fueltype_p,
        s_tech_by_p,
        fuel_tech_p_by,
        switch_type='actual_switch')

    assert result['boilerB'] == 1.0
    assert result['boilerA'] == 0.0

    # -------

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=3050,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=0.5
        )]

    service_fueltype_p = {1: 1.0, 2: 0.0}
    s_tech_by_p = {'boilerA': 1.0, 'boilerB': 0.0}
    fuel_tech_p_by = {1: {'boilerA': 1.0}, 2: {'boilerB': 1.0}}

    result = s_generate_sigmoid.calc_service_fuel_switched(
        fuel_switches,
        technologies,
        service_fueltype_p,
        s_tech_by_p,
        fuel_tech_p_by,
        switch_type='actual_switch')

    assert result['boilerB'] == 0.5
    assert result['boilerA'] == 0.5

    # -------

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=3050,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=0.5
        )]

    service_fueltype_p = {1: 0.5, 2: 0.5}
    s_tech_by_p = {'boilerA': 0.5, 'boilerB': 0.5}
    fuel_tech_p_by = {1: {'boilerA': 1.0}, 2: {'boilerB': 1.0}}

    result = s_generate_sigmoid.calc_service_fuel_switched(
        fuel_switches,
        technologies,
        service_fueltype_p,
        s_tech_by_p,
        fuel_tech_p_by,
        switch_type='actual_switch')

    assert result['boilerB'] == 0.75
    assert result['boilerA'] == 0.25

    # -------

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=3050,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=0.5
        )]

    service_fueltype_p = {1: 0.5, 2: 0.5}
    s_tech_by_p = {'boilerA': 0.25, 'boilerB': 0.5, 'boilerC': 0.25}
    fuel_tech_p_by  = {1: {'boilerA': 0.5, 'boilerC': 0.5,}, 2: {'boilerB': 1.0}}

    result = s_generate_sigmoid.calc_service_fuel_switched(
        fuel_switches,
        technologies,
        service_fueltype_p,
        s_tech_by_p,
        fuel_tech_p_by,
        switch_type='actual_switch')

    assert result['boilerC'] == 0.125
    assert result['boilerB'] == 0.75
    assert result['boilerA'] == 0.125
Ejemplo n.º 5
0
def run(results_every_year, lookups, path_plot_fig):
    """Plots

    Plot peak hour per fueltype over time for

    Arguments
    ---------
    tot_fuel_dh_peak : dict
        year, fueltype, peak_dh

    """
    # Set figure size
    fig = plt.figure(figsize=basic_plot_functions.cm2inch(14, 8))
    ax = fig.add_subplot(1, 1, 1)

    nr_y_to_plot = len(results_every_year)

    legend_entries = []

    # Initialise (number of enduses, number of hours to plot)
    y_init = np.zeros((lookups['fueltypes_nr'], nr_y_to_plot))

    for fueltype_str, fueltype in lookups['fueltypes'].items():
        fueltype_int = tech_related.get_fueltype_int(fueltype_str)

        # Legend
        legend_entries.append(fueltype_str)

        # Read out fueltype specific load
        data_over_years = []
        for model_year_object in results_every_year.values():

            # Sum fuel across all regions
            fuel_all_regs = np.sum(model_year_object, axis=1) # (fueltypes, 8760 hours)

            # Get peak day across all enduses for every region
            _, gw_peak_fueltyp_h = enduse_func.get_peak_day_single_fueltype(fuel_all_regs[fueltype_int])

            # Add peak hour
            data_over_years.append(gw_peak_fueltyp_h)

        y_init[fueltype] = data_over_years

    # ----------
    # Plot lines
    # ----------
    #linestyles = plotting_styles.linestyles()

    years = list(results_every_year.keys())
    for fueltype, _ in enumerate(y_init):
        plt.plot(
            years,
            y_init[fueltype],
            linewidth=0.7)

    ax.legend(
        legend_entries,
        prop={
            'family': 'arial',
            'size': 8},
        frameon=False)

    # -
    # Axis
    # -
    base_yr = 2015
    major_interval = 10
    minor_interval = 5

    # Major ticks
    major_ticks = np.arange(base_yr, years[-1] + major_interval, major_interval)
    ax.set_xticks(major_ticks)

    # Minor ticks
    minor_ticks = np.arange(base_yr, years[-1] + minor_interval, minor_interval)
    ax.set_xticks(minor_ticks, minor=True)

    plt.xlim(2015, years[-1])

    # --------
    # Labeling
    # --------
    plt.ylabel("GW")
    plt.xlabel("year")
    plt.title("ED peak hour, y, all enduses and regions")

    # Tight layout
    plt.tight_layout()
    plt.margins(x=0)

    # Save fig
    fig.savefig(path_plot_fig)
    plt.close()
def run(data_input, simulation_yr_to_plot, period_h, fueltype_str, fig_name):
    """

    https://stackoverflow.com/questions/18313322/plotting-quantiles-median-and-spread-using-scipy-and-matplotlib

    https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.quantile.html

    """
    # Select period and fueltype
    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    # -----------------------------------------------------------
    # Iterate overall weather_yrs and store data in dataframe
    # (columns = timestep, rows: value of year)
    # -----------------------------------------------------------
    columns = period_h  # hours in 8760 hours

    # List of selected data for every weather year (which is then converted to array)
    weather_yrs_data = []

    print("Weather yrs: " + str(list(data_input.keys())), flush=True)

    for weather_yr, data_weather_yr in data_input.items():

        # Weather year specific data
        data_input_fueltype = data_weather_yr[simulation_yr_to_plot][
            fueltype_int]  # Select fueltype
        data_input_reshape = data_input_fueltype.reshape(8760)  # reshape
        data_input_selection_hrs = data_input_reshape[
            period_h]  # select period
        weather_yrs_data.append(data_input_selection_hrs)

    weather_yrs_data = np.array(weather_yrs_data)

    # Create dataframe
    df = pd.DataFrame(weather_yrs_data, columns=columns)

    # Calculate quantiles
    quantile_95 = 0.95
    quantile_05 = 0.05

    df_q_95 = df.quantile(quantile_95)
    df_q_05 = df.quantile(quantile_05)

    #Transpose for plotting purposes
    df = df.T
    df_q_95 = df_q_95.T
    df_q_05 = df_q_05.T

    fig = plt.figure()  #(figsize = cm2inch(10,10))

    ax = fig.add_subplot(111)

    # 2015 weather year
    data_2015 = data_weather_yr[2015][fueltype_int].reshape(8760)[period_h]

    # ---------------
    # Smoothing lines
    # ---------------
    try:
        period_h_smoothed, df_q_95_smoothed = basic_plot_functions.smooth_data(
            period_h, df_q_95, num=40000)
        period_h_smoothed, df_q_05_smoothed = basic_plot_functions.smooth_data(
            period_h, df_q_05, num=40000)
        period_h_smoothed, data_2015_smoothed = basic_plot_functions.smooth_data(
            period_h, data_2015, num=40000)
    except:
        period_h_smoothed = period_h
        df_q_95_smoothed = df_q_95
        df_q_05_smoothed = df_q_05
        data_2015_smoothed = data_2015

    plt.plot(period_h_smoothed,
             data_2015_smoothed,
             color='tomato',
             linestyle='-',
             linewidth=2,
             label="2015 weather_yr")
    plt.plot(period_h_smoothed,
             df_q_05_smoothed,
             color='black',
             linestyle='--',
             linewidth=0.5,
             label="0.05")
    plt.plot(period_h_smoothed,
             df_q_95_smoothed,
             color='black',
             linestyle='--',
             linewidth=0.5,
             label="0.95")

    # -----------------
    # Uncertainty range
    # -----------------
    plt.fill_between(
        period_h_smoothed,  #x
        df_q_95_smoothed,  #y1
        df_q_05_smoothed,  #y2
        alpha=.25,
        facecolor="grey",
        label="uncertainty band")

    plt.legend(prop={
        'family': 'arial',
        'size': 10
    },
               loc='best',
               frameon=False,
               shadow=True)

    plt.show()
Ejemplo n.º 7
0
def read_in_weather_results(
        path_result,
        seasons,
        model_yeardays_daytype,
        pop_data,
        fueltype_str
    ):
    """Read and post calculate results from txt files
    and store into container

    Arguments
    ---------
    path_result : str
        Paths
    seasons : dict
        seasons
    model_yeardays_daytype : dict
        Daytype of modelled yeardays
    """
    logging.info("... Reading in results")

    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    results_container = {}

    # Read in total regional demands per fueltype
    results_container['ed_reg_tot_y'] = read_data.read_results_yh(
        path_result, 'only_total')

    #print(results_container['ed_reg_tot_y'][2015].shape)
    results_container['ed_reg_peakday'] = read_data.read_results_yh(
        os.path.join('simulation_results', path_result), 'only_peak')

    #print(results_container['ed_reg_peakday'][2015].shape)
    results_container['ed_reg_peakday_peak_hour'] = {}
    results_container['ed_reg_peakday_peak_hour_per_pop'] = {}
    
    results_container['national_peak'] = {}
    results_container['regional_share_national_peak'] = {}
    results_container['regional_share_national_peak_pp'] = {}
    results_container['pp_peak_abs'] = {}
    results_container['regional_peak'] = {}
    results_container['national_all_fueltypes'] = {}
    results_container['mean_peak_day_demand'] = {}

    for year in results_container['ed_reg_peakday']:

        reg_pop_yr = pop_data[year]

        # Get peak demand of each region
        results_container['ed_reg_peakday_peak_hour'][year] = results_container['ed_reg_peakday'][year].max(axis=2)

        # Divide peak by number of population
        results_container['ed_reg_peakday_peak_hour_per_pop'][year] = results_container['ed_reg_peakday_peak_hour'][year] / reg_pop_yr

        # Get national peak
        national_demand_per_hour = results_container['ed_reg_peakday'][year].sum(axis=1) #Aggregate hourly across all regions

        # Get maximum hour for electricity demand
        max_hour = national_demand_per_hour[fueltype_int].argmax()

        results_container['national_peak'][year] = national_demand_per_hour[:, max_hour]

        # Calculate regional share of peak hour to national peak
        national_peak = results_container['national_peak'][year][fueltype_int]
        regional_peak = results_container['ed_reg_peakday'][year][fueltype_int][:, max_hour]
        
        results_container['regional_peak'][year] = regional_peak
        results_container['regional_share_national_peak'][year] = (100 / national_peak) * regional_peak #1 = 1 %
        results_container['regional_share_national_peak_pp'][year] = ((100 / national_peak) * regional_peak) /  reg_pop_yr #1 = 1 %

        # Calculate mean of peak day demand of peak day
        results_container['national_all_fueltypes'][year] = np.sum(results_container['ed_reg_tot_y'][year], axis=1)
        results_container['mean_peak_day_demand'][year] = np.mean(national_demand_per_hour, axis=1)

        # Calculate contribution per person towards national peak (reg_peak / people) [abs]
        #print(results_container['ed_reg_peakday'][year].shape)
        #print(reg_pop_yr.shape)
        # results_container['pp_peak_abs'][year] = (
        #    results_container['ed_reg_peakday'][year][:,:, max_hour] / reg_pop_yr)
        #(cpp = (regional peak / national peak) / people [%]

    logging.info("... Reading in results finished")
    return results_container
Ejemplo n.º 8
0
def run_fig_p2_temporal_validation(
        data_input,
        weather_yr,
        fueltype_str,
        simulation_yr_to_plot,
        period_h,
        validation_elec_2015,
        non_regional_elec_2015,
        fig_name,
        titel="titel",
        y_lim_val=100,
        plot_validation=False,
        plot_show=False
    ):
    """
    """
    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    # -----------------------------------------------------------
    # Iterate overall weather_yrs and store data in dataframe
    # (columns = timestep, rows: value of year)
    # -----------------------------------------------------------
    selection_hours = period_h

    # List of selected data for every weather year (which is then converted to array)
    weather_yrs_data = []
    weather_yrs_full_year = []

    for station_id, data_weather_yr in data_input[weather_yr].items():

        # Weather year specific data
        data_input_fueltype = data_weather_yr[simulation_yr_to_plot][fueltype_int]     # Select fueltype
        data_input_reshape = data_input_fueltype.reshape(8760)  # reshape
        data_input_selection_hrs = data_input_reshape[selection_hours] # select period

        weather_yrs_data.append(data_input_selection_hrs)
        weather_yrs_full_year.append(data_input_reshape)

    weather_yrs_data = np.array(weather_yrs_data)

    # Create dataframe
    df = pd.DataFrame(weather_yrs_data, columns=period_h)

    df_full_year = pd.DataFrame(weather_yrs_full_year, columns=range(8760))

    # Calculate quantiles
    quantile_95 = 0.95
    quantile_05 = 0.05

    df_q_95 = df.quantile(quantile_95)
    df_q_05 = df.quantile(quantile_05)

    #Transpose for plotting purposes
    df = df.T
    df_q_95 = df_q_95.T
    df_q_05 = df_q_05.T

    fig = plt.figure()

    ax = fig.add_subplot(111)

    # ---------------
    # Smoothing lines
    # ---------------
    try:
        period_h_smoothed, df_q_95_smoothed = basic_plot_functions.smooth_data(period_h, df_q_95, num=40000)
        period_h_smoothed, df_q_05_smoothed = basic_plot_functions.smooth_data(period_h, df_q_05, num=40000)
        #period_h_smoothed, data_2015_smoothed = basic_plot_functions.smooth_data(period_h, data_2015, num=40000)
    except:
        period_h_smoothed = period_h
        df_q_95_smoothed = df_q_95
        df_q_05_smoothed = df_q_05

    #plt.plot(period_h_smoothed, data_2015_smoothed, color='tomato', linestyle='-', linewidth=2, label="2015 weather_yr")
    plt.plot(period_h_smoothed, df_q_05_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.05")
    plt.plot(period_h_smoothed, df_q_95_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.95")

    # -----------------
    # Uncertainty range
    # -----------------
    plt.fill_between(
        period_h_smoothed, #x
        df_q_95_smoothed,  #y1
        df_q_05_smoothed,  #y2
        alpha=1,
        facecolor="lightgrey",
        label="uncertainty band")

    # -----------------
    # All weather stations are used for this data
    # -----------------
    all_weather_stations_2015 = non_regional_elec_2015.reshape(8760)[period_h] 

    # -----------------
    # Validation data
    # -----------------
    if plot_validation:
        validation_2015 = validation_elec_2015.reshape(8760)[selection_hours] 

        try:
            period_h_smoothed, validation_2015_smoothed = basic_plot_functions.smooth_data(period_h, validation_2015, num=40000)
        except:
            period_h_smoothed = period_h
            validation_2015_smoothed = validation_2015

        plt.plot(
            period_h_smoothed,
            validation_2015_smoothed,
            color='green',
            linestyle='--',
            linewidth=1.5, label="validation 2015")

        # -----------
        # statistics
        # -----------
        # Calculate mean of all all single station runs
        mean_all_single_runs = df_full_year.mean(axis=0).tolist()
        mean_all_single_runs = np.array(mean_all_single_runs)[period_h]

        slope, intercept, r_value, p_value, std_err = stats.linregress(
            validation_2015,
            all_weather_stations_2015)
        slope, intercept, r_value2, p_value, std_err = stats.linregress(
            validation_2015,
            mean_all_single_runs)

        print("R_Value_all_stations: " + str(r_value))
        print("R_Value_single_stations: " + str(r_value2))
        plt.title(
            "R_value: all stations: {} mean_single_weather_stations: {}".format(
                round(r_value, 2),
                round(r_value2, 2)))
    
    try:
        period_h_smoothed, all_weather_stations_2015_smoothed = basic_plot_functions.smooth_data(
            period_h, all_weather_stations_2015, num=40000)
    except:
        period_h_smoothed = period_h
        all_weather_stations_2015_smoothed = all_weather_stations_2015

    plt.plot(
        period_h_smoothed,
        all_weather_stations_2015_smoothed,
        color='blue',
        linestyle='--',
        linewidth=1.2,
        label="all stations")

    # Ticks for single day
    #major_ticks_days, major_ticks_labels = get_date_strings(
    #    days_to_plot,
    #    daystep=1)
    #plt.xticks(major_ticks_days, major_ticks_labels)
    zero_entry = period_h[0]
    plt.xticks([0 + zero_entry, 5 + zero_entry, 11 + zero_entry, 17 + zero_entry, 23 + zero_entry], ['1', '6', '12', '18', '24'])

    plt.ylim(0, y_lim_val)
    plt.xlim(period_h[0], period_h[-1])
    plt.title("Peak day: " + str(titel))
    plt.legend(
        prop={
            'family':'arial',
            'size': 10},
        loc='best',
        frameon=False,
        shadow=True)

    if plot_show:
        plt.show()
    else:
        pass
    plt.savefig(fig_name)
    plt.close()
Ejemplo n.º 9
0
def run(data_input, regions, simulation_yr_to_plot, population, fueltype_str,
        path_shapefile, fig_name):
    """
    """
    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    # -----------------------------------------------------------
    # Iterate overall weather_yrs and store data in dataframe
    # (columns = timestep, rows: value of year)
    # -----------------------------------------------------------

    # List of selected data for every weather year and region (which is then converted to array)
    weather_yrs_data = defaultdict(dict)

    print("Weather yrs: " + str(list(data_input.keys())), flush=True)

    for weather_yr, data_weather_yr in data_input.items():

        # Weather year specific data for every region
        regions_fuel = data_weather_yr[simulation_yr_to_plot][
            fueltype_int]  # Select fueltype

        for region_nr, region_name in enumerate(regions):
            try:
                weather_yrs_data[region_name].append(regions_fuel[region_nr])
            except (KeyError, AttributeError):
                weather_yrs_data[region_name] = [regions_fuel[region_nr]]

    regional_statistics_columns = [
        'name', 'mean_peak_h', 'diff_av_max', 'mean_peak_h_pp',
        'diff_av_max_pp', 'std_dev_average_every_h', 'std_dev_peak_h_norm_pop'
    ]

    df_stats = pd.DataFrame(columns=regional_statistics_columns)

    for region_name, region_data in weather_yrs_data.items():

        # Convert regional data to dataframe
        region_data_array = np.array(region_data)
        df = pd.DataFrame(region_data_array, columns=range(8760))

        # Calculate regional statistics
        mean = df.mean(axis=0)
        std_dev = df.std(axis=0)  #standard deviation across every hour

        # Get maximum per colum
        #max_every_h = df.max()
        #colum_max_h = max_every_h.argmax() #get colum (respesctively hour) of maximum value

        # Average standard deviation across every hour
        std_dev_average_every_h = np.std(list(std_dev))

        max_entry = df.max(axis=0)  #maximum entry for every hour
        min_entry = df.min(axis=0)  #maximum entry for every hour

        # Get hour number with maximum demand
        hour_nr_max = max_entry.argmax()
        hour_nr_min = min_entry.argmin()

        # standard deviation of peak hour
        std_dev_peak_h = std_dev[hour_nr_max]

        # Difference between average and max
        diff_av_max = max_entry[hour_nr_max] - mean[hour_nr_max]
        mean_peak_h = mean[hour_nr_max]

        # Convert GW to KW
        diff_av_max = diff_av_max * 1000000  #GW to KW
        mean_peak_h = mean_peak_h * 1000000  #GW to KW

        # Weight with population
        for region_nr, n in enumerate(regions):
            if region_name == n:
                nr_of_reg = region_nr
                break
        pop = population[nr_of_reg]

        # Divide standard deviation of peak hour by population
        # which gives measure of weather variability in peak hour
        std_dev_peak_h_norm_pop = std_dev_peak_h / pop

        diff_av_max_pp = diff_av_max / pop
        mean_peak_h_pp = mean_peak_h / pop

        line_entry = [[
            str(region_name), mean_peak_h, diff_av_max, mean_peak_h_pp,
            diff_av_max_pp, std_dev_average_every_h, std_dev_peak_h_norm_pop
        ]]

        line_df = pd.DataFrame(line_entry, columns=regional_statistics_columns)

        df_stats = df_stats.append(line_df)

    print(df_stats['diff_av_max'].max())
    print(df_stats['mean_peak_h'].max())
    print(df_stats['std_dev_peak_h_norm_pop'].max())
    print("-")
    print(df_stats['diff_av_max_pp'].max())
    print(df_stats['diff_av_max_pp'].min())
    print("-")
    print(df_stats['mean_peak_h_pp'].max())
    print(df_stats['mean_peak_h_pp'].min())
    # ---------------
    # Create spatial maps
    # http://darribas.org/gds15/content/labs/lab_03.html
    # http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1
    # ---------------
    # Load uk shapefile
    uk_shapefile = gpd.read_file(path_shapefile)

    # Merge stats to geopanda
    shp_gdp_merged = uk_shapefile.merge(df_stats, on='name')

    # Assign projection
    crs = {'init': 'epsg:27700'}  #27700: OSGB_1936_British_National_Grid
    uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)

    ax = uk_gdf.plot()

    # Assign bin colors according to defined cmap and whether
    # plot with min_max values or only min/max values
    #bin_values = [0, 0.0025, 0.005, 0.0075, 0.01]
    #bin_values = [0, 0.02, 0.04, 0.06, 0.08, 0.1] #list(np.arange(0.0, 1.0, 0.1))

    # Field to plot
    field_to_plot = "diff_av_max_pp"  # Difference between average and peak per person in KWh
    #field_to_plot = "diff_av_max"    # Difference between average and peak
    field_to_plot = 'std_dev_peak_h_norm_pop'

    nr_of_intervals = 6

    bin_values = result_mapping.get_reasonable_bin_values_II(
        data_to_plot=list(uk_gdf[field_to_plot]),
        nr_of_intervals=nr_of_intervals)
    print(float(uk_gdf[field_to_plot]))
    print("BINS " + str(bin_values))

    uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = user_defined_bin_classification(
        uk_gdf, field_to_plot, bin_values=bin_values)

    # plot with face color attribute
    uk_gdf.plot(ax=ax,
                facecolor=uk_gdf['bin_color'],
                edgecolor='black',
                linewidth=0.5)

    #shp_gdp_merged.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
    #ax = uk_gdf.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
    #uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
    #uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')

    # Get legend patches TODO IMPROVE
    # TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING
    legend_handles = result_mapping.get_legend_handles(bin_values[1:-1],
                                                       cmap_rgb_colors,
                                                       color_zero, min_value,
                                                       max_value)

    plt.legend(handles=legend_handles,
               title="tittel_elgend",
               prop={'size': 8},
               loc='upper center',
               bbox_to_anchor=(0.5, -0.05),
               frameon=False)

    # PLot bins on plot
    plt.text(
        0,
        -20,
        bin_values[:-1],  #leave away maximum value
        fontsize=8)

    plt.tight_layout()
    plt.show()
    raise Exception
    plt.savefig(fig_name)
    plt.close()
Ejemplo n.º 10
0
        # ----------------------------------
        container_all_initialisations = []
        for initialization in all_realizations:

            path_sim_yr = os.path.join(
                path_results, scenario, initialization, "simulation_results",
                "model_run_results_txt", "only_fueltype_reg_8760",
                "fueltype_reg_8760__{}.npy".format(simulation_yr))
            print("       ... loading scenario: {}".format(initialization),
                  flush=True)

            full_result = np.load(path_sim_yr)
            container_all_initialisations.append(full_result)

            # Check peak
            fueltype_int = tech_related.get_fueltype_int('electricity')
            national_hourly_demand = np.sum(full_result[fueltype_int], axis=0)
            peak_day_electricity, _ = enduse_func.get_peak_day_single_fueltype(
                national_hourly_demand)
            selected_hours = date_prop.convert_yearday_to_8760h_selection(
                peak_day_electricity)
            print("PEAK electricity: " +
                  str(np.max(national_hourly_demand[selected_hours])))

        for fueltype_str in fueltypes:
            print("         ...fueltype {}".format(fueltype_str), flush=True)
            fueltype_int = tech_related.get_fueltype_int(fueltype_str)

            # --------
            # Calculate
            # --------
Ejemplo n.º 11
0
def disaggr_demand(data, crit_temp_min_max, spatial_calibration=False):
    """Disaggregated demand

    Arguments
    ----------
    data : dict
        Data

    Returns
    --------
    disagg : dict
        Disaggregated energy demand
    spatial_calibration : bool
        Criteria wheter base year data should be used to
        calibrate spatial disaggregation (non-residential demand)
    """
    disagg = {}

    # ===========================================
    # I. Disaggregation
    # ===========================================
    base_yr = data['assumptions'].base_yr

    # Load data for disaggregateion
    data['scenario_data']['employment_stats'] = data_loader.read_employment_stats(
        data['paths']['path_employment_statistics'])

    # Disaggregate fuel for all regions
    disagg['residential'], disagg['service'], disagg['industry'] = disaggregate_base_demand(
        data['pop_for_disag'],
        data['regions'],
        data['fuels'],
        data['scenario_data'],
        data['assumptions'],
        data['reg_coord'],
        data['weather_stations'], # Base year data used to disaggregate demand
        data['temp_data'][base_yr], # Base year data used to disaggregate demand
        data['sectors'],
        data['enduses'],
        data['service_building_count'],
        crit_temp_min_max)

    if spatial_calibration:
        '''The spatial disaggregation of non-residential demand
        can be calibrated for gas and electricity based on actual
        measured demand data

        Note: All other fueltypes are not scaled
        '''
        calibrate_residential = False # Calibrate residential demands
        calibrate_non_residential = True # Calibrate non residential demands

        # Non-residential electricity regional demands of base year for electrictiy and gas
        fueltype_elec = tech_related.get_fueltype_int('electricity')
        fueltype_gas = tech_related.get_fueltype_int('gas')

        if calibrate_non_residential:

            valid_non_resid_elec = data_loader.read_lad_demands(
                data['paths']['val_subnational_elec_non_residential'])

            valid_non_resid_gas = data_loader.read_lad_demands(
                data['paths']['val_subnational_gas_non_residential'])

            # Calculate and apply regional calibration factor
            for region in data['regions']:

                # Total modeleld non_residential
                service_demand_elec = 0
                service_demand_gas = 0
                for enduse_data in disagg['service'][region].values():
                    for sector_data in enduse_data.values():
                        service_demand_elec += np.sum(sector_data[fueltype_elec])
                        service_demand_gas += np.sum(sector_data[fueltype_gas])

                industry_demand_elec = 0
                industry_demand_gas = 0
                for enduse_data in disagg['industry'][region].values():
                    for sector_data in enduse_data.values():
                        industry_demand_elec += np.sum(sector_data[fueltype_elec])
                        industry_demand_gas += np.sum(sector_data[fueltype_gas])

                modelled_elec = service_demand_elec + industry_demand_elec
                modelled_gas = service_demand_gas + industry_demand_gas

                # Calculate calibration factor
                try:
                    f_spatial_calibration_elec = valid_non_resid_elec[region] / modelled_elec
                except KeyError: # No real data available
                    f_spatial_calibration_elec = 1

                try:
                    f_spatial_calibration_gas = valid_non_resid_gas[region] / modelled_gas
                except KeyError: # No real data available
                    f_spatial_calibration_gas = 1

                # Apply calibration factor to spatial disaggregation
                for enduse in disagg['service'][region].keys():
                    for sector in disagg['service'][region][enduse]:
                        disagg['service'][region][enduse][sector][fueltype_elec] *= f_spatial_calibration_elec
                        disagg['service'][region][enduse][sector][fueltype_gas] *= f_spatial_calibration_gas
                for enduse in disagg['industry'][region].keys():
                    for sector in disagg['industry'][region][enduse]:
                        disagg['industry'][region][enduse][sector][fueltype_elec] *= f_spatial_calibration_elec
                        disagg['industry'][region][enduse][sector][fueltype_gas] *= f_spatial_calibration_gas

        if calibrate_residential:

            valid_resid_elec = data_loader.read_lad_demands(
                data['paths']['val_subnational_elec_residential'])
            valid_resid_gas = data_loader.read_lad_demands(
                data['paths']['val_subnational_gas_residential'])

            # Calculate and apply regional calibration factor
            for region in data['regions']:

                # Total modeleld residential
                modelled_elec = 0
                modelled_gas = 0
                for enduse_data in disagg['residential'][region].values():
                    modelled_elec += np.sum(enduse_data[fueltype_elec])
                    modelled_gas += np.sum(enduse_data[fueltype_gas])

                # Calculate calibration factor
                try:
                    real_elec = valid_resid_elec[region]
                    f_spatial_calibration_elec = real_elec / modelled_elec
                    real_gas = valid_resid_gas[region]
                    f_spatial_calibration_gas = real_gas / modelled_gas
                except KeyError:
                    # No real data available
                    f_spatial_calibration_elec = 1
                    f_spatial_calibration_gas = 1

                # Apply calibration factor to spatial disaggregation
                for enduse in disagg['residential'][region].keys():
                    disagg['residential'][region][enduse][fueltype_elec] *= f_spatial_calibration_elec
                    disagg['residential'][region][enduse][fueltype_gas] *= f_spatial_calibration_gas
    else:
        pass

    # Sum demand across all sectors for every region
    disagg['ss_fuel_disagg_sum_all_sectors'] = init_scripts.sum_across_sectors_all_regs(
        disagg['service'])

    disagg['is_aggr_fuel_sum_all_sectors'] = init_scripts.sum_across_sectors_all_regs(
        disagg['industry'])

    # Sum demand across all submodels and sectors for every region
    disagg['tot_disaggregated_regs'] = init_scripts.sum_across_all_submodels_regs(
        data['regions'],
        [disagg['residential'], disagg['service'], disagg['industry']])

    disagg['tot_disaggregated_regs_residenital'] = init_scripts.sum_across_all_submodels_regs(
        data['regions'],
        [disagg['residential']])

    disagg['tot_disaggregated_regs_non_residential'] = init_scripts.sum_across_all_submodels_regs(
        data['regions'],
        [disagg['service'], disagg['industry']])

    return disagg
Ejemplo n.º 12
0
def main(
        scenarios_path,
        path_shapefile_input,
        base_yr,
        simulation_yrs_to_plot
    ):
    """Read in all results and plot PDFs

    Arguments
    ----------
    scenarios_path : str
        Path to results
    path_shapefile_input : str
        Path to shapefile
    plot_crit_dict : dict
        Criteria to select plots to plot
    base_yr : int
        Base year
    comparison_year : int
        Year to generate comparison plots
    """
    print("Start creating plots")

    # -------------------
    # Create temperatuere figur plot
    # -------------------
    plot_weather_chart = False
    if plot_weather_chart:
        from energy_demand.plotting import fig3_weather_at_home_plot
        path_weather_data = "//linux-filestore.ouce.ox.ac.uk/mistral/nismod/data/energy_demand/J-MARIUS_data/_weather_realisation"
        fig3_weather_at_home_plot.plotting_weather_data(path_weather_data)

    # -------------------
    # Create result folder
    # -------------------
    result_path = os.path.join(scenarios_path, '_results_weather_plots')
    basic_functions.del_previous_setup(result_path)
    basic_functions.create_folder(result_path)

    x_chart_yrs_storage = {}

    for simulation_yr_to_plot in simulation_yrs_to_plot:
        print("=================")
        print("...simulation_yr_to_plot: " + str(simulation_yr_to_plot))
        print("=================")
        data = {}
        x_chart_yrs_storage[simulation_yr_to_plot] = {}

        # ---------------------------------------------------------
        # Iterate folders and read out all weather years and stations
        # ---------------------------------------------------------
        to_ignores = [
            'model_run_pop',
            'PDF_validation',
            '_results_weather_plots']

        endings_to_ignore = [
            '.pdf',
            '.txt',
            '.ini']

        all_scenarios_incl_ignored = os.listdir(scenarios_path)
        all_scenarios = []
        for scenario in all_scenarios_incl_ignored:
            if scenario not in to_ignores:
                all_scenarios.append(scenario)

        scenario_result_container = []
        for scenario_nr, scenario_name in enumerate(all_scenarios):
            print(" ")
            print("Scenario: {}".format(scenario_name))
            print(" ")
            scenario_path = os.path.join(scenarios_path, scenario_name)
            all_result_folders = os.listdir(scenario_path)
            paths_folders_result = []

            for result_folder in all_result_folders:
                if result_folder not in to_ignores and result_folder[-4:] not in endings_to_ignore:
                    paths_folders_result.append(
                        os.path.join(scenario_path, result_folder))

            fueltype_str_to_create_maps = ['electricity']

            fueltype_str ='electricity'
            fueltype_elec_int = tech_related.get_fueltype_int('electricity')

            ####################################################################
            # Collect regional simulation data for every realisation
            ####################################################################
            total_regional_demand_electricity = pd.DataFrame()
            peak_hour_demand = pd.DataFrame()
            peak_hour_demand_per_person = pd.DataFrame()
            national_peak = pd.DataFrame()
            regional_share_national_peak = pd.DataFrame()
            regional_share_national_peak_pp = pd.DataFrame()
            national_electricity = pd.DataFrame()
            national_gas = pd.DataFrame()
            national_hydrogen = pd.DataFrame()
            national_heating_peak = pd.DataFrame()
            daily_mean_peak_day = pd.DataFrame()

            for path_result_folder in paths_folders_result:
                print("... path_result_folder: {}".format(path_result_folder))
                data = {}
                ed_national_heating_peak = {}

                try:
                    # ================================
                    # Loading in only heating peak demands (seperate calculations)
                    # ================================
                    
                    # Simulation information is read in from .ini file for results
                    data['enduses'], data['assumptions'], data['regions'] = data_loader.load_ini_param(os.path.join(path_result_folder))
                    pop_data = read_data.read_scenaric_population_data(os.path.join(path_result_folder, 'model_run_pop'))
                    path_result_folder_heating = os.path.join(path_result_folder, 'simulation_results')
                    path_result_folder_model_runs = os.path.join(path_result_folder_heating, 'model_run_results_txt')
                    data['lookups'] = lookup_tables.basic_lookups()

                    # Read in heating deamnds
                    path_heating_demands = os.path.join(path_result_folder_model_runs, 'enduse_specific_results')
                    all_files = os.listdir(path_heating_demands)
                    for file_name in all_files:
                        
                        ending  = file_name[-4:]
                        if ending == ".npy":
                            year = int(file_name.split("__")[2][:-4])
                            file_path = os.path.join(path_heating_demands, file_name)
                            heating_demand = np.load(file_path)
                            maximum_hour_of_peak_day = heating_demand[fueltype_elec_int].argmax() #get maxim hour of peak day
                            ed_national_heating_peak[year] = heating_demand[fueltype_elec_int][maximum_hour_of_peak_day]

                    simulation_yrs_result = [ed_national_heating_peak[year] for year in simulation_yrs_to_plot]
                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    national_heating_peak = national_heating_peak.append(realisation_data)

                except:
                    raise Exception("... no heating peak data available " + str(path_result_folder))

                try:
                    # Simulation information is read in from .ini file for results
                    data['enduses'], data['assumptions'], data['regions'] = data_loader.load_ini_param(os.path.join(path_result_folder))
                    pop_data = read_data.read_scenaric_population_data(os.path.join(path_result_folder, 'model_run_pop'))
                    path_result_folder = os.path.join(path_result_folder, 'simulation_results')
                    path_result_folder_model_runs = os.path.join(path_result_folder, 'model_run_results_txt')
                    data['lookups'] = lookup_tables.basic_lookups()

                    # Other information is read in
                    data['assumptions']['seasons'] = date_prop.get_season(year_to_model=2015)
                    data['assumptions']['model_yeardays_daytype'], data['assumptions']['yeardays_month'], data['assumptions']['yeardays_month_days'] = date_prop.get_yeardays_daytype(year_to_model=2015)

                    # --------------------------------------------
                    # Reading in results from different model runs
                    # --------------------------------------------
                    results_container = read_weather_results.read_in_weather_results(
                        path_result_folder_model_runs,
                        data['assumptions']['seasons'],
                        data['assumptions']['model_yeardays_daytype'],
                        pop_data,
                        fueltype_str='electricity')

                    # --Total demand (dataframe with row: realisation, column=region)
                    realisation_data = pd.DataFrame(
                        [results_container['ed_reg_tot_y'][simulation_yr_to_plot][fueltype_elec_int]],
                        columns=data['regions'])
                    total_regional_demand_electricity = total_regional_demand_electricity.append(realisation_data)

                    # National per fueltype electricity
                    simulation_yrs_result = [results_container['national_all_fueltypes'][year][fueltype_elec_int] for year in simulation_yrs_to_plot]

                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    national_electricity = national_electricity.append(realisation_data)

                    # National per fueltype gas
                    fueltype_gas_int = tech_related.get_fueltype_int('gas')
                    simulation_yrs_result = [results_container['national_all_fueltypes'][year][fueltype_gas_int] for year in simulation_yrs_to_plot]

                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    national_gas = national_gas.append(realisation_data)

                    # National per fueltype hydrogen
                    fueltype_hydrogen_int = tech_related.get_fueltype_int('hydrogen')
                    simulation_yrs_result = [results_container['national_all_fueltypes'][year][fueltype_hydrogen_int] for year in simulation_yrs_to_plot]

                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    national_hydrogen = national_hydrogen.append(realisation_data)

                    # --Peak hour demand per region (dataframe with row: realisation, column=region)
                    realisation_data = pd.DataFrame(
                        [results_container['ed_reg_peakday_peak_hour'][simulation_yr_to_plot][fueltype_elec_int]],
                        columns=data['regions'])

                    peak_hour_demand = peak_hour_demand.append(realisation_data)

                    # --Peak hour demand / pop per region (dataframe with row: realisation, column=region)
                    realisation_data = pd.DataFrame(
                        [results_container['ed_reg_peakday_peak_hour_per_pop'][simulation_yr_to_plot][fueltype_elec_int]],
                        columns=data['regions'])

                    peak_hour_demand_per_person = peak_hour_demand_per_person.append(realisation_data)

                    # --National peak
                    simulation_yrs_result = [results_container['national_peak'][year][fueltype_elec_int] for year in simulation_yrs_to_plot]

                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    national_peak = national_peak.append(realisation_data)

                    # --Regional percentage of national peak demand
                    realisation_data = pd.DataFrame(
                        [results_container['regional_share_national_peak'][simulation_yr_to_plot]],
                        columns=data['regions'])
                    regional_share_national_peak = regional_share_national_peak.append(realisation_data)

                    # --Regional percentage of national peak demand per person
                    realisation_data = pd.DataFrame(
                        [results_container['regional_share_national_peak_pp'][simulation_yr_to_plot]],
                        columns=data['regions'])

                    regional_share_national_peak_pp = regional_share_national_peak_pp.append(realisation_data)

                    # Mean demand of peak day
                    simulation_yrs_result = [results_container['mean_peak_day_demand'][year][fueltype_elec_int] for year in simulation_yrs_to_plot]
                    realisation_data = pd.DataFrame(
                        [simulation_yrs_result],
                        columns=data['assumptions']['sim_yrs'])
                    daily_mean_peak_day = daily_mean_peak_day.append(realisation_data)
                except:
                    raise Exception("The run '{}' is corrupted".format(path_result_folder))

            # Add to scenario container
            result_entry = {
                'national_heating_peak': national_heating_peak,
                'scenario_name': scenario_name,
                'peak_hour_demand': peak_hour_demand,
                'peak_hour_demand_per_person': peak_hour_demand_per_person,
                'national_peak': national_peak,
                'regional_share_national_peak': regional_share_national_peak,
                'regional_share_national_peak_pp': regional_share_national_peak_pp,
                'total_regional_demand_electricity': total_regional_demand_electricity,
                'national_electricity': national_electricity,
                'national_gas': national_gas,
                'national_hydrogen': national_hydrogen,
                'daily_mean_peak_day': daily_mean_peak_day}

            scenario_result_container.append(result_entry)

            # ---------------------------------------------------------------
            # TEST PLOT X-axis: Contribution to peak y-axis: Std: deviation
            # ---------------------------------------------------------------
            x_chart_yrs_storage[simulation_yr_to_plot][scenario_name] = result_entry

        # ------------------------------
        # Plot national sum over time per fueltype and scenario
        # ------------------------------
        crit_smooth_line = False
        seperate_legend = True

        try:
            print("... plotting national sum of fueltype over time ")
            fig_3_plot_over_time.fueltypes_over_time(
                scenario_result_container=scenario_result_container,
                sim_yrs=data['assumptions']['sim_yrs'],
                fig_name="fueltypes_over_time__{}__{}.pdf".format(simulation_yr_to_plot, fueltype_str),
                fueltypes=['electricity', 'gas', 'hydrogen'],
                result_path=result_path,
                unit='TWh',
                plot_points=True,
                crit_smooth_line=crit_smooth_line,
                seperate_legend=seperate_legend)
        except:
            raise Exception("FAILS national sum")

        # ------------------------------
        # Plot national peak change over time for each scenario including weather variability
        # ------------------------------
        try:
            fig_3_plot_over_time.scenario_over_time(
                scenario_result_container=scenario_result_container,
                field_name='national_peak',
                sim_yrs=data['assumptions']['sim_yrs'],
                fig_name="scenarios_peak_over_time__{}__{}.pdf".format(simulation_yr_to_plot, fueltype_str),
                plot_points=True,
                result_path=result_path,
                crit_smooth_line=crit_smooth_line,
                seperate_legend=seperate_legend)
        except:
            raise Exception("FAILED")
            pass

        # ------------------------------
        # Plot heating peak change over time for each scenario including weather variability
        # ------------------------------
        try:
            fig_3_plot_over_time.scenario_over_time(
                scenario_result_container=scenario_result_container,
                field_name='national_heating_peak',
                sim_yrs=data['assumptions']['sim_yrs'],
                fig_name="scenarios_heating_peak_over_time__{}__{}.pdf".format(simulation_yr_to_plot, fueltype_str),
                plot_points=True,
                result_path=result_path,
                crit_smooth_line=crit_smooth_line,
                seperate_legend=seperate_legend)
        except:
            raise Exception("FAILED")
            pass

        # ------------------------------
        # plot PEAK DAY mean
        # ------------------------------
        try:
            fig_3_plot_over_time.scenario_over_time(
                scenario_result_container=scenario_result_container,
                field_name='daily_mean_peak_day',
                sim_yrs=data['assumptions']['sim_yrs'],
                fig_name="mean_demand_of_peak_day{}__{}.pdf".format(simulation_yr_to_plot, fueltype_str),
                plot_points=True,
                result_path=result_path,
                crit_smooth_line=crit_smooth_line,
                seperate_legend=seperate_legend)
        except:
            raise Exception("FAILED")
            pass

    ## ------------------------------
    ## Plotting x-chart
    ## ------------------------------
    fig_3_plot_over_time.plot_std_dev_vs_contribution(
        scenario_result_container=x_chart_yrs_storage,
        sim_yrs=data['assumptions']['sim_yrs'],
        fig_name="_scenarios_4_chart_absolute.pdf",
        fueltypes=['electricity'],
        result_path=result_path,
        path_shapefile_input=path_shapefile_input,
        unit='TWh',
        plot_points=True)

    print("===================================")
    print("... finished reading and plotting results")
    print("===================================")
def plot_spatial_validation(simulation_yr_to_plot,
                            non_regional_modelled,
                            regional_modelled,
                            subnational_real,
                            regions,
                            fueltype_str,
                            fig_path,
                            label_points=False,
                            plotshow=False):
    result_dict = {}
    result_dict['real_demand'] = {}
    result_dict['modelled_demand'] = {}
    result_dict['modelled_demands_regional'] = {}

    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    # -------------------------------------------
    # Match ECUK sub-regional demand with geocode
    # -------------------------------------------
    for region_nr, region in enumerate(regions):
        try:
            # --Sub Regional Electricity demand (as GWh)
            real = subnational_real[region]
            modelled = non_regional_modelled[region]
            result_dict['real_demand'][region] = real
            result_dict['modelled_demand'][region] = modelled

        except KeyError:
            logging.debug(
                "Sub-national spatial validation: No fuel for region %s",
                region)

        # Do this for every weather station data
        for weather_station in regional_modelled:
            try:
                _reg_demand = regional_modelled[weather_station][
                    simulation_yr_to_plot][fueltype_int][region_nr]
                result_dict['modelled_demands_regional'][region].append(
                    _reg_demand)
            except KeyError:
                _reg_demand = regional_modelled[weather_station][
                    simulation_yr_to_plot][fueltype_int][region_nr]
                result_dict['modelled_demands_regional'][region] = [
                    _reg_demand
                ]

    # --------------------
    # Calculate statistics
    # --------------------
    diff_real_modelled_p = []
    diff_real_modelled_abs = []

    for region in regions:
        try:
            real = result_dict['real_demand'][region]
            modelled = result_dict['modelled_demand'][region]
            diff_real_modelled_p.append(abs(100 - ((100 / real) * modelled)))
            diff_real_modelled_abs.append(real - modelled)
        except KeyError:
            pass

    # Calculate the average deviation between reald and modelled
    av_deviation_real_modelled = np.average(diff_real_modelled_p)
    median_absolute_deviation = np.median(
        diff_real_modelled_p)  # median deviation

    # Calculate standard deviation
    std_dev_p = np.std(diff_real_modelled_p)  # Given as percent
    std_dev_abs = np.std(diff_real_modelled_abs)  # Given as energy unit

    # -----------------
    # Sort results according to size
    # -----------------
    sorted_dict_real = sorted(result_dict['real_demand'].items(),
                              key=operator.itemgetter(1))

    # -------------------------------------
    # Plot
    # -------------------------------------
    fig = plt.figure(figsize=basic_plot_functions.cm2inch(9, 8))
    ax = fig.add_subplot(1, 1, 1)
    x_values = np.arange(0, len(sorted_dict_real), 1)
    y_real_demand = []
    y_modelled_demand = []
    y_modelled_demands_non_regional = []
    labels = []

    for sorted_region in sorted_dict_real:
        geocode_lad = sorted_region[0]
        y_real_demand.append(result_dict['real_demand'][geocode_lad])
        y_modelled_demand.append(result_dict['modelled_demand'][geocode_lad])
        y_modelled_demands_non_regional.append(
            result_dict['modelled_demands_regional'][geocode_lad])

        print(
            "validation %s LAD %s: real: %s modelled: %s  modelled percentage: %s (%sp diff)",
            fueltype_str, geocode_lad,
            round(result_dict['real_demand'][geocode_lad], 4),
            round(result_dict['modelled_demand'][geocode_lad], 4),
            round(
                100 / result_dict['real_demand'][geocode_lad] *
                result_dict['modelled_demand'][geocode_lad], 4),
            round(
                100 - (100 / result_dict['real_demand'][geocode_lad] *
                       result_dict['modelled_demand'][geocode_lad]), 4))

        labels.append(geocode_lad)

    # Calculate r_squared
    _slope, _intercept, r_value, _p_value, _std_err = stats.linregress(
        y_real_demand, y_modelled_demand)

    # --------
    # Axis
    # --------
    plt.tick_params(
        axis='x',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        bottom='off',  # ticks along the bottom edge are off
        top='off',  # ticks along the top edge are off
        labelbottom='off')  # labels along the bottom edge are off

    # ----------------------------------------------
    # Plot
    # ----------------------------------------------
    markersize = 3
    markeredgewidth = 0
    linewidth = 2
    color_real = 'black'
    color_all_stations = 'green'
    color_single_station = 'red'
    plt.plot(x_values,
             y_real_demand,
             linestyle='-',
             marker='o',
             alpha=0.6,
             markersize=markersize,
             fillstyle='full',
             markerfacecolor='black',
             markeredgewidth=markeredgewidth,
             color=color_real,
             label='actual demand')

    plt.plot(x_values,
             y_modelled_demand,
             marker='o',
             linestyle='-',
             markersize=markersize,
             alpha=0.6,
             markerfacecolor='blue',
             fillstyle='none',
             markeredgewidth=markeredgewidth,
             markeredgecolor='blue',
             color=color_all_stations,
             label='modelled using all stations')

    # Demands calculated only from one weather station
    station_nr = 0
    nr_of_stations = len(y_modelled_demands_non_regional[0])
    station_vals = []
    for region_vals in y_modelled_demands_non_regional:
        station_vals.append(region_vals[station_nr])

    plt.plot(x_values,
             station_vals,
             marker='o',
             linestyle='-',
             linewidth=linewidth,
             markersize=markersize,
             alpha=0.6,
             markerfacecolor='green',
             fillstyle='none',
             markeredgewidth=markeredgewidth,
             markeredgecolor='green',
             color=color_single_station,
             label='modelled using only a single stations')
    '''for i in range(nr_of_stations):
        station_data = []
        for reg_nr in range(nr_of_regions):
            station_data.append(y_modelled_demands_non_regional[reg_nr][i])

        plt.plot(
            x_values,
            station_data, #y_modelled_demands_non_regional,
            marker='o',
            linestyle='None',
            markersize=1.6,
            alpha=0.6,
            markerfacecolor='white',
            fillstyle='none',
            markeredgewidth=0.5,
            markeredgecolor='orange',
            color='black',
            label='model')'''
    '''# ------------
    # Collect all values per weather_yr
    list_with_station_vals = []
    for station_i in range(nr_of_stations):
        station_vals = []
        for region_vals in y_modelled_demands_non_regional:
            station_vals.append(region_vals[station_i])
        list_with_station_vals.append(station_vals)

    df = pd.DataFrame(
        list_with_station_vals,
        columns=range(nr_of_regions)) #note not region_rn as ordered

    period_h = range(nr_of_regions)

    quantile_95 = 0.95
    quantile_05 = 0.05

    df_q_95 = df.quantile(quantile_95)
    df_q_05 = df.quantile(quantile_05)

    #Transpose for plotting purposes
    df = df.T
    df_q_95 = df_q_95.T
    df_q_05 = df_q_05.T

    # ---------------
    # Smoothing lines
    # ---------------
    try:
        period_h_smoothed, df_q_95_smoothed = basic_plot_functions.smooth_data(period_h, df_q_95, num=40000)
        period_h_smoothed, df_q_05_smoothed = basic_plot_functions.smooth_data(period_h, df_q_05, num=40000)
    except:
        period_h_smoothed = period_h
        df_q_95_smoothed = df_q_95
        df_q_05_smoothed = df_q_05
    #plt.plot(period_h_smoothed, df_q_05_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.05")
    #plt.plot(period_h_smoothed, df_q_95_smoothed, color='black', linestyle='--', linewidth=0.5, label="0.95")

    # -----------------
    # Uncertainty range
    # -----------------
    plt.fill_between(
        period_h_smoothed, #x
        df_q_95_smoothed,  #y1
        df_q_05_smoothed,  #y2
        alpha=.40,
        facecolor="grey",
        label="uncertainty band")
    # -----------'''

    # Limit
    plt.ylim(ymin=0)

    # -----------
    # Labelling
    # -----------
    if label_points:
        for pos, txt in enumerate(labels):

            ax.text(x_values[pos],
                    y_modelled_demand[pos],
                    txt,
                    horizontalalignment="right",
                    verticalalignment="top",
                    fontsize=1)

    font_additional_info = plotting_styles.font_info(size=4)

    title_info = (
        'R_2: {}, std_%: {} (GWh {}), av_diff_%: {} median_abs_dev: {}'.format(
            round(r_value, 2), round(std_dev_p, 2), round(std_dev_abs, 2),
            round(av_deviation_real_modelled, 2),
            round(median_absolute_deviation, 2)))

    plt.title(title_info, loc='left', fontdict=font_additional_info)

    plt.xlabel("UK regions (excluding northern ireland)")
    plt.ylabel("{} [GWh]".format(fueltype_str))

    # --------
    # Legend
    # --------
    plt.legend(prop={'family': 'arial', 'size': 8}, frameon=False)

    # Tight layout
    plt.margins(x=0)
    plt.tight_layout()
    plt.savefig(fig_path)

    if plotshow:
        plt.show()
    else:
        plt.close()
def test_tech_l_sigmoid():
    """testing
    """
    fueltype_lookup = {
        'solid_fuel': 0,
        'gas': 1,
        'electricity': 2,
        'oil': 3,
        'heat_sold': 4,
        'biomass': 5,
        'hydrogen': 6,
        'heat': 7}

    technologies = {
        'boilerA': read_data.TechnologyData(
            fueltype='gas',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990,
            tech_max_share=1.0),

        'boilerB': read_data.TechnologyData(
            fueltype='electricity',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990,
            tech_max_share=1.0)
        }

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=2020,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=1.0
        )]

    service_fueltype_p =  {1: 1.0, 2: 0.0}
    s_tech_by_p = {
        'boilerA': 1.0,
        'boilerB': 0.0}

    fuel_tech_p_by = {
        1: {'boilerA': 1.0},
        2: {'boilerB': 1.0}}
    installed_tech = ['boilerB']

    s_tech_switched_ey = {
        'boilerA': 0.0,
        'boilerB': 1.0}

    result = s_generate_sigmoid.tech_l_sigmoid(
        s_tech_switched_ey=s_tech_switched_ey,
        enduse_fuel_switches=fuel_switches,
        technologies=technologies,
        installed_tech=installed_tech,
        s_fueltype_by_p=service_fueltype_p,
        s_tech_by_p=s_tech_by_p,
        fuel_tech_p_by=fuel_tech_p_by)

    assert result['boilerB'] == 1.0

    # -----
    technologies = {
        'boilerA': read_data.TechnologyData(
            fueltype='gas',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990,
            tech_max_share=1.0),

        'boilerB': read_data.TechnologyData(
            fueltype='electricity',
            eff_by=0.5,
            eff_ey=0.5,
            year_eff_ey=2015,
            eff_achieved=1.0,
            diff_method='linear',
            market_entry=1990,
            tech_max_share=0.8)
        }

    fuel_switches = [
        read_data.FuelSwitch(
            enduse='heating',
            technology_install='boilerB',
            switch_yr=2020,
            fueltype_replace=tech_related.get_fueltype_int('gas'),
            fuel_share_switched_ey=0.5 #info lower than max
        )]

    service_fueltype_p = {1: 1.0, 2: 0.0}
    s_tech_by_p = {'boilerA': 1.0, 'boilerB': 0.0}
    fuel_tech_p_by = {1: {'boilerA': 1.0}, 2: {'boilerB': 1.0}}
    installed_tech = ['boilerB']

    s_tech_switched_ey = {'boilerA': 0.5, 'boilerB': 0.5}

    result = s_generate_sigmoid.tech_l_sigmoid(
        s_tech_switched_ey=s_tech_switched_ey,
        enduse_fuel_switches=fuel_switches,
        technologies=technologies,
        installed_tech=installed_tech,
        s_fueltype_by_p=service_fueltype_p,
        s_tech_by_p=s_tech_by_p,
        fuel_tech_p_by=fuel_tech_p_by)
    assert result['boilerB'] == 0.8
def run(data_input, fueltype_str, fig_name):
    """Plot peak demand and total demand over time in same plot
    """
    statistics_to_print = []

    # Select period and fueltype
    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    # -----------------------------------------------------------
    # Modelled years
    # -----------------------------------------------------------

    # List of selected data for every weather year (which is then converted to array)
    weather_yrs_total_demand = []
    weather_yrs_peak_demand = []

    nr_weather_yrs = list(data_input.keys())
    statistics_to_print.append("_____________________________")
    statistics_to_print.append("Weather years")
    statistics_to_print.append(str(data_input.keys()))

    # Iterate weather years
    for weather_yr, data_weather_yr in data_input.items():

        total_demands = []
        peak_demands = []
        sim_yrs = []
        for sim_yr in data_weather_yr.keys():
            sim_yrs.append(sim_yr)
            data_input_fueltype = data_weather_yr[sim_yr][
                fueltype_int]  # Select fueltype

            # sum total annual demand and convert gwh to twh
            sum_gwh_y = np.sum(data_input_fueltype)
            sum_thw_y = conversions.gwh_to_twh(sum_gwh_y)

            # Get peak
            peak_h = np.max(data_input_fueltype.reshape(8760))

            total_demands.append(sum_thw_y)
            peak_demands.append(peak_h)

        weather_yrs_total_demand.append(total_demands)
        weather_yrs_peak_demand.append(peak_demands)

    columns = sim_yrs

    # Convert to array
    weather_yrs_total_demand = np.array(weather_yrs_total_demand)
    weather_yrs_peak_demand = np.array(weather_yrs_peak_demand)

    # Calculate std per simulation year
    std_total_demand = list(np.std(weather_yrs_total_demand,
                                   axis=0))  # across columns calculate std
    std_peak_demand = list(np.std(weather_yrs_peak_demand,
                                  axis=0))  # across columns calculate std

    # Create dataframe
    if len(nr_weather_yrs) > 2:

        # Create dataframes
        df_total_demand = pd.DataFrame(weather_yrs_total_demand,
                                       columns=columns)
        df_peak = pd.DataFrame(weather_yrs_peak_demand, columns=columns)

        # Calculate quantiles
        quantile_95 = 0.95
        quantile_05 = 0.05

        # Calculate quantiles
        df_total_demand_q_95 = df_total_demand.quantile(quantile_95)
        df_total_demand_q_05 = df_total_demand.quantile(quantile_05)
        df_peak_q_95 = df_peak.quantile(quantile_95)
        df_peak_q_05 = df_peak.quantile(quantile_05)

        # convert to list
        df_total_demand_q_95 = df_total_demand_q_95.tolist()
        df_total_demand_q_05 = df_total_demand_q_05.tolist()
        df_peak_q_95 = df_peak_q_95.tolist()
        df_peak_q_05 = df_peak_q_05.tolist()
        #df_peak = df_peak.T #All indivdiual values
    else:
        #df_total_demand = weather_yrs_total_demand
        #df_peak = weather_yrs_peak_demand
        pass

    # -------------------
    # Base year data (2015)
    # -------------------
    # total demand
    tot_demand_twh_2015 = []
    for sim_yr, data_sim_yr in data_input[2015].items():
        gwh_2015_y = np.sum(data_sim_yr[fueltype_int])
        twh_2015_y = conversions.gwh_to_twh(gwh_2015_y)
        tot_demand_twh_2015.append(twh_2015_y)

    # peak
    df_peak_2015 = []
    for sim_yr, data_sim_yr in data_input[2015].items():
        peak_gwh_2015_y = np.max(data_sim_yr[fueltype_int])
        df_peak_2015.append(peak_gwh_2015_y)

    # ---------------
    # Smoothing lines
    # ---------------
    if len(nr_weather_yrs) > 2:
        try:
            period_h_smoothed, tot_demand_twh_2015_smoothed = basic_plot_functions.smooth_data(
                columns, tot_demand_twh_2015, num=40000)
            period_h_smoothed, df_total_demand_q_95_smoothed = basic_plot_functions.smooth_data(
                list(columns), df_total_demand_q_95, num=40000)
            period_h_smoothed, df_total_demand_q_05_smoothed = basic_plot_functions.smooth_data(
                columns, df_total_demand_q_05, num=40000)
            period_h_smoothed, df_peak_q_95_smoothed = basic_plot_functions.smooth_data(
                list(columns), df_peak_q_95, num=40000)
            period_h_smoothed, df_peak_q_05_smoothed = basic_plot_functions.smooth_data(
                columns, df_peak_q_05, num=40000)
            period_h_smoothed, df_peak_2015_smoothed = basic_plot_functions.smooth_data(
                columns, df_peak_2015, num=40000)
        except:
            period_h_smoothed = columns
            df_total_demand_q_95_smoothed = df_total_demand_q_95
            df_total_demand_q_05_smoothed = df_total_demand_q_05
            df_peak_q_95_smoothed = df_peak_q_95
            df_peak_q_05_smoothed = df_peak_q_05
            tot_demand_twh_2015_smoothed = tot_demand_twh_2015
            df_peak_2015_smoothed = df_peak_2015
    else:
        try:
            period_h_smoothed, tot_demand_twh_2015_smoothed = basic_plot_functions.smooth_data(
                columns, tot_demand_twh_2015, num=40000)
            period_h_smoothed, df_peak_2015_smoothed = basic_plot_functions.smooth_data(
                columns, df_peak_2015, num=40000)
        except:
            period_h_smoothed = columns
            tot_demand_twh_2015_smoothed = tot_demand_twh_2015
            df_peak_2015_smoothed = df_peak_2015

    # --------------
    # Two axis figure
    # --------------
    fig, ax1 = plt.subplots(figsize=basic_plot_functions.cm2inch(15, 10))

    ax2 = ax1.twinx()

    # Axis label
    ax1.set_xlabel('Years')
    ax2.set_ylabel('Peak hour {} demand (GW)'.format(fueltype_str),
                   color='black')
    ax1.set_ylabel('Total {} demand (TWh)'.format(fueltype_str), color='black')

    # Make the y-axis label, ticks and tick labels match the line color.¨
    color_axis1 = 'lightgrey'
    color_axis2 = 'blue'

    ax1.tick_params('y', colors='black')
    ax2.tick_params('y', colors='black')

    if len(nr_weather_yrs) > 2:

        # -----------------
        # Uncertainty range total demand
        # -----------------
        '''ax1.plot(
            period_h_smoothed,
            df_total_demand_q_05_smoothed,
            color='tomato', linestyle='--', linewidth=0.5, label="0.05_total_demand")'''
        '''ax1.plot(
            period_h_smoothed,
            df_total_demand_q_95_smoothed,
            color=color_axis1, linestyle='--', linewidth=0.5, label="0.95_total_demand")

        ax1.fill_between(
            period_h_smoothed, #x
            df_total_demand_q_95_smoothed,  #y1
            df_total_demand_q_05_smoothed,  #y2
            alpha=.25,
            facecolor=color_axis1,
            label="uncertainty band demand")'''

        # -----------------
        # Uncertainty range peaks
        # -----------------
        ##ax2.plot(period_h_smoothed, df_peak_q_05_smoothed, color=color_axis2, linestyle='--', linewidth=0.5, label="0.05_peak")
        ##ax2.plot(period_h_smoothed, df_peak_q_95_smoothed, color=color_axis2, linestyle='--', linewidth=0.5, label="0.95_peak")
        ax2.plot(period_h_smoothed,
                 df_peak_2015_smoothed,
                 color=color_axis2,
                 linestyle="--",
                 linewidth=0.4)

        # Error bar of bar charts
        ax2.errorbar(columns,
                     df_peak_2015,
                     linewidth=0.5,
                     color='black',
                     yerr=std_peak_demand,
                     linestyle="None")

        # Error bar bar plots
        ax1.errorbar(columns,
                     tot_demand_twh_2015,
                     linewidth=0.5,
                     color='black',
                     yerr=std_total_demand,
                     linestyle="None")
        '''ax2.fill_between(
            period_h_smoothed, #x
            df_peak_q_95_smoothed,  #y1
            df_peak_q_05_smoothed,  #y2
            alpha=.25,
            facecolor="blue",
            label="uncertainty band peak")'''

    # Total demand bar plots
    ##ax1.plot(period_h_smoothed, tot_demand_twh_2015_smoothed, color='tomato', linestyle='-', linewidth=2, label="tot_demand_weather_yr_2015")
    ax1.bar(columns,
            tot_demand_twh_2015,
            width=2,
            alpha=1,
            align='center',
            color=color_axis1,
            label="total {} demand".format(fueltype_str))

    statistics_to_print.append("_____________________________")
    statistics_to_print.append("total demand per model year")
    statistics_to_print.append(str(tot_demand_twh_2015))

    # Line of peak demand
    #ax2.plot(columns, df_peak, color=color_axis2, linestyle='--', linewidth=0.5, label="peak_0.95")
    ax2.plot(period_h_smoothed,
             df_peak_2015_smoothed,
             color=color_axis2,
             linestyle='-',
             linewidth=2,
             label="{} peak demand (base weather yr)".format(fueltype_str))

    statistics_to_print.append("_____________________________")
    statistics_to_print.append("peak demand per model year")
    statistics_to_print.append(str(df_peak_2015))

    # Scatter plots of peak demand
    ax2.scatter(columns,
                df_peak_2015,
                marker='o',
                s=20,
                color=color_axis2,
                alpha=1)

    ax1.legend(prop={
        'family': 'arial',
        'size': 10
    },
               loc='upper center',
               bbox_to_anchor=(0.9, -0.1),
               frameon=False,
               shadow=True)

    ax2.legend(prop={
        'family': 'arial',
        'size': 10
    },
               loc='upper center',
               bbox_to_anchor=(0.1, -0.1),
               frameon=False,
               shadow=True)

    # More space at bottom
    #fig.subplots_adjust(bottom=0.4)
    fig.tight_layout()

    plt.savefig(fig_name)
    plt.close()

    # Write info to txt
    write_data.write_list_to_txt(
        os.path.join(fig_name.replace(".pdf", ".txt")), statistics_to_print)
    print("--")
Ejemplo n.º 16
0
def write_user_defined_results(criterias, result_paths, sim_obj, data, curr_yr,
                               region_selection, pop_array_cy):
    """
    Write annual results to files
    """

    logging.info("... Start writing results to file")
    if criterias['write_txt_additional_results']:

        # Write population data to file
        write_data.write_scenaric_population_data(
            curr_yr, result_paths['data_results_model_run_pop'], pop_array_cy)

        # Write full results (Note: Results in very large data written to file)
        ##write_data.write_full_results(
        ##    data_handle.current_timestep,
        ##    os.path.join(result_path, 'model_run_results_txt'),
        ##    sim_obj.ed_enduse_fueltype_regs_yh,
        ##    "out_enduse_specific")
        write_data.write_supply_results(
            curr_yr, "ed_fueltype_regs_yh",
            result_paths['data_results_model_run_pop'],
            sim_obj.ed_fueltype_regs_yh, "result_tot_submodels_fueltypes")
        write_data.write_enduse_specific(
            curr_yr, result_paths['data_results_model_run_results_txt'],
            sim_obj.tot_fuel_y_enduse_specific_yh, "out_enduse_specific")
        write_data.write_lf(result_paths['data_results_model_run_results_txt'],
                            "result_reg_load_factor_y", [curr_yr],
                            sim_obj.reg_load_factor_y, 'reg_load_factor_y')
        write_data.write_lf(result_paths['data_results_model_run_results_txt'],
                            "result_reg_load_factor_yd", [curr_yr],
                            sim_obj.reg_load_factor_yd, 'reg_load_factor_yd')

    # ----------------------------------------------------------------------------------------
    # Write out national demand for every fueltype (used for first sending of demand data)
    # ----------------------------------------------------------------------------------------
    if criterias['write_out_national']:

        # Write out gas
        demand_supply_interaction.write_national_results(
            path_folder=result_paths['data_results'],
            results_unconstrained=sim_obj.results_unconstrained,
            enduse_specific_results=sim_obj.tot_fuel_y_enduse_specific_yh,
            fueltype_str='gas',
            fuelype_nr=tech_related.get_fueltype_int('gas'),
            year=curr_yr,
            submodels_names=data['assumptions'].submodels_names)

        # Write out elec
        demand_supply_interaction.write_national_results(
            path_folder=result_paths['data_results'],
            results_unconstrained=sim_obj.results_unconstrained,
            enduse_specific_results=sim_obj.tot_fuel_y_enduse_specific_yh,
            fueltype_str='electricity',
            fuelype_nr=tech_related.get_fueltype_int('electricity'),
            year=curr_yr,
            submodels_names=data['assumptions'].submodels_names)

    # ------------------------------------------------
    # Temporal Validation
    # ------------------------------------------------
    if (criterias['validation_criteria']
            == True) and (curr_yr == data['assumptions'].base_yr) and (
                ['cluster_calc'] != True):
        lad_validation.spatio_temporal_val(
            sim_obj.ed_fueltype_national_yh,
            sim_obj.ed_fueltype_regs_yh,
            result_paths,
            data['paths'],
            region_selection,
            data['assumptions'].seasons,
            data['assumptions'].model_yeardays_daytype,
            plot_crit=False)
def main(scenarios_path, path_shapefile_input, base_yr,
         simulation_yrs_to_plot):
    """Read in all results and plot PDFs

    Arguments
    ----------
    scenarios_path : str
        Path to results
    path_shapefile_input : str
        Path to shapefile
    plot_crit_dict : dict
        Criteria to select plots to plot
    base_yr : int
        Base year
    comparison_year : int
        Year to generate comparison plots
    """
    print("Start creating plots")

    # -------------------
    # Create result folder
    # -------------------
    result_path = os.path.join(scenarios_path, '_results_weather_plots')
    basic_functions.del_previous_setup(result_path)
    basic_functions.create_folder(result_path)

    for simulation_yr_to_plot in simulation_yrs_to_plot:
        print("-----------")
        print("...simulation_yr_to_plot: " + str(simulation_yr_to_plot))
        print("-----------")
        data = {}

        # ---------------------------------------------------------
        # Iterate folders and read out all weather years and stations
        # ---------------------------------------------------------
        to_ignores = [
            'model_run_pop', 'PDF_validation', '_results_weather_plots'
        ]

        endings_to_ignore = ['.pdf', '.txt', '.ini']

        all_scenarios_incl_ignored = os.listdir(scenarios_path)
        all_scenarios = []
        for scenario in all_scenarios_incl_ignored:
            if scenario not in to_ignores:
                all_scenarios.append(scenario)

        scenario_result_container = []
        for scenario_nr, scenario_name in enumerate(all_scenarios):
            print(" ")
            print("Scenario: {}".format(scenario_name))
            print(" ")
            scenario_path = os.path.join(scenarios_path, scenario_name)
            all_result_folders = os.listdir(scenario_path)

            paths_folders_result = []

            for result_folder in all_result_folders:
                if result_folder not in to_ignores and result_folder[
                        -4:] not in endings_to_ignore:
                    paths_folders_result.append(
                        os.path.join(scenario_path, result_folder))

            fueltype_str_to_create_maps = ['electricity']

            fueltype_str = 'electricity'
            fueltype_int = tech_related.get_fueltype_int(fueltype_str)

            ####################################################################
            # Collect regional simulation data for every realisation
            ####################################################################
            total_regional_demand_electricity = pd.DataFrame()
            peak_hour_demand = pd.DataFrame()
            national_peak = pd.DataFrame()
            regional_share_national_peak = pd.DataFrame()
            national_electricity = pd.DataFrame()
            national_gas = pd.DataFrame()
            national_hydrogen = pd.DataFrame()

            for path_result_folder in paths_folders_result:

                data = {}

                # Simulation information is read in from .ini file for results
                data['enduses'], data['assumptions'], data[
                    'regions'] = data_loader.load_ini_param(
                        os.path.join(path_result_folder))
                pop_data = read_data.read_scenaric_population_data(
                    os.path.join(path_result_folder, 'model_run_pop'))
                path_result_folder = os.path.join(path_result_folder,
                                                  'simulation_results')
                path_result_folder_model_runs = os.path.join(
                    path_result_folder, 'model_run_results_txt')
                data['lookups'] = lookup_tables.basic_lookups()

                # Other information is read in
                data['assumptions']['seasons'] = date_prop.get_season(
                    year_to_model=2015)
                data['assumptions']['model_yeardays_daytype'], data[
                    'assumptions']['yeardays_month'], data['assumptions'][
                        'yeardays_month_days'] = date_prop.get_yeardays_daytype(
                            year_to_model=2015)

                # --------------------------------------------
                # Reading in results from different model runs
                # --------------------------------------------
                results_container = read_weather_results.read_in_weather_results(
                    path_result_folder_model_runs,
                    data['assumptions']['seasons'],
                    data['assumptions']['model_yeardays_daytype'],
                    fueltype_str='electricity')

                # --Total demand (dataframe with row: realisation, column=region)
                realisation_data = pd.DataFrame([
                    results_container['ed_reg_tot_y'][simulation_yr_to_plot]
                    [fueltype_int]
                ],
                                                columns=data['regions'])
                total_regional_demand_electricity = total_regional_demand_electricity.append(
                    realisation_data)

                # National per fueltype electricity
                fueltype_elec_int = tech_related.get_fueltype_int(
                    'electricity')
                simulation_yrs_result = [
                    results_container['national_all_fueltypes'][year]
                    [fueltype_elec_int] for year in
                    results_container['national_all_fueltypes'].keys()
                ]

                realisation_data = pd.DataFrame(
                    [simulation_yrs_result],
                    columns=data['assumptions']['sim_yrs'])
                national_electricity = national_electricity.append(
                    realisation_data)

                # National per fueltype gas
                fueltype_elec_int = tech_related.get_fueltype_int('gas')
                simulation_yrs_result = [
                    results_container['national_all_fueltypes'][year]
                    [fueltype_elec_int] for year in
                    results_container['national_all_fueltypes'].keys()
                ]

                realisation_data = pd.DataFrame(
                    [simulation_yrs_result],
                    columns=data['assumptions']['sim_yrs'])
                national_gas = national_gas.append(realisation_data)

                # National per fueltype hydrogen
                fueltype_elec_int = tech_related.get_fueltype_int('hydrogen')
                simulation_yrs_result = [
                    results_container['national_all_fueltypes'][year]
                    [fueltype_elec_int] for year in
                    results_container['national_all_fueltypes'].keys()
                ]

                realisation_data = pd.DataFrame(
                    [simulation_yrs_result],
                    columns=data['assumptions']['sim_yrs'])
                national_hydrogen = national_hydrogen.append(realisation_data)

                # --Peak day demand (dataframe with row: realisation, column=region)
                realisation_data = pd.DataFrame([
                    results_container['ed_reg_peakday_peak_hour']
                    [simulation_yr_to_plot][fueltype_int]
                ],
                                                columns=data['regions'])

                peak_hour_demand = peak_hour_demand.append(realisation_data)

                # --National peak
                simulation_yrs_result = [
                    results_container['national_peak'][year][fueltype_int]
                    for year in results_container['national_peak'].keys()
                ]

                realisation_data = pd.DataFrame(
                    [simulation_yrs_result],
                    columns=data['assumptions']['sim_yrs'])
                national_peak = national_peak.append(realisation_data)

                # --Regional percentage of national peak demand
                realisation_data = pd.DataFrame([
                    results_container['regional_share_national_peak']
                    [simulation_yr_to_plot]
                ],
                                                columns=data['regions'])

                regional_share_national_peak = regional_share_national_peak.append(
                    realisation_data)

            # Add to scenario container
            scenario_result_container.append({
                'scenario_name':
                scenario_name,
                'peak_hour_demand':
                peak_hour_demand,
                'national_peak':
                national_peak,
                'regional_share_national_peak':
                regional_share_national_peak,
                'total_regional_demand_electricity':
                total_regional_demand_electricity,
                'national_electricity':
                national_electricity,
                'national_gas':
                national_gas,
                'national_hydrogen':
                national_hydrogen,
            })

        # ------------------------------
        # Plot national sum over time per fueltype and scenario
        # ------------------------------
        print("... plotting national sum of fueltype over time ")
        fig_3_plot_over_time.fueltypes_over_time(
            scenario_result_container=scenario_result_container,
            sim_yrs=data['assumptions']['sim_yrs'],
            fig_name="fueltypes_over_time__{}__{}.pdf".format(
                simulation_yr_to_plot, fueltype_str),
            fueltypes=['electricity', 'gas', 'hydrogen'],
            result_path=result_path,
            unit='TWh',
            plot_points=True,
            crit_smooth_line=True,
            seperate_legend=False)

        # ------------------------------
        # Plot national peak change over time for each scenario including weather variability
        # ------------------------------
        fig_3_plot_over_time.scenario_over_time(
            scenario_result_container=scenario_result_container,
            sim_yrs=data['assumptions']['sim_yrs'],
            fig_name="scenarios_peak_over_time__{}__{}.pdf".format(
                simulation_yr_to_plot, fueltype_str),
            plot_points=True,
            result_path=result_path,
            crit_smooth_line=True,
            seperate_legend=False)

        # ------------------------------
        # Plotting spatial results for electricity
        # ------------------------------
        for i in scenario_result_container:
            scenario_name = i['scenario_name']
            total_regional_demand_electricity = i[
                'total_regional_demand_electricity']
            peak_hour_demand = i['peak_hour_demand']
            regional_share_national_peak = i['regional_share_national_peak']

            print("... plot spatial map of total annual demand")
            field_to_plot = 'std_dev'
            fig_3_weather_map.total_annual_demand(
                total_regional_demand_electricity,
                path_shapefile_input,
                data['regions'],
                pop_data=pop_data,
                simulation_yr_to_plot=simulation_yr_to_plot,
                result_path=result_path,
                fig_name="{}__tot_demand__{}_{}_{}.pdf".format(
                    scenario_name, field_to_plot, fueltype_str,
                    simulation_yr_to_plot),
                field_to_plot=field_to_plot,
                unit='GW',
                seperate_legend=False)

            print("... plot spatial map of peak hour demand")
            field_to_plot = 'std_dev'
            fig_3_weather_map.total_annual_demand(
                peak_hour_demand,
                path_shapefile_input,
                data['regions'],
                pop_data=pop_data,
                simulation_yr_to_plot=simulation_yr_to_plot,
                result_path=result_path,
                fig_name="{}__peak_h_demand_{}_{}_{}.pdf".format(
                    scenario_name, field_to_plot, fueltype_str,
                    simulation_yr_to_plot),
                field_to_plot=field_to_plot,
                unit='GW',
                seperate_legend=False)

            print(
                "... plot spatial map of percentage of regional peak hour demand"
            )
            field_to_plot = 'mean'
            fig_3_weather_map.total_annual_demand(
                regional_share_national_peak,
                path_shapefile_input,
                data['regions'],
                pop_data=pop_data,
                simulation_yr_to_plot=simulation_yr_to_plot,
                result_path=result_path,
                fig_name="{}__regional_share_national_peak_{}_{}_{}.pdf".
                format(scenario_name, field_to_plot, fueltype_str,
                       simulation_yr_to_plot),
                field_to_plot=field_to_plot,
                unit='percentage',
                seperate_legend=False,
                bins=[0.000001, 0.25, 0.5, 0.75, 1, 1.25, 1.5])

            field_to_plot = 'std_dev'
            fig_3_weather_map.total_annual_demand(
                regional_share_national_peak,
                path_shapefile_input,
                data['regions'],
                pop_data=pop_data,
                simulation_yr_to_plot=simulation_yr_to_plot,
                result_path=result_path,
                fig_name="{}__regional_share_national_peak_{}_{}_{}.pdf".
                format(scenario_name, field_to_plot, fueltype_str,
                       simulation_yr_to_plot),
                field_to_plot=field_to_plot,
                unit='percentage',
                seperate_legend=False)

    print("===================================")
    print("... finished reading and plotting results")
    print("===================================")
Ejemplo n.º 18
0
def plot_fig_spatio_temporal_validation(path_regional_calculations,
                                        path_rolling_elec_demand,
                                        path_temporal_elec_validation,
                                        path_temporal_gas_validation,
                                        path_non_regional_elec_2015,
                                        path_out_plots,
                                        plot_show=False):
    """
    Create plot with regional and non-regional plots for second paper
    Compare hdd calculations and disaggregation of regional and local
    """
    # ---------------------------------------------------------
    # Iterate folders and read out all weather years and stations
    # ---------------------------------------------------------
    all_result_folders = os.listdir(path_regional_calculations)

    paths_folders_result = []
    data_container = defaultdict(dict)
    ed_fueltype_regs_yh = defaultdict(dict)
    weather_yr_station_tot_fueltype_yh = defaultdict(dict)
    residential_results = defaultdict(dict)
    for scenario_folder in all_result_folders:
        result_folders = os.listdir(
            os.path.join(path_regional_calculations, scenario_folder))
        for result_folder in result_folders:
            try:
                split_path_name = result_folder.split("__")
                weather_yr = int(split_path_name[0])

                try:
                    weather_station = int(split_path_name[1])
                except:
                    weather_station = "all_stations"

                paths_folders_result.append(
                    os.path.join(path_regional_calculations, result_folder))

                data = {}
                data['lookups'] = lookup_tables.basic_lookups()
                data['enduses'], data['assumptions'], data[
                    'regions'] = data_loader.load_ini_param(
                        os.path.join(
                            path_regional_calculations,
                            all_result_folders[0]))  # last result folder
                data['assumptions']['seasons'] = date_prop.get_season(
                    year_to_model=2015)
                data['assumptions']['model_yeardays_daytype'], data[
                    'assumptions']['yeardays_month'], data['assumptions'][
                        'yeardays_month_days'] = date_prop.get_yeardays_daytype(
                            year_to_model=2015)

                results_container = read_data.read_in_results(
                    os.path.join(path_regional_calculations, scenario_folder,
                                 "{}__{}".format(weather_yr, weather_station),
                                 'model_run_results_txt'),
                    data['assumptions']['seasons'],
                    data['assumptions']['model_yeardays_daytype'])

                weather_yr_station_tot_fueltype_yh[weather_yr][
                    weather_station] = results_container['tot_fueltype_yh']
                ed_fueltype_regs_yh[weather_yr][
                    weather_station] = results_container['ed_fueltype_regs_yh']
                residential_results[weather_yr][
                    weather_station] = results_container['residential_results']
            except ValueError:
                pass

    data_container['ed_fueltype_regs_yh'] = ed_fueltype_regs_yh
    data_container['tot_fueltype_yh'] = weather_yr_station_tot_fueltype_yh
    data_container['residential_results'] = residential_results
    data_container = dict(data_container)

    # -------------------------------------------------
    # Collect non regional 2015 elec data
    # Calculated with all regional weather stations
    # -------------------------------------------------
    year_non_regional = 2015
    path_with_txt = os.path.join(
        path_non_regional_elec_2015, "{}__{}".format(str(year_non_regional),
                                                     "all_stations"),
        'model_run_results_txt')

    demand_year_non_regional = read_data.read_in_results(
        path_with_txt, data['assumptions']['seasons'],
        data['assumptions']['model_yeardays_daytype'])
    tot_fueltype_yh = demand_year_non_regional['tot_fueltype_yh']

    fueltype_int = tech_related.get_fueltype_int('electricity')

    non_regional_elec_2015 = tot_fueltype_yh[year_non_regional][fueltype_int]

    # ---Collect real electricity data of year 2015
    elec_2015_indo, _ = elec_national_data.read_raw_elec_2015(
        path_rolling_elec_demand)

    # Factor data as total sum is not identical
    f_diff_elec = np.sum(non_regional_elec_2015) / np.sum(elec_2015_indo)
    elec_factored_yh = f_diff_elec * elec_2015_indo

    # *****************************************************************
    # Temporal validation
    # Compare regional and non regional and actual demand over time
    # *****************************************************************
    simulation_yr_to_plot = 2015

    winter_week, spring_week, summer_week, autumn_week = date_prop.get_seasonal_weeks(
    )

    # Peak day
    peak_day, _ = enduse_func.get_peak_day_single_fueltype(elec_factored_yh)

    # Convert days to hours
    period_to_plot = list(range(0, 400))
    period_to_plot = date_prop.get_8760_hrs_from_yeardays(winter_week)
    period_to_plot = date_prop.get_8760_hrs_from_yeardays([peak_day])
    period_to_plot_winter = date_prop.get_8760_hrs_from_yeardays(winter_week)
    period_to_plot_spring = date_prop.get_8760_hrs_from_yeardays(spring_week)

    fig_p2_temporal_validation.run_fig_p2_temporal_validation(
        data_input=data_container['tot_fueltype_yh'],
        weather_yr=2015,
        fueltype_str='electricity',
        simulation_yr_to_plot=simulation_yr_to_plot,  # Simulation year to plot
        period_h=period_to_plot,
        validation_elec_2015=elec_factored_yh,
        non_regional_elec_2015=non_regional_elec_2015,
        fig_name=os.path.join(path_out_plots, "temporal_validation_elec.pdf"),
        titel="yearday: {}".format(peak_day),
        y_lim_val=55,
        plot_validation=False,
        plot_show=plot_show)

    fueltype_gas = tech_related.get_fueltype_int('gas')
    fig_p2_temporal_validation.run_fig_p2_temporal_validation(
        data_input=data_container['tot_fueltype_yh'],
        weather_yr=2015,
        fueltype_str='gas',
        simulation_yr_to_plot=simulation_yr_to_plot,  # Simulation year to plot
        period_h=period_to_plot,
        validation_elec_2015=None,
        non_regional_elec_2015=tot_fueltype_yh[year_non_regional]
        [fueltype_gas],
        fig_name=os.path.join(path_out_plots, "temporal_validation_gas.pdf"),
        titel="yearday: {}".format(peak_day),
        y_lim_val=250,
        plot_validation=False,
        plot_show=plot_show)

    # -------------------
    # Spatial validation (not with maps)
    # -------------------
    # non_regional: All weather station, spatially disaggregated TODO Give BETTER NAMES
    # regional: Only one weather station for whole countr but still data for every region
    weather_yr = 2015
    fig_p2_spatial_val.run(
        simulation_yr_to_plot=simulation_yr_to_plot,
        demand_year_non_regional=demand_year_non_regional[
            'residential_results'][weather_yr],
        demand_year_regional=data_container['residential_results'][weather_yr],
        fueltypes=data['lookups']['fueltypes'],
        fig_path=path_out_plots,
        path_temporal_elec_validation=path_temporal_elec_validation,
        path_temporal_gas_validation=path_temporal_gas_validation,
        regions=data['regions'],
        plot_crit=plot_show)
def run_fig_spatial_distribution_of_peak(
        scenarios,
        path_to_folder_with_scenarios,
        path_shapefile,
        sim_yrs,
        field_to_plot,
        unit,
        fig_path,
        fueltype_str='electricity'
    ):
    """
    """
    weather_yrs = []
    calculated_yrs_paths = []
    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    for scenario in scenarios:
        path_scenario = os.path.join(path_to_folder_with_scenarios, scenario)
        all_result_folders = os.listdir(path_scenario)

        for result_folder in all_result_folders:
            try:
                split_path_name = result_folder.split("__")
                weather_yr = int(split_path_name[0])
                weather_yrs.append(weather_yr)
                tupyle_yr_path = (weather_yr, os.path.join(path_scenario))
                calculated_yrs_paths.append(tupyle_yr_path)
            except ValueError:
                pass

    for simulation_yr in sim_yrs:
        container = {}
        container['abs_demand_in_peak_h_pp'] = {}
        container['abs_demand_in_peak_h'] = {}
        container['p_demand_in_peak_h'] = {}

        for weather_yr, path_data_ed in calculated_yrs_paths:
            print("... prepare data {} {}".format(weather_yr, path_data_ed))

            path_to_weather_yr = os.path.join(path_data_ed, "{}__{}".format(weather_yr, 'all_stations'))

            data = {}
            data['lookups'] = lookup_tables.basic_lookups()
            data['enduses'], data['assumptions'], regions = data_loader.load_ini_param(os.path.join(path_data_ed))
            data['assumptions']['seasons'] = date_prop.get_season(year_to_model=2015)
            data['assumptions']['model_yeardays_daytype'], data['assumptions']['yeardays_month'], data['assumptions']['yeardays_month_days'] = date_prop.get_yeardays_daytype(year_to_model=2015)

            # Population 
            population_data = read_data.read_scenaric_population_data(os.path.join(path_data_ed, 'model_run_pop'))

            results_container = read_data.read_in_results(
                os.path.join(path_to_weather_yr, 'model_run_results_txt'),
                data['assumptions']['seasons'],
                data['assumptions']['model_yeardays_daytype'])

            # ---------------------------------------------------
            # Calculate hour with national peak demand
            # This may be different depending on the weather yr
            # ---------------------------------------------------
            ele_regions_8760 = results_container['ed_fueltype_regs_yh'][simulation_yr][fueltype_int]
            sum_all_regs_fueltype_8760 = np.sum(ele_regions_8760, axis=0) # Sum for every hour

            max_day = int(basic_functions.round_down((np.argmax(sum_all_regs_fueltype_8760) / 24), 1))
            max_h = np.argmax(sum_all_regs_fueltype_8760)
            max_demand = np.max(sum_all_regs_fueltype_8760)

            # Calculate the national peak demand in GW
            national_peak_GW = np.max(sum_all_regs_fueltype_8760)

            # ------------------------------------------------------
            # Calculate the contribution of the regional peak demand
            # ------------------------------------------------------
            demand_in_peak_h = ele_regions_8760[:, max_h]

            if unit == 'GW':
                container['abs_demand_in_peak_h'][weather_yr] = demand_in_peak_h
            elif unit == 'kW':
                container['abs_demand_in_peak_h'][weather_yr] = demand_in_peak_h * 1000000 # Convert to KWh
            else:
                # Use GW as default
                container['abs_demand_in_peak_h'][weather_yr] = demand_in_peak_h

            container['abs_demand_in_peak_h_pp'][weather_yr] = demand_in_peak_h / population_data[simulation_yr]
    
            # Relative fraction of regional demand in relation to peak
            container['p_demand_in_peak_h'][weather_yr] = (demand_in_peak_h / national_peak_GW ) * 100 # given as percent

            print("=================================")
            print("{}  {}  {}  {}".format(
                simulation_yr,
                weather_yr,
                np.sum(ele_regions_8760),
                national_peak_GW))

        # --------------
        # Create dataframe with all weather yrs calculatiosn for every region
            
        #               region1, region2, region3
        # weather yr1
        # weather yr2
        # --------------
        # Convert regional data to dataframe
        abs_demand_in_peak_h_pp = np.array(list(container['abs_demand_in_peak_h_pp'].values()))
        abs_demand_peak_h = np.array(list(container['abs_demand_in_peak_h'].values()))
        p_demand_peak_h = np.array(list(container['p_demand_in_peak_h'].values()))

        # Absolute demand
        df_abs_peak_demand = pd.DataFrame(
            abs_demand_peak_h,
            columns=regions,
            index=list(container['abs_demand_in_peak_h'].keys()))

        # Relative demand
        df_p_peak_demand = pd.DataFrame(
            p_demand_peak_h,
            columns=regions,
            index=list(container['p_demand_in_peak_h'].keys()))
        
        df_abs_demand_in_peak_h_pp = pd.DataFrame(
            abs_demand_in_peak_h_pp,
            columns=regions,
            index=list(container['abs_demand_in_peak_h_pp'].keys()))

        # Absolute peak value - mean
        max_peak_h_across_weather_yrs = df_abs_peak_demand.max()
        average_across_weather_yrs = df_abs_peak_demand.mean()
        diff_peak_h_minus_mean = max_peak_h_across_weather_yrs - average_across_weather_yrs

        for index, row in df_p_peak_demand.iterrows():
            print("Weather yr: {} Total p: {}".format(index, np.sum(row)))
            assert round(np.sum(row), 4) == 100.0

        # ----------------------------
        # Calculate standard deviation
        # ----------------------------
        std_deviation_df_abs_demand_in_peak_h_pp = df_abs_demand_in_peak_h_pp.std()
        std_deviation_abs_demand_peak_h = df_abs_peak_demand.std()
        std_deviation_p_demand_peak_h = df_p_peak_demand.std()

        print("=========")
        print("National stats")
        print("=========")
        print("Sum of std of absolut peak demand:  " + str(np.sum(std_deviation_abs_demand_peak_h)))

        # --------------------
        # Create map
        # --------------------
        regional_statistics_columns = [
            'name',
            'std_deviation_p_demand_peak_h',
            'std_deviation_abs_demand_peak_h',
            'std_deviation_df_abs_demand_in_peak_h_pp',
            'diff_peak_h_minus_mean']

        df_stats = pd.DataFrame(columns=regional_statistics_columns)

        for region_name in regions:

            # 'name', 'absolute_GW', 'p_GW_peak'
            line_entry = [[
                region_name,
                std_deviation_p_demand_peak_h[region_name],
                std_deviation_abs_demand_peak_h[region_name],
                std_deviation_df_abs_demand_in_peak_h_pp[region_name],
                diff_peak_h_minus_mean[region_name],
                ]]

            line_df = pd.DataFrame(line_entry, columns=regional_statistics_columns)
            df_stats = df_stats.append(line_df)

        # Load uk shapefile
        uk_shapefile = gpd.read_file(path_shapefile)

        # Merge stats to geopanda
        shp_gdp_merged = uk_shapefile.merge(
            df_stats,
            on='name')

        # Assign projection
        crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
        uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)

        ax = uk_gdf.plot(
            figsize=basic_plot_functions.cm2inch(25, 20))

        nr_of_intervals = 6

        bin_values = result_mapping.get_reasonable_bin_values_II(
            data_to_plot=list(uk_gdf[field_to_plot]),
            nr_of_intervals=nr_of_intervals)
        # Maual bins
        #bin_values = [0, 0.005, 0.01, 0.015, 0.02, 0.025, 0.03]
        print(float(uk_gdf[field_to_plot].max()))
        print("BINS " + str(bin_values))

        uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = fig_p2_weather_val.user_defined_bin_classification(
            uk_gdf,
            field_to_plot,
            bin_values=bin_values)

        # plot with face color attribute
        uk_gdf.plot(
            ax=ax,
            facecolor=uk_gdf['bin_color'],
            edgecolor='black',
            linewidth=0.5)

        legend_handles = result_mapping.add_simple_legend(
            bin_values,
            cmap_rgb_colors,
            color_zero)

        plt.legend(
            handles=legend_handles,
            title="{}  [{}]".format(field_to_plot, unit),
            prop={'size': 8},
            #loc='upper center', bbox_to_anchor=(0.5, -0.05),
            loc='center left', bbox_to_anchor=(1, 0.5),
            frameon=False)

        # PLot bins on plot
        '''plt.text(
            -20,
            -20,
            bin_values[:-1], #leave away maximum value
            fontsize=8)'''

        plt.tight_layout()

        fig_out_path = os.path.join(fig_path, str(field_to_plot) + "__" + str(simulation_yr) + ".pdf")
        plt.savefig(fig_out_path)
Ejemplo n.º 20
0
def read_fuel_is(path_to_csv, fueltypes_nr):
    """This function reads in base_data_CSV all fuel types

    Arguments
    ----------
    path_to_csv : str
        Path to csv file
    fueltypes_nr : int
        Number of fueltypes

    Returns
    -------
    fuels : dict
        Industry fuels
    sectors : list
        Industral sectors
    enduses : list
        Industrial enduses

    Info
    ----
    Source: User Guide Energy Consumption in the UK
            https://www.gov.uk/government/uploads/system/uploads/attach
            ment_data/file/573271/ECUK_user_guide_November_2016_final.pdf

            https://unstats.un.org/unsd/cr/registry/regcst.asp?Cl=27

            http://ec.europa.eu/eurostat/ramon/nomenclatures/
            index.cfm?TargetUrl=LST_NOM_DTL&StrNom=NACE_REV2&StrLanguageCode=EN&IntPcKey=&StrLayoutCode=

    High temperature processes
    =============================
    High temperature processing dominates energy consumption in the iron and steel,
    non-ferrous metal, bricks, cement, glass and potteries industries. This includes
        - coke ovens
        - blast furnaces and other furnaces
        - kilns and
        - glass tanks.

    Low temperature processes
    =============================
    Low temperature processes are the largest end use of energy for the food, drink
    and tobacco industry. This includes:
        - process heating and distillation in the chemicals sector;
        - baking and separation processes in food and drink;
        - pressing and drying processes, in paper manufacture;
        - and washing, scouring, dyeing and drying in the textiles industry.

    Drying/separation
    =============================
    Drying and separation is important in paper-making while motor processes are used
    more in the manufacture of chemicals and chemical products than in any other
    individual industry.

    Motors
    =============================
    This includes pumping, fans and machinery drives.

    Compressed air
    =============================
    Compressed air processes are mainly used in the publishing, printing and
    reproduction of recorded media sub-sector.

    Lighting
    =============================
    Lighting (along with space heating) is one of the main end uses in engineering
    (mechanical and electrical engineering and vehicles industries).

    Refrigeration
    =============================
    Refrigeration processes are mainly used in the chemicals and food and drink
    industries.

    Space heating
    =============================
    Space heating (along with lighting) is one of the main end uses in engineering
    (mechanical and electrical engineering and vehicles industries).

    Other
    =============================

    -----------------------
    Industry classes from BEIS
    -----------------------

    SIC 2007    Name
    --------    ------
    08	        Other mining and quarrying
    10	        Manufacture of food products
    11	        Manufacture of beverages
    12	        Manufacture of tobacco products
    13	        Manufacture of textiles
    14	        Manufacture of wearing apparel
    15	        Manufacture of leather and related products
    16	        Manufacture of wood and of products of wood and cork, except furniture; manufacture of articles of straw and plaiting materials
    17	        Manufacture of paper and paper products
    18	        Printing and publishing of recorded media and other publishing activities
    20	        Manufacture of chemicals and chemical products
    21	        Manufacture of basic pharmaceutical products and pharmaceutical preparations
    22	        Manufacture of rubber and plastic products
    23	        Manufacture of other non-metallic mineral products
    24	        Manufacture of basic metals
    25	        Manufacture of fabricated metal products, except machinery and equipment
    26	        Manufacture of computer, electronic and optical products
    27	        Manufacture of electrical equipment
    28	        Manufacture of machinery and equipment n.e.c.
    29	        Manufacture of motor vehicles, trailers and semi-trailers
    30	        Manufacture of other transport equipment
    31	        Manufacture of furniture
    32	        Other manufacturing
    36	        Water collection, treatment and supply
    38	        Waste collection, treatment and disposal activities; materials recovery
    """
    rows_list = []
    fuels = {}

    '''# Read csv
    raw_csv_file = pd.read_csv(path_to_csv)

    # Replace NaN with " " values
    raw_csv_file = raw_csv_file.fillna(0)

    # Enduses
    enduses = list(raw_csv_file.columns.values)'''


    with open(path_to_csv, 'r') as csvfile:
        rows = csv.reader(csvfile, delimiter=',')
        headings = next(rows)
        _secondline = next(rows)

        # All sectors
        enduses = set([])
        for enduse in headings[1:]:
            if enduse is not '':
                enduses.add(enduse)

        # All enduses
        sectors = set([])
        for row in rows:
            rows_list.append(row)
            sectors.add(row[0])

        # Initialise dict
        for enduse in enduses:
            fuels[enduse] = {}
            for sector in sectors:

                fuels[str(enduse)][str(sector)] = np.zeros(
                    (fueltypes_nr), dtype="float")

        for row in rows_list:
            sector = row[0]
            for position, entry in enumerate(row[1:], 1): # Start with position 1

                if entry != '':
                    enduse = str(headings[position])
                    fueltype = _secondline[position]
                    fueltype_int = tech_related.get_fueltype_int(fueltype)
                    fuels[enduse][sector][fueltype_int] += float(row[position])

    return fuels, list(sectors), list(enduses)
Ejemplo n.º 21
0
def read_in_weather_results(path_result, seasons, model_yeardays_daytype,
                            fueltype_str):
    """Read and post calculate results from txt files
    and store into container

    Arguments
    ---------
    path_result : str
        Paths
    seasons : dict
        seasons
    model_yeardays_daytype : dict
        Daytype of modelled yeardays
    """
    logging.info("... Reading in results")

    fueltype_int = tech_related.get_fueltype_int(fueltype_str)

    results_container = {}

    # -----------------
    # Read in demands
    # -----------------
    # Read in total regional demands per fueltype
    results_container['ed_reg_tot_y'] = read_data.read_results_yh(
        path_result, 'only_total')

    #print(results_container['ed_reg_tot_y'][2015].shape)
    results_container['ed_reg_peakday'] = read_data.read_results_yh(
        os.path.join('simulation_results', path_result), 'only_peak')

    #print(results_container['ed_reg_peakday'][2015].shape)
    results_container['ed_reg_peakday_peak_hour'] = {}
    results_container['national_peak'] = {}
    results_container['regional_share_national_peak'] = {}

    results_container['national_all_fueltypes'] = {}

    for year in results_container['ed_reg_peakday']:
        # Get peak demand of each region
        results_container['ed_reg_peakday_peak_hour'][
            year] = results_container['ed_reg_peakday'][year].max(axis=2)

        # Get national peak
        national_demand_per_hour = results_container['ed_reg_peakday'][
            year].sum(axis=1)  #Aggregate hourly across all regions

        # Get maximum hour for electricity demand
        max_hour = national_demand_per_hour[fueltype_int].argmax()

        results_container['national_peak'][
            year] = national_demand_per_hour[:, max_hour]

        # Calculate regional share of peak hour to national peak
        national_peak = results_container['national_peak'][year][fueltype_int]
        regional_peak = results_container['ed_reg_peakday'][year][
            fueltype_int][:, max_hour]
        results_container['regional_share_national_peak'][year] = (
            100 / national_peak) * regional_peak  #1 = 1 %

        # Sum all regions for each fueltypes
        #print(results_container['ed_reg_tot_y'][year].shape)
        results_container['national_all_fueltypes'][year] = np.sum(
            results_container['ed_reg_tot_y'][year], axis=1)
        #print(results_container['national_all_fueltypes'][year].shape)

    logging.info("... Reading in results finished")
    return results_container