def fueltypes_over_time(scenario_result_container, sim_yrs, fig_name, fueltypes, result_path, plot_points=False, unit='TWh', crit_smooth_line=True, seperate_legend=False): """Plot fueltypes over time """ statistics_to_print = [] fig = plt.figure(figsize=basic_plot_functions.cm2inch(10, 10)) #width, height ax = fig.add_subplot(1, 1, 1) colors = { # Low elec 'electricity': '#3e3838', 'gas': '#ae7c7c', 'hydrogen': '#6cbbb3', } line_styles_default = plotting_styles.linestyles() linestyles = { 'h_max': line_styles_default[0], 'h_min': line_styles_default[6], 'l_min': line_styles_default[8], 'l_max': line_styles_default[9], } for cnt_scenario, i in enumerate(scenario_result_container): scenario_name = i['scenario_name'] for cnt_linestyle, fueltype_str in enumerate(fueltypes): national_sum = i['national_{}'.format(fueltype_str)] if unit == 'TWh': unit_factor = conversions.gwh_to_twh(1) elif unit == 'GWh': unit_factor = 1 else: raise Exception("Wrong unit") national_sum = national_sum * unit_factor # Calculate quantiles quantile_95 = 0.95 quantile_05 = 0.05 try: color = colors[fueltype_str] except KeyError: #color = list(colors.values())[cnt_linestyle] raise Exception("Wrong color") try: linestyle = linestyles[scenario_name] except KeyError: linestyle = list(linestyles.values())[cnt_scenario] try: marker = marker_styles[scenario_name] except KeyError: marker = list(marker_styles.values())[cnt_scenario] # Calculate average across all weather scenarios mean_national_sum = national_sum.mean(axis=0) mean_national_sum_sim_yrs = copy.copy(mean_national_sum) statistics_to_print.append( "{} fueltype_str: {} mean_national_sum_sim_yrs: {}".format( scenario_name, fueltype_str, mean_national_sum_sim_yrs)) # Standard deviation over all realisations df_q_05 = national_sum.quantile(quantile_05) df_q_95 = national_sum.quantile(quantile_95) statistics_to_print.append( "{} fueltype_str: {} df_q_05: {}".format( scenario_name, fueltype_str, df_q_05)) statistics_to_print.append( "{} fueltype_str: {} df_q_95: {}".format( scenario_name, fueltype_str, df_q_95)) # -------------------- # Try to smooth lines # -------------------- sim_yrs_smoothed = sim_yrs if crit_smooth_line: try: sim_yrs_smoothed, mean_national_sum_smoothed = basic_plot_functions.smooth_data( sim_yrs, mean_national_sum, num=500) _, df_q_05_smoothed = basic_plot_functions.smooth_data( sim_yrs, df_q_05, num=500) _, df_q_95_smoothed = basic_plot_functions.smooth_data( sim_yrs, df_q_95, num=500) mean_national_sum = pd.Series(mean_national_sum_smoothed, sim_yrs_smoothed) df_q_05 = pd.Series(df_q_05_smoothed, sim_yrs_smoothed) df_q_95 = pd.Series(df_q_95_smoothed, sim_yrs_smoothed) except: print("did not owrk {} {}".format(fueltype_str, scenario_name)) pass # ------------------------ # Plot lines # ------------------------ plt.plot(mean_national_sum, label="{} {}".format(fueltype_str, scenario_name), linestyle=linestyle, color=color, zorder=1, clip_on=True) # ------------------------ # Plot markers # ------------------------ if plot_points: plt.scatter(sim_yrs, mean_national_sum_sim_yrs, marker=marker, edgecolor='black', linewidth=0.5, c=color, zorder=2, s=15, clip_on=False) #do not clip points on axis # Plottin qunatilse and average scenario df_q_05.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #, label="0.05") df_q_95.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #, label="0.05") plt.fill_between( sim_yrs_smoothed, list(df_q_95), #y1 list(df_q_05), #y2 alpha=0.25, facecolor=color, ) plt.xlim(2015, 2050) plt.ylim(0) ax = plt.gca() # Major ticks every 20, minor ticks every 5 major_ticks = [200, 400, 600] #np.arange(0, 600, 200) minor_ticks = [100, 200, 300, 400, 500, 600] #np.arange(0, 600, 100) ax.set_yticks(major_ticks) ax.set_yticks(minor_ticks, minor=True) # And a corresponding grid ax.grid(which='both', color='black', linewidth=0.5, axis='y', linestyle=line_styles_default[3]) #[6]) # Or if you want different settings for the grids: ax.grid(which='minor', axis='y', alpha=0.4) ax.grid(which='major', axis='y', alpha=0.8) # Achsen ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) # Ticks plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, right=False, labelbottom=False, labeltop=False, labelleft=True, labelright=False) # labels along the bottom edge are off # -------- # Legend # -------- legend = plt.legend( #title="tt", ncol=2, prop={'size': 6}, loc='upper center', bbox_to_anchor=(0.5, -0.1), frameon=False) legend.get_title().set_fontsize(8) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(fig_name))) legend.remove() # -------- # Labeling # -------- plt.ylabel("national fuel over time [in {}]".format(unit)) #plt.xlabel("year") #plt.title("Title") plt.tight_layout() #plt.show() plt.savefig(os.path.join(result_path, fig_name)) plt.close() write_data.write_list_to_txt( os.path.join(result_path, fig_name).replace(".pdf", ".txt"), statistics_to_print)
def scenario_over_time(scenario_result_container, sim_yrs, fig_name, result_path, plot_points, crit_smooth_line=True, seperate_legend=False): """Plot peak over time """ statistics_to_print = [] fig = plt.figure(figsize=basic_plot_functions.cm2inch(10, 10)) #width, height ax = fig.add_subplot(1, 1, 1) for cnt_scenario, i in enumerate(scenario_result_container): scenario_name = i['scenario_name'] national_peak = i['national_peak'] # dataframe with national peak (columns= simulation year, row: Realisation) # Calculate quantiles quantile_95 = 0.95 quantile_05 = 0.05 try: color = colors[scenario_name] marker = marker_styles[scenario_name] except KeyError: color = list(colors.values())[cnt_scenario] try: marker = marker_styles[scenario_name] except KeyError: marker = list(marker_styles.values())[cnt_scenario] print("SCENARIO NAME {} {}".format(scenario_name, color)) # Calculate average across all weather scenarios mean_national_peak = national_peak.mean(axis=0) mean_national_peak_sim_yrs = copy.copy(mean_national_peak) statistics_to_print.append("scenario: {} values over years: {}".format( scenario_name, mean_national_peak_sim_yrs)) # Standard deviation over all realisations df_q_05 = national_peak.quantile(quantile_05) df_q_95 = national_peak.quantile(quantile_95) statistics_to_print.append("scenario: {} df_q_05: {}".format( scenario_name, df_q_05)) statistics_to_print.append("scenario: {} df_q_95: {}".format( scenario_name, df_q_95)) # -------------------- # Try to smooth lines # -------------------- sim_yrs_smoothed = sim_yrs if crit_smooth_line: try: sim_yrs_smoothed, mean_national_peak_smoothed = basic_plot_functions.smooth_data( sim_yrs, mean_national_peak, num=40000) _, df_q_05_smoothed = basic_plot_functions.smooth_data( sim_yrs, df_q_05, num=40000) _, df_q_95_smoothed = basic_plot_functions.smooth_data( sim_yrs, df_q_95, num=40000) mean_national_peak = pd.Series(mean_national_peak_smoothed, sim_yrs_smoothed) df_q_05 = pd.Series(df_q_05_smoothed, sim_yrs_smoothed) df_q_95 = pd.Series(df_q_95_smoothed, sim_yrs_smoothed) except: sim_yrs_smoothed = sim_yrs # ----------------------- # Plot lines # ------------------------ plt.plot(mean_national_peak, label="{} (mean)".format(scenario_name), color=color) # ------------------------ # Plot markers # ------------------------ if plot_points: plt.scatter(sim_yrs, mean_national_peak_sim_yrs, c=color, marker=marker, edgecolor='black', linewidth=0.5, s=15, clip_on=False) #do not clip points on axis # Plottin qunatilse and average scenario df_q_05.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #, label="0.05") df_q_95.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #, label="0.05") plt.fill_between( sim_yrs_smoothed, list(df_q_95), #y1 list(df_q_05), #y2 alpha=0.25, facecolor=color, ) plt.xlim(2015, 2050) plt.ylim(0) # -------- # Different style # -------- ax = plt.gca() ax.grid(which='major', color='black', axis='y', linestyle='--') ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, right=False, labelbottom=False, labeltop=False, labelleft=True, labelright=False) # labels along the bottom edge are off # -------- # Legend # -------- legend = plt.legend( #title="tt", ncol=2, prop={'size': 10}, loc='upper center', bbox_to_anchor=(0.5, -0.1), frameon=False) legend.get_title().set_fontsize(8) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__{}__legend.pdf".format(fig_name))) legend.remove() # -------- # Labeling # -------- plt.ylabel("national peak demand (GW)") #plt.xlabel("year") #plt.title("Title") plt.tight_layout() plt.savefig(os.path.join(result_path, fig_name)) plt.close() # Write info to txt write_data.write_list_to_txt( os.path.join(result_path, fig_name).replace(".pdf", ".txt"), statistics_to_print)
def total_annual_demand(df_data_input, path_shapefile_input, regions, pop_data, simulation_yr_to_plot, result_path, fig_name, field_to_plot, unit='GW', seperate_legend=True, bins=False): """ """ if unit == 'GW': conversion_factor = 1 elif unit == 'kW': conversion_factor = conversions.gwh_to_kwh(gwh=1) #GW to KW elif unit == 'percentage': conversion_factor = 1 else: raise Exception("Not defined unit") df_data_input = df_data_input * conversion_factor # Load uk shapefile uk_shapefile = gpd.read_file(path_shapefile_input) # Population of simulation year pop_sim_yr = pop_data[simulation_yr_to_plot] regions = list(df_data_input.columns) nr_of_regions = df_data_input.shape[1] nr_of_realisations = df_data_input.shape[0] # Mean over all realisations mean = df_data_input.mean(axis=0) # Mean normalized with population mean_norm_pop = df_data_input.mean(axis=0) / pop_sim_yr # Standard deviation over all realisations std_dev = df_data_input.std(axis=0) max_entry = df_data_input.max(axis=0) #maximum entry for every hour min_entry = df_data_input.min(axis=0) #maximum entry for every hour print("---- Calculate average per person") tot_person = sum(pop_sim_yr) #print(df_data_input.iloc[0]) tot_demand = sum(df_data_input.iloc[0]) print("TOT PERSON: " + str(tot_person)) print("TOT PERSON: " + str(tot_demand)) print('AVERAGE KW per Person " ' + str(tot_demand / tot_person)) #print(df_data_input) regional_statistics_columns = ['name', 'mean', 'mean_norm_pop', 'std_dev'] # #'diff_av_max', #'mean_pp', #'diff_av_max_pp', #'std_dev_average_every_h', #'std_dev_peak_h_norm_pop'] df_stats = pd.DataFrame(columns=regional_statistics_columns) for region_name in regions: line_entry = [[ str(region_name), mean[region_name], mean_norm_pop[region_name], std_dev[region_name] #diff_av_max, #mean_peak_h_pp, #diff_av_max_pp, #std_dev_average_every_h, #std_dev_peak_h_norm_pop ]] line_df = pd.DataFrame(line_entry, columns=regional_statistics_columns) df_stats = df_stats.append(line_df) # --------------- # Create spatial maps # http://darribas.org/gds15/content/labs/lab_03.html # http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1 # --------------- # Merge stats to geopanda shp_gdp_merged = uk_shapefile.merge(df_stats, on='name') # Assign projection crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs) ax = uk_gdf.plot() # Assign bin colors according to defined cmap and whether # plot with min_max values or only min/max values #bin_values = [0, 0.0025, 0.005, 0.0075, 0.01] nr_of_intervals = 6 if bins: bin_values = bins else: bin_values = result_mapping.get_reasonable_bin_values_II( data_to_plot=list(uk_gdf[field_to_plot]), nr_of_intervals=nr_of_intervals) #print("field_to_plot: {} BINS: {}".format(field_to_plot, bin_values)) uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = fig_p2_weather_val.user_defined_bin_classification( uk_gdf, field_to_plot, bin_values=bin_values) # plot with face color attribute uk_gdf.plot(ax=ax, facecolor=uk_gdf['bin_color'], edgecolor='black', linewidth=0.5) # TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING legend_handles = result_mapping.get_legend_handles(bin_values[1:-1], cmap_rgb_colors, color_zero, min_value, max_value) legend = plt.legend(handles=legend_handles, title="Unit: {} field: {}".format(unit, field_to_plot), prop={'size': 8}, loc='upper center', bbox_to_anchor=(0.5, -0.05), frameon=False) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(fig_name))) legend.remove() # Remove coordinates from figure ax.set_yticklabels([]) ax.set_xticklabels([]) legend.get_title().set_fontsize(8) # PLot bins on plot '''plt.text( 0, -20, bin_values[:-1], fontsize=8)''' # -------- # Labeling # -------- #plt.title("Peak demand over time") plt.tight_layout() #plt.show() plt.savefig(os.path.join(result_path, fig_name)) plt.close()
def plotting_weather_data(path): """ Things to plot - annual t_min of all realizations - annual t_max of all realizations - annual maximum t_max - annual minimum t_min """ sim_yrs = range(2015, 2051, 5) weather_reationzations = ["NF{}".format(i) for i in range(1, 101, 1)] container_weather_stations = {} container_temp_data = {} # All used weather stations from model run used_stations = [ 'station_id_253', 'station_id_252', 'station_id_253', 'station_id_252', 'station_id_252', 'station_id_328', 'station_id_329', 'station_id_305', 'station_id_282', 'station_id_335', 'station_id_335', 'station_id_359', 'station_id_358', 'station_id_309', 'station_id_388', 'station_id_418', 'station_id_420', 'station_id_389', 'station_id_433', 'station_id_385', 'station_id_374', 'station_id_481', 'station_id_481', 'station_id_480', 'station_id_466', 'station_id_531', 'station_id_532', 'station_id_535', 'station_id_535', 'station_id_484', 'station_id_421', 'station_id_472', 'station_id_526', 'station_id_525', 'station_id_526', 'station_id_504', 'station_id_503', 'station_id_504', 'station_id_505', 'station_id_504', 'station_id_504', 'station_id_455', 'station_id_548', 'station_id_546', 'station_id_537', 'station_id_545', 'station_id_236', 'station_id_353', 'station_id_352', 'station_id_384', 'station_id_510', 'station_id_527', 'station_id_550', 'station_id_501', 'station_id_456', 'station_id_472', 'station_id_201', 'station_id_470', 'station_id_487', 'station_id_505', 'station_id_486', 'station_id_457', 'station_id_533', 'station_id_458', 'station_id_441', 'station_id_440', 'station_id_473', 'station_id_217', 'station_id_247', 'station_id_199', 'station_id_232', 'station_id_234', 'station_id_478', 'station_id_248', 'station_id_388', 'station_id_377', 'station_id_376', 'station_id_376', 'station_id_388', 'station_id_354', 'station_id_376', 'station_id_388', 'station_id_515', 'station_id_514', 'station_id_514', 'station_id_531', 'station_id_532', 'station_id_494', 'station_id_512', 'station_id_535', 'station_id_535', 'station_id_517', 'station_id_534', 'station_id_533', 'station_id_549', 'station_id_549', 'station_id_550', 'station_id_549', 'station_id_508', 'station_id_490', 'station_id_507', 'station_id_526', 'station_id_508', 'station_id_491', 'station_id_507', 'station_id_489', 'station_id_509', 'station_id_526', 'station_id_545', 'station_id_492', 'station_id_490', 'station_id_451', 'station_id_467', 'station_id_450', 'station_id_451', 'station_id_466', 'station_id_451', 'station_id_521', 'station_id_538', 'station_id_537', 'station_id_537', 'station_id_522', 'station_id_546', 'station_id_536', 'station_id_522', 'station_id_520', 'station_id_537', 'station_id_488', 'station_id_487', 'station_id_488', 'station_id_472', 'station_id_487', 'station_id_487', 'station_id_551', 'station_id_544', 'station_id_419', 'station_id_525', 'station_id_552', 'station_id_525', 'station_id_543', 'station_id_542', 'station_id_552', 'station_id_543', 'station_id_544', 'station_id_542', 'station_id_542', 'station_id_306', 'station_id_305', 'station_id_282', 'station_id_306', 'station_id_406', 'station_id_264', 'station_id_306', 'station_id_283', 'station_id_284', 'station_id_306', 'station_id_305', 'station_id_304', 'station_id_283', 'station_id_418', 'station_id_403', 'station_id_419', 'station_id_418', 'station_id_403', 'station_id_402', 'station_id_392', 'station_id_379', 'station_id_391', 'station_id_422', 'station_id_404', 'station_id_379', 'station_id_443', 'station_id_444', 'station_id_445', 'station_id_423', 'station_id_469', 'station_id_425', 'station_id_444', 'station_id_461', 'station_id_438', 'station_id_437', 'station_id_439', 'station_id_438', 'station_id_455', 'station_id_454', 'station_id_438', 'station_id_284', 'station_id_268', 'station_id_286', 'station_id_266', 'station_id_288', 'station_id_270', 'station_id_333', 'station_id_389', 'station_id_378', 'station_id_389', 'station_id_389', 'station_id_377', 'station_id_390', 'station_id_403', 'station_id_469', 'station_id_485', 'station_id_484', 'station_id_469', 'station_id_499', 'station_id_498', 'station_id_516', 'station_id_497', 'station_id_479', 'station_id_400', 'station_id_387', 'station_id_401', 'station_id_374', 'station_id_400', 'station_id_386', 'station_id_375', 'station_id_401', 'station_id_491', 'station_id_459', 'station_id_492', 'station_id_476', 'station_id_475', 'station_id_477', 'station_id_462', 'station_id_523', 'station_id_523', 'station_id_522', 'station_id_523', 'station_id_523', 'station_id_523', 'station_id_505', 'station_id_522', 'station_id_541', 'station_id_539', 'station_id_523', 'station_id_417', 'station_id_417', 'station_id_437', 'station_id_436', 'station_id_436', 'station_id_548', 'station_id_547', 'station_id_488', 'station_id_539', 'station_id_540', 'station_id_540', 'station_id_540', 'station_id_547', 'station_id_416', 'station_id_434', 'station_id_435', 'station_id_434', 'station_id_434', 'station_id_415', 'station_id_488', 'station_id_488', 'station_id_489', 'station_id_329', 'station_id_330', 'station_id_330', 'station_id_330', 'station_id_330', 'station_id_329', 'station_id_354', 'station_id_330', 'station_id_329', 'station_id_329', 'station_id_328', 'station_id_328', 'station_id_328', 'station_id_304', 'station_id_327', 'station_id_331', 'station_id_357', 'station_id_356', 'station_id_355', 'station_id_221', 'station_id_221', 'station_id_221', 'station_id_237', 'station_id_416', 'station_id_417', 'station_id_416', 'station_id_416', 'station_id_417', 'station_id_400', 'station_id_400', 'station_id_307', 'station_id_307', 'station_id_331', 'station_id_308', 'station_id_332', 'station_id_221', 'station_id_506', 'station_id_507', 'station_id_506', 'station_id_525', 'station_id_506', 'station_id_524', 'station_id_506', 'station_id_524', 'station_id_505', 'station_id_506', 'station_id_524', 'station_id_506', 'station_id_506', 'station_id_506', 'station_id_505', 'station_id_507', 'station_id_505', 'station_id_505', 'station_id_506', 'station_id_506', 'station_id_523', 'station_id_524', 'station_id_524', 'station_id_524', 'station_id_506', 'station_id_507', 'station_id_505', 'station_id_524', 'station_id_524', 'station_id_506', 'station_id_506', 'station_id_524', 'station_id_506', 'station_id_172', 'station_id_193', 'station_id_173', 'station_id_133', 'station_id_130', 'station_id_168', 'station_id_194', 'station_id_152', 'station_id_170', 'station_id_214', 'station_id_195', 'station_id_103', 'station_id_124', 'station_id_110', 'station_id_176', 'station_id_136', 'station_id_125', 'station_id_121', 'station_id_9', 'station_id_111', 'station_id_105', 'station_id_36', 'station_id_108', 'station_id_52', 'station_id_119', 'station_id_4', 'station_id_94', 'station_id_159', 'station_id_137', 'station_id_102', 'station_id_77', 'station_id_303', 'station_id_2', 'station_id_154', 'station_id_64', 'station_id_89', 'station_id_124', 'station_id_109', 'station_id_109', 'station_id_123', 'station_id_86', 'station_id_105', 'station_id_110', 'station_id_110', 'station_id_448', 'station_id_348', 'station_id_349', 'station_id_350', 'station_id_351', 'station_id_372', 'station_id_394', 'station_id_449', 'station_id_464', 'station_id_407', 'station_id_428', 'station_id_446', 'station_id_446', 'station_id_463', 'station_id_463', 'station_id_464', 'station_id_447', 'station_id_448', 'station_id_448', 'station_id_396', 'station_id_447' ] # Load full data for weather_realisation in weather_reationzations: print("weather_realisation: " + str(weather_realisation)) path_weather_data = path weather_stations, temp_data = data_loader.load_temp_data( {}, sim_yrs=sim_yrs, weather_realisation=weather_realisation, path_weather_data=path_weather_data, same_base_year_weather=False, crit_temp_min_max=True, load_np=False, load_parquet=False, load_csv=True) # Load only data from selected weather stations temp_data_used = {} for year in sim_yrs: temp_data_used[year] = {} all_station_data = temp_data[year].keys() for station in all_station_data: if station in used_stations: temp_data_used[year][station] = temp_data[year][station] container_weather_stations[weather_realisation] = weather_stations container_temp_data[weather_realisation] = temp_data_used # Create plot with daily min print("... creating min max plot") t_min_average_every_day = [] t_max_average_every_day = [] t_min_min_every_day = [] t_max_max_every_day = [] std_dev_t_min = [] std_dev_t_max = [] std_dev_t_min_min = [] std_dev_t_max_max = [] for year in sim_yrs: for realization in container_weather_stations.keys(): t_min_average_stations = [] t_max_average_stations = [] t_min_min_average_stations = [] t_max_max_average_stations = [] stations_data = container_temp_data[realization][year] for station in stations_data.keys(): t_min_annual_average = np.average( stations_data[station]['t_min']) t_max_annual_average = np.average( stations_data[station]['t_max']) t_min_min_stations = np.min(stations_data[station]['t_min']) t_max_max_stations = np.max(stations_data[station]['t_max']) t_min_average_stations.append( t_min_annual_average) #average cross all stations t_max_average_stations.append( t_max_annual_average) #average cross all stations t_min_min_average_stations.append(t_min_min_stations) t_max_max_average_stations.append(t_max_max_stations) av_t_min = np.average( t_min_average_stations) #average across all realizations av_t_max = np.average( t_max_average_stations) #average across all realizations av_min_t_min = np.average( t_min_min_average_stations) #average across all realizations av_max_t_max = np.average( t_max_max_average_stations) #average across all realizations std_t_min = np.std(t_min_average_stations) std_t_max = np.std(t_max_average_stations) std_t_min_min = np.std(t_min_min_average_stations) std_t_max_max = np.std(t_max_max_average_stations) t_min_average_every_day.append(av_t_min) t_max_average_every_day.append(av_t_max) t_min_min_every_day.append(av_min_t_min) t_max_max_every_day.append(av_max_t_max) std_dev_t_min.append(std_t_min) std_dev_t_max.append(std_t_max) std_dev_t_min_min.append(std_t_min_min) std_dev_t_max_max.append(std_t_max_max) # Plot variability fig = plt.figure(figsize=basic_plot_functions.cm2inch(9, 6)) #width, height colors = { 't_min': 'steelblue', 't_max': 'tomato', 't_min_min': 'peru', 't_max_max': 'r' } # plot plt.plot(sim_yrs, t_min_average_every_day, color=colors['t_min'], label="t_min") plt.plot(sim_yrs, t_max_average_every_day, color=colors['t_max'], label="t_max") plt.plot(sim_yrs, t_min_min_every_day, color=colors['t_min_min'], label="t_min_min") plt.plot(sim_yrs, t_max_max_every_day, color=colors['t_max_max'], label="t_max_max") # Variations plt.fill_between( sim_yrs, list( np.array(t_min_average_every_day) - (2 * np.array(std_dev_t_min))), list( np.array(t_min_average_every_day) + (2 * np.array(std_dev_t_min))), color=colors['t_min'], alpha=0.25) plt.fill_between( sim_yrs, list( np.array(t_max_average_every_day) - (2 * np.array(std_dev_t_max))), list( np.array(t_max_average_every_day) + (2 * np.array(std_dev_t_max))), color=colors['t_max'], alpha=0.25) plt.fill_between( sim_yrs, list( np.array(t_min_min_every_day) - (2 * np.array(std_dev_t_min_min))), list( np.array(t_min_min_every_day) + (2 * np.array(std_dev_t_min_min))), color=colors['t_min_min'], alpha=0.25) plt.fill_between( sim_yrs, list( np.array(t_max_max_every_day) - (2 * np.array(std_dev_t_max_max))), list( np.array(t_max_max_every_day) + (2 * np.array(std_dev_t_max_max))), color=colors['t_max_max'], alpha=0.25) # Legend legend = plt.legend(ncol=2, prop={'size': 10}, loc='upper center', bbox_to_anchor=(0.5, -0.1), frameon=False) legend.get_title().set_fontsize(8) result_path = "C:/_scrap/" seperate_legend = True if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(result_path))) legend.remove() plt.legend(ncol=2) plt.xlabel("Year") plt.ylabel("Temperature (°C)") plt.tight_layout() plt.margins(x=0) fig.savefig(os.path.join(result_path, "test.pdf"))
def scenario_over_time( scenario_result_container, field_name, sim_yrs, fig_name, result_path, plot_points, crit_smooth_line=True, seperate_legend=False ): """Plot peak over time """ statistics_to_print = [] fig = plt.figure(figsize=basic_plot_functions.cm2inch(10, 10)) #width, height ax = fig.add_subplot(1, 1, 1) for cnt_scenario, i in enumerate(scenario_result_container): scenario_name = i['scenario_name'] national_peak = i[field_name] # dataframe with national peak (columns= simulation year, row: Realisation) # Calculate quantiles #quantile_95 = 0.68 #0.95 #0.68 #quantile_05 = 0.32 #0.05 #0.32 try: color = colors[scenario_name] marker = marker_styles[scenario_name] except KeyError: color = list(colors.values())[cnt_scenario] try: marker = marker_styles[scenario_name] except KeyError: marker = list(marker_styles.values())[cnt_scenario] #print("SCENARIO NAME {} {}".format(scenario_name, color)) # Calculate average across all weather scenarios mean_national_peak = national_peak.mean(axis=0) mean_national_peak_sim_yrs = copy.copy(mean_national_peak) statistics_to_print.append("scenario: {} values over years: {}".format(scenario_name, mean_national_peak_sim_yrs)) # Standard deviation over all realisations #df_q_05 = national_peak.quantile(quantile_05) #df_q_95 = national_peak.quantile(quantile_95) # Number of sigma nr_of_sigma = 1 std_dev = national_peak.std(axis=0) two_std_line_pos = mean_national_peak + (nr_of_sigma * std_dev) two_std_line_neg = mean_national_peak - (nr_of_sigma * std_dev) # Maximum and minium values max_values = national_peak.max() min_values = national_peak.min() median_values = national_peak.median() statistics_to_print.append("scenario: {} two_sigma_pos: {}".format(scenario_name, two_std_line_pos)) statistics_to_print.append("scenario: {} two_sigma_neg: {}".format(scenario_name, two_std_line_neg)) statistics_to_print.append("--------min-------------- {}".format(scenario_name)) statistics_to_print.append("{}".format(min_values)) #Get minimum value for every simulation year of all realizations statistics_to_print.append("--------max-------------- {}".format(scenario_name)) statistics_to_print.append("{}".format(max_values)) statistics_to_print.append("--------median_-------------- {}".format(scenario_name)) statistics_to_print.append("{}".format(median_values)) # -------------------- # Try to smooth lines # -------------------- sim_yrs_smoothed = sim_yrs if crit_smooth_line: try: sim_yrs_smoothed, mean_national_peak_smoothed = basic_plot_functions.smooth_data(sim_yrs, mean_national_peak, num=40000) #_, df_q_05_smoothed = basic_plot_functions.smooth_data(sim_yrs, df_q_05, num=40000) #_, df_q_95_smoothed = basic_plot_functions.smooth_data(sim_yrs, df_q_95, num=40000) _, two_std_line_pos_smoothed = basic_plot_functions.smooth_data(sim_yrs, two_std_line_pos, num=40000) _, two_std_line_neg_smoothed = basic_plot_functions.smooth_data(sim_yrs, two_std_line_neg, num=40000) _, max_values_smoothed = basic_plot_functions.smooth_data(sim_yrs, max_values, num=40000) _, min_values_smoothed = basic_plot_functions.smooth_data(sim_yrs, min_values, num=40000) mean_national_peak = pd.Series(mean_national_peak_smoothed, sim_yrs_smoothed) #df_q_05 = pd.Series(df_q_05_smoothed, sim_yrs_smoothed) #df_q_95 = pd.Series(df_q_95_smoothed, sim_yrs_smoothed) two_std_line_pos = pd.Series(two_std_line_pos_smoothed, sim_yrs_smoothed) two_std_line_neg = pd.Series(two_std_line_neg_smoothed, sim_yrs_smoothed) max_values = pd.Series(max_values_smoothed, sim_yrs_smoothed).values min_values = pd.Series(min_values_smoothed, sim_yrs_smoothed).values except: sim_yrs_smoothed = sim_yrs # ----------------------- # Plot lines # ------------------------ plt.plot( mean_national_peak, label="{} (mean)".format(scenario_name), zorder=1, color=color) # ------------------------ # Plot markers # ------------------------ if plot_points: plt.scatter( sim_yrs, mean_national_peak_sim_yrs, c=color, marker=marker, edgecolor='black', linewidth=0.5, zorder=2, s=15, clip_on=False) #do not clip points on axis # ------------------ # Start with uncertainty one model step later (=> 2020) # ------------------ start_yr_uncertainty = 2020 if crit_smooth_line: #Get position in array of start year uncertainty pos_unc_yr = len(np.where(sim_yrs_smoothed < start_yr_uncertainty)[0]) else: pos_unc_yr = 0 for cnt, year in enumerate(sim_yrs_smoothed): if year == start_yr_uncertainty: pos_unc_yr = cnt # select based on index which is year #df_q_05 = df_q_05.loc[start_yr_uncertainty:] #df_q_95 = df_q_95.loc[start_yr_uncertainty:] two_std_line_pos = two_std_line_pos.loc[start_yr_uncertainty:] two_std_line_neg = two_std_line_neg.loc[start_yr_uncertainty:] sim_yrs_smoothed = sim_yrs_smoothed[pos_unc_yr:] min_values = min_values[pos_unc_yr:] #min_values.loc[start_yr_uncertainty:] max_values = max_values[pos_unc_yr:] #max_values.loc[start_yr_uncertainty:] # -------------------------------------- # Plottin qunatilse and average scenario # -------------------------------------- #df_q_05.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #df_q_95.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') # Plot standard deviation #two_std_line_pos.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') #two_std_line_neg.plot.line(color=color, linestyle='--', linewidth=0.1, label='_nolegend_') # plot min and maximum values plt.plot(sim_yrs_smoothed, min_values, color=color, linestyle='--', linewidth=0.3, label='_nolegend_') plt.plot(sim_yrs_smoothed, max_values, color=color, linestyle='--', linewidth=0.3, label='_nolegend_') plt.fill_between( sim_yrs_smoothed, list(two_std_line_pos), list(two_std_line_neg), alpha=0.25, facecolor=color) plt.xlim(2015, 2050) plt.ylim(0) # -------- # Different style # -------- ax = plt.gca() ax.grid(which='major', color='black', axis='y', linestyle='--') ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) ax.spines['top'].set_visible(False) plt.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off left=False, right=False, labelbottom=False, labeltop=False, labelleft=True, labelright=False) # labels along the bottom edge are off # -------- # Legend # -------- legend = plt.legend( ncol=2, prop={'size': 10}, loc='upper center', bbox_to_anchor=(0.5, -0.1), frameon=False) legend.get_title().set_fontsize(8) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(fig_name[:-4]))) legend.remove() # -------- # Labeling # -------- plt.ylabel("national peak demand (GW)") plt.tight_layout() plt.savefig(os.path.join(result_path, fig_name)) plt.close() # Write info to txt write_data.write_list_to_txt( os.path.join(result_path, fig_name).replace(".pdf", ".txt"), statistics_to_print)
def plot_4_cross_map_OLD( cmap_rgb_colors, reclassified, result_path, path_shapefile_input, threshold=None, seperate_legend=False ): """Plot classifed 4 cross map """ # Load uk shapefile uk_shapefile = gpd.read_file(path_shapefile_input) # Merge stats to geopanda shp_gdp_merged = uk_shapefile.merge(reclassified, on='name') # Assign projection crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs) ax = uk_gdf.plot() uk_gdf['facecolor'] = 'white' for region in uk_gdf.index: reclassified_value = uk_gdf.loc[region]['reclassified'] uk_gdf.loc[region, 'facecolor'] = cmap_rgb_colors[reclassified_value] # plot with face color attribute uk_gdf.plot(ax=ax, facecolor=uk_gdf['facecolor'], edgecolor='black', linewidth=0.1) legend_handles = [ mpatches.Patch(color=cmap_rgb_colors[0], label=str("+- thr {}".format(threshold))), mpatches.Patch(color=cmap_rgb_colors[1], label=str("a")), mpatches.Patch(color=cmap_rgb_colors[2], label=str("b")), mpatches.Patch(color=cmap_rgb_colors[3], label=str("c")), mpatches.Patch(color=cmap_rgb_colors[4], label=str("d"))] legend = plt.legend( handles=legend_handles, #title="test", prop={'size': 8}, loc='upper center', bbox_to_anchor=(0.5, -0.05), frameon=False) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(result_path))) legend.remove() # Remove coordinates from figure ax.set_yticklabels([]) ax.set_xticklabels([]) legend.get_title().set_fontsize(8) # -------- # Labeling # -------- plt.tight_layout() plt.savefig(os.path.join(result_path)) plt.close()
def plot_4_cross_map( cmap_rgb_colors, reclassified, result_path, path_shapefile_input, threshold=None, seperate_legend=False ): """Plot classifed 4 cross map """ # -------------- # Use Cartopy to plot geometreis with reclassified faceolor # -------------- plt.figure(figsize=basic_plot_functions.cm2inch(10, 10)) #, dpi=150) proj = ccrs.OSGB() #'epsg:27700' ax = plt.axes(projection=proj) ax.outline_patch.set_visible(False) # set up a dict to hold geometries keyed by our key geoms_by_key = defaultdict(list) # for each records, pick out our key's value from the record # and store the geometry in the relevant list under geoms_by_key for record in shpreader.Reader(path_shapefile_input).records(): region_name = record.attributes['name'] geoms_by_key[region_name].append(record.geometry) # now we have all the geometries in lists for each value of our key # add them to the axis, using the relevant color as facecolor for key, geoms in geoms_by_key.items(): region_reclassified_value = reclassified.loc[key]['reclassified'] facecolor = cmap_rgb_colors[region_reclassified_value] ax.add_geometries(geoms, crs=proj, edgecolor='black', facecolor=facecolor, linewidth=0.1) # -------------- # Create Legend # -------------- legend_handles = [ mpatches.Patch(color=cmap_rgb_colors[0], label=str("+- threshold {}".format(threshold))), mpatches.Patch(color=cmap_rgb_colors[1], label=str("a")), mpatches.Patch(color=cmap_rgb_colors[2], label=str("b")), mpatches.Patch(color=cmap_rgb_colors[3], label=str("c")), mpatches.Patch(color=cmap_rgb_colors[4], label=str("d"))] legend = plt.legend( handles=legend_handles, #title="test", prop={'size': 8}, loc='upper center', bbox_to_anchor=(0.5, -0.05), frameon=False) if seperate_legend: basic_plot_functions.export_legend( legend, os.path.join(result_path, "{}__legend.pdf".format(result_path))) legend.remove() # Remove coordinates from figure ax.set_yticklabels([]) ax.set_xticklabels([]) legend.get_title().set_fontsize(8) # -------- # Labeling # -------- plt.tight_layout() plt.savefig(os.path.join(result_path)) plt.close()