def plot_irena_poweroutput_validation(p_out_eia, p_out_irena): fig, axes = plt.subplots(2, figsize=FIGSIZE, sharex=True) (1e-3 * p_out_eia).plot.line(label="EIA", ax=axes[0], color=TURBINE_COLORS[3], marker="o") (1e-3 * p_out_irena).plot.line(label="IRENA", ax=axes[0], color=TURBINE_COLORS[4], marker="o") axes[0].set_ylabel("Power output (TWh/Year)") axes[0].set_xlabel("") axes[0].grid() axes[0].legend() rel_difference = 100 * p_out_irena / p_out_eia - 100 rel_difference.plot.line(label="Relative difference (IRENA - EIA)", color="k", marker="o") plt.ylabel("Relative difference (%)") plt.xlabel("") axes[1].grid() axes[1].legend() axes[1].xaxis.set_major_locator(MaxNLocator(integer=True)) write_data_value( "irena_poweroutput_max_deviation", f"{float(rel_difference.max()):.1f}", ) return fig
def capacity_growth(): turbines = load_turbines() for year in (2010, 2019): installed_capacity_gw = ( turbines.sel(turbines=turbines.p_year <= year).t_cap.sum().values * 1e-6) write_data_value( f"installed_capacity_gw_{year}", f"{installed_capacity_gw:.0f}", )
def number_of_turbines(): turbines = load_turbines() (turbines.p_year <= 2010).sum().compute() write_data_value( "number-of-turbines-start", f"{(turbines.p_year <= 2010).sum().values:,d}", ) write_data_value( "number-of-turbines-end", f"{(turbines.p_year <= 2019).sum().values:,d}", )
def specific_power(): turbines = load_turbines() rotor_swept_area = turbines.t_rd**2 / 4 * np.pi specific_power = ((turbines.t_cap * KILO_TO_ONE / rotor_swept_area).groupby(turbines.p_year).mean()) write_data_value( "specific-power-start", f"{specific_power.sel(p_year=2010).values:.0f}", ) write_data_value( "specific-power-end", f"{specific_power.sel(p_year=2019).values:.0f}", )
def plot_missing_uswtdb_data(): fig, ax = plt.subplots(1, 1, figsize=FIGSIZE) turbines = load_turbines(replace_nan_values="") is_metadata_missing_hh = np.isnan(turbines.t_hh) is_metadata_missing_rd = np.isnan(turbines.t_rd) is_metadata_missing_cap = np.isnan(turbines.t_cap) num_turbines_per_year = turbines.p_year.groupby(turbines.p_year).count() num_missing_hh_per_year = is_metadata_missing_hh.groupby( turbines.p_year).sum() num_missing_rd_per_year = is_metadata_missing_rd.groupby( turbines.p_year).sum() num_missing_cap_per_year = is_metadata_missing_cap.groupby( turbines.p_year).sum() # note: this assumes that a turbine with installation year x is already operating in year x (100 * num_missing_hh_per_year.cumsum() / num_turbines_per_year.cumsum()).plot.line( label="Hub height", color=TURBINE_COLORS[1], ax=ax, ) (100 * num_missing_rd_per_year.cumsum() / num_turbines_per_year.cumsum()).plot( label="Rotor diameter", color=TURBINE_COLORS[3], ax=ax, ) percent_missing_cap_per_year = (100 * num_missing_cap_per_year.cumsum() / num_turbines_per_year.cumsum()) percent_missing_cap_per_year.plot( label="Capacity", color=TURBINE_COLORS[4], ax=ax, ) for year in (2000, 2010): write_data_value( f"percent_missing_capacity_per_year{year}", f"{percent_missing_cap_per_year.sel(p_year=year).values:.0f}", ) plt.legend() plt.ylabel("Turbines with missing metadata (%)") plt.xlabel("") plt.grid() return fig
def plot_effect_trends_pin(datasets, baseline, labels, colors): assert np.all( len(datasets[0]) == np.array( [len(dataset) for dataset in datasets])), "all datasets must be of same length" fig, ax = plt.subplots(1, 1, figsize=FIGSIZE) # this was used only for the one-slide-presentation for the EGU # ax.yaxis.set_label_position("right") # ax.yaxis.tick_right() previous = baseline for i, (label, dataset, color) in enumerate(zip(labels, datasets, colors)): dataset_relative = dataset - previous dataset_relative.plot.line("o-", label=label, color=color, zorder=25) previous = dataset # this is a bit ugly :-/ if dataset.isel(time=0).time.dt.year == 2010: label_no_space = label.replace(" ", "-").lower() write_data_value( f"inputpowerdensity_{label_no_space}", f"{(dataset_relative[-1] - dataset_relative[0]).values:.1f}", ) if label == "Wind change due to new locations": write_data_value( f"inputpowerdensity_{label_no_space}_until2013_abs", f"{abs(float(dataset_relative.sel(time='2013') - dataset_relative[0])):.1f}", ) write_data_value( f"inputpowerdensity_{label_no_space}_since2013", f"{float(dataset_relative[-1] - dataset_relative.sel(time='2013')):.1f}", ) if label == "Annual variations": for extremum in ("min", "max"): write_data_value( f"inputpowerdensity_{label_no_space}_{extremum}", f"{getattr(dataset_relative, extremum)().values:.1f}", ) for label in ax.get_xmajorticklabels(): label.set_rotation(0) label.set_horizontalalignment("center") plt.axhline(0, color="k", linewidth=1, zorder=5) plt.legend() ax.grid(zorder=0) plt.ylabel("Change in input power density (W/m²)") plt.xlabel("") return fig, ax
def rotor_swept_area_avg(): rotor_swept_area = xr.load_dataarray(OUTPUT_DIR / "turbine-time-series" / "rotor_swept_area_yearly.nc") turbines = load_turbines() time = rotor_swept_area.time calc_rotor_swept_area(turbines, time) is_built = calc_is_built(turbines, time) rotor_swept_area_avg = (calc_rotor_swept_area(turbines, time) / is_built.sum(dim="turbines")).compute() write_data_value( "rotor_swept_area_avg-start", f"{int(rotor_swept_area_avg.sel(time='2010').values.round()):,d}", ) write_data_value( "rotor_swept_area_avg-end", f"{int(rotor_swept_area_avg.sel(time='2019').values.round()):,d}", )
def calc_correlation_efficiency_vs_input_power_density(): rotor_swept_area = xr.load_dataarray(OUTPUT_DIR / "turbine-time-series" / "rotor_swept_area.nc") p_in = xr.load_dataarray(OUTPUT_DIR / "power_in_wind" / "p_in_monthly.nc") p_in = p_in.sortby("time") p_out = load_generated_energy_gwh() p_out = p_out / p_out.time.dt.days_in_month / 24 p_out = p_out.sortby("time") p_in = filter2010(p_in) p_out = filter2010(p_out) rotor_swept_area = filter2010(rotor_swept_area) efficiency = p_out / p_in p_in_density = p_in / rotor_swept_area * 1e9 correlation = np.corrcoef(p_in_density, efficiency)[0, 1] write_data_value( "correlation-efficiency-vs-input-power-density", f"{correlation:.3f}", )
def missing_commissioning_year(): turbines = load_turbines() turbines_with_nans = load_turbines(replace_nan_values="") write_data_value( "percentage_missing_commissioning_year", f"{nanratio(turbines_with_nans.p_year).values * 100:.1f}", ) missing2010 = (np.isnan(turbines_with_nans.p_year).sum() / (turbines_with_nans.p_year <= 2010).sum()).values write_data_value( "percentage_missing_commissioning_year_2010", f"{missing2010 * 100:.1f}", ) write_data_value( "num_available_decommissioning_year", f"{(~np.isnan(turbines.d_year)).sum().values:,d}", ) write_data_value( "num_decommissioned_turbines", f"{turbines.is_decomissioned.sum().values:,d}", ) lifetime = 25 num_further_old_turbines = ( (turbines.sel(turbines=~turbines.is_decomissioned).p_year < (2019 - lifetime)).sum().values) write_data_value( "num_further_old_turbines", f"{num_further_old_turbines:,d}", ) write_data_value( "missing_ratio_rd_hh", f"{100 * nanratio(turbines_with_nans.t_hh + turbines_with_nans.t_rd).values:.1f}", )
def calculate_slopes(): p_in = filter2010( xr.open_dataarray(OUTPUT_DIR / "power_in_wind" / "p_in.nc")) generated_energy_gwh_yearly = filter2010( load_generated_energy_gwh_yearly()) rotor_swept_area = filter2010( xr.load_dataarray(OUTPUT_DIR / "turbine-time-series" / "rotor_swept_area_yearly.nc")) is_built_yearly = xr.open_dataarray(OUTPUT_DIR / "turbine-time-series" / "is_built_yearly.nc") num_turbines_built = filter2010(is_built_yearly.sum(dim="turbines")) data = { "outputpowerdensity": (1e9 * generated_energy_gwh_yearly / HOURS_PER_YEAR / rotor_swept_area), "inputpowerdensity": 1e9 * p_in / rotor_swept_area, "rotor_swept_area_avg": rotor_swept_area / num_turbines_built, "efficiency": 100 * generated_energy_gwh_yearly / p_in / HOURS_PER_YEAR, } # do not forget to filter2010! for key, values in data.items(): relative_to_2010 = 100 * values / values[0] write_data_value( f"{key}_relative_abs_slope", f"{calc_abs_slope(relative_to_2010):.1f}", ) outputpowerdensity = data["outputpowerdensity"].values write_data_value( "outputpowerdensity-start", f"{outputpowerdensity[0]:.0f}", ) write_data_value( "outputpowerdensity-end", f"{outputpowerdensity[-1]:.0f}", ) write_data_value( "outputpowerdensity_abs_slope", f"{calc_abs_slope(outputpowerdensity):.1f}", ) percentage_poweroutput_per_area = 100 * outputpowerdensity[ -1] / outputpowerdensity[0] write_data_value( "percentage_poweroutput_per_area", f"{percentage_poweroutput_per_area:.0f}", ) write_data_value( "less_poweroutput_per_area", f"{100 - percentage_poweroutput_per_area:.0f}", ) growth_num_turbines_built = num_turbines_built[-1] / num_turbines_built[ 0] * 100 write_data_value( "growth_num_turbines_built", f"{growth_num_turbines_built.values:.0f}", ) rotor_swept_area_avg = data["rotor_swept_area_avg"] growth_rotor_swept_area_avg = rotor_swept_area_avg[ -1] / rotor_swept_area_avg[0] * 100 write_data_value( "growth_rotor_swept_area_avg", f"{growth_rotor_swept_area_avg.values:.0f}", )
def plot_system_effiency( efficiency, efficiency_without_pin, efficiency_monthly=None, efficiency_without_pin_monthly=None, ): fig, ax = plt.subplots(1, 1, figsize=FIGSIZE) (100 * efficiency).plot.line("o-", color=TURBINE_COLORS[3], label="System efficiency") (100 * efficiency_without_pin).plot.line( "--o", color=TURBINE_COLORS[3], label="Scenario with constant input power density") if efficiency_monthly is not None: (100 * efficiency_monthly).plot.line( "o-", color=TURBINE_COLORS[4], label="Monthly system efficiency, aggregated yearly") (100 * efficiency_without_pin_monthly).plot.line( "--o", color=TURBINE_COLORS[4], label=("Scenario using monthly time series, aggregated yearly"), ) for label in ax.get_xmajorticklabels(): label.set_rotation(0) label.set_horizontalalignment("center") plt.grid() plt.legend() plt.xlabel("") plt.ylabel("System efficiency (%)") # this should be better placed in scripts/calc_data_values.py but would cause a lot of code # duplication without large re-organization, so let's keep it here if efficiency_monthly is None: write_data_value( "efficiency_without_pin_yearly_start", f"{100 * efficiency_without_pin[0].values:.1f}", ) write_data_value( "efficiency_without_pin_yearly_end", f"{100 * efficiency_without_pin[-1].values:.1f}", ) write_data_value( "efficiency_yearly_start", f"{100 * efficiency.values[0]:.1f}", ) write_data_value( "efficiency_yearly_end", f"{100 * efficiency.values[-1]:.1f}", ) write_data_value( "efficiency_without_pin_yearly_slope", f"{100 * calc_abs_slope(efficiency_without_pin):.2f}", ) write_data_value( "efficiency_yearly_slope", f"{100 * calc_abs_slope(efficiency):.2f}", ) return fig
def plot_irena_capacity_validation(turbines, turbines_with_nans): fig, ax = plt.subplots(1, 1, figsize=FIGSIZE) capacity_irena = load_capacity_irena() capacity_uswtdb = calc_capacity_per_year(turbines) capacity_uswtdb_no_capnans = calc_capacity_per_year(turbines_with_nans) rel_errors = [] def compare_to_irena(capacity_uswtdb, label, **kwargs): rel_error = 100 * (capacity_uswtdb - capacity_irena) / capacity_irena rel_error.plot.line(label=label, ax=ax, **kwargs) rel_errors.append(rel_error) capacity_uswtdb_no_decom = calc_capacity_per_year( turbines.sel(turbines=~turbines.is_decomissioned)) # more scenarios # COLORS = ("#0f4241", "#273738", "#136663", "#246b71", "#6a9395", "#84bcbf", "#9bdade") # LIFETIMES = (15, 18, 19, 20, 25, 30, 35) COLORS = ("#273738", "#246b71", "#6a9395", "#84bcbf", "#9bdade") LIFETIMES = (15, 20, 25, 30, 35) for lifetime, color in zip(LIFETIMES, COLORS): capacity_uswtdb_no_old = capacity_uswtdb - capacity_uswtdb.shift( p_year=lifetime).fillna(0.0) compare_to_irena(capacity_uswtdb_no_old, f"{lifetime} years lifetime", color=color) for lifetime, color in zip(LIFETIMES, COLORS): # the same thing again without the t_cap NaN replacement capacity_uswtdb_no_old = capacity_uswtdb_no_capnans - capacity_uswtdb_no_capnans.shift( p_year=lifetime).fillna(0.0) compare_to_irena( capacity_uswtdb_no_old, "", # f"lifetime {lifetime} (without capacity data imputation)", linestyle="--", color=color, ) compare_to_irena( capacity_uswtdb_no_decom, "exclude decommissioned turbines", linewidth=4, color="#ffde65", ) compare_to_irena(capacity_uswtdb, "include decommissioned turbines", linewidth=4, color="#c42528") ax.xaxis.set_major_locator(MaxNLocator(integer=True)) plt.grid() xlim = ax.get_xlim() plt.xlim(*xlim) plt.axvspan(xlim[0] - 10, 2010, facecolor="k", alpha=0.07) handles, labels = plt.gca().get_legend_handles_labels() line = Line2D([0], [0], label="without data imputation", linestyle="--", color="k") handles.insert(-2, line) plt.legend(handles=handles) plt.tight_layout() plt.axhline(0.0, color="k") plt.xlabel("") plt.ylabel("Relative difference (%)") rel_errors = xr.concat(rel_errors, dim="scenarios") max_abs_error = (np.abs( rel_errors.isel( p_year=(rel_errors.p_year >= 2010).values)).max().compute()) # note: using ceil, because text says "less than" write_data_value( "irena_uswtdb_validation_max_abs_error", f"{float(np.ceil(max_abs_error)):.0f}", ) return fig