def moo_optimization(locator, weather_file, gv, config): ''' This function optimizes the conversion, storage and distribution systems of a heating distribution for the case study. It requires that the energy demand, technology potential and thermal networks are simulated, as follows: - energy demand simulation: run cea/demand/demand_main.py - PV potential: run cea/technologies/solar/photovoltaic.py - PVT potential: run cea/technologies/solar/photovoltaic_thermal.py - flat plate solar collector potential: run cea/technologies/solar/solar_collector.py with config.solar.type_scpanel = 'FP' - evacuated tube solar collector potential: run cea/technologies/solar/solar_collector.py with config.solar.type_scpanel = 'ET' - waste water heat recovery: run cea/resources/sewage_heat_exchanger.py - lake water potential: run cea/resources/lake_potential.py - thermal network simulation: run cea/technologies/thermal_network/thermal_network_matrix.py if no network is currently present in the case study, consider running network_layout/main.py first - decentralized building simulation: run cea/optimization/preprocessing/decentralized_building_main.py :param locator: path to input locator :param weather_file: path to weather file :param gv: global variables class :type locator: string :type weather_file: string :type gv: class :returns: None :rtype: Nonetype ''' # read total demand file and names and number of all buildings total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() lca = lca_calculations(locator, config) prices = Prices(locator, config) # pre-process information regarding resources and technologies (they are treated before the optimization) # optimize best systems for every individual building (they will compete against a district distribution solution) print "PRE-PROCESSING" extra_costs, extra_CO2, extra_primary_energy, solar_features = preproccessing( locator, total_demand, building_names, weather_file, gv, config, prices, lca) # optimize the distribution and linearize the results(at the moment, there is only a linearization of values in Zug) print "NETWORK OPTIMIZATION" network_features = network_opt_main.network_opt_main(config, locator) # optimize conversion systems print "CONVERSION AND STORAGE OPTIMIZATION" master_main.non_dominated_sorting_genetic_algorithm( locator, building_names, extra_costs, extra_CO2, extra_primary_energy, solar_features, network_features, gv, config, prices, lca)
def moo_optimization(locator, weather_file, gv, config): ''' This function optimizes the conversion, storage and distribution systems of a heating distribution for the case study. It requires that solar technologies be calculated in advance and nodes of a distribution should have been already generated. :param locator: path to input locator :param weather_file: path to weather file :param gv: global variables class :type locator: string :type weather_file: string :type gv: class :returns: None :rtype: Nonetype ''' # read total demand file and names and number of all buildings total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() lca = lca_calculations(locator, config) prices = Prices(locator, config) # pre-process information regarding resources and technologies (they are treated before the optimization) # optimize best systems for every individual building (they will compete against a district distribution solution) print "PRE-PROCESSING" extra_costs, extra_CO2, extra_primary_energy, solarFeat = preproccessing( locator, total_demand, building_names, weather_file, gv, config, prices, lca) # optimize the distribution and linearize the results(at the moment, there is only a linearization of values in Zug) print "NETWORK OPTIMIZATION" network_features = network_opt.network_opt_main(config, locator) # optimize conversion systems print "CONVERSION AND STORAGE OPTIMIZATION" master.evolutionary_algo_main(locator, building_names, extra_costs, extra_CO2, extra_primary_energy, solarFeat, network_features, gv, config, prices, lca)
def run_as_script(scenario_path=None): import cea.globalvar import pandas as pd import cea.optimization.distribution.network_opt_main as network_opt from cea.optimization.preprocessing.preprocessing_main import preproccessing gv = cea.globalvar.GlobalVariables() if scenario_path is None: scenario_path = gv.scenario_reference locator = cea.inputlocator.InputLocator(scenario_path=scenario_path) total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() weather_file = locator.get_default_weather() extraCosts, extraCO2, extraPrim, solarFeat = preproccessing( locator, total_demand, building_names, weather_file, gv) ntwFeat = network_opt.network_opt_main() sensAnalysis(locator, extraCosts, extraCO2, extraPrim, solarFeat, ntwFeat, gen) print 'sensitivity analysis succeeded'
def preprocessing_cost_data(locator, data_raw, individual, generations, data_address, config): string_network = data_raw['network'].loc[individual].values[0] total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values individual_barcode_list = data_raw['individual_barcode'].loc[ individual].values[0] # The current structure of CEA has the following columns saved, in future, this will be slightly changed and # correspondingly these columns_of_saved_files needs to be changed columns_of_saved_files = [ 'CHP/Furnace', 'CHP/Furnace Share', 'Base Boiler', 'Base Boiler Share', 'Peak Boiler', 'Peak Boiler Share', 'Heating Lake', 'Heating Lake Share', 'Heating Sewage', 'Heating Sewage Share', 'GHP', 'GHP Share', 'Data Centre', 'Compressed Air', 'PV', 'PV Area Share', 'PVT', 'PVT Area Share', 'SC_ET', 'SC_ET Area Share', 'SC_FP', 'SC_FP Area Share', 'DHN Temperature', 'DHN unit configuration', 'Lake Cooling', 'Lake Cooling Share', 'VCC Cooling', 'VCC Cooling Share', 'Absorption Chiller', 'Absorption Chiller Share', 'Storage', 'Storage Share', 'DCN Temperature', 'DCN unit configuration' ] for i in building_names: # DHN columns_of_saved_files.append(str(i) + ' DHN') for i in building_names: # DCN columns_of_saved_files.append(str(i) + ' DCN') df_current_individual = pd.DataFrame( np.zeros(shape=(1, len(columns_of_saved_files))), columns=columns_of_saved_files) for i, ind in enumerate((columns_of_saved_files)): df_current_individual[ind] = individual_barcode_list[i] data_address = data_address[data_address['individual_list'] == individual] generation_number = data_address['generation_number_address'].values[0] individual_number = data_address['individual_number_address'].values[0] # get data about the activation patterns of these buildings (main units) if config.multi_criteria.network_type == 'DH': building_demands_df = pd.read_csv( locator.get_optimization_network_results_summary( string_network)).set_index("DATE") data_activation_path = os.path.join( locator.get_optimization_slave_heating_activation_pattern( individual_number, generation_number)) df_heating = pd.read_csv(data_activation_path).set_index("DATE") data_activation_path = os.path.join( locator. get_optimization_slave_electricity_activation_pattern_heating( individual_number, generation_number)) df_electricity = pd.read_csv(data_activation_path).set_index("DATE") # get data about the activation patterns of these buildings (storage) data_storage_path = os.path.join( locator.get_optimization_slave_storage_operation_data( individual_number, generation_number)) df_SO = pd.read_csv(data_storage_path).set_index("DATE") # join into one database data_processed = df_heating.join(df_electricity).join(df_SO).join( building_demands_df) elif config.multi_criteria.network_type == 'DC': data_costs = pd.read_csv( os.path.join( locator. get_optimization_slave_investment_cost_detailed_cooling( individual_number, generation_number))) data_cooling = pd.read_csv( os.path.join( locator.get_optimization_slave_cooling_activation_pattern( individual_number, generation_number))) data_electricity = pd.read_csv( os.path.join( locator. get_optimization_slave_electricity_activation_pattern_cooling( individual_number, generation_number))) # Total CAPEX calculations # Absorption Chiller Absorption_chiller_cost_data = pd.read_excel( locator.get_supply_systems(config.region), sheetname="Absorption_chiller", usecols=[ 'type', 'code', 'cap_min', 'cap_max', 'a', 'b', 'c', 'd', 'e', 'IR_%', 'LT_yr', 'O&M_%' ]) Absorption_chiller_cost_data = Absorption_chiller_cost_data[ Absorption_chiller_cost_data['type'] == 'double'] max_ACH_chiller_size = max( Absorption_chiller_cost_data['cap_max'].values) Inv_IR = (Absorption_chiller_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr'] Q_ACH_max_W = data_cooling['Q_from_ACH_W'].max() Q_ACH_max_W = Q_ACH_max_W * (1 + SIZING_MARGIN) number_of_ACH_chillers = max( int(ceil(Q_ACH_max_W / max_ACH_chiller_size)), 1) Q_nom_ACH_W = Q_ACH_max_W / number_of_ACH_chillers Capex_a_ACH, Opex_fixed_ACH = calc_Cinv(Q_nom_ACH_W, locator, 'double', config) Capex_total_ACH = (Capex_a_ACH * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) * number_of_ACH_chillers data_costs['Capex_total_ACH'] = Capex_total_ACH data_costs['Opex_total_ACH'] = np.sum( data_cooling['Opex_var_ACH']) + data_costs['Opex_fixed_ACH'] # VCC VCC_cost_data = pd.read_excel(locator.get_supply_systems( config.region), sheetname="Chiller") VCC_cost_data = VCC_cost_data[VCC_cost_data['code'] == 'CH3'] max_VCC_chiller_size = max(VCC_cost_data['cap_max'].values) Inv_IR = (VCC_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = VCC_cost_data.iloc[0]['LT_yr'] Q_VCC_max_W = data_cooling['Q_from_VCC_W'].max() Q_VCC_max_W = Q_VCC_max_W * (1 + SIZING_MARGIN) number_of_VCC_chillers = max( int(ceil(Q_VCC_max_W / max_VCC_chiller_size)), 1) Q_nom_VCC_W = Q_VCC_max_W / number_of_VCC_chillers Capex_a_VCC, Opex_fixed_VCC = calc_Cinv_VCC(Q_nom_VCC_W, locator, config, 'CH3') Capex_total_VCC = (Capex_a_VCC * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) * number_of_VCC_chillers data_costs['Capex_total_VCC'] = Capex_total_VCC data_costs['Opex_total_VCC'] = np.sum( data_cooling['Opex_var_VCC']) + data_costs['Opex_fixed_VCC'] # VCC Backup Q_VCC_backup_max_W = data_cooling['Q_from_VCC_backup_W'].max() Q_VCC_backup_max_W = Q_VCC_backup_max_W * (1 + SIZING_MARGIN) number_of_VCC_backup_chillers = max( int(ceil(Q_VCC_backup_max_W / max_VCC_chiller_size)), 1) Q_nom_VCC_backup_W = Q_VCC_backup_max_W / number_of_VCC_backup_chillers Capex_a_VCC_backup, Opex_fixed_VCC_backup = calc_Cinv_VCC( Q_nom_VCC_backup_W, locator, config, 'CH3') Capex_total_VCC_backup = ( Capex_a_VCC_backup * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) * number_of_VCC_backup_chillers data_costs['Capex_total_VCC_backup'] = Capex_total_VCC_backup data_costs['Opex_total_VCC_backup'] = np.sum( data_cooling['Opex_var_VCC_backup'] ) + data_costs['Opex_fixed_VCC_backup'] # Storage Tank storage_cost_data = pd.read_excel(locator.get_supply_systems( config.region), sheetname="TES") storage_cost_data = storage_cost_data[storage_cost_data['code'] == 'TES2'] Inv_IR = (storage_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = storage_cost_data.iloc[0]['LT_yr'] Capex_a_storage_tank = data_costs['Capex_a_Tank'][0] Capex_total_storage_tank = (Capex_a_storage_tank * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) data_costs['Capex_total_storage_tank'] = Capex_total_storage_tank data_costs['Opex_total_storage_tank'] = np.sum( data_cooling['Opex_var_VCC_backup'] ) + data_costs['Opex_fixed_Tank'] # Cooling Tower CT_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname="CT") CT_cost_data = CT_cost_data[CT_cost_data['code'] == 'CT1'] max_CT_size = max(CT_cost_data['cap_max'].values) Inv_IR = (CT_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = CT_cost_data.iloc[0]['LT_yr'] Qc_CT_max_W = data_cooling['Qc_CT_associated_with_all_chillers_W'].max( ) number_of_CT = max(int(ceil(Qc_CT_max_W / max_CT_size)), 1) Qnom_CT_W = Qc_CT_max_W / number_of_CT Capex_a_CT, Opex_fixed_CT = calc_Cinv_CT(Qnom_CT_W, locator, config, 'CT1') Capex_total_CT = (Capex_a_CT * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) * number_of_CT data_costs['Capex_total_CT'] = Capex_total_CT data_costs['Opex_total_CT'] = np.sum( data_cooling['Opex_var_CT']) + data_costs['Opex_fixed_CT'] # CCGT CCGT_cost_data = pd.read_excel(locator.get_supply_systems( config.region), sheetname="CCGT") technology_code = list(set(CCGT_cost_data['code'])) CCGT_cost_data = CCGT_cost_data[CCGT_cost_data['code'] == technology_code[0]] Inv_IR = (CCGT_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = CCGT_cost_data.iloc[0]['LT_yr'] Capex_a_CCGT = data_costs['Capex_a_CCGT'][0] Capex_total_CCGT = (Capex_a_CCGT * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) data_costs['Capex_total_CCGT'] = Capex_total_CCGT data_costs['Opex_total_CCGT'] = np.sum( data_cooling['Opex_var_CCGT']) + data_costs['Opex_fixed_CCGT'] # pump config.restricted_to = None # FIXME: remove this later config.thermal_network.network_type = config.multi_criteria.network_type config.thermal_network.network_names = [] network_features = network_opt.network_opt_main(config, locator) DCN_barcode = "" for name in building_names: DCN_barcode += str(df_current_individual[name + ' DCN'][0]) if df_current_individual['Data Centre'][0] == 1: df = pd.read_csv( locator.get_optimization_network_data_folder( "Network_summary_result_" + hex(int(str(DCN_barcode), 2)) + ".csv"), usecols=[ "mdot_cool_space_cooling_and_refrigeration_netw_all_kgpers" ]) else: df = pd.read_csv( locator.get_optimization_network_data_folder( "Network_summary_result_" + hex(int(str(DCN_barcode), 2)) + ".csv"), usecols=[ "mdot_cool_space_cooling_data_center_and_refrigeration_netw_all_kgpers" ]) mdotA_kgpers = np.array(df) mdotnMax_kgpers = np.amax(mdotA_kgpers) deltaPmax = np.max((network_features.DeltaP_DCN) * DCN_barcode.count("1") / len(DCN_barcode)) E_pumping_required_W = mdotnMax_kgpers * deltaPmax / DENSITY_OF_WATER_AT_60_DEGREES_KGPERM3 P_motor_tot_W = E_pumping_required_W / PUMP_ETA # electricty to run the motor Pump_max_kW = 375.0 Pump_min_kW = 0.5 nPumps = int(np.ceil(P_motor_tot_W / 1000.0 / Pump_max_kW)) # if the nominal load (electric) > 375kW, a new pump is installed Pump_Array_W = np.zeros((nPumps)) Pump_Remain_W = P_motor_tot_W Capex_total_pumps = 0 Capex_a_total_pumps = 0 for pump_i in range(nPumps): # calculate pump nominal capacity Pump_Array_W[pump_i] = min(Pump_Remain_W, Pump_max_kW * 1000) if Pump_Array_W[pump_i] < Pump_min_kW * 1000: Pump_Array_W[pump_i] = Pump_min_kW * 1000 Pump_Remain_W -= Pump_Array_W[pump_i] pump_cost_data = pd.read_excel(locator.get_supply_systems( config.region), sheetname="Pump") pump_cost_data = pump_cost_data[pump_cost_data['code'] == 'PU1'] # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least # capacity for the corresponding technology from the database if Pump_Array_W[pump_i] < pump_cost_data.iloc[0]['cap_min']: Pump_Array_W[pump_i] = pump_cost_data.iloc[0]['cap_min'] pump_cost_data = pump_cost_data[ (pump_cost_data['cap_min'] <= Pump_Array_W[pump_i]) & (pump_cost_data['cap_max'] > Pump_Array_W[pump_i])] Inv_a = pump_cost_data.iloc[0]['a'] Inv_b = pump_cost_data.iloc[0]['b'] Inv_c = pump_cost_data.iloc[0]['c'] Inv_d = pump_cost_data.iloc[0]['d'] Inv_e = pump_cost_data.iloc[0]['e'] Inv_IR = (pump_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = pump_cost_data.iloc[0]['LT_yr'] Inv_OM = pump_cost_data.iloc[0]['O&M_%'] / 100 InvC = Inv_a + Inv_b * (Pump_Array_W[pump_i])**Inv_c + ( Inv_d + Inv_e * Pump_Array_W[pump_i]) * log( Pump_Array_W[pump_i]) Capex_total_pumps += InvC Capex_a_total_pumps += InvC * (Inv_IR) * (1 + Inv_IR)**Inv_LT / ( (1 + Inv_IR)**Inv_LT - 1) data_costs['Capex_total_pumps'] = Capex_total_pumps data_costs['Opex_total_pumps'] = data_costs[ 'Opex_fixed_pump'] + data_costs['Opex_fixed_pump'] # PV pv_installed_area = data_electricity['Area_PV_m2'].max() Capex_a_PV, Opex_fixed_PV = calc_Cinv_pv(pv_installed_area, locator, config) pv_annual_production_kWh = (data_electricity['E_PV_W'].sum()) / 1000 Opex_a_PV = calc_opex_PV(pv_annual_production_kWh, pv_installed_area) PV_cost_data = pd.read_excel(locator.get_supply_systems(config.region), sheetname="PV") technology_code = list(set(PV_cost_data['code'])) PV_cost_data[PV_cost_data['code'] == technology_code[0]] Inv_IR = (PV_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = PV_cost_data.iloc[0]['LT_yr'] Capex_total_PV = (Capex_a_PV * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) data_costs['Capex_total_PV'] = Capex_total_PV data_costs['Opex_total_PV'] = Opex_a_PV + Opex_fixed_PV # Disconnected Buildings Capex_total_disconnected = 0 Opex_total_disconnected = 0 Capex_a_total_disconnected = 0 for (index, building_name) in zip(DCN_barcode, building_names): if index is '0': df = pd.read_csv( locator. get_optimization_disconnected_folder_building_result_cooling( building_name, configuration='AHU_ARU_SCU')) dfBest = df[df["Best configuration"] == 1] if dfBest['VCC to AHU_ARU_SCU Share'].iloc[ 0] == 1: #FIXME: Check for other options Inv_IR = (VCC_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = VCC_cost_data.iloc[0]['LT_yr'] if dfBest['single effect ACH to AHU_ARU_SCU Share (FP)'].iloc[ 0] == 1: Inv_IR = ( Absorption_chiller_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = Absorption_chiller_cost_data.iloc[0]['LT_yr'] Opex_total_disconnected += dfBest[ "Operation Costs [CHF]"].iloc[0] Capex_a_total_disconnected += dfBest[ "Annualized Investment Costs [CHF]"].iloc[0] Capex_total_disconnected += ( dfBest["Annualized Investment Costs [CHF]"].iloc[0] * ((1 + Inv_IR)**Inv_LT - 1) / (Inv_IR) * (1 + Inv_IR)**Inv_LT) data_costs[ 'Capex_total_disconnected_Mio'] = Capex_total_disconnected / 1000000 data_costs[ 'Opex_total_disconnected_Mio'] = Opex_total_disconnected / 1000000 data_costs[ 'Capex_a_disconnected_Mio'] = Capex_a_total_disconnected / 1000000 data_costs['costs_Mio'] = data_raw['population']['costs_Mio'][ individual] data_costs['emissions_kiloton'] = data_raw['population'][ 'emissions_kiloton'][individual] data_costs['prim_energy_TJ'] = data_raw['population'][ 'prim_energy_TJ'][individual] # Electricity Details/Renewable Share total_electricity_demand_decentralized_W = np.zeros(8760) DCN_barcode = "" for name in building_names: # identifying the DCN code DCN_barcode += str( int(df_current_individual[name + ' DCN'].values[0])) for i, name in zip( DCN_barcode, building_names ): # adding the electricity demand from the decentralized buildings if i is '0': building_demand = pd.read_csv( locator.get_demand_results_folder() + '//' + name + ".csv", usecols=['E_sys_kWh']) total_electricity_demand_decentralized_W += building_demand[ 'E_sys_kWh'] * 1000 lca = lca_calculations(locator, config) data_electricity_processed = electricity_import_and_exports( generation_number, individual_number, locator, config) data_costs['Network_electricity_demand_GW'] = ( data_electricity['E_total_req_W'].sum()) / 1000000000 # GW data_costs['Decentralized_electricity_demand_GW'] = ( data_electricity_processed['E_decentralized_appliances_W'].sum() ) / 1000000000 # GW data_costs['Total_electricity_demand_GW'] = ( data_electricity_processed['E_total_req_W'].sum() ) / 1000000000 # GW renewable_share_electricity = (data_electricity_processed['E_PV_to_directload_W'].sum() + data_electricity_processed['E_PV_to_grid_W'].sum()) * 100 / \ (data_costs['Total_electricity_demand_GW'] * 1000000000) data_costs['renewable_share_electricity'] = renewable_share_electricity data_costs['Electricity_Costs_Mio'] = ( (data_electricity_processed['E_from_grid_W'].sum() + data_electricity_processed['E_total_to_grid_W_negative'].sum()) * lca.ELEC_PRICE) / 1000000 data_costs['Capex_a_total_Mio'] = (Capex_a_ACH * number_of_ACH_chillers + Capex_a_VCC * number_of_VCC_chillers + \ Capex_a_VCC_backup * number_of_VCC_backup_chillers + Capex_a_CT * number_of_CT + Capex_a_storage_tank + \ Capex_a_total_pumps + Capex_a_CCGT + Capex_a_PV + Capex_a_total_disconnected) / 1000000 data_costs['Capex_a_ACH'] = Capex_a_ACH * number_of_ACH_chillers data_costs['Capex_a_VCC'] = Capex_a_VCC * number_of_VCC_chillers data_costs[ 'Capex_a_VCC_backup'] = Capex_a_VCC_backup * number_of_VCC_backup_chillers data_costs['Capex_a_CT'] = Capex_a_CT * number_of_CT data_costs['Capex_a_storage_tank'] = Capex_a_storage_tank data_costs['Capex_a_total_pumps'] = Capex_a_total_pumps data_costs['Capex_a_CCGT'] = Capex_a_CCGT data_costs['Capex_a_PV'] = Capex_a_PV data_costs['Capex_total_Mio'] = (data_costs['Capex_total_ACH'] + data_costs['Capex_total_VCC'] + data_costs['Capex_total_VCC_backup'] + \ data_costs['Capex_total_storage_tank'] + data_costs['Capex_total_CT'] + data_costs['Capex_total_CCGT'] + \ data_costs['Capex_total_pumps'] + data_costs['Capex_total_PV'] + Capex_total_disconnected) / 1000000 data_costs['Opex_total_Mio'] = ((data_costs['Opex_total_ACH'] + data_costs['Opex_total_VCC'] + data_costs['Opex_total_VCC_backup'] + \ data_costs['Opex_total_storage_tank'] + data_costs['Opex_total_CT'] + data_costs['Opex_total_CCGT'] + \ data_costs['Opex_total_pumps'] + Opex_total_disconnected) / 1000000) + data_costs['Electricity_Costs_Mio'] data_costs['TAC_Mio'] = data_costs['Capex_a_total_Mio'] + data_costs[ 'Opex_total_Mio'] return data_costs
print ('combined euclidean distance = ' + str(combined_euclidean_distance)) print ('spread = ' + str(spread_final)) return combined_euclidean_distance, spread_final if __name__ == "__main__": config = cea.config.Configuration() gv = cea.globalvar.GlobalVariables() locator = cea.inputlocator.InputLocator(scenario=config.scenario) weather_file = config.weather total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() lca = lca_calculations(locator, config) prices = Prices(locator, config) extra_costs, extra_CO2, extra_primary_energy, solar_features = preproccessing(locator, total_demand, building_names, weather_file, gv, config, prices, lca) # optimize the distribution and linearize the results(at the moment, there is only a linearization of values in Zug) print "NETWORK OPTIMIZATION" nBuildings = len(building_names) network_features = network_opt_main.network_opt_main(config, locator) non_dominated_sorting_genetic_algorithm(locator, building_names, extra_costs, extra_CO2, extra_primary_energy, solar_features, network_features, gv, config, prices, lca)
def main(config): """ run the whole optimization routine """ gv = cea.globalvar.GlobalVariables() locator = cea.inputlocator.InputLocator(scenario=config.scenario) weather_file = config.weather try: if not demand_files_exist(config, locator): raise ValueError( "Missing demand data of the scenario. Consider running demand script first" ) if not os.path.exists(locator.get_total_demand()): raise ValueError( "Missing total demand of the scenario. Consider running demand script first" ) if not os.path.exists(locator.PV_totals()): raise ValueError( "Missing PV potential of the scenario. Consider running photovoltaic script first" ) if config.district_heating_network: if not os.path.exists(locator.PVT_totals()): raise ValueError( "Missing PVT potential of the scenario. Consider running photovoltaic-thermal script first" ) if not os.path.exists(locator.SC_totals(panel_type='FP')): raise ValueError( "Missing SC potential of panel type 'FP' of the scenario. Consider running solar-collector script first with panel_type as SC1 and t-in-SC as 75" ) if not os.path.exists(locator.SC_totals(panel_type='ET')): raise ValueError( "Missing SC potential of panel type 'ET' of the scenario. Consider running solar-collector script first with panel_type as SC2 and t-in-SC as 150" ) if not os.path.exists(locator.get_sewage_heat_potential()): raise ValueError( "Missing sewage potential of the scenario. Consider running sewage heat exchanger script first" ) if not os.path.exists( locator.get_optimization_network_edge_list_file( config.thermal_network.network_type, '')): raise ValueError( "Missing network edge list. Consider running thermal network script first" ) except ValueError as err: import sys print(err.message) sys.exit(1) # read total demand file and names and number of all buildings total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() prices = Prices(locator, config) lca = lca_calculations(locator, config) # pre-process information regarding resources and technologies (they are treated before the optimization) # optimize best systems for every individual building (they will compete against a district distribution solution) extra_costs, extra_CO2, extra_primary_energy, solarFeat = preproccessing( locator, total_demand, building_names, weather_file, gv, config, prices, lca) # optimize the distribution and linearize the results(at the moment, there is only a linearization of values in Zug) network_features = network_opt_main.network_opt_main(config, locator) ## generate individual from config # heating technologies at the centralized plant heating_block = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 90.0, 6 ] # FIXME: connect PV to config # cooling technologies at the centralized plant centralized_vcc_size = config.supply_system_simulation.centralized_vcc centralized_ach_size = config.supply_system_simulation.centralized_ach centralized_storage_size = config.supply_system_simulation.centralized_storage cooling_block = [0, 0, 1, 0.3, 1, 0.4, 1, 0.2, 6, 7] cooling_block[2:4] = [1, centralized_vcc_size ] if (centralized_vcc_size != 0) else [0, 0] cooling_block[4:6] = [1, centralized_ach_size ] if (centralized_ach_size != 0) else [0, 0] cooling_block[6:8] = [1, centralized_storage_size ] if (centralized_storage_size != 0) else [0, 0] total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values # read list of buildings connected to DC from config if len(config.supply_system_simulation.dc_connected_buildings) == 0: dc_connected_buildings = building_names # default, all connected else: dc_connected_buildings = config.supply_system_simulation.dc_connected_buildings # dc_connected_buildings = building_names # default, all connected # buildings connected to networks heating_network = [0] * building_names.size cooling_network = [0] * building_names.size for building in dc_connected_buildings: index = np.where(building_names == building)[0][0] cooling_network[index] = 1 individual = heating_block + cooling_block + heating_network + cooling_network # individual = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0.01,1,0.535812211,0,0,0,0,10,7,1,0,1,1,0,1,0,0,0,0,1,1,1,1,0,1,1,0,1,1] supply_calculation(individual, building_names, total_demand, locator, extra_costs, extra_CO2, extra_primary_energy, solarFeat, network_features, gv, config, prices, lca) print 'Buildings connected to thermal network:', dc_connected_buildings print 'Centralized systems:', centralized_vcc_size, 'VCC', centralized_ach_size, 'ACH', centralized_storage_size print 'Decentralized systems:', config.supply_system_simulation.decentralized_systems print 'supply calculation succeeded!'
def sensOneFactor(obj, factor_name, mini, maxi, colNumber): iniValue = getattr(obj, factor_name) index = 0 for delta in np.arange(mini, maxi + 1E-5, (maxi-mini)/step): FactorResults[index][colNumber + 0] = delta if abs(delta) > 1E-10: setattr(obj, factor_name, iniValue * (1+delta)) newpop = [] for ind in pop: newInd = toolbox.clone(ind) newpop.append(newInd) (costs, CO2, prim) = eI.evaluation_main(newInd, buildList, locator, solarFeat, ntwFeat, obj,, newInd.fitness.values = (costs, CO2, prim) index += 1 setattr(obj, factor_name, iniValue) sensOneFactor(gV, 'ELEC_PRICE', bandwidth.minElec, bandwidth.maxElec, 0) sensOneFactor(gV, 'NG_PRICE', bandwidth.minNG, bandwidth.maxNG, 2) sensOneFactor(gV, 'BG_PRICE', bandwidth.minBG, bandwidth.maxBG, 4) indexSensible = FactorResults.argmax()%(2*bandwidth.nFactors) if indexSensible == 1: mostSensitive = 'Electricity price' elif indexSensible == 3: mostSensitive = 'NG price' else: mostSensitive = 'BG price' print FactorResults print mostSensitive return mostSensitive gen = 4 def run_as_script(scenario_path=None): import cea.globalvar import pandas as pd import cea.optimization.distribution.network_opt_main as network_opt from cea.optimization.preprocessing.preprocessing_main import preproccessing gv = cea.globalvar.GlobalVariables() if scenario_path is None: scenario_path = gv.scenario_reference locator = cea.inputlocator.InputLocator(scenario_path=scenario_path) total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() weather_file = locator.get_default_weather() extraCosts, extraCO2, extraPrim, solarFeat = preproccessing(locator, total_demand, building_names, weather_file, gv) ntwFeat = network_opt.network_opt_main() sensAnalysis(locator, extraCosts, extraCO2, extraPrim, solarFeat, ntwFeat, gen) print 'sensitivity analysis succeeded' if __name__ == '__main__': run_as_script(r'C:\reference-case-zug\baseline')
def individual_evaluation(generation, level, size, variable_groups): """ :param generation: Generation of the optimization in which the individual evaluation is to be done :type generation: int :param level: Number of the uncertain scenario. For each scenario, the objectives are calculated :type level: int :param size: Total uncertain scenarios developed. See 'uncertainty.csv' :type size: int :return: Function saves the new objectives in a json file """ from cea.optimization.preprocessing.preprocessing_main import preproccessing gv = cea.globalvar.GlobalVariables() scenario_path = gv.scenario_reference locator = cea.inputlocator.InputLocator(scenario_path) config = cea.config.Configuration() weather_file = locator.get_default_weather() with open( locator.get_optimization_master_results_folder() + "\CheckPoint_" + str(generation), "rb") as fp: data = json.load(fp) pop = data['population'] ntwList = data['networkList'] # # Uncertainty Part row = [] with open(locator.get_uncertainty_results_folder() + '\uncertainty.csv') as f: reader = csv.reader(f) for i in xrange(size + 1): row.append(next(reader)) j = level + 1 for i in xrange(len(row[0]) - 1): setattr(gv, row[0][i + 1], float(row[j][i + 1])) total_demand = pd.read_csv(locator.get_total_demand()) building_names = total_demand.Name.values gv.num_tot_buildings = total_demand.Name.count() lca = lca_calculations(locator, config) prices = Prices(locator, config) extra_costs, extra_CO2, extra_primary_energy, solarFeat = preproccessing( locator, total_demand, building_names, weather_file, gv) network_features = network_opt.network_opt_main() def objective_function(ind, ind_num): (costs, CO2, prim) = evaluation.evaluation_main( ind, building_names, locator, solarFeat, network_features, gv, config, prices, lca, ind_num, generation) # print (costs, CO2, prim) return (costs, CO2, prim) fitness = [] for i in xrange(gv.initialInd): evaluation.checkNtw(pop[i], ntwList, locator, gv) fitness.append(objective_function(pop[i], i)) with open(locator.get_uncertainty_checkpoint(level), "wb") as fp: cp = dict(population=pop, uncertainty_level=level, population_fitness=fitness) json.dump(cp, fp)