def test_calc_thermal_loads(self): bpr = self.building_properties['B1011'] self.config.general.multiprocessing = False self.config.schedule_maker.schedule_model = "deterministic" schedule_maker_main(self.locator, self.config, building='B1011') result = calc_thermal_loads( 'B1011', bpr, self.weather_data, self.date_range, self.locator, self.use_dynamic_infiltration_calculation, self.resolution_output, self.loads_output, self.massflows_output, self.temperatures_output, self.config, self.debug) self.assertIsNone(result) self.assertTrue( os.path.exists(self.locator.get_demand_results_file('B1011')), 'Building csv not produced') self.assertTrue( os.path.exists(self.locator.get_temporary_file('B1011T.csv')), 'Building temp file not produced') # test the building csv file (output of the `calc_thermal_loads` call above) df = pd.read_csv(self.locator.get_demand_results_file('B1011')) value_columns = json.loads( self.test_config.get('test_calc_thermal_loads', 'value_columns')) values = json.loads( self.test_config.get('test_calc_thermal_loads', 'values')) for i, column in enumerate(value_columns): self.assertAlmostEqual(values[i], df[column].sum(), msg='Sum of column %s differs, %f != %f' % (column, values[i], df[column].sum()), places=3)
def simulate_demand_sample(locator, config, output_parameters): """ Run a demand simulation for a single sample. This function expects a locator that is already initialized to the simulation folder, that has already been prepared with `apply_sample_parameters`. :param locator: The InputLocator to use for the simulation :type locator: InputLocator :param weather: The path to the weather file (``*.epw``) to use for simulation. See the `weather_path` parameter in `cea.demand.demand_main.demand_calculation` for more information. :type weather: str :param output_parameters: The list of output parameters to save to disk. This is a column-wise subset of the output of `cea.demand.demand_main.demand_calculation`. :type output_parameters: list of str :return: Returns the columns of the results of `cea.demand.demand_main.demand_calculation` as defined in `output_parameters`. :rtype: pandas.DataFrame """ # MODIFY CONFIG FILE TO RUN THE DEMAND FOR ONLY SPECIFIC QUANTITIES config.demand.resolution_output = "monthly" config.multiprocessing = False config.demand.massflows_output = [] config.demand.temperatures_output = [] config.demand.format_output = "csv" config.demand.override_variables = True config.schedule_maker.schedule_model = "deterministic" config.schedule_maker.buildings = config.demand.buildings # force simulation to be sequential schedule_maker_main(locator, config) totals, time_series = demand_main.demand_calculation(locator, config) return totals[output_parameters], time_series
def test_mixed_use_schedules(self): locator = ReferenceCaseOpenLocator() config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) config.scenario = locator.scenario building_properties = BuildingProperties(locator, False) bpr = building_properties['B1011'] bpr.occupancy = {'OFFICE': 0.5, 'SERVERROOM': 0.5} bpr.comfort['mainuse'] = 'OFFICE' # calculate schedules schedule_maker_main(locator, config) calculated_schedules = pd.read_csv(locator.get_schedule_model_file('B1011')).set_index('DATE') config = ConfigParser.SafeConfigParser() config.read(get_test_config_path()) reference_results = json.loads(config.get('test_mixed_use_schedules', 'reference_results')) for schedule in reference_results: if (isinstance(calculated_schedules[schedule][REFERENCE_TIME], str)) and (isinstance( reference_results[schedule], unicode)): self.assertEqual(calculated_schedules[schedule][REFERENCE_TIME], reference_results[schedule], msg="Schedule '{}' at time {}, {} != {}".format(schedule, str(REFERENCE_TIME), calculated_schedules[schedule][ REFERENCE_TIME], reference_results[schedule])) else: self.assertAlmostEqual(calculated_schedules[schedule][REFERENCE_TIME], reference_results[schedule], places=4, msg="Schedule '{}' at time {}, {} != {}".format(schedule, str(REFERENCE_TIME), calculated_schedules[schedule][ REFERENCE_TIME], reference_results[schedule]))
def run_for_single_building(building, bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug): config.multiprocessing = False schedule_maker_main(locator, config, building=building) calc_thermal_loads(building, bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug) df = pd.read_csv(locator.get_demand_results_file(building)) return building, float(df['Qhs_sys_kWh'].sum()), df['Qcs_sys_kWh'].sum(), float(df['Qww_sys_kWh'].sum())
def create_data(): """Create test data to compare against - run this the first time you make changes that affect the results. Note, this will overwrite the previous test data.""" test_config = ConfigParser.SafeConfigParser() test_config.read(get_test_config_path()) if not test_config.has_section('test_mixed_use_archetype_values'): test_config.add_section('test_mixed_use_archetype_values') locator = ReferenceCaseOpenLocator() expected_results = calculate_mixed_use_archetype_values_results(locator) test_config.set('test_mixed_use_archetype_values', 'expected_results', expected_results.to_json()) config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) locator = ReferenceCaseOpenLocator() # calculate schedules building_properties = BuildingProperties(locator, False) bpr = building_properties['B1011'] list_uses = ['OFFICE', 'LAB', 'INDUSTRIAL', 'SERVERRROOM'] bpr.occupancy = {'OFFICE': 0.5, 'SERVERROOM': 0.5} # get year from weather file weather_path = locator.get_weather_file() weather_data = epwreader.epw_reader(weather_path)[['year']] year = weather_data['year'][0] date = pd.date_range(str(year) + '/01/01', periods=HOURS_IN_YEAR, freq='H') calculated_schedules = schedule_maker_main(locator, config) if not test_config.has_section('test_mixed_use_schedules'): test_config.add_section('test_mixed_use_schedules') test_config.set('test_mixed_use_schedules', 'reference_results', json.dumps( {schedule: calculated_schedules[schedule][REFERENCE_TIME] for schedule in calculated_schedules.keys()})) with open(get_test_config_path(), 'w') as f: test_config.write(f)
def run_bigmacc(config): """ This is the main script for the bigmacc process. It iteartes through various CEA and bigmacc operations for each key (i.e. 01011101). It ends by saving a sample of the hourly results across the key for each building in a netcdf and then wiping the project files to reset them for the next iteration. :param config: :type config: cea.config.Configuration :return: """ locator = cea.inputlocator.InputLocator(config.scenario) # set the key (i.e. 01010100) print('Key in run') i = config.bigmacc.key print(i) # SCENARIO SETUP --- cea.datamanagement.data_initializer.main(config) # use the scenario code to set the year for the lca and other operations that need the current year pathway_code = config.general.parent pathway_items = pathway_code.split('_') scenario_year = int(pathway_items[1]) config.emissions.year_to_calculate = scenario_year bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round) scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') experiment_key = 'exp_{}'.format(i) print(experiment_key) keys = [int(x) for x in str(i)] if experiment_key in scen_check['Experiments'].values.tolist(): print('Experiment was finished previously, moving to next.') pass else: print('START: experiment {}.'.format(i)) # INITIALIZE TIMER --- t0 = time.perf_counter() # run the archetype mapper to leverage the newly loaded typology file and set parameters print( ' - Running archetype mapper for experiment {} to remove changes made in the last experiment.' .format(i)) cea.datamanagement.archetypes_mapper.main(config) # run the rule checker to set the scenario parameters print(' - Running rule checker for experiment {}.'.format(i)) cea.bigmacc.bigmacc_rules.main(config) # SIMULATIONS --- print(' - Run radiation is {}.'.format(config.bigmacc.runrad)) print(' - Write sensor data is {}.'.format( config.radiation.write_sensor_data)) # SIMULATION 1 checking on need for radiation simulation unique_rad_files = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_in', util.change_key(i), 'solar-radiation') if i in config.bigmacc.runradiation: # everytime the runradiation list is triggered a new set of files is loaded in shutil.rmtree(locator.get_solar_radiation_folder()) if config.bigmacc.rerun == True: print( ' - Rerun mode, copying radiation files for experiment {}.' .format(i)) distutils.dir_util.copy_tree( unique_rad_files, locator.get_solar_radiation_folder()) else: print(' - Radiation running for experiment {}.'.format(i)) cea.resources.radiation_daysim.radiation_main.main(config) print(' - Copying radiation files to repo for experiment {}.'. format(i)) distutils.dir_util.copy_tree( locator.get_solar_radiation_folder(), unique_rad_files) else: print( ' - Previous iteration radiation files are equivalent for experiment {}.' .format(i)) if not os.path.exists(locator.get_solar_radiation_folder()): print( ' - Radiation files for experiment {} not found, running radiation script.' .format(i)) cea.resources.radiation_daysim.radiation_main.main(config) # SIMULATION 2 check to see if schedules need to be made bldg_names = locator.get_zone_building_names() for name in bldg_names: if not os.path.exists(locator.get_schedule_model_file(name)): print( ' - Schedule maker running for building {}.'.format(name)) schedule_maker.schedule_maker_main(locator, config) else: print(' - Schedules exist for building {}.'.format(name)) print(' - Schedules exist for experiment {}.'.format(i)) # SIMULATION 3 run demand # cea.demand.demand_main.main(config) if config.bigmacc.rerun != True: print(' - Running demand simulation for experiment {}.'.format(i)) cea.demand.demand_main.main(config) else: if keys[0] == 1: print(' - Running demand simulation for experiment {}.'.format( i)) cea.demand.demand_main.main(config) elif keys[6] == 1: print(' - Running demand simulation for experiment {}.'.format( i)) cea.demand.demand_main.main(config) else: print(' - Copying demand results for experiment {}.'.format(i)) old_demand_files = os.path.join( r'D:\BIGMACC_WESBROOK\Projects', config.general.parent, config.bigmacc.key, 'initial', 'outputs', 'data', 'demand') distutils.dir_util.copy_tree( old_demand_files, locator.get_demand_results_folder()) if not os.path.exists(locator.get_demand_results_folder()): print( ' - Demand results for experiment {} not found, running radiation script.' .format(i)) cea.demand.demand_main.main(config) # SIMULATION 4 check to see if pv needs to run if config.bigmacc.pv == True: unique_pv_files = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_in', util.change_key(i), 'potentials', 'solar') if i in config.bigmacc.runradiation: shutil.rmtree(locator.solar_potential_folder()) if config.bigmacc.rerun == True: print(' - Rerun mode, copying PV files for experiment {}.'. format(i)) distutils.dir_util.copy_tree( unique_pv_files, locator.solar_potential_folder()) else: print(' - Radiation running for experiment {}.'.format(i)) photovoltaic.main(config) print(' - Copying PV files to repo for experiment {}.'. format(i)) distutils.dir_util.copy_tree( locator.solar_potential_folder(), unique_pv_files) else: print( ' - Previous iteration PV results files are equivalent for experiment {}.' .format(i)) # last check for the PV files if not os.path.exists(locator.solar_potential_folder()): print( ' - PV results do not exist running simulation for experiment {}.' .format(i)) photovoltaic.main(config) else: print(f' - PV does not exist in scenario {i}.') # SIMULATION 5 recalculating the supply split between grid and ng in the wesbrook DH if keys[4] == 1: print(' - Do not run district heat recalculation.') else: print(' - Run district heat recalculation.') cea.bigmacc.wesbrook_DH_multi.main(config) # include PV results in demand results files for costing and emissions if keys[7] == 1: print(' - PV use detected. Adding PV generation to demand files.') util.write_pv_to_demand_multi(config) else: print(' - No PV use detected.') print(f' - Writing annual results for {i}.') util.rewrite_annual_demand(config) # SIMULATION 6 & 7 running the emissions and costing calculations print(' - Run cost and emissions scripts.') cea.analysis.costs.system_costs.main(config) cea.analysis.lca.main.main(config) # FILE MANAGEMENT --- # clone out the simulation inputs and outputs directory print(' - Transferring results directory for experiment {}.'.format(i)) new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.key, 'inputs') # new_outputs_path_demand = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', # config.bigmacc.key, 'demand') if config.bigmacc.rerun != True: distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) # distutils.dir_util.copy_tree(locator.get_demand_results_folder(), new_outputs_path_demand) else: distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path) # distutils.dir_util.copy_tree(locator.get_demand_results_folder(), new_outputs_path_demand) log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0') # write netcdf of hourly_results print('Writing the hourly results to zarr.') netcdf_writer.main(config, time_scale='hourly') if len(log_df['Completed']) < (len(util.generate_key_list(config)) - 1): print('Writing the annual results to netcdf.') else: print('Writing the annual results to zarr.') netcdf_writer.main(config, time_scale='whole') time_elapsed = time.perf_counter() - t0 # save log information log_df = log_df.append(pd.DataFrame( { 'Experiments': 'exp_{}'.format(i), 'Completed': 'True', 'Experiment Time': time_elapsed, 'Unique Radiation': config.bigmacc.runrad }, index=[0]), ignore_index=True) log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv')) log_df.to_csv(r"C:\Users\justi\Desktop\126logger_backup.csv", ) # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here if keys[0] == 1: print(' - Rerun data initializer.') cea.datamanagement.data_initializer.main(config) else: pass print('END: experiment {}. \n'.format(i))
def main(output_file): import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) weather_path = locator.get_weather('Zug_inducity_2009') weather_data = epwreader.epw_reader(weather_path)[[ 'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.datamanagement.archetypes_mapper cea.datamanagement.archetypes_mapper.archetypes_mapper( locator, True, True, True, True, True, True, []) year = weather_data['year'][0] date_range = get_date_range_hours_from_year(year) resolution_outputs = config.demand.resolution_output loads_output = config.demand.loads_output massflows_output = config.demand.massflows_output temperatures_output = config.demand.temperatures_output use_dynamic_infiltration_calculation = config.demand.use_dynamic_infiltration_calculation debug = config.debug building_properties = BuildingProperties(locator) print("data for test_calc_thermal_loads:") print(building_properties.list_building_names()) schedule_maker_main(locator, config, building='B1011') bpr = building_properties['B1011'] result = calc_thermal_loads('B1011', bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B1011')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) test_config = configparser.ConfigParser() test_config.read(output_file) value_columns = [ u"E_sys_kWh", u"Qcdata_sys_kWh", u"Qcre_sys_kWh", u"Qcs_sys_kWh", u"Qhs_sys_kWh", u"Qww_sys_kWh", u"Tcs_sys_re_C", u"Ths_sys_re_C", u"Tww_sys_re_C", u"Tcs_sys_sup_C", u"Ths_sys_sup_C", u"Tww_sys_sup_C" ] values = [float(df[column].sum()) for column in value_columns] print("values = %s " % repr(values)) if not test_config.has_section("test_calc_thermal_loads"): test_config.add_section("test_calc_thermal_loads") test_config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns)) print(values) test_config.set("test_calc_thermal_loads", "values", json.dumps(values)) print("data for test_calc_thermal_loads_other_buildings:") buildings = [ 'B1013', 'B1012', 'B1010', 'B1000', 'B1009', 'B1011', 'B1006', 'B1003', 'B1004', 'B1001', 'B1002', 'B1005', 'B1008', 'B1007', 'B1014' ] results = {} for building in buildings: bpr = building_properties[building] b, qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh = run_for_single_building( building, bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug) print( "'%(b)s': (%(qhs_sys_kwh).5f, %(qcs_sys_kwh).5f, %(qww_sys_kwh).5f)," % locals()) results[building] = (qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh) if not test_config.has_section("test_calc_thermal_loads_other_buildings"): test_config.add_section("test_calc_thermal_loads_other_buildings") test_config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results)) with open(output_file, 'w') as f: test_config.write(f) print("Wrote output to %(output_file)s" % locals())