def main(): locator = InputLocator(REFERENCE_CASE) gv = GlobalVariables() weather_path = locator.get_default_weather() weather_data = epwreader.epw_reader(weather_path)[[ 'drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] building_properties = BuildingProperties(locator, gv) date = pd.date_range(gv.date_start, periods=8760, freq='H') list_uses = building_properties.list_uses() schedules = schedule_maker(date, locator, list_uses) usage_schedules = {'list_uses': list_uses, 'schedules': schedules} print("data for test_calc_thermal_loads_new_ventilation:") print building_properties.list_building_names() bpr = building_properties['B01'] result = calc_thermal_loads('B01', bpr, weather_data, usage_schedules, date, gv, locator) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B01')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) value_columns = [ u'Ealf_kWh', u'Eauxf_kWh', u'Edataf_kWh', u'Ef_kWh', u'QCf_kWh', u'QHf_kWh', u'Qcdataf_kWh', u'Qcref_kWh', u'Qcs_kWh', u'Qcsf_kWh', u'Qhs_kWh', u'Qhsf_kWh', u'Qww_kWh', u'Qwwf_kWh', u'Tcsf_re_C', u'Thsf_re_C', u'Twwf_re_C', u'Tcsf_sup_C', u'Thsf_sup_C', u'Twwf_sup_C' ] print("values = %s " % repr([df[column].sum() for column in value_columns])) print("data for test_calc_thermal_loads_other_buildings:") # randomly selected except for B302006716, which has `Af == 0` buildings = { 'B01': (81124.39400, 150471.05200), 'B03': (81255.09200, 150520.01000), 'B02': (82176.15300, 150604.85100), 'B05': (84058.72400, 150841.56200), 'B04': (82356.22600, 150598.43400), 'B07': (81052.19000, 150490.94800), 'B06': (83108.45600, 150657.24900), 'B09': (84491.58100, 150853.54000), 'B08': (88572.59000, 151020.09300), } for building in buildings.keys(): bpr = building_properties[building] b, qcf_kwh, qhf_kwh = run_for_single_building(building, bpr, weather_data, usage_schedules, date, gv, locator) print("'%(b)s': (%(qcf_kwh).5f, %(qhf_kwh).5f)," % locals())
def create_demand_samples(method='morris', num_samples=1000, variable_groups=('ENVELOPE', ), sampler_parameters={}): """ Create the samples to simulate using the specified method (`method`), the sampling method parameter N (`num_samples`) and any additional sampling method-specific parameters specified in `sampler_parameters for each variable definined in the uncertainty database worksheets referenced in `variable_groups`. :param method: The method to use. Valid values are 'morris' (default) and 'sobol'. :type method: str :param num_samples: The parameter `N` for the sampling methods (sobol defines this as "The number of samples to generate", but in reality, for both methods, the actual number of samples is a multiple of `num_samples`). :type num_samples: int :param sampler_parameters: additional, sampler-specific parameters. For `method='morris'` these are: [grid_jump, num_levels], for `method='sobol'` these are: [calc_second_order] :type sampler_parameters: dict of (str, _) :param variable_groups: list of names of groups of variables to analyse. Possible values are: 'THERMAL', 'ARCHITECTURE', 'INDOOR_COMFORT', 'INTERNAL_LOADS'. This list links to the probability density functions of the variables contained in locator.get_uncertainty_db() and refers to the Excel worksheet names. :return: (samples, problem) - samples is a list of configurations for each simulation to run, a configuration being a list of values for each variable in the problem. The problem is a dictionary with the keys 'num_vars', 'names' and 'bounds' and describes the variables being sampled: 'names' is list of variable names of length 'num_vars' and 'bounds' is a list of tuples(lower-bound, upper-bound) for each of these variables. Further, the keys 'N' (`num_samples`) and 'method' (`method`) are set and the sampler_parameters are also added to `problem`. """ locator = InputLocator(None) # get probability density functions (pdf) of all variable_groups from the uncertainty database pdf = pd.concat([ pd.read_excel(locator.get_uncertainty_db(), group, axis=1) for group in variable_groups ]) # a list of tupples containing the lower-bound and upper-bound of each variable bounds = list(zip(pdf['min'], pdf['max'])) # define the problem problem = { 'num_vars': pdf.name.count(), 'names': pdf.name.values, 'bounds': bounds, 'groups': None, 'N': num_samples, 'method': method } problem.update(sampler_parameters) return sampler(method, problem, num_samples, sampler_parameters), problem
def main(): locator = InputLocator(REFERENCE_CASE) gv = GlobalVariables() weather_path = locator.get_default_weather() weather_data = epwreader.epw_reader(weather_path)[['drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C']] building_properties = BuildingProperties(locator, gv) date = pd.date_range(gv.date_start, periods=8760, freq='H') list_uses = building_properties.list_uses() schedules = schedule_maker(date, locator, list_uses) usage_schedules = {'list_uses': list_uses, 'schedules': schedules} print("data for test_calc_thermal_loads_new_ventilation:") print building_properties.list_building_names() bpr = building_properties['B01'] result = calc_thermal_loads('B01', bpr, weather_data, usage_schedules, date, gv, locator) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B01')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) value_columns = [u'Ealf_kWh', u'Eauxf_kWh', u'Edataf_kWh', u'Ef_kWh', u'QCf_kWh', u'QHf_kWh', u'Qcdataf_kWh', u'Qcref_kWh', u'Qcs_kWh', u'Qcsf_kWh', u'Qhs_kWh', u'Qhsf_kWh', u'Qww_kWh', u'Qwwf_kWh', u'Tcsf_re_C', u'Thsf_re_C', u'Twwf_re_C', u'Tcsf_sup_C', u'Thsf_sup_C', u'Twwf_sup_C'] print("values = %s " % repr([df[column].sum() for column in value_columns])) print("data for test_calc_thermal_loads_other_buildings:") # randomly selected except for B302006716, which has `Af == 0` buildings = {'B01': (81124.39400, 150471.05200), 'B03': (81255.09200, 150520.01000), 'B02': (82176.15300, 150604.85100), 'B05': (84058.72400, 150841.56200), 'B04': (82356.22600, 150598.43400), 'B07': (81052.19000, 150490.94800), 'B06': (83108.45600, 150657.24900), 'B09': (84491.58100, 150853.54000), 'B08': (88572.59000, 151020.09300), } for building in buildings.keys(): bpr = building_properties[building] b, qcf_kwh, qhf_kwh = run_for_single_building(building, bpr, weather_data, usage_schedules, date, gv, locator) print("'%(b)s': (%(qcf_kwh).5f, %(qhf_kwh).5f)," % locals())
def main(scenario_path, output_path, buildings=None): locator = InputLocator(scenario_path) gv = GlobalVariables() bp = BuildingProperties(locator, gv) if not buildings: buildings = list(bp._prop_RC_model.index) row_index = [] row_index.extend(bp._prop_thermal.columns) row_index.extend(bp._prop_geometry.columns) row_index.extend(bp._prop_architecture.columns) row_index.extend(bp._prop_occupancy.columns) row_index.extend(bp._prop_HVAC_result.columns) row_index.extend(bp._prop_RC_model.columns) row_index.extend(bp._prop_comfort.columns) row_index.extend(bp._prop_internal_loads.columns) row_index.extend(bp._prop_age.columns) df = pd.DataFrame(columns=buildings, index=row_index) for building in buildings: bdata = {} bdata.update(bp._prop_thermal.T[building].to_dict()) bdata.update(bp._prop_geometry.T[building].to_dict()) bdata.update(bp._prop_architecture.T[building].to_dict()) bdata.update(bp._prop_occupancy.T[building].to_dict()) bdata.update(bp._prop_HVAC_result.T[building].to_dict()) bdata.update(bp._prop_RC_model.T[building].to_dict()) bdata.update(bp._prop_comfort.T[building].to_dict()) bdata.update(bp._prop_internal_loads.T[building].to_dict()) bdata.update(bp._prop_age.T[building].to_dict()) bseries = pd.Series(bdata, index=row_index) df[building] = bseries df.to_csv(output_path)
def setUpClass(cls): if os.environ.has_key('REFERENCE_CASE'): cls.locator = InputLocator(os.environ['REFERENCE_CASE']) else: cls.locator = InputLocator(REFERENCE_CASE) cls.gv = GlobalVariables() weather_path = cls.locator.get_default_weather() cls.weather_data = epwreader.epw_reader(weather_path)[['drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C']] cls.building_properties = BuildingProperties(cls.locator, cls.gv) cls.date = pd.date_range(cls.gv.date_start, periods=8760, freq='H') cls.list_uses = cls.building_properties.list_uses() cls.schedules = schedule_maker(cls.date, cls.locator, cls.list_uses) cls.usage_schedules = {'list_uses': cls.list_uses, 'schedules': cls.schedules}
def setUpClass(cls): import zipfile import tempfile import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') cls.locator = InputLocator(reference_case) cls.gv = GlobalVariables() weather_path = cls.locator.get_default_weather() cls.weather_data = epwreader.epw_reader(weather_path)[[ 'drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.demand.preprocessing.properties cea.demand.preprocessing.properties.properties(cls.locator, True, True, True, True) cls.building_properties = BuildingProperties(cls.locator, cls.gv) cls.date = pd.date_range(cls.gv.date_start, periods=8760, freq='H') cls.list_uses = cls.building_properties.list_uses() cls.archetype_schedules, cls.archetype_values = schedule_maker( cls.date, cls.locator, cls.list_uses) cls.occupancy_densities = cls.archetype_values['people'] cls.usage_schedules = { 'list_uses': cls.list_uses, 'archetype_schedules': cls.archetype_schedules, 'occupancy_densities': cls.occupancy_densities, 'archetype_values': cls.archetype_values }
def test_mixed_use_archetype_values(self): # test if a sample mixed use building gets standard results # get reference case to be tested archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) # create test results office_occ = float( pd.read_excel(locator.get_archetypes_schedules(), 'OFFICE').T['density'].values[:1][0]) gym_occ = float( pd.read_excel(locator.get_archetypes_schedules(), 'GYM').T['density'].values[:1][0]) calculated_results = calculate_average_multiuse( properties_df=pd.DataFrame( data=[['B1', 0.5, 0.5, 0.0, 0.0], ['B2', 0.25, 0.75, 0.0, 0.0]], columns=['Name', 'OFFICE', 'GYM', 'X_ghp', 'El_Wm2']), occupant_densities={ 'OFFICE': 1 / office_occ, 'GYM': 1 / gym_occ }, list_uses=['OFFICE', 'GYM'], properties_DB=pd.read_excel(locator.get_archetypes_properties(), 'INTERNAL_LOADS')) # compare to reference values expected_results = pd.DataFrame( data=[['B1', 0.5, 0.5, 208.947368, 12.9], ['B2', 0.25, 0.75, 236.382979, 11.4]], columns=['Name', 'OFFICE', 'GYM', 'X_ghp', 'El_Wm2']) assert_frame_equal(calculated_results, expected_results) architecture_DB = get_database(locator.get_archetypes_properties(), 'ARCHITECTURE') architecture_DB['Code'] = architecture_DB.apply( lambda x: x['building_use'] + str(x['year_start']) + str(x[ 'year_end']) + x['standard'], axis=1) self.assertEqual( correct_archetype_areas(prop_architecture_df=pd.DataFrame( data=[['B1', 0.5, 0.5, 0.0, 2006, 2020, 'C'], ['B2', 0.2, 0.8, 0.0, 1300, 1920, 'R']], columns=[ 'Name', 'SERVERROOM', 'PARKING', 'Hs', 'year_start', 'year_end', 'standard' ]), architecture_DB=architecture_DB, list_uses=['SERVERROOM', 'PARKING']), [0.5, 0.2])
def apply_sample_parameters(sample_index, samples_path, scenario_path, simulation_path): """ Copy the scenario from the `scenario_path` to the `simulation_path`. Patch the parameters from the problem statement. Return an `InputLocator` implementation that can be used to simulate the demand of the resulting scenario. The `simulation_path` is modified by the demand calculation. For the purposes of the sensitivity analysis, these changes can be viewed as temporary and deleted / overwritten after each simulation. :param sample_index: zero-based index into the samples list, which is read from the file `$samples_path/samples.npy` :type sample_index: int :param samples_path: path to the pre-calculated samples and problem statement (created by `sensitivity_demand_samples.py`) :type samples_path: str :param scenario_path: path to the scenario template :type scenario_path: str :param simulation_path: a (temporary) path for simulating a scenario that has been patched with a sample NOTE: When simulating in parallel, special care must be taken that each process has a unique `simulation_path` value. For the Euler cluster, this is solved by ensuring the simulation is done with `gv.multiprocessing = False` and setting the `simulation_path` to the special folder `$TMPDIR` that is set to a local scratch folder for each job by the job scheduler of the Euler cluster. Other setups will need to adopt an equivalent strategy. :type simulation_path: str :return: InputLocator that can be used to simulate the demand in the `simulation_path` """ if os.path.exists(simulation_path): shutil.rmtree(simulation_path) shutil.copytree(scenario_path, simulation_path) locator = InputLocator(scenario_path=simulation_path) with open(os.path.join(samples_path, 'problem.pickle'), 'r') as f: problem = pickle.load(f) samples = np.load(os.path.join(samples_path, 'samples.npy')) try: sample = samples[sample_index] except IndexError: return None prop = Gdf.from_file(locator.get_building_geometry()).set_index('Name') prop_overrides = pd.DataFrame(index=prop.index) for i, key in enumerate(problem['names']): print("Setting prop_overrides['%s'] to %s" % (key, sample[i])) prop_overrides[key] = sample[i] sample_locator = InputLocator(scenario_path=simulation_path) prop_overrides.to_csv(sample_locator.get_building_overrides()) return sample_locator
def setUpClass(cls): import zipfile import tempfile import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') cls.locator = InputLocator(reference_case) cls.gv = GlobalVariables() cls.gv.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) weather_path = cls.locator.get_weather('Zug') cls.weather_data = epwreader.epw_reader(weather_path)[[ 'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] year = cls.weather_data['year'][0] cls.region = cls.gv.config.region cls.test_config = ConfigParser.SafeConfigParser() cls.test_config.read( os.path.join(os.path.dirname(__file__), 'test_calc_thermal_loads.config')) # run properties script import cea.datamanagement.data_helper cea.datamanagement.data_helper.data_helper(cls.locator, cls.gv.config, True, True, True, True, True, True) use_daysim_radiation = cls.gv.config.demand.use_daysim_radiation cls.building_properties, cls.usage_schedules, cls.date = properties_and_schedule( cls.gv, cls.locator, cls.region, year, use_daysim_radiation) cls.use_dynamic_infiltration_calculation = cls.gv.config.demand.use_dynamic_infiltration_calculation cls.use_stochastic_occupancy = cls.gv.config.demand.use_stochastic_occupancy cls.resolution_output = cls.gv.config.demand.resolution_output cls.loads_output = cls.gv.config.demand.loads_output cls.massflows_output = cls.gv.config.demand.massflows_output cls.temperatures_output = cls.gv.config.demand.temperatures_output cls.format_output = cls.gv.config.demand.format_output
def test_mixed_use_schedules(self): # get reference case to be tested archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) # calculate schedules list_uses = ['OFFICE', 'INDUSTRIAL'] occupancy = {'OFFICE': 0.5, 'INDUSTRIAL': 0.5} gv = GlobalVariables() date = pd.date_range(gv.date_start, periods=8760, freq='H') archetype_schedules, archetype_values = schedule_maker( date, locator, list_uses) calculated_schedules = calc_schedules(list_uses, archetype_schedules, occupancy, archetype_values) reference_time = 3456 reference_results = { 'El': 0.1080392156862745, 'Qs': 0.0088163265306122462, 've': 0.01114606741573034, 'Epro': 0.17661721828842394, 'people': 0.0080000000000000019, 'Ed': 0.0, 'Vww': 0.0, 'Ea': 0.1340740740740741, 'Ere': 0.0, 'Vw': 0.0, 'X': 0.010264150943396229 } for schedule in reference_results: self.assertEqual(calculated_schedules[schedule][reference_time], reference_results[schedule], msg="Schedule '%s' at time %s, %f != %f" % (schedule, str(reference_time), calculated_schedules[schedule][reference_time], reference_results[schedule]))
def main(output_file): import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) weather_path = locator.get_weather('Zug_inducity_2009') weather_data = epwreader.epw_reader(weather_path)[[ 'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.datamanagement.archetypes_mapper cea.datamanagement.archetypes_mapper.archetypes_mapper( locator, True, True, True, True, True, True, []) year = weather_data['year'][0] date_range = get_date_range_hours_from_year(year) resolution_outputs = config.demand.resolution_output loads_output = config.demand.loads_output massflows_output = config.demand.massflows_output temperatures_output = config.demand.temperatures_output use_dynamic_infiltration_calculation = config.demand.use_dynamic_infiltration_calculation debug = config.debug building_properties = BuildingProperties(locator) print("data for test_calc_thermal_loads:") print(building_properties.list_building_names()) schedule_maker_main(locator, config, building='B1011') bpr = building_properties['B1011'] result = calc_thermal_loads('B1011', bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B1011')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) test_config = configparser.ConfigParser() test_config.read(output_file) value_columns = [ u"E_sys_kWh", u"Qcdata_sys_kWh", u"Qcre_sys_kWh", u"Qcs_sys_kWh", u"Qhs_sys_kWh", u"Qww_sys_kWh", u"Tcs_sys_re_C", u"Ths_sys_re_C", u"Tww_sys_re_C", u"Tcs_sys_sup_C", u"Ths_sys_sup_C", u"Tww_sys_sup_C" ] values = [float(df[column].sum()) for column in value_columns] print("values = %s " % repr(values)) if not test_config.has_section("test_calc_thermal_loads"): test_config.add_section("test_calc_thermal_loads") test_config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns)) print(values) test_config.set("test_calc_thermal_loads", "values", json.dumps(values)) print("data for test_calc_thermal_loads_other_buildings:") buildings = [ 'B1013', 'B1012', 'B1010', 'B1000', 'B1009', 'B1011', 'B1006', 'B1003', 'B1004', 'B1001', 'B1002', 'B1005', 'B1008', 'B1007', 'B1014' ] results = {} for building in buildings: bpr = building_properties[building] b, qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh = run_for_single_building( building, bpr, weather_data, date_range, locator, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, config, debug) print( "'%(b)s': (%(qhs_sys_kwh).5f, %(qcs_sys_kwh).5f, %(qww_sys_kwh).5f)," % locals()) results[building] = (qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh) if not test_config.has_section("test_calc_thermal_loads_other_buildings"): test_config.add_section("test_calc_thermal_loads_other_buildings") test_config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results)) with open(output_file, 'w') as f: test_config.write(f) print("Wrote output to %(output_file)s" % locals())
def main(output_file): import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) gv = GlobalVariables() weather_path = locator.get_default_weather() weather_data = epwreader.epw_reader(weather_path)[[ 'drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.demand.preprocessing.properties cea.demand.preprocessing.properties.properties(locator, True, True, True, True) building_properties = BuildingProperties(locator, gv) date = pd.date_range(gv.date_start, periods=8760, freq='H') list_uses = building_properties.list_uses() archetype_schedules, archetype_values = schedule_maker( date, locator, list_uses) usage_schedules = { 'list_uses': list_uses, 'archetype_schedules': archetype_schedules, 'occupancy_densities': archetype_values['people'], 'archetype_values': archetype_values } print("data for test_calc_thermal_loads:") print(building_properties.list_building_names()) bpr = building_properties['B01'] result = calc_thermal_loads('B01', bpr, weather_data, usage_schedules, date, gv, locator) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B01')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) config = ConfigParser.SafeConfigParser() config.read(output_file) value_columns = [ u'Ealf_kWh', u'Eauxf_kWh', u'Edataf_kWh', u'Ef_kWh', u'QCf_kWh', u'QHf_kWh', u'Qcdataf_kWh', u'Qcref_kWh', u'Qcs_kWh', u'Qcsf_kWh', u'Qhs_kWh', u'Qhsf_kWh', u'Qww_kWh', u'Qwwf_kWh', u'Tcsf_re_C', u'Thsf_re_C', u'Twwf_re_C', u'Tcsf_sup_C', u'Thsf_sup_C', u'Twwf_sup_C' ] values = [float(df[column].sum()) for column in value_columns] print("values = %s " % repr(values)) if not config.has_section("test_calc_thermal_loads"): config.add_section("test_calc_thermal_loads") config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns)) print values config.set("test_calc_thermal_loads", "values", json.dumps(values)) print("data for test_calc_thermal_loads_other_buildings:") buildings = ['B01', 'B03', 'B02', 'B05', 'B04', 'B07', 'B06', 'B09', 'B08'] results = {} for building in buildings: bpr = building_properties[building] b, qcf_kwh, qhf_kwh = run_for_single_building(building, bpr, weather_data, usage_schedules, date, gv, locator) print("'%(b)s': (%(qcf_kwh).5f, %(qhf_kwh).5f)," % locals()) results[building] = (qcf_kwh, qhf_kwh) if not config.has_section("test_calc_thermal_loads_other_buildings"): config.add_section("test_calc_thermal_loads_other_buildings") config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results)) with open(output_file, 'w') as f: config.write(f) print("Wrote output to %(output_file)s" % locals())
def main(): import zipfile import cea.examples import tempfile archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) gv = GlobalVariables() weather_path = locator.get_default_weather() weather_data = epwreader.epw_reader(weather_path)[[ 'drybulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.demand.preprocessing.properties cea.demand.preprocessing.properties.properties(locator, True, True, True, True) building_properties = BuildingProperties(locator, gv) date = pd.date_range(gv.date_start, periods=8760, freq='H') list_uses = building_properties.list_uses() archetype_schedules, archetype_values = schedule_maker( date, locator, list_uses) usage_schedules = { 'list_uses': list_uses, 'archetype_schedules': archetype_schedules, 'occupancy_densities': archetype_values['people'], 'archetype_values': archetype_values } print("data for test_calc_thermal_loads:") print building_properties.list_building_names() bpr = building_properties['B01'] result = calc_thermal_loads('B01', bpr, weather_data, usage_schedules, date, gv, locator) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B01')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) value_columns = [ u'Ealf_kWh', u'Eauxf_kWh', u'Edataf_kWh', u'Ef_kWh', u'QCf_kWh', u'QHf_kWh', u'Qcdataf_kWh', u'Qcref_kWh', u'Qcs_kWh', u'Qcsf_kWh', u'Qhs_kWh', u'Qhsf_kWh', u'Qww_kWh', u'Qwwf_kWh', u'Tcsf_re_C', u'Thsf_re_C', u'Twwf_re_C', u'Tcsf_sup_C', u'Thsf_sup_C', u'Twwf_sup_C' ] print("values = %s " % repr([df[column].sum() for column in value_columns])) print("data for test_calc_thermal_loads_other_buildings:") # randomly selected except for B302006716, which has `Af == 0` buildings = ['B01', 'B03', 'B02', 'B05', 'B04', 'B07', 'B06', 'B09', 'B08'] for building in buildings: bpr = building_properties[building] b, qcf_kwh, qhf_kwh = run_for_single_building(building, bpr, weather_data, usage_schedules, date, gv, locator) print("'%(b)s': (%(qcf_kwh).5f, %(qhf_kwh).5f)," % locals())
def main(output_file): import cea.examples archive = zipfile.ZipFile( os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip')) archive.extractall(tempfile.gettempdir()) reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline') locator = InputLocator(reference_case) config = cea.config.Configuration(cea.config.DEFAULT_CONFIG) weather_path = locator.get_weather('Zug') weather_data = epwreader.epw_reader(weather_path)[[ 'year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C' ]] # run properties script import cea.datamanagement.data_helper cea.datamanagement.data_helper.data_helper(locator, config, True, True, True, True, True, True) region = config.region year = weather_data['year'][0] use_daysim_radiation = config.demand.use_daysim_radiation resolution_outputs = config.demand.resolution_output loads_output = config.demand.loads_output massflows_output = config.demand.massflows_output temperatures_output = config.demand.temperatures_output format_output = config.demand.format_output use_dynamic_infiltration_calculation = config.demand.use_dynamic_infiltration_calculation use_stochastic_occupancy = config.demand.use_stochastic_occupancy building_properties, schedules_dict, date = properties_and_schedule( locator, region, year, use_daysim_radiation) print("data for test_calc_thermal_loads:") print(building_properties.list_building_names()) bpr = building_properties['B01'] result = calc_thermal_loads('B01', bpr, weather_data, schedules_dict, date, locator, use_stochastic_occupancy, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, format_output, region) # test the building csv file df = pd.read_csv(locator.get_demand_results_file('B01')) expected_columns = list(df.columns) print("expected_columns = %s" % repr(expected_columns)) test_config = ConfigParser.SafeConfigParser() test_config.read(output_file) value_columns = [ u"E_sys_kWh", u"Qcdata_sys_kWh", u"Qcre_sys_kWh", u"Qcs_sys_kWh", u"Qhs_sys_kWh", u"Qww_sys_kWh", u"Tcs_sys_re_C", u"Ths_sys_re_C", u"Tww_sys_re_C", u"Tcs_sys_sup_C", u"Ths_sys_sup_C", u"Tww_sys_sup_C" ] values = [float(df[column].sum()) for column in value_columns] print("values = %s " % repr(values)) if not test_config.has_section("test_calc_thermal_loads"): test_config.add_section("test_calc_thermal_loads") test_config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns)) print values test_config.set("test_calc_thermal_loads", "values", json.dumps(values)) print("data for test_calc_thermal_loads_other_buildings:") buildings = ['B01', 'B03', 'B02', 'B05', 'B04', 'B07', 'B06', 'B09', 'B08'] results = {} for building in buildings: bpr = building_properties[building] b, qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh = run_for_single_building( building, bpr, weather_data, schedules_dict, date, locator, use_stochastic_occupancy, use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output, temperatures_output, format_output, region) print( "'%(b)s': (%(qhs_sys_kwh).5f, %(qcs_sys_kwh).5f, %(qww_sys_kwh).5f)," % locals()) results[building] = (qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh) if not test_config.has_section("test_calc_thermal_loads_other_buildings"): test_config.add_section("test_calc_thermal_loads_other_buildings") test_config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results)) with open(output_file, 'w') as f: test_config.write(f) print("Wrote output to %(output_file)s" % locals())