def test_monthlychart_two_axes(): """Test the MonthlyChart with two Y-axes.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = [i for i in range(12)] date_t = list(range(1, 13)) data_coll = MonthlyCollection(header, values, date_t) header2 = Header(RelativeHumidity(), '%', AnalysisPeriod()) values2 = [i for i in range(10, 70, 5)] data_coll2 = MonthlyCollection(header2, values2, date_t) month_chart = MonthlyChart([data_coll, data_coll2]) y_txt = month_chart.y_axis_labels2 assert all(isinstance(txt, str) for txt in y_txt) y_lines = month_chart.y_axis_lines y_pts = month_chart.y_axis_label_points2 assert len(y_lines) == len(y_txt) == len(y_pts) == 11 assert all(isinstance(line, LineSegment2D) for line in y_lines) assert all(isinstance(pt, Point2D) for pt in y_pts) assert isinstance(month_chart.y_axis_title_text2, str) assert 'Fraction' in month_chart.y_axis_title_text2 assert isinstance(month_chart.y_axis_title_location2, Plane) # ensure the first axis was not affected y_txt = month_chart.y_axis_labels1 assert all(isinstance(txt, str) for txt in y_txt) y_pts = month_chart.y_axis_label_points1 assert len(y_lines) == len(y_txt) == len(y_pts) == 11 assert all(isinstance(pt, Point2D) for pt in y_pts) assert isinstance(month_chart.y_axis_title_text1, str) assert 'Temperature' in month_chart.y_axis_title_text1 assert isinstance(month_chart.y_axis_title_location1, Plane)
def test_monthlychart_monthly(): """Test the initialization of MonthlyChart with monthly data collections.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = [i for i in range(12)] date_t = list(range(1, 13)) data_coll = MonthlyCollection(header, values, date_t) month_chart = MonthlyChart([data_coll]) meshes = month_chart.data_meshes assert len(meshes) == 1 assert isinstance(meshes[0], Mesh2D) assert len(meshes[0].faces) == 12 assert month_chart.data_polylines is None header2 = Header(RelativeHumidity(), '%', AnalysisPeriod()) values2 = [i for i in range(10, 70, 5)] data_coll2 = MonthlyCollection(header2, values2, date_t) month_chart = MonthlyChart([data_coll, data_coll2]) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 12 month_chart = MonthlyChart([data_coll, data_coll2], stack=True) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 12
def test_monthlychart_monthly_stack(): """Test the initialization of MonthlyChart with monthly stacked data collections.""" header = Header(Energy(), 'kWh', AnalysisPeriod()) values = [i for i in range(12, 24)] date_t = list(range(1, 13)) data_coll = MonthlyCollection(header, values, date_t) month_chart = MonthlyChart([data_coll]) meshes = month_chart.data_meshes assert len(meshes) == 1 assert isinstance(meshes[0], Mesh2D) assert len(meshes[0].faces) == 12 assert month_chart.y_axis_labels1[0] == '0.00' header2 = Header(Energy(), 'kWh', AnalysisPeriod()) values2 = [i for i in range(24, 36)] data_coll2 = MonthlyCollection(header2, values2, date_t) month_chart = MonthlyChart([data_coll, data_coll2]) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 12 month_chart = MonthlyChart([data_coll, data_coll2], stack=True) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 12
def test_filter_by_analysis_period_monthly(): """Test filtering by analysis period on monthly collection.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(12)) dc = MonthlyCollection(header, values, AnalysisPeriod().months_int) a_per = AnalysisPeriod(st_month=3, end_month=6) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == 4 assert filt_dc.header.analysis_period == a_per assert filt_dc.datetimes[0] == 3 assert filt_dc.datetimes[-1] == 6
def data_to_load_intensity(data_colls, floor_area, data_type, cop=1, mults=None): """Convert data collections output by EnergyPlus to a single load intensity collection. Args: data_colls: A list of monthly data collections for an energy term. floor_area: The total floor area of the rooms, used to compute EUI. data_type: Text for the data type of the collections (eg. "Cooling"). cop: Optional number for the COP, which the results will be divided by. """ if len(data_colls) != 0: if mults is not None: if 'Zone' in data_colls[0].header.metadata: rel_mults = [ mults[data.header.metadata['Zone']] for data in data_colls ] data_colls = [ dat * mul for dat, mul in zip(data_colls, rel_mults) ] total_vals = [ sum(month_vals) / floor_area for month_vals in zip(*data_colls) ] if cop != 1: total_vals = [val / cop for val in total_vals] else: # just make a "filler" collection of 0 values total_vals = [0] * 12 meta_dat = {'type': data_type} total_head = Header(EnergyIntensity(), 'kWh/m2', AnalysisPeriod(), meta_dat) return MonthlyCollection(total_head, total_vals, range(12))
def monthly_prevailing_temperature(self): """MonthlyCollection of prevailing outdoor temperature in C.""" if self._monthly_prevail == []: return self.hourly_prevailing_temperature.average_monthly() else: return MonthlyCollection(self._get_header(), self._monthly_prevail, self._head.analysis_period.months_int)
def serialize_data(data_dicts): """Reserialize a list of collection dictionaries.""" if len(data_dicts) == 0: return [] elif data_dicts[0]['type'] == 'HourlyContinuousCollection': return [HourlyContinuousCollection.from_dict(data) for data in data_dicts] elif data_dicts[0]['type'] == 'MonthlyCollection': return [MonthlyCollection.from_dict(data) for data in data_dicts] elif data_dicts[0]['type'] == 'DailyCollection': return [DailyCollection.from_dict(data) for data in data_dicts]
def test_monthlychart_set_min_max_by_index(): """Test the set_minimum_by_index amd set_maximum_by_index methods.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = [i for i in range(12)] date_t = list(range(1, 13)) data_coll = MonthlyCollection(header, values, date_t) header2 = Header(RelativeHumidity(), '%', AnalysisPeriod()) values2 = [i for i in range(10, 70, 5)] data_coll2 = MonthlyCollection(header2, values2, date_t) l_par = LegendParameters(min=-20, max=40) l_par.decimal_count = 0 month_chart = MonthlyChart([data_coll, data_coll2], legend_parameters=l_par) assert month_chart.y_axis_labels1[0] == '-20' assert month_chart.y_axis_labels1[-1] == '40' month_chart.set_minimum_by_index(0, 1) assert month_chart.y_axis_labels2[0] == '0' month_chart.set_maximum_by_index(100, 1) assert month_chart.y_axis_labels2[-1] == '100'
def test_init_prevailing_temperature_monthly(): """Test the PrevailingTemperature object with monthly inputs.""" outdoor_header = Header(Temperature(), 'C', AnalysisPeriod()) outdoor_temp = MonthlyCollection(outdoor_header, range(12), AnalysisPeriod().months_int) outdoor_temp = outdoor_temp.validate_analysis_period() prevail_obj = PrevailingTemperature(outdoor_temp, True) assert isinstance(prevail_obj.hourly_prevailing_temperature, HourlyContinuousCollection) assert len(prevail_obj.hourly_prevailing_temperature.values) == 8760 assert isinstance(prevail_obj.daily_prevailing_temperature, DailyCollection) assert len(prevail_obj.daily_prevailing_temperature.values) == 365 assert isinstance(prevail_obj.monthly_prevailing_temperature, MonthlyCollection) assert len(prevail_obj.monthly_prevailing_temperature.values) == 12 assert isinstance(prevail_obj.monthly_per_hour_prevailing_temperature, MonthlyPerHourCollection) assert len( prevail_obj.monthly_per_hour_prevailing_temperature.values) == 288 with pytest.raises(Exception): prevail_obj = PrevailingTemperature(outdoor_temp, False)
def test_init_monthly(): """Test the init methods for monthly collections.""" a_per = AnalysisPeriod(6, 1, 0, 7, 31, 23) v1, v2 = 20, 25 avg = (v1 + v2) / 2 # Setup data collection dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per), [v1, v2], a_per.months_int) assert dc1.datetimes == tuple(a_per.months_int) assert dc1.values == (v1, v2) assert dc1.average == avg assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header
def serialize_data(data_dicts): """Reserialize a list of MonthlyCollection dictionaries.""" return [MonthlyCollection.from_dict(data) for data in data_dicts]
from ladybug.datacollection import HourlyContinuousCollection, DailyCollection, \ MonthlyCollection, MonthlyPerHourCollection, HourlyDiscontinuousCollection except ImportError as e: raise ImportError('\nFailed to import ladybug:\n\t{}'.format(e)) try: from ladybug_rhino.grasshopper import all_required_inputs except ImportError as e: raise ImportError('\nFailed to import ladybug_rhino:\n\t{}'.format(e)) if all_required_inputs(ghenv.Component): inter = _interval_.lower() if _interval_ is not None else 'hourly' if inter == 'hourly': aper = _header.analysis_period if aper.st_hour == 0 and aper.end_hour == 23: data = HourlyContinuousCollection(_header, _values) else: data = HourlyDiscontinuousCollection(_header, _values, aper.datetimes) elif inter == 'monthly': data = MonthlyCollection( _header, _values, _header.analysis_period.months_int) elif inter == 'daily': data = DailyCollection( _header, _values, _header.analysis_period.doys_int) elif inter == 'monthly-per-hour': data = MonthlyPerHourCollection( _header, _values, _header.analysis_period.months_per_hour) else: raise ValueError('{} is not a recongized interval.'.format(_interval_))
def test_validate_a_period_monthly(): """Test the validate_a_period methods for monthly collections.""" a_per = AnalysisPeriod(6, 1, 0, 7, 1, 23) v1, v2 = 20, 25 dt1, dt2 = 6, 7 # Test that the validate method correctly sorts reversed datetimes. dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt2, dt1]) dc1_new = dc1.validate_analysis_period() assert dc1.validated_a_period is False assert dc1_new.validated_a_period is True assert dc1.datetimes == (dt2, dt1) assert dc1_new.datetimes == (dt1, dt2) # Test that the validate method correctly updates analysis_period range. a_per_2 = AnalysisPeriod(6, 1, 0, 6, 1, 23) dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per_2), [v1, v2], [dt1, dt2]) dc1_new = dc1.validate_analysis_period() assert dc1.validated_a_period is False assert dc1_new.validated_a_period is True assert dc1.header.analysis_period == a_per_2 assert dc1_new.header.analysis_period == AnalysisPeriod(6, 1, 0, 7, 31, 23) # Test that the validate method with reversed analysis_periods. a_per_3 = AnalysisPeriod(6, 1, 0, 2, 28, 23) dt5 = 1 dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2, v2], [dt5, dt1, dt2]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == a_per_3 assert dc1_new.datetimes == (dt1, dt2, dt5) dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt1, dt2]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == a_per_3 assert dc1_new.datetimes == (dt1, dt2) dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt5, 2]) dc1_new = dc1.validate_analysis_period() assert dc1_new.datetimes == (dt5, 2) assert dc1_new.header.analysis_period == a_per_3 dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt5, 4]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == AnalysisPeriod() assert dc1_new.datetimes == (dt5, 4) # Test that duplicated datetimes are caught dc1 = MonthlyCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt1, dt1]) with pytest.raises(Exception): dc1_new = dc1.validate_analysis_period()
def data_collections_by_output_name(self, output_name): """Get an array of Ladybug DataCollections for a specified output. Args: output_name: The name of an EnergyPlus output to be retrieved from the SQLite result file. This can also be an array of output names for which all data collections should be retrieved. Returns: An array of data collections of the requested output type. This will be an empty list if no output of the requested name was found in the file. """ conn = sqlite3.connect(self.file_path) try: # extract all indices in the ReportDataDictionary with the output_name c = conn.cursor() if isinstance(output_name, str): c.execute('SELECT * FROM ReportDataDictionary WHERE Name=?', (output_name,)) else: c.execute('SELECT * FROM ReportDataDictionary WHERE Name IN {}'.format( output_name)) header_rows = c.fetchall() # if nothing was found, return an empty list if len(header_rows) == 0: conn.close() # ensure connection is always closed return [] # extract all data of the relevant type from ReportData rel_indices = tuple(row[0] for row in header_rows) if len(rel_indices) == 1: c.execute('SELECT Value, TimeIndex FROM ReportData WHERE ' 'ReportDataDictionaryIndex=?', rel_indices) else: c.execute('SELECT Value, TimeIndex FROM ReportData WHERE ' 'ReportDataDictionaryIndex IN {}'.format(rel_indices)) data = c.fetchall() conn.close() # ensure connection is always closed except Exception as e: conn.close() # ensure connection is always closed raise Exception(str(e)) # get the analysis period and the reporting frequency from the time table st_time, end_time = data[0][1], data[-1][1] run_period, report_frequency, dday = self._extract_run_period(st_time, end_time) if dday: # there are multiple analysis periods; get them all run_period = self._extract_all_run_period( report_frequency, run_period.timestep, run_period.is_leap_year) # create the header objects to be used for the resulting data collections units = header_rows[0][-1] if header_rows[0][-1] != 'J' else 'kWh' data_type = self._data_type_from_unit(units) meta_datas = [] for row in header_rows: obj_type = row[3] if 'Surface' not in output_name else 'Surface' meta_datas.append({'type': row[6], obj_type: row[5]}) headers = [] if isinstance(run_period, list): # multiple run periods for runper in run_period: for m_data in meta_datas: headers.append(Header(data_type, units, runper, m_data)) else: # just one run period for m_data in meta_datas: headers.append(Header(data_type, units, run_period, m_data)) # format the data such that we have one list for each of the header rows if isinstance(run_period, list): # multiple run periods chunks = [len(runper) for runper in run_period] if units == 'kWh': all_values = self._partition_and_convert_timeseries_chunks(data, chunks) else: all_values = self._partition_timeseries_chunks(data, chunks) else: # just one run period n_lists = len(header_rows) if units == 'kWh': all_values = self._partition_and_convert_timeseries(data, n_lists) else: all_values = self._partition_timeseries(data, n_lists) # create the final data collections data_colls = [] if report_frequency == 'Hourly' or isinstance(report_frequency, int): for head, values in zip(headers, all_values): data_colls.append(HourlyContinuousCollection(head, values)) elif report_frequency == 'Daily': for head, values in zip(headers, all_values): data_colls.append(DailyCollection( head, values, head.analysis_period.doys_int)) elif report_frequency == 'Monthly': for head, values in zip(headers, all_values): data_colls.append(MonthlyCollection( head, values, head.analysis_period.months_int)) else: # Annual data; just return the values as they are return all_values # ensure all imported data gets marked as valid; this increases speed elsewhere for data in data_colls: data._validated_a_period = True return data_colls