def test_monthlychart_daily_stack(): """Test the initialization of MonthlyChart with daily stacked data collections.""" header = Header(Energy(), 'kWh', AnalysisPeriod()) values = [i / 31 for i in range(365)] date_t = list(range(1, 366)) data_coll = DailyCollection(header, values, date_t) month_chart = MonthlyChart([data_coll]) meshes = month_chart.data_meshes assert len(meshes) == 1 assert isinstance(meshes[0], Mesh2D) assert len(meshes[0].faces) == 365 assert month_chart.data_polylines is None header2 = Header(Energy(), 'kWh', AnalysisPeriod()) values2 = [i / 31 for i in range(365)] data_coll2 = DailyCollection(header2, values2, date_t) month_chart = MonthlyChart([data_coll, data_coll2]) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 365 month_chart = MonthlyChart([data_coll, data_coll2], stack=True) meshes = month_chart.data_meshes assert len(meshes) == 2 assert isinstance(meshes[1], Mesh2D) assert len(meshes[1].faces) == 365
def test_is_collection_aligned(): """Test the test_is_collection_aligned method for discontinuous collections.""" header = Header(Temperature(), 'C', AnalysisPeriod(end_month=1, end_day=1)) header3 = Header(Temperature(), 'C', AnalysisPeriod(end_month=1, end_day=2)) header4 = Header(Temperature(), 'C', AnalysisPeriod(st_day=2, end_month=1, end_day=2)) header5 = Header(Temperature(), 'C', AnalysisPeriod(end_month=1, end_day=24)) values1 = list(xrange(24)) values2 = [12] * 24 values3 = [12] * 48 dc1 = HourlyDiscontinuousCollection(header, values1, header.analysis_period.datetimes) dc2 = HourlyDiscontinuousCollection(header, values2, header.analysis_period.datetimes) dc3 = HourlyDiscontinuousCollection(header3, values3, header3.analysis_period.datetimes) dc4 = HourlyDiscontinuousCollection(header4, values1, header4.analysis_period.datetimes) dc5 = DailyCollection(header5, values1, header5.analysis_period.doys_int) assert dc1.is_collection_aligned(dc2) assert dc2.is_collection_aligned(dc1) assert not dc1.is_collection_aligned(dc3) assert not dc3.is_collection_aligned(dc1) assert not dc1.is_collection_aligned(dc4) assert not dc4.is_collection_aligned(dc1) assert not dc1.is_collection_aligned(dc5) assert not dc5.is_collection_aligned(dc1) assert HourlyDiscontinuousCollection.are_collections_aligned([dc1, dc2]) assert not HourlyDiscontinuousCollection.are_collections_aligned( [dc1, dc2, dc3], False)
def test_percentile_monthly_on_daily_collection(): """Test the percentile monthly method.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(365)) dc = DailyCollection(header, values, AnalysisPeriod().doys_int) new_dc = dc.percentile_monthly(25) assert isinstance(new_dc, MonthlyCollection) assert len(new_dc) == 12 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 12
def test_total_monthly_on_daily_collection(): """Test the total monthly method.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(365)) dc = DailyCollection(header, values, AnalysisPeriod().doys_int) new_dc = dc.total_monthly() assert isinstance(new_dc, MonthlyCollection) assert len(new_dc) == 12 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 12 for i, val in dc.group_by_month().items(): assert new_dc[i - 1] == sum(val)
def test_filter_by_analysis_period_daily(): """Test filtering by analysis period on daily collection.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(365)) dc = DailyCollection(header, values, AnalysisPeriod().doys_int) dc = dc.validate_analysis_period() a_per = AnalysisPeriod(st_month=3, end_month=4, end_day=30) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == 31 + 30 assert filt_dc.header.analysis_period == a_per assert filt_dc.datetimes[0] == 31 + 28 + 1 assert filt_dc.datetimes[-1] == 31 + 28 + 31 + 30
def daily_prevailing_temperature(self): """DailyCollection of prevailing outdoor temperature in C.""" if self._daily_prevail == []: if self._avg_month is True: self._daily_prevail_from_monthly() else: self._daily_prevail_from_hourly() return DailyCollection(self._get_header(), self._daily_prevail, self._head.analysis_period.doys_int)
def serialize_data(data_dicts): """Reserialize a list of collection dictionaries.""" if len(data_dicts) == 0: return [] elif data_dicts[0]['type'] == 'HourlyContinuousCollection': return [HourlyContinuousCollection.from_dict(data) for data in data_dicts] elif data_dicts[0]['type'] == 'MonthlyCollection': return [MonthlyCollection.from_dict(data) for data in data_dicts] elif data_dicts[0]['type'] == 'DailyCollection': return [DailyCollection.from_dict(data) for data in data_dicts]
def test_init_prevailing_temperature_daily(): """Test the PrevailingTemperature object with daily inputs.""" outdoor_header = Header(Temperature(), 'C', AnalysisPeriod()) outdoor_temp = DailyCollection(outdoor_header, range(365), AnalysisPeriod().doys_int) outdoor_temp = outdoor_temp.validate_analysis_period() prevail_obj = PrevailingTemperature(outdoor_temp, True) assert isinstance(prevail_obj.hourly_prevailing_temperature, HourlyContinuousCollection) assert len(prevail_obj.hourly_prevailing_temperature.values) == 8760 assert isinstance(prevail_obj.daily_prevailing_temperature, DailyCollection) assert len(prevail_obj.daily_prevailing_temperature.values) == 365 assert isinstance(prevail_obj.monthly_prevailing_temperature, MonthlyCollection) assert len(prevail_obj.monthly_prevailing_temperature.values) == 12 assert isinstance(prevail_obj.monthly_per_hour_prevailing_temperature, MonthlyPerHourCollection) assert len( prevail_obj.monthly_per_hour_prevailing_temperature.values) == 288 prevail_obj = PrevailingTemperature(outdoor_temp, False) assert isinstance(prevail_obj.hourly_prevailing_temperature, HourlyContinuousCollection) assert len(prevail_obj.hourly_prevailing_temperature.values) == 8760 assert isinstance(prevail_obj.daily_prevailing_temperature, DailyCollection) assert len(prevail_obj.daily_prevailing_temperature.values) == 365 assert isinstance(prevail_obj.monthly_prevailing_temperature, MonthlyCollection) assert len(prevail_obj.monthly_prevailing_temperature.values) == 12 assert isinstance(prevail_obj.monthly_per_hour_prevailing_temperature, MonthlyPerHourCollection) assert len( prevail_obj.monthly_per_hour_prevailing_temperature.values) == 288
def test_init_daily(): """Test the init methods for daily collections.""" a_per = AnalysisPeriod(6, 21, 0, 6, 22, 23) v1, v2 = 20, 25 avg = (v1 + v2) / 2 # Setup data collection dc1 = DailyCollection(Header(Temperature(), 'C', a_per), [v1, v2], a_per.doys_int) assert dc1.datetimes == tuple(a_per.doys_int) assert dc1.values == (v1, v2) assert dc1.average == avg assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header
from ladybug.datacollection import HourlyContinuousCollection, DailyCollection, \ MonthlyCollection, MonthlyPerHourCollection, HourlyDiscontinuousCollection except ImportError as e: raise ImportError('\nFailed to import ladybug:\n\t{}'.format(e)) try: from ladybug_rhino.grasshopper import all_required_inputs except ImportError as e: raise ImportError('\nFailed to import ladybug_rhino:\n\t{}'.format(e)) if all_required_inputs(ghenv.Component): inter = _interval_.lower() if _interval_ is not None else 'hourly' if inter == 'hourly': aper = _header.analysis_period if aper.st_hour == 0 and aper.end_hour == 23: data = HourlyContinuousCollection(_header, _values) else: data = HourlyDiscontinuousCollection(_header, _values, aper.datetimes) elif inter == 'monthly': data = MonthlyCollection( _header, _values, _header.analysis_period.months_int) elif inter == 'daily': data = DailyCollection( _header, _values, _header.analysis_period.doys_int) elif inter == 'monthly-per-hour': data = MonthlyPerHourCollection( _header, _values, _header.analysis_period.months_per_hour) else: raise ValueError('{} is not a recongized interval.'.format(_interval_))
def test_validate_a_period_daily(): """Test the validate_a_period methods for daily collections.""" a_per = AnalysisPeriod(6, 21, 0, 6, 22, 23) v1, v2 = 20, 25 dt1, dt2 = 172, 173 # Test that the validate method correctly sorts reversed datetimes. dc1 = DailyCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt2, dt1]) dc1_new = dc1.validate_analysis_period() assert dc1.validated_a_period is False assert dc1_new.validated_a_period is True assert dc1.datetimes == (dt2, dt1) assert dc1_new.datetimes == (dt1, dt2) # Test that the validate method correctly updates analysis_period range. a_per_2 = AnalysisPeriod(6, 20, 0, 6, 20, 23) dc1 = DailyCollection(Header(Temperature(), 'C', a_per_2), [v1, v2], [dt1, dt2]) dc1_new = dc1.validate_analysis_period() assert dc1.validated_a_period is False assert dc1_new.validated_a_period is True assert dc1.header.analysis_period == a_per_2 assert dc1_new.header.analysis_period == AnalysisPeriod( 6, 20, 0, 6, 22, 23) # Test that the validate method with reversed analysis_periods. a_per_3 = AnalysisPeriod(6, 20, 0, 2, 20, 23) dt5 = 21 dc1 = DailyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2, v2], [dt1, dt2, dt5]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == a_per_3 dc1 = DailyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt1, dt2]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == a_per_3 dc1 = DailyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt5, 22]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == a_per_3 dc1 = DailyCollection(Header(Temperature(), 'C', a_per_3), [v1, v2], [dt5, 60]) dc1_new = dc1.validate_analysis_period() assert dc1_new.header.analysis_period == AnalysisPeriod() # Test that the validate method correctly identifies leap years. dc1 = DailyCollection(Header(Temperature(), 'C', a_per), [v1, v2, v2], [dt1, dt2, 366]) dc1_new = dc1.validate_analysis_period() assert dc1.validated_a_period is False assert dc1_new.validated_a_period is True assert dc1.header.analysis_period.is_leap_year is False assert dc1_new.header.analysis_period.is_leap_year is True # Test that duplicated datetimes are caught dc1 = DailyCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt1, dt1]) with pytest.raises(Exception): dc1_new = dc1.validate_analysis_period()
def data_collections_by_output_name(self, output_name): """Get an array of Ladybug DataCollections for a specified output. Args: output_name: The name of an EnergyPlus output to be retrieved from the SQLite result file. This can also be an array of output names for which all data collections should be retrieved. Returns: An array of data collections of the requested output type. This will be an empty list if no output of the requested name was found in the file. """ conn = sqlite3.connect(self.file_path) try: # extract all indices in the ReportDataDictionary with the output_name c = conn.cursor() if isinstance(output_name, str): c.execute('SELECT * FROM ReportDataDictionary WHERE Name=?', (output_name,)) else: c.execute('SELECT * FROM ReportDataDictionary WHERE Name IN {}'.format( output_name)) header_rows = c.fetchall() # if nothing was found, return an empty list if len(header_rows) == 0: conn.close() # ensure connection is always closed return [] # extract all data of the relevant type from ReportData rel_indices = tuple(row[0] for row in header_rows) if len(rel_indices) == 1: c.execute('SELECT Value, TimeIndex FROM ReportData WHERE ' 'ReportDataDictionaryIndex=?', rel_indices) else: c.execute('SELECT Value, TimeIndex FROM ReportData WHERE ' 'ReportDataDictionaryIndex IN {}'.format(rel_indices)) data = c.fetchall() conn.close() # ensure connection is always closed except Exception as e: conn.close() # ensure connection is always closed raise Exception(str(e)) # get the analysis period and the reporting frequency from the time table st_time, end_time = data[0][1], data[-1][1] run_period, report_frequency, dday = self._extract_run_period(st_time, end_time) if dday: # there are multiple analysis periods; get them all run_period = self._extract_all_run_period( report_frequency, run_period.timestep, run_period.is_leap_year) # create the header objects to be used for the resulting data collections units = header_rows[0][-1] if header_rows[0][-1] != 'J' else 'kWh' data_type = self._data_type_from_unit(units) meta_datas = [] for row in header_rows: obj_type = row[3] if 'Surface' not in output_name else 'Surface' meta_datas.append({'type': row[6], obj_type: row[5]}) headers = [] if isinstance(run_period, list): # multiple run periods for runper in run_period: for m_data in meta_datas: headers.append(Header(data_type, units, runper, m_data)) else: # just one run period for m_data in meta_datas: headers.append(Header(data_type, units, run_period, m_data)) # format the data such that we have one list for each of the header rows if isinstance(run_period, list): # multiple run periods chunks = [len(runper) for runper in run_period] if units == 'kWh': all_values = self._partition_and_convert_timeseries_chunks(data, chunks) else: all_values = self._partition_timeseries_chunks(data, chunks) else: # just one run period n_lists = len(header_rows) if units == 'kWh': all_values = self._partition_and_convert_timeseries(data, n_lists) else: all_values = self._partition_timeseries(data, n_lists) # create the final data collections data_colls = [] if report_frequency == 'Hourly' or isinstance(report_frequency, int): for head, values in zip(headers, all_values): data_colls.append(HourlyContinuousCollection(head, values)) elif report_frequency == 'Daily': for head, values in zip(headers, all_values): data_colls.append(DailyCollection( head, values, head.analysis_period.doys_int)) elif report_frequency == 'Monthly': for head, values in zip(headers, all_values): data_colls.append(MonthlyCollection( head, values, head.analysis_period.months_int)) else: # Annual data; just return the values as they are return all_values # ensure all imported data gets marked as valid; this increases speed elsewhere for data in data_colls: data._validated_a_period = True return data_colls