def test_interpolate_holes(): """Test the interoplate holes method on the discontinuous collection.""" a_per = AnalysisPeriod(6, 21, 0, 6, 21, 23) dt1, dt2 = DateTime(6, 21, 12), DateTime(6, 21, 14) v1, v2 = 20, 25 dc1 = HourlyDiscontinuousCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt1, dt2]) with pytest.raises(Exception): interp_coll1 = dc1.interpolate_holes() dc2 = dc1.validate_analysis_period() interp_coll1 = dc2.interpolate_holes() assert isinstance(interp_coll1, HourlyContinuousCollection) assert len(interp_coll1.values) == 24 assert interp_coll1[0] == 20 assert interp_coll1[12] == 20 assert interp_coll1[13] == 22.5 assert interp_coll1[14] == 25 assert interp_coll1[23] == 25 values = list(xrange(24)) test_header = Header(GenericType('Test Type', 'test'), 'test', AnalysisPeriod(end_month=1, end_day=1)) dc3 = HourlyContinuousCollection(test_header, values) interp_coll2 = dc3.interpolate_holes() assert isinstance(interp_coll2, HourlyContinuousCollection) assert len(interp_coll2.values) == 24
def test_to_from_array(): """Test the from_array method for DateTime.""" dt1 = DateTime(6, 21, 12) dt_arr = dt1.to_array() rebuilt_dt = DateTime.from_array(dt_arr) assert rebuilt_dt == dt1 assert rebuilt_dt.to_array() == dt_arr
def test_to_from_date_time_string(): """Test the from_date_time_string method for DateTime.""" dt1 = DateTime(6, 21, 12) dt_str = str(dt1) rebuilt_dt = DateTime.from_date_time_string(dt_str) assert rebuilt_dt == dt1 assert str(rebuilt_dt) == dt_str
def test_date_time_init(): """Test the init method for DateTime and basic properties.""" dt1 = DateTime(6, 21, 12) dt2 = DateTime(6, 21, 12) dt3 = DateTime(6, 21, 12, leap_year=True) dt4 = DateTime(6, 21, 13) str(dt1) # test the string representation of Datetime assert dt1.month == 6 assert dt1.day == 21 assert dt1.hour == 12 assert dt1.minute == 0 assert not dt1.leap_year assert dt1.doy == 172 assert dt1.hoy == 4116 assert dt1.moy == 246960 assert isinstance(dt1.int_hoy, int) assert dt1.int_hoy == 4116 assert dt1.float_hour == 12.0 assert dt1.date == Date(6, 21) assert dt1.time == Time(12, 0) assert dt1 == dt2 assert dt1 != dt3 assert dt1 != dt4 assert sorted([dt4, dt1]) == [dt1, dt4]
def test_date_time_to_from_dict(): """Test the dict methods for DateTime.""" dt1 = DateTime(6, 21, 12) dt_dict = dt1.to_dict() rebuilt_dt = DateTime.from_dict(dt_dict) assert dt1 == rebuilt_dt assert rebuilt_dt.to_dict() == dt_dict
def test_filter_by_hoys(): """Test filter_by_hoys method.""" a_per = AnalysisPeriod(st_month=3, end_month=3) header = Header(Temperature(), 'C', a_per) values = list(xrange(24 * 31)) dc = HourlyDiscontinuousCollection(header, values, a_per.datetimes) hoys = AnalysisPeriod(st_hour=9, end_hour=17).hoys filt_dc = dc.filter_by_hoys(hoys) assert len(filt_dc) == 31 * 9 assert filt_dc.datetimes[0] == DateTime(3, 1, 9) assert filt_dc.datetimes[-1] == DateTime(3, 31, 17)
def test_prevailing_direction(): """Test prevailing direction getter""" # Test with single prevailing dir dir_vals = [0, 3, 10, # 315 - 45 85, 90, 95, # 45 - 135 140, 170, 170, 170, # 135 - 225 230, 285, 288] # 225 - 315 spd_vals = dir_vals # Make into fake data collections a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dates = [DateTime(6, 21, i) for i in range(len(dir_vals))] spd_header = Header(Speed(), 'm/s', a_per) dir_header = Header(GenericType('Direction', 'deg'), 'deg', a_per) spd_data = HourlyDiscontinuousCollection(spd_header, spd_vals, dates) dir_data = HourlyDiscontinuousCollection(dir_header, dir_vals, dates) # Init simple dir set divided by 4 w = WindRose(dir_data, spd_data, 4) test_prev_dir = 180 assert w.prevailing_direction[0] == test_prev_dir # Testing with two max prevailing values dir_vals = [3, 3, 10, # 315 - 45 85, 90, 90, 100, # 45 - 135 170, 170, 170, 180, # 135 - 225 230, 285, 288] # 225 - 315 spd_vals = dir_vals # Make into fake data collections a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dates = [DateTime(6, 21, i) for i in range(len(dir_vals))] spd_header = Header(Speed(), 'm/s', a_per) dir_header = Header(GenericType('Direction', 'deg'), 'deg', a_per) spd_data = HourlyDiscontinuousCollection(spd_header, spd_vals, dates) dir_data = HourlyDiscontinuousCollection(dir_header, dir_vals, dates) # Init simple dir set divided by 4 w = WindRose(dir_data, spd_data, 4) test_prev_dir = set((90, 180)) assert set(w.prevailing_direction) == test_prev_dir # Test with epw epw_path = os.path.join(os.getcwd(), 'tests/fixtures/epw/chicago.epw') epw = EPW(epw_path) # Test 5 directions w = WindRose(epw.wind_direction, epw.wind_speed, 5) assert w.prevailing_direction[0] == 216.0
def end_date_time(self): """Get a ladybug DateTime object for the end time of the schedule's values.""" num_hoys = (len(self._values) - 1) / self.timestep end_hoy = (self.start_date.doy - 1) * 24 + num_hoys if not self.is_leap_year: end_dt = DateTime.from_hoy(end_hoy) if end_hoy < 8760 else \ DateTime.from_hoy(end_hoy - 8760) else: end_dt = DateTime.from_hoy(end_hoy, True) if end_hoy < 8784 else \ DateTime.from_hoy(end_hoy - 8760, True) return end_dt
def test_filter_by_analysis_period_continuous_hour_subset(): """Test filtering hour subset analysis period on hourly continuous collection.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(8760)) dc = HourlyContinuousCollection(header, values) a_per = AnalysisPeriod(3, 2, 9, 3, 8, 17) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == 7 * 9 assert filt_dc.header.analysis_period == a_per assert filt_dc.datetimes[0] == DateTime(3, 2, 9) assert filt_dc.datetimes[-1] == DateTime(3, 8, 17) assert not isinstance(filt_dc, HourlyContinuousCollection)
def test_date_time_add_sub(): """Test the add and subtract methods for DateTime.""" dt1 = DateTime(6, 21, 12) dt2 = dt1.add_hour(1) dt3 = dt1.sub_hour(1) dt4 = dt1.add_minute(1) dt5 = dt1.sub_minute(1) assert dt2 == DateTime(6, 21, 13) assert dt3 == DateTime(6, 21, 11) assert dt4 == DateTime(6, 21, 12, 1) assert dt5 == DateTime(6, 21, 11, 59)
def test_filter_by_analysis_period_continuous_reversed(): """Test filtering by reversed analysis period on hourly continuous collection.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(8760)) dc = HourlyContinuousCollection(header, values) a_per = AnalysisPeriod(st_month=12, end_month=3) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == (31 + 31 + 28 + 31) * 24 assert filt_dc.header.analysis_period == a_per assert filt_dc.datetimes[0] == DateTime(12, 1, 0) assert filt_dc.datetimes[-1] == DateTime(3, 31, 23) assert isinstance(filt_dc, HourlyContinuousCollection)
def _extract_run_period(self, st_time, end_time): """Extract the run period object and frequency from the SQLite file. Args: st_time: Index for the start time of the data. end_time: Index for the end time of the data. Returns: A tuple with run_period, reporting_frequency, and a boolean for whether the data was for a design day. """ conn = sqlite3.connect(self.file_path) try: # extract the start and end times from the Time table c = conn.cursor() c.execute('SELECT * FROM Time WHERE TimeIndex=?', (st_time,)) start = c.fetchone() c.execute('SELECT * FROM Time WHERE TimeIndex=?', (end_time,)) end = c.fetchone() conn.close() # ensure connection is always closed except Exception as e: conn.close() # ensure connection is always closed raise Exception(str(e)) # check whether the data was for a design day dday_period = True if start[10] in ('SummerDesignDay', 'WinterDesignDay') \ else False # set the reporting frequency by the interval type interval_typ = start[8] if interval_typ <= 1: min_per_step = start[7] aper_timestep = int(60 / min_per_step) reporting_frequency = aper_timestep else: reporting_frequency = self._interval_codes[interval_typ] aper_timestep = 1 min_per_step = 60 # convert the extracted data into an AnalysisPeriod object leap_year = True if end[1] % 4 == 0 else False if reporting_frequency == 'Monthly': st_date = DateTime(start[2], 1, 0) else: st_date = DateTime(start[2], start[3], 0) end_date = DateTime(end[2], end[3], 0) end_date = end_date.add_minute(1440 - min_per_step) run_period = AnalysisPeriod( st_date.month, st_date.day, st_date.hour, end_date.month, end_date.day, end_date.hour, aper_timestep, leap_year) return run_period, reporting_frequency, dday_period
def test_daylight_saving(self): nyc = Location('New_York', 'USA', latitude=40.72, longitude=-74.02, time_zone=-5) sp = Sunpath.from_location(nyc) dt1 = DateTime(6, 21, 12, 0) dt2 = DateTime(12, 21, 12, 0) # TODO(mostapha): This is not implemented yet # assert sp.is_daylight_saving_hour(dt1) is True assert sp.is_daylight_saving_hour(dt1) is False assert sp.is_daylight_saving_hour(dt2) is False
def test_filter_by_analysis_period_continuous_large(): """Test filtering large analysis period on hourly continuous collection.""" header = Header(Temperature(), 'C', AnalysisPeriod(st_month=3, end_month=3)) values = list(xrange(24 * 31)) dc = HourlyContinuousCollection(header, values) a_per = AnalysisPeriod(st_hour=9, end_hour=17) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == 31 * 9 assert filt_dc.header.analysis_period == AnalysisPeriod(3, 1, 9, 3, 31, 17) assert filt_dc.datetimes[0] == DateTime(3, 1, 9) assert filt_dc.datetimes[-1] == DateTime(3, 31, 17) assert not isinstance(filt_dc, HourlyContinuousCollection)
def test_filter_by_analysis_period_hourly(): """Test filtering by analysis period on hourly discontinuous collection.""" header = Header(Temperature(), 'C', AnalysisPeriod()) values = list(xrange(8760)) dc = HourlyDiscontinuousCollection(header, values, header.analysis_period.datetimes) dc = dc.validate_analysis_period() a_per = AnalysisPeriod(st_month=3, end_month=3) filt_dc = dc.filter_by_analysis_period(a_per) assert len(filt_dc) == 31 * 24 assert filt_dc.header.analysis_period == a_per assert filt_dc.datetimes[0] == DateTime(3, 1, 0) assert filt_dc.datetimes[-1] == DateTime(3, 31, 23)
def test_daylight_saving(): """Test the applicaiton of daylight saving time.""" nyc = Location('New_York', country='USA', latitude=40.72, longitude=-74.02, time_zone=-5) daylight_saving = AnalysisPeriod(st_month=3, st_day=8, st_hour=2, end_month=11, end_day=1, end_hour=2) sp = Sunpath.from_location(nyc, daylight_saving_period=daylight_saving) dt1 = DateTime(6, 21, 12, 0) dt2 = DateTime(12, 21, 12, 0) dt3 = DateTime(6, 21, 0) dt4 = DateTime(12, 21, 0) assert sp.is_daylight_saving_hour(dt1) assert not sp.is_daylight_saving_hour(dt2) assert sp.is_daylight_saving_hour(dt3) assert not sp.is_daylight_saving_hour(dt4) sun1ds = sp.calculate_sun_from_date_time(dt1) sun2ds = sp.calculate_sun_from_date_time(dt2) sun3ds = sp.calculate_sun_from_date_time(dt3) sun4ds = sp.calculate_sun_from_date_time(dt4) sp.daylight_saving_period = None assert sun1ds != sp.calculate_sun_from_date_time(dt1) assert sun3ds != sp.calculate_sun_from_date_time(dt3) assert sun1ds.altitude == \ approx(sp.calculate_sun_from_date_time(dt1.sub_hour(1)).altitude, rel=1e-2) assert sun3ds.altitude == \ approx(sp.calculate_sun_from_date_time(dt3.sub_hour(1)).altitude, rel=1e-2) sun2 = sp.calculate_sun_from_date_time(dt2) sun4 = sp.calculate_sun_from_date_time(dt4) assert sun2 == sun2ds assert sun4 == sun4ds
def test_daylight_saving(): nyc = Location('New_York', 'USA', latitude=40.72, longitude=-74.02, time_zone=-5) daylight_saving = AnalysisPeriod(st_month=3, st_day=8, end_month=11, end_day=1) sp = Sunpath.from_location(nyc, daylight_saving_period=daylight_saving) dt1 = DateTime(6, 21, 12, 0) dt2 = DateTime(12, 21, 12, 0) assert sp.is_daylight_saving_hour(dt1) assert not sp.is_daylight_saving_hour(dt2)
def test_init(): """Test the init methods for base collections.""" a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dt1, dt2 = DateTime(6, 21, 12), DateTime(6, 21, 13) v1, v2 = 20, 25 avg = (v1 + v2) / 2 # Setup data collection dc1 = BaseCollection(Header(Temperature(), 'C', a_per), [v1, v2], [dt1, dt2]) assert dc1.datetimes == (dt1, dt2) assert dc1.values == (v1, v2) assert dc1.average == avg str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header
def test_histogram_data_nested(): # Testing vals dir_vals = [0, 0, 0, 10, 10, 10, 85, 90, 90, 90, 95, 170, 285, 288] spd_vals = dir_vals # Make into fake data collections a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dates = [DateTime(6, 21, i) for i in range(len(dir_vals))] spd_header = Header(Speed(), 'm/s', a_per) dir_header = Header(GenericType('Direction', 'deg'), 'deg', a_per) spd_data = HourlyDiscontinuousCollection(spd_header, spd_vals, dates) dir_data = HourlyDiscontinuousCollection(dir_header, dir_vals, dates) # Init simple example w segs == bin num w = WindRose(dir_data, spd_data, 4) #w.legend_parameters = LegendParameters(segment_count=5) w.frequency_hours = 1 # Bin values to divide into colors # 315-45: [10, 10, 10]; 2 intervals, [10, 10, 10] # 45-135: [85, 90, 90, 90, 95]; 3 intervals, [85, 90, 90, 90, 95] # 135-225: [170]; 1 intervals, [170]; # 225-315: [285, 288]; 2 intervals, [285, 288] # interval_num: [2, 3, 1, 2] chk_histstack = [ [10, 10, 10], [85, 90, 90, 90, 95], [170.], [285, 288]] # Testing histstack = WindRose._histogram_data_nested(w.histogram_data, 1) for chkh, h in zip(chk_histstack, histstack): for c, _h in zip(chkh, h): assert abs(c - _h) <= 1e-10 # Init complex dir set divided by 4 w = WindRose(dir_data, spd_data, 4) w.frequency_hours = 2 # Bin values to divide into colors # 315-45: [10, 10, 10]; 2 intervals, [10, 10] # 45-135: [85, 90, 90, 90, 95]; 3 intervals, [87.5, 90, 95. ] # 135-225: [170]; 1 intervals, [170] # 225-315: [285, 288]; 2 intervals, [286.5] # interval_num: [2, 3, 1, 2] chk_histstack = [ [10, 10], [87.5, 90, 95.], [170.], [286.5]] # Testing histstack = WindRose._histogram_data_nested(w.histogram_data, 2) for chkh, h in zip(chk_histstack, histstack): for c, _h in zip(chkh, h): assert abs(c - _h) <= 1e-10
def sky_cie(day, month, time, latitude, longitude, time_zone, sky_type, north, ground, altitude, azimuth, folder, name): """Get a CIE sky file from parameters. These can be a minimal representation of the sky through altitude and azimuth (eg. "cie -alt 71.6 -az 185.2 -type 0"). Or it can be a detailed specification of time and location (eg. "cie 21 Jun 12:00 -lat 41.78 -lon -87.75 -type 0"). Both the altitude and azimuth must be specified for the minimal representation to be used. Otherwise, this command defaults to the detailed specification of time and location. \b Args: day: An intger for the day of the month (between 1 and 28-31). month: Text for the 3-letter abbreviation of the month of the year (eg. "Mar"). time: Text for the time of day (from 0:00 to 23:59). """ try: if altitude is not None and azimuth is not None: sky_obj = hbsky.CIE(altitude, azimuth, sky_type, ground) else: dtime = DateTime.from_date_time_string('{} {} {}'.format( day, month, time)) sky_obj = hbsky.CIE.from_lat_long(latitude, longitude, time_zone, dtime.month, dtime.day, dtime.float_hour, sky_type, north, ground) sky_obj.to_file(folder, name, True) except Exception: _logger.exception('Failed to generate sky.') sys.exit(1)
def results(self): """Return results for this analysis.""" assert self._isCalculated, \ "You haven't run the Recipe yet. Use self.run " + \ "to run the analysis before loading the results." print('Unloading the current values from the analysis grids.') for ag in self.analysis_grids: ag.unload() sky = self.sky dt = DateTime(sky.month, sky.day, int(sky.hour), int(60 * (sky.hour - int(sky.hour)))) # all the results will be divided by this value to calculated the percentage div = self.SKYILLUM / 100.0 rf = self._result_files start_line = 0 for count, analysisGrid in enumerate(self.analysis_grids): if count: start_line += len(self.analysis_grids[count - 1]) analysisGrid.set_values_from_file(rf, (int(dt.hoy), ), start_line=start_line, header=False, mode=div) return self.analysis_grids
def from_epw_file(cls, epw_file, month=6, day=21, hour=12, sky_density=1, north=0): """Generate a climate-based sky vector. This methos uses Radiance's gendaylit. Args: epw_file: Full path to epw weather file. month: Month [1..12] (default: 6). day: Day [1..31] (default: 21). hour: Hour [0..23] (default: 12). sky_type: An intger between 0-5 for CIE sky type. 0: [+s] Sunny with sun, 1: [-s] Sunny without sun, 2: [+i] Intermediate with sun, 3: [-i] Intermediate with no sun, 4: [-c] Cloudy overcast sky, 5: [-u] Uniform cloudy sky sky_density: A positive intger for sky density. [1] Tregenza Sky, [2] Reinhart Sky, etc. (Default: 1) """ epw = EPW(epw_file) location = epw.location hoy = DateTime(month, day, hour).hoy dnr = epw.direct_normal_radiation.values()[hoy] dhr = epw.diffuse_horizontal_radiation.values()[hoy] return cls.from_radiation_values(location, dnr, dhr, month, day, hour, sky_density, north)
def set_sun(location, hoy, north=0): """Set the sun in the Rhino scene to correspond to a given location and DateTime. Args: location: A Ladybug Location object to set the latitude, longitude and time zone of the Rhino sun path. hoy: A number between 0 and 8760 that represent the hour of the year at which to evaluate the sun position. Note that this does not need to be an integer and decimal values can be used to specify date times that are not on the hour mark. north: A number between -360 and 360 for the counterclockwise difference between the North and the positive Y-axis in degrees. 90 is West and 270 is East. (Default: 0). Returns: The Rhino sun object. """ # process the hoy into a .NET date/time lb_dt = DateTime.from_hoy(hoy) rh_dt = System.DateTime(lb_dt.year, lb_dt.month, lb_dt.day, lb_dt.hour, lb_dt.minute, 0) # enable the sun and set its position based on the location and date/time sun_position = doc.Lights.Sun sun.Enabled.SetValue(sun_position, True) sun.TimeZone.SetValue(sun_position, location.time_zone) sun.SetPosition(sun_position, rh_dt, location.latitude, location.longitude) # set the north of the sun, ensuring the the y-axis is North sun.North.SetValue(sun_position, 90 + north) return sun
def test_histogram_data_stacked(): # Testing vals dir_vals = [0, 0, 0, 10, 10, 10, 85, 90, 90, 90, 95, 170, 285, 288] spd_vals = dir_vals # Make into fake data collections a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dates = [DateTime(6, 21, i) for i in range(len(dir_vals))] spd_header = Header(Speed(), 'm/s', a_per) dir_header = Header(GenericType('Direction', 'deg'), 'deg', a_per) spd_data = HourlyDiscontinuousCollection(spd_header, spd_vals, dates) dir_data = HourlyDiscontinuousCollection(dir_header, dir_vals, dates) # Init simple dir set divided by 4 w = WindRose(dir_data, spd_data, 4) w.legend_parameters.segment_count = 3 # Bin values to divide into colors # 315-45: [10, 10, 10]; 2 intervals # 45-135: [85, 90, 90, 90, 95]; 3 intervals, [85. , 88.3, 91.7, 95. ] # 135-225: [170]; 1 intervals # 225-315: [285, 288]; 2 intervals, [285. , 286.5, 288. ] # interval_num: [2, 3, 1, 2] chk_histstack = [[(10 + 10) / 2., (10 + 10) / 2.], [(85 + 88.3) / 2., (88.3 + 91.7) / 2., (91.7 + 95) / 2.], [170.], [(285 + 286.5) / 2., (286.5 + 288) / 2.]] # Testing histstack = WindRose._histogram_data_stacked(w.histogram_data, 3) for chkh, h in zip(chk_histstack, histstack): for c, _h in zip(chkh, h): assert abs(c - _h) <= 1e-1
def _calculate_solar_values(self): """Calculate solar values for requested hours of the year. This method is called everytime that output type is set. """ wea = self.wea hoys_set = set(wea.hoys) output_type = self.output_type month_date_time = (DateTime.from_hoy(idx) for idx in self.hoys) sp = Sunpath.from_location(wea.location, self.north) # use gendaylit to calculate radiation values for each hour. print('Calculating solar values...') for timecount, dt in enumerate(month_date_time): if dt.hoy not in hoys_set: print('Warn: Wea data for {} is not available!'.format(dt)) continue month, day, hour = dt.month, dt.day, dt.float_hour dnr, dhr = wea.get_radiation_values(month, day, hour) sun = sp.calculate_sun(month, day, hour) if sun.altitude < 0: continue if dnr == 0: solarradiance = 0 else: solarradiance = \ int(gendaylit(sun.altitude, month, day, hour, dnr, dhr, output_type)) self._solar_values.append(solarradiance) # keep the number of hour relative to hoys in this sun matrix self._sun_up_hours_indices.append(timecount)
def _calculate_solar_values(wea, hoys, output_type, north=0, is_leap_year=False): """Calculate solar values for requested hours of the year. This method is called everytime that output type is set. """ month_date_time = (DateTime.from_hoy(idx, is_leap_year) for idx in hoys) sp = Sunpath.from_location(wea.location, north) sp.is_leap_year = is_leap_year solar_values = [] sun_up_hours = [] # use gendaylit to calculate radiation values for each hour. print('Calculating solar values...') for timecount, dt in enumerate(month_date_time): month, day, hour = dt.month, dt.day, dt.float_hour sun = sp.calculate_sun(month, day, hour) if sun.altitude < 0: continue else: dnr, dhr = wea.get_irradiance_value(month, day, hour) if dnr == 0: solarradiance = 0 else: solarradiance = \ int(gendaylit(sun.altitude, month, day, hour, dnr, dhr, output_type)) solar_values.append(solarradiance) # keep the number of hour relative to hoys in this sun matrix sun_up_hours.append(dt.hoy) return solar_values, sun_up_hours
def test_schedule_fixedinterval_from_idf(): """Test the ScheduleFixedInterval from_idf method.""" idf_str = \ """Schedule:File, Electrochromic Control, !- schedule name On-Off, !- schedule type limits ./tests/csv/Electrochromic_Control.csv, !- file name 1, !- column number 0, !- rows to skip 8760, !- number of hours of data Comma, !- column separator No, !- interpolate to timestep 60; !- minutes per item """ ec_schedule = ScheduleFixedInterval.from_idf(idf_str) assert ec_schedule.identifier == 'Electrochromic Control' assert len(ec_schedule.values) == 8760 assert ec_schedule[0] == 0 assert ec_schedule.schedule_type_limit is None assert ec_schedule.timestep == 1 assert not ec_schedule.interpolate assert ec_schedule.start_date == Date(1, 1) assert ec_schedule.end_date_time == DateTime(12, 31, 23) assert not ec_schedule.is_leap_year assert ec_schedule.placeholder_value == 0
def from_epw(cls, epw, month, day, hour, north_angle=0, ground_reflectance=0.2): """Create a standard climate-based sky from a EPW. Args: epw: A Ladybug EPW objects. month: An intger between 1-12 for month. day: An intger between 1 to 28-31 depending on the input month. hour: A float number larger or equal to 0 and smaller than 24. north_angle: North angle in degrees. A number between -360 and 360 for the counterclockwise difference between the North and the positive Y-axis in degrees. 90 is West and 270 is East (Default: 0). ground_reflectance: Average ground reflectance (Default: 0.2). """ assert isinstance(epw, EPW), \ 'epw must be from type EPW not {}'.format(type(epw)) location = epw.location hoy = int(DateTime(month, day, hour).hoy) direct_normal_irradiance = epw.direct_normal_radiation[hoy] diffuse_horizontal_irradiance = epw.diffuse_horizontal_radiation[hoy] return cls.from_lat_long(location.latitude, location.longitude, location.time_zone, month, day, hour, direct_normal_irradiance, diffuse_horizontal_irradiance, north_angle, ground_reflectance)
def test_radial_histogram(): """ Test circular histogram""" # Testing vals dir_vals = [0, 0, 0, 10, 85, 90, 95, 170, 285, 288] spd_vals = dir_vals # Make into fake data collections a_per = AnalysisPeriod(6, 21, 12, 6, 21, 13) dates = [DateTime(6, 21, i) for i in range(len(dir_vals))] spd_header = Header(Speed(), 'm/s', a_per) dir_header = Header(GenericType('Direction', 'deg'), 'deg', a_per) spd_data = HourlyDiscontinuousCollection(spd_header, spd_vals, dates) dir_data = HourlyDiscontinuousCollection(dir_header, dir_vals, dates) # Init simple dir set divided by 4 w = WindRose(dir_data, spd_data, 4) # Testing bin_vecs = w.bin_vectors vec_cpt = (0, 0) radius_arr = (0., 1.) ytick_num = 1 hist = w.histogram_data histstack = w._histogram_data_stacked(hist, ytick_num) show_stack = False vecs = WindRose._histogram_array_radial(bin_vecs, vec_cpt, hist, histstack, radius_arr, show_stack)
def results(self): """Return results for this analysis.""" assert self._isCalculated, \ "You haven't run the Recipe yet. Use self.run " + \ "to run the analysis before loading the results." print('Unloading the current values from the analysis grids.') for ag in self.analysisGrids: ag.unload() sky = self.sky dt = DateTime(sky.month, sky.day, int(sky.hour), int(60 * (sky.hour - int(sky.hour)))) rf = self._resultFiles startLine = 0 mode = 179 if self.simulationType == 1 else 0 for count, analysisGrid in enumerate(self.analysisGrids): if count: startLine += len(self.analysisGrids[count - 1]) analysisGrid.setValuesFromFile(rf, (int(dt.hoy), ), startLine=startLine, header=False, mode=mode) return self.analysisGrids
def hoy(self, v): """Set datetime by hour of year.""" self._datetime = DateTime.from_hoy(v)