def _load_solarcal_splines(): """load the spline data that gets used in solarcal.""" try: cur_dir = os.path.dirname(__file__) solarcal_splines = { 'seated': csv_to_num_matrix(os.path.join( cur_dir, '_mannequin', 'seatedspline.csv')), 'standing': csv_to_num_matrix(os.path.join( cur_dir, '_mannequin', 'standingspline.csv'))} except IOError: solarcal_splines = {} print('Failed to import projection factor splines from CSV.' '\nA simpler interoplation method for Solarcal will be used.') return solarcal_splines
def test_csv_to_matrix(): """Test the csv_to_matrix functions.""" path = './tests/fixtures/epw/tokyo.epw' epw_mtx = futil.csv_to_matrix(path) assert len(epw_mtx) == 8768 with pytest.raises(Exception): epw_mtx = futil.csv_to_num_matrix(path)
data_header = Header.from_dict(json.load(json_file)) a_per = data_header.analysis_period continuous = True if a_per.st_hour == 0 and a_per.end_hour == 23 else False if not continuous: dates = a_per.datetimes # parse the grids_info.json with the correct order of the grid files with open(os.path.join(_comf_result, 'grids_info.json')) as json_file: grid_list = json.load(json_file) # loop through the grid CSV files, parse their results, and build data collections comf_matrix = [] for grid in grid_list: grid_name = grid['full_id'] if 'full_id' in grid else 'id' metadata = {'grid': grid_name} grid_file = os.path.join(_comf_result, '{}.csv'.format(grid_name)) data_matrix = csv_to_num_matrix(grid_file) grid_data = [] for i, row in enumerate(data_matrix): header = data_header.duplicate() header.metadata = metadata.copy() header.metadata['sensor_index'] = i data = HourlyContinuousCollection(header, row) if continuous else \ HourlyDiscontinuousCollection(header, row, dates) grid_data.append(data) comf_matrix.append(grid_data) # wrap the maptrix into an object so that it does not slow the Grasshopper UI comf_mtx = objectify_output('{} Matrix'.format(data_header.data_type.name), comf_matrix)