def parse_type46(file_abspath): """Parse results from TRNSYS Type 46 generated file. Parses monthly integrated results from files generated by TRNSYS Type 46, also kown as "Printegrator". Also parses min and max instantaneous values (and corresponding times) and total values for all variables, but discards max and min integrated values, which can be easily deduced from monthly integrated values. Args: file_abspath: absolute path to result file. Returns: list of dicts (one dict per useful row of result file). Raises: IOError: problem reading out_file """ # Parse data from Type-46-generated tab separated file and return a list of # dicts (one dict per each row of result file) with open(file_abspath, 'rU') as out_f: next(out_f) # Skip first line which doesn't hold any useful info dr = csv.DictReader(out_f, delimiter='\t') fieldnames = dr.fieldnames dr_list = list(dr) dict_list = [] months_list = ['January','February','March','April','May','June','July','August','September','October','November','December'] # Verify that the file is a valid Type46 output file if fieldnames[0].strip() == 'Month' or fieldnames[0].strip() == 'Period': # Check if the given file holds results integrated monthly if dr_list[0][fieldnames[0]].strip() in months_list: # Build a list of dict (one row from file = one dict) for idx, row in enumerate(dr_list): # Only append rows containing results and discard # rows containing max and min integrated values if (any(row[fieldnames[0]].strip()) and (idx not in [16, 21, 25, 26, 30, 31])): dict_list.append(row) else: # Print error message if integration period is not monthly print("Invalid integration period." + "\nFunction only parses monthly integrated results.") # Print error message if file format doesn't fit Type 46 else: print("Unrecognized file format.") # Post-process generated list of dicts to remove unwanted whitespaced # and convert strings with numbers to float type for idx, item in enumerate(dict_list): dict_list[idx] = dict_cleanconvert(dict_list[idx]) return dict_list
def parse_da(file_abspathlist): """Parse results from DAYSIM DA results files. Parses all results from DA files and put them in a list of dicts, each dict corresponding to a particular sensor point. Args: file_abspathlist: list of absolute paths to DA result files. Returns: A list of dicts (one dict for each sensor point) Raises: IOError: problem reading out_file """ da_results = [] # Parse data from DA files generated by Daysim and return a list of # dicts (one dict per each sensor point in result file) for file_abspath in file_abspathlist: with open(file_abspath, 'rU') as out_f: line_1 = next(out_f) # Keep first line which identifies the data # Search for string that describes data contained in file match = re.search(r'# (.*) - Active User', line_1) da_data = match.group(1) # Extract illuminance level from found string match = re.search(r'(.*) \((\d{3}) lux\)', da_data) if match: da_data = match.group(1) next(out_f) # Skip second line which doesn't hold any useful info dr = csv.reader(out_f, delimiter='\t') dict_list = [] # Build a list of dict (one row from file = one dict) for i, row in enumerate(dr): dict_list.append({}) dict_list[i]['sens_x'] = row[0] dict_list[i]['sens_y'] = row[1] dict_list[i]['sens_z'] = row[2] if da_data == 'Daylight Autonomy': dict_list[i]['DA'] = row[3] elif da_data == 'Continuous Daylight Autonomy': dict_list[i]['DAcon'] = row[3] elif da_data == 'DA_max': dict_list[i]['DAmax'] = row[3] elif da_data == 'Daylight Saturation Potential': dict_list[i]['DSP'] = row[3] else: dict_list[i][da_data] = row[3] # Post-process generated list of dicts to remove unwanted # whitespaced and convert strings with numbers to float type for dict in dict_list: dict = dict_cleanconvert(dict) # Group results in list of dicts if not da_results: da_results.extend(dict_list) else: for (res_dict, new_dict) in zip(da_results, dict_list): for k, v in new_dict.items(): res_dict[k] = new_dict[k] return da_results