def create_dataframeBI(bi_path, section='[CONDUITS]'): """ given a path to a biuld instructions file, create a dataframe of data in a given section """ headerdefs = funcs.complete_inp_headers(bi_path) headerlist = headerdefs['headers'][section].split() + [';', 'Comment', 'Origin'] tempfilepath = txt.extract_section_from_inp(bi_path, section, headerdefs=headerdefs, skipheaders=True) df = pd.read_table(tempfilepath, header=None, delim_whitespace=True, skiprows=[0], index_col=0, names=headerlist, comment=None) os.remove(tempfilepath) # clean up return df
def create_dataframeINP(inp_path, section='[CONDUITS]', ignore_comments=True, comment_str=';', comment_cols=True): """ given a path to an INP file, create a dataframe of data in the given section. """ # find all the headers and their defs (section title with cleaned one-liner column headers) headerdefs = funcs.complete_inp_headers(inp_path) # create temp file with section isolated from inp file tempfilepath = txt.extract_section_from_inp(inp_path, section, headerdefs=headerdefs, ignore_comments=ignore_comments) if ignore_comments: comment_str = None if not tempfilepath: # if this head (section) was not found in the textfile, return a # blank dataframe with the appropriate schema print('header "{}" not found in "{}"'.format(section, inp_path)) print('returning empty dataframe') headerlist = headerdefs['headers'].get(section, 'blob').split() + [';', 'Comment', 'Origin'] blank_df = pd.DataFrame(data=None, columns=headerlist).set_index(headerlist[0]) return blank_df if headerdefs['headers'][section] == 'blob': # return the whole row, without specifc col headers df = pd.read_table(tempfilepath, delim_whitespace=False, comment=comment_str) elif section == '[CURVES]' or section == '[TIMESERIES]': # return the whole row, without specifc col headers df = pd.read_table(tempfilepath, delim_whitespace=False) # , index_col=0)#, skiprows=[0]) else: # this section header is recognized and will be organized into known columns headerlist = headerdefs['headers'][section].split() if comment_cols: headerlist = headerlist + [';', 'Comment', 'Origin'] df = pd.read_table(tempfilepath, header=None, delim_whitespace=True, skiprows=[0], index_col=0, names=headerlist, comment=comment_str) if comment_cols: # add new blank comment column after a semicolon column df[';'] = ';' os.remove(tempfilepath) return df.rename(index=str)
def generate_inp_from_diffs(basemodel, inpdiffs, target_dir): """ create a new inp with respect to a baseline inp and changes instructed with a list of inp diff files (build instructions). This saves having to recalculate the differences of each model from the baseline whenever we want to combine versions. """ #step 1 --> combine the diff/build instructions allheaders = funcs.complete_inp_headers(basemodel.inp.filePath) combi_build_instr_file = os.path.join(target_dir, 'build_instructions.txt') newinp = os.path.join(target_dir, 'new.inp') with open(combi_build_instr_file, 'w') as f: for header in allheaders['order']: s = '' section_header_written = False for inp in inpdiffs: sect_s = None if not section_header_written: sect_s = text.extract_section_from_inp(inp, header, cleanheaders=False, return_string=True, skipheaders=False) section_header_written = True else: sect_s = text.extract_section_from_inp(inp, header, cleanheaders=False, return_string=True, skipheaders=True) if sect_s: #remove the extra space between data in the same table #coming from diffrent models. if sect_s[-2:] == '\n\n': #NOTE Check this section... s += sect_s[:-1] else: s += sect_s f.write(s + '\n') #step 2 --> clean up the new combined diff instructions df_dict = clean_inp_diff_formatting( combi_build_instr_file) #makes more human readable #step 3 --> create a new inp based on the baseline, with the inp_diff #instructions applied with open(newinp, 'w') as f: for section in allheaders['order']: print section if section not in problem_sections and allheaders['headers'][ section] != 'blob': #check if a changes from baseline spreadheet exists, and use this #information if available to create the changes array df = create_dataframeINP(basemodel.inp, section) df['Origin'] = '' #add the origin column if not there if section in df_dict: df_change = df_dict[section] ids_to_drop = df_change.loc[df_change['Comment'].isin( ['Removed', 'Altered'])].index df = df.drop(ids_to_drop) df = df.append(df_change.loc[df_change['Comment'].isin( ['Added', 'Altered'])]) new_section = df else: #blindly copy this section from the base model new_section = create_dataframeINP(basemodel.inp, section=section) #write the section into the inp file and the excel file vc_utils.write_inp_section(f, allheaders, section, new_section)
def generate_inp_from_diffs(basemodel, inpdiffs, target_dir): """ create a new inp with respect to a baseline inp and changes instructed with a list of inp diff files (build instructions). This saves having to recalculate the differences of each model from the baseline whenever we want to combine versions. NOTE THIS ISN'T USED ANYWHERE. DELETE ???? """ #step 1 --> combine the diff/build instructions allheaders = funcs.complete_inp_headers(basemodel.inp.path) combi_build_instr_file = os.path.join(target_dir, 'build_instructions.txt') newinp = os.path.join(target_dir, 'new.inp') with open (combi_build_instr_file, 'w') as f: for header in allheaders['order']: s = '' section_header_written = False for inp in inpdiffs: sect_s = None if not section_header_written: sect_s = text.extract_section_from_inp(inp, header, cleanheaders=False, return_string=True, skipheaders=False) section_header_written = True else: sect_s = text.extract_section_from_inp(inp, header, cleanheaders=False, return_string=True, skipheaders=True) if sect_s: #remove the extra space between data in the same table #coming from diffrent models. if sect_s[-2:] == '\n\n': #NOTE Check this section... s += sect_s[:-1] else: s += sect_s f.write(s + '\n') #step 2 --> clean up the new combined diff instructions # df_dict = clean_inp_diff_formatting(combi_build_instr_file) #makes more human readable #step 3 --> create a new inp based on the baseline, with the inp_diff #instructions applied with open (newinp, 'w') as f: for section in allheaders['order']: print(section) if section not in problem_sections and allheaders['headers'][section] != 'blob': #check if a changes from baseline spreadheet exists, and use this #information if available to create the changes array df = create_dataframeINP(basemodel.inp.path, section) df['Origin'] = '' #add the origin column if not there if section in df_dict: df_change = df_dict[section] ids_to_drop = df_change.loc[df_change['Comment'].isin(['Removed', 'Altered'])].index df = df.drop(ids_to_drop) df = df.append(df_change.loc[df_change['Comment'].isin(['Added', 'Altered'])]) new_section = df else: #blindly copy this section from the base model new_section = create_dataframeINP(basemodel.inp.path, section=section) #write the section into the inp file and the excel file vc_utils.write_inp_section(f, allheaders, section, new_section)