def _wrap_opener_and_closer_process(self, text): # print(text) # List of tags to consider tags_in_opener_and_closer = ["salute", "dateline", "date", "address", "signed"] letter_text = text for tag in tags_in_opener_and_closer: # print(tag) letter_text = WU.wrap_element_with_tags(letter_text, tag, "TEMP") pieces = [] pieces = WU.find_positions_of_matches(letter_text, pieces=[]) # if pieces: # print('Pieces ok') try: contiguous_pieces = WU.find_contiguous_pieces(pieces) # if contiguous_pieces: # print('cont_pieces ok') opener_closer_fixed_text = WU.wrap_pieces_in_text(letter_text, contiguous_pieces) # print('SUCCESS') # print(opener_closer_fixed_text) return opener_closer_fixed_text except Exception as e: # Maybe raise error and log higher up??) # print('FAIL', e) return text
def _wrap_opener_and_closer_process(self, text): #print(text) # List of tags to consider tags_in_opener_and_closer = ['salute', 'dateline', 'date', 'address', 'signed'] letter_text = text for tag in tags_in_opener_and_closer: #print(tag) letter_text = WU.wrap_element_with_tags(letter_text, tag, 'TEMP') pieces = [] pieces = WU.find_positions_of_matches(letter_text, pieces=[]) #if pieces: #print('Pieces ok') try: contiguous_pieces = WU.find_contiguous_pieces(pieces) #if contiguous_pieces: #print('cont_pieces ok') opener_closer_fixed_text = WU.wrap_pieces_in_text(letter_text, contiguous_pieces) #print('SUCCESS') #print(opener_closer_fixed_text) return opener_closer_fixed_text except Exception as e: #Maybe raise error and log higher up??) #print('FAIL', e) return text
def CollectDynSym(args): """Replaces: nm --format=posix -g -D $sofile | cut -f1-2 -d' '""" toc = '' nm = subprocess.Popen(WrapperUtils.CommandToRun( [args.nm, '--format=posix', '-g', '-D', args.sofile]), stdout=subprocess.PIPE, bufsize=-1) for line in nm.stdout: toc += ' '.join(line.split(' ', 2)[:2]) + '\n' return nm.wait(), toc
def CollectSONAME(args): """Replaces: readelf -d $sofile | grep SONAME""" toc = '' readelf = subprocess.Popen(WrapperUtils.CommandToRun( [args.readelf, '-d', args.sofile]), stdout=subprocess.PIPE, bufsize=-1) for line in readelf.stdout: if 'SONAME' in line: toc += line return readelf.wait(), toc
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--ar', required=True, help='The ar binary to run', metavar='PATH') parser.add_argument('--output', required=True, help='Output archive file', metavar='ARCHIVE') parser.add_argument('--plugin', help='Load plugin') parser.add_argument('--resource-whitelist', help='Merge all resource whitelists into a single file.', metavar='PATH') parser.add_argument('operation', help='Operation on the archive') parser.add_argument('inputs', nargs='+', help='Input files') args = parser.parse_args() if args.resource_whitelist: whitelist_candidates = WrapperUtils.ResolveRspLinks(args.inputs) WrapperUtils.CombineResourceWhitelists( whitelist_candidates, args.resource_whitelist) command = [args.ar, args.operation] if args.plugin is not None: command += ['--plugin', args.plugin] command.append(args.output) command += args.inputs # Remove the output file first. try: os.remove(args.output) except OSError as e: if e.errno != os.errno.ENOENT: raise # Now just run the ar command. return subprocess.call(WrapperUtils.CommandToRun(command))
def _fix(self, row): new_row = row if row["Type"] == 'Letter': #print(row["Letter"]) text = self._merged_pages(row['Pages']) if row: try: extracted_opener = HF.extractTagContents(text, 'opener') #print(extracted_opener[0]) extracted_address = HF.extractTagContents(extracted_opener[0], 'address') #print('----') ##print(extracted_address[0]) try: wrapped = HF.wrapOnEmptyElementSplit(extracted_address[0],'lb','addrLine') #print(wrapped) #print('------') text = text.replace(extracted_address[0], wrapped) #print(text) except IndexError: #print('No address') pass extracted_closer = HF.extractTagContents(text, 'closer') #print(extracted_opener[0]) extracted_address = HF.extractTagContents(extracted_closer[0], 'address') #print('----') ##print(extracted_address[0]) try: wrapped = HF.wrapOnEmptyElementSplit(extracted_address[0],'lb','addrLine') #print(wrapped) #print('------') text = text.replace(extracted_address[0], wrapped) #print(text) except IndexError: pass #print('No address') except Exception: pass #print('nameError') #print('-----') try: extracted_opener = HF.extractTagContents(text, 'opener') #print(extracted_opener[0]) #print('-----') dateline_wrapped = WU.wrap_element_with_tags(extracted_opener[0], 'date', 'dateline') #print(dateline_wrapped) #print('-----') text = text.replace(extracted_opener[0], dateline_wrapped) #print(text) except: pass #print('DATEERROR') try: extracted_opener = HF.extractTagContents(text, 'opener') #print(extracted_opener) lb_stripped = extracted_opener[0].replace("<lb/>","") #print(lb_stripped) text = text.replace(extracted_opener[0], lb_stripped) #print(text) except: pass #print('DATEERROR') #print(text) split = self._split_pages(text) new_row["Pages"] = self._build_new_page_row(row['Pages'], split) addrPageID = [k for k, p in row["Pages"].items() if p["PageType"] == 'EnvelopeType'] if addrPageID: text = row["Pages"][addrPageID[0]]["Translation"] #print(text) try: extracted_address = HF.extractTagContents(text, 'address') try: wrapped = HF.wrapOnEmptyElementSplit(extracted_address[0],'lb','addrLine') #print(wrapped) #print('------') text = text.replace(extracted_address[0], wrapped) except: pass except: pass #print(text) new_row["Pages"][addrPageID[0]]["Translation"] = text elif row["Type"] == 'PostcardAM': addrPageID = [k for k, p in row["Pages"].items() if p["PageType"] == 'AddressSide'] text = row["Pages"][addrPageID[0]]["Translation"] #print(text) try: extracted_address = HF.extractTagContents(text, 'address') try: wrapped = HF.wrapOnEmptyElementSplit(extracted_address[0],'lb','addrLine') #print(wrapped) #print('------') text = text.replace(extracted_address[0], wrapped) except: pass except: pass #print(text) new_row["Pages"][addrPageID[0]]["Translation"] = text return new_row
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--readelf', required=True, help='The readelf binary to run', metavar='PATH') parser.add_argument('--nm', required=True, help='The nm binary to run', metavar='PATH') parser.add_argument('--strip', help='The strip binary to run', metavar='PATH') parser.add_argument('--sofile', required=True, help='Shared object file produced by linking command', metavar='FILE') parser.add_argument('--tocfile', required=True, help='Output table-of-contents file', metavar='FILE') parser.add_argument('--output', required=True, help='Final output shared object file', metavar='FILE') parser.add_argument( '--resource-whitelist', help='Merge all resource whitelists into a single file.', metavar='PATH') parser.add_argument('command', nargs='+', help='Linking command') args = parser.parse_args() # Work-around for gold being slow-by-default. http://crbug.com/632230 fast_env = dict(os.environ) fast_env['LC_ALL'] = 'C' if args.resource_whitelist: whitelist_candidates = WrapperUtils.ResolveRspLinks(args.command) WrapperUtils.CombineResourceWhitelists(whitelist_candidates, args.resource_whitelist) # First, run the actual link. result = subprocess.call(WrapperUtils.CommandToRun(args.command), env=fast_env) if result != 0: return result # Next, generate the contents of the TOC file. result, toc = CollectTOC(args) if result != 0: return result # If there is an existing TOC file with identical contents, leave it alone. # Otherwise, write out the TOC file. UpdateTOC(args.tocfile, toc) # Finally, strip the linked shared object file (if desired). if args.strip: result = subprocess.call( WrapperUtils.CommandToRun([ args.strip, '--strip-unneeded', '-o', args.output, args.sofile ])) return result