def clean_csv(pathin): fno, ext = os.path.splitext(pathin) clean_path = fno + '_clean' + ext myfile = txt_mixin.txt_file_with_list(pathin) N = len(myfile.list) i = 0 pat = re.compile('.*[;"]$') while i < (N - 1): line = myfile.list[i] q = pat.match(line) if q is None: #this line needs to be merged with the next one down cur_line = myfile.list.pop( i) #retrieves line and removes it from the list next_line = myfile.list[i] #retrieve without removing new_line = cur_line.rstrip() + ' ' + next_line.lstrip() myfile.list[i] = new_line N = len(myfile.list) else: i += 1 myfile.replaceallre('(;"")+$', '') myfile.replaceallre('^""$', '') clean_list = filter(None, myfile.list) txt_mixin.dump(clean_path, clean_list) return clean_path, clean_list
def clean_csv(pathin): fno, ext = os.path.splitext(pathin) clean_path = fno + '_clean' + ext myfile = txt_mixin.txt_file_with_list(pathin) N = len(myfile.list) i = 0 pat = re.compile('.*[;"]$') while i < (N-1): line = myfile.list[i] q = pat.match(line) if q is None : #this line needs to be merged with the next one down cur_line = myfile.list.pop(i)#retrieves line and removes it from the list next_line = myfile.list[i]#retrieve without removing new_line = cur_line.rstrip() + ' ' + next_line.lstrip() myfile.list[i] = new_line N = len(myfile.list) else: i += 1 myfile.replaceallre('(;"")+$','') myfile.replaceallre('^""$','') clean_list = filter(None, myfile.list) txt_mixin.dump(clean_path, clean_list) return clean_path, clean_list
def get_list(N): if N == 0: mylist = ['import control','G_list = []', ''] else: myfile = txt_mixin.txt_file_with_list(saved_path) mylist = myfile.list return mylist
def _load_raw(self): raw_data = [] for path in self.csv_files: temp = txt_mixin.txt_file_with_list(path) raw_data.append(temp.list) self.raw_data = raw_data return raw_data
def load_bode_options(filepath): myfile = txt_mixin.txt_file_with_list(filepath) mylist = myfile.list mylist = _clean(mylist) ds_opts = get_data_set_options(mylist) mylist = _fix_continued_lines(mylist) return mylist, ds_opts
def fix_one_file(pathin): #from IPython.core.debugger import Pdb myfile = txt_mixin.txt_file_with_list(pathin) #old_inds = myfile.list.findallre('from +IPython.Debugger', match=0) old_inds = myfile.list.findall('IPython.Debugger') new_inds = myfile.list.findall('from IPython.core.debugger') new_line = 'from IPython.core.debugger import Pdb' if old_inds and not new_inds: for ind in old_inds: myfile.list[ind] = new_line elif (len(old_inds) == len(new_inds)): #assume I commented out the old and inserted the new for ind in old_inds: if myfile.list[ind][0] == '#': myfile.list[ind:ind+1] = [] else: print('found old and new inds, but old isn not commented out:') print(myfile.list[ind]) if old_inds: myfile.save(pathin)
def open_txt_list(pathin=None): if pathin is None: pathin = open_txt() print('pathin = %s' % pathin) myfile = txt_mixin.txt_file_with_list(pathin) mylist = filter(None, myfile.list) mylist = txt_mixin.txt_list(mylist) return mylist
def find_and_replace_one_file(inpath, outpath, repdict): curfile = txt_mixin.txt_file_with_list(inpath) for key, value in repdict.items(): curfile.replaceall(key, value) if not os.path.exists(outpath): curfile.save(outpath) else: print('file already exists: %s' % outpath)
def ToHTML(self, startnum, title, basename='slide',fmt='%0.3d',reveal=True, presdir=None, author='Ryan Krauss'): outlist = [] out = outlist.append out('<div id="container">') out('<div id="top">') out('<h1>%s</h1>'%self.title) out('</div>') #out('<div id="slidebody">') out('<div id="leftnav">') out('<p>') out('') out('(outline goes here)') firstlink = CreateHTMLLink('slide001.html', 'start') out(firstlink) out('</p>') out('</div>') out('<div id="content">') #out('<h2>Subheading</h2>') for item in self.objlist: outlist.extend(item.ToHTML(reveal=reveal)) out('</div>') out('<div id="footer">') out('<TABLE width=100%>') out('<TR>') out('<TD width=25% align=left>') out(author) out('</TD>') out('<TD width=50% align=center>') out(title) out('</TD>') navstr = '' if startnum > 1: prevname = CreateSlideName(startnum-1, basename=basename, fmt=fmt) prevlink = CreateHTMLLink(prevname,backwardlink()) navstr += prevlink +' ' nextname = CreateSlideName(startnum+1, basename=basename, fmt=fmt) nextlink = CreateHTMLLink(nextname,forwardlink()) navstr += nextlink out('<TD width=25% align=right>') out(navstr) out('</TD>') out('</TR>') out('</TABLE>') out('</div>') out('</div>') mypath = basename+fmt%startnum+'.html' if presdir: mypath = os.path.join(presdir, mypath) #myhtml = textfiles.htmllist.htmllist([], mypath) myhtml = txt_mixin.txt_file_with_list([], mypath) headerlines = ['<style type="text/css">','body {background-color: black}','</style>'] myhtml.insertheader(self.title, headerlines=headerlines) myhtml.extend(outlist) myhtml.append('</BODY>') myhtml.append('</HTML>') myhtml.tofile() return startnum+1
def load_and_parse_opts(pathin): myfile = txt_mixin.txt_file_with_list(pathin) mylist = myfile.list mylist = _clean(mylist) my_dict = get_data_set_options(mylist, key_list=["load_name", "save_name", "freqs", "points", "plot"]) exec_list = ["freqs", "points"] for key in exec_list: val_str = my_dict[key] exec("val=" + val_str) my_dict[key] = val return my_dict
def test_one_file(actual_tex_name, expected_tex_name, \ cut_header=True): """Load actual_tex_name and extract its body, i.e. that which is between \\begin{document} and \\end{document}. Then compare this body to expected_tex_name.""" actual = txt_mixin.txt_file_with_list(actual_tex_name) expected = txt_mixin.txt_file_with_list(expected_tex_name) if cut_header: inds1 = actual.findall('\\begin{document}') assert len(inds1) == 1, 'Did not find exactly one instance of \\begin{document}' startind = inds1[0] + 1 inds2 = actual.findall('\\end{document}') assert len(inds2) == 1, 'Did not find exactly one instance of \\end{document}' stopind = inds2[0] actual_body = actual.list[startind:stopind] else: actual_body = actual.list expected_body = expected.list return compare_two_bodies(actual_body, expected_body)
def seperator_sheet(pathin, line1, line2='', line3='', \ headerpath='/home/ryan/siue/tenure/header.tex', \ runlatex=1, space2='1.5in'): myline = '\\coverpagevar{%s}{%s}{%s}{%s}' % (line1, line2, line3, space2) myfile = txt_mixin.txt_file_with_list(pathin=None) myfile.append_file_to_list(headerpath) myfile.list.append('\\begin{document}') myfile.list.append(myline) myfile.list.append('\\end{document}') myfile.writefile(pathin) if runlatex: return pytexutils.RunLatex(pathin)
def seperator_sheet(pathin, line1, line2='', line3='', \ headerpath='/Users/rkrauss/siue/dossiers/tenure/header.tex', \ runlatex=1, space2='1.5in'): myline = '\\coverpagevar{%s}{%s}{%s}{%s}' % (line1, line2, line3, space2) #Pdb().set_trace() myfile = txt_mixin.txt_file_with_list(pathin=None) myfile.append_file_to_list(headerpath) myfile.list.append('\\begin{document}') myfile.list.append(myline) myfile.list.append('\\end{document}') myfile.writefile(pathin) if runlatex: return pytexutils.RunLatex(pathin)
def combined_csvs(allfiles, pathout, pat2): N = len(allfiles) if os.path.exists(pathout): os.remove(pathout) outfile = txt_mixin.txt_file_with_list(pathout) for i in range(1, N + 1): curfile = pat2 % i outfile.append_file_to_list(curfile) outfile.writefile(pathout) return outfile
def add_up_link_to_rst(pathin, uplink_path=None): if uplink_path is None: uplink_path = '../index.html' myfile = txt_mixin.txt_file_with_list(pathin) inds = myfile.findallre('`up <.*index.html>`_') #print('inds = '+str(inds)) if len(inds) > 1: print('more than one up match: '+pathin) for ind in inds: print(' ' + myfile.list[ind]) if not inds: myfile.list.append('') myfile.list.append('`up <%s>`_' % uplink_path) myfile.save(pathin)
def _add_if_needed(pathin, pat, list_to_insert, ind=0): myfile = txt_mixin.txt_file_with_list(pathin) inds = myfile.findallre(pat) #print('inds = '+str(inds)) if len(inds) > 1: print('more than one up match: '+pathin) for ind in inds: print(' ' + myfile.list[ind]) if not inds: if (ind is None) or (ind == -1): myfile.list.extend(list_to_insert) else: myfile.list[ind:ind] = list_to_insert myfile.save(pathin)
def test_one_file(actual_tex_name, expected_tex_name, \ cut_header=True): """Load actual_tex_name and extract its body, i.e. that which is between \\begin{document} and \\end{document}. Then compare this body to expected_tex_name.""" actual = txt_mixin.txt_file_with_list(actual_tex_name) expected = txt_mixin.txt_file_with_list(expected_tex_name) if cut_header: inds1 = actual.findall('\\begin{document}') assert len( inds1 ) == 1, 'Did not find exactly one instance of \\begin{document}' startind = inds1[0] + 1 inds2 = actual.findall('\\end{document}') assert len( inds2) == 1, 'Did not find exactly one instance of \\end{document}' stopind = inds2[0] actual_body = actual.list[startind:stopind] else: actual_body = actual.list expected_body = expected.list return compare_two_bodies(actual_body, expected_body)
def ToLatex(self, indent=' '*4): #print('in ToLatex') if not hasattr(self, 'entrylist'): self.FindNestLevels() texlist = [] preventry = Entry('') openlist = [] for item in self.entrylist: #print('item.string = '+item.string) #print('item.ToString() = '+item.ToString()) if item is not None: if item.sec and not preventry.sec:#close everybody closelist = CloseAll(openlist, indent) texlist.extend(closelist) texlist.append('') openlist = [item] else: while (item.level != preventry.level) or item.enum != preventry.enum or (preventry.sec and not item.sec): #close and open new itemize or enumerate as necessary if item.level < preventry.level: #for n in range(preventry.level-item.level): curclose = openlist.pop() texlist.append(indent*(curclose.level-1)+curclose.CloseStr()) #ind = openlist.index(preventry) elif item.level > preventry.level: texlist.append(indent*(item.level-1)+item.OpenStr()) openlist.append(item) elif item.level == preventry.level and ((item.enum != preventry.enum) or (item.sec != preventry.sec)) : #assume we need to close and open when switching from enum to itemize or vice versa curclose = openlist.pop() texlist.append(indent*(curclose.level-1)+curclose.CloseStr()) texlist.append(indent*(item.level-1)+item.OpenStr()) openlist.append(item) if openlist: preventry = openlist[-1] else: preventry = Entry('') texlist.append(item.ToString()) preventry = item else: texlist.append('') openlist.reverse() for item in openlist: texlist.append(indent*(item.level-1)+item.CloseStr()) self.texlist = texlist self.latexlist = txt_mixin.txt_file_with_list() self.latexlist.list = [] self.latexlist.list = texlist
def copy_announcements_forward(self, debug=0): prev_exclude_path = os.path.join(self.prev_lecture_path, \ 'exclude') announce_path = os.path.join(prev_exclude_path, 'announcements.rst') filein = txt_mixin.txt_file_with_list(announce_path) listout = copy.copy(rst_list) listout.replaceall('@@TITLE@@', 'Reminders') if debug: print('pathin = ' + announce_path) print('listin = ' + str(filein.list)) if len(filein.list) > 3: listout.extend(filein.list[3:]) new_exclude_path = os.path.join(self.lecture_path, 'exclude') pathout = os.path.join(new_exclude_path, 'reminders.rst') if debug: print('pathout = ' + pathout) txt_mixin.dump(pathout, listout)
def OutlineToSkeleton(self): """Turn a document outline into a docuement skeleton, by replacing outline markings with section and subsection commands.""" skeleton = [] replacelist = ['section','subsection','subsubsection'] N = len(replacelist) for item in self.entrylist: if item is not None: if 0 < item.level <= N: curout = '\\'+replacelist[item.level-1]+'{'+item.str+'}' else: curout = item.str skeleton.append(curout) else: skeleton.append('') self.skeleton = txt_mixin.txt_file_with_list() self.skeleton.list = skeleton
def process_includes(self): """Repeatedly include all lines from included files until # include is no longer found""" pat = re.compile('^# *[Ii]nclude[ :]*(.*)') for i in range(10): inds = self.list.findallre('^# *[iI]nclude') if not inds: break else: # we are going to process the first one and then # recheck because we will mess up the indices # when we insert the new stuff ind0 = inds[0] ind_line = self.list.pop(ind0) q = pat.search(ind_line) mypath = q.group(1) newfile = txt_mixin.txt_file_with_list(mypath) self.list[ind0:ind0] = copy.copy(newfile.list)
def seperator_sheet( pathin, line1, line2="", line3="", headerpath="/Users/rkrauss/siue/dossiers/tenure/header.tex", runlatex=1, space2="1.5in", ): myline = "\\coverpagevar{%s}{%s}{%s}{%s}" % (line1, line2, line3, space2) # Pdb().set_trace() myfile = txt_mixin.txt_file_with_list(pathin=None) myfile.append_file_to_list(headerpath) myfile.list.append("\\begin{document}") myfile.list.append(myline) myfile.list.append("\\end{document}") myfile.writefile(pathin) if runlatex: return pytexutils.RunLatex(pathin)
def get_data_labels(self, arduino_path): """Extract the column labels for an Arduino test based on the assumption that the printing is handled by a function called print_line_serial""" myfile = txt_mixin.txt_file_with_list(arduino_path) ind1 = myfile.findnext('void print_line_serial') ind2 = myfile.findnext('}',ind1) self.func_list = myfile.list[ind1:ind2] assert (self.func_list[0].find('void') > -1), "list did not start with void" self.func_list.pop(0) p_com = re.compile('[ \t]*//') filt1 = [item for item in self.func_list \ if not p_com.search(item)] filt2 = [item for item in filt1 if item.strip()] for i in range(-1,-5,-1): curline = filt2[i].strip() if curline.find("Serial.print('\\n')") == 0: filt2.pop(i) # get rid of printing commas filt3 = [] for line in filt2: lineout = line.strip() lineout = lineout.replace('Serial.print(",");','') filt3.append(lineout) self.filt_list = filt3 # get variable names labels = [] p_var_name = re.compile("Serial.print\((.*)\);") for line in filt3: q = p_var_name.match(line) labels.append(q.group(1)) self.labels = labels return self.labels
def append_lectures_rst(self): # find lectures dir lectures_dir = self.get_lectures_dir() rst_path = os.path.join(lectures_dir, 'lectures.rst') if not os.path.exists(rst_path): print('rst_path does not exist: ' + rst_path) return None myfile = txt_mixin.txt_file_with_list(rst_path) full_date_path = self.get_lectures_date_folder_from_textbox() junk, date_folder = os.path.split(full_date_path) inds = myfile.list.findall(date_folder) if inds: #already in, do nothing return None else: while not myfile.list[-1]: myfile.list.pop(-1) index_rp = os.path.join(date_folder, 'index.rst') outline = ' '*3 + index_rp myfile.list.append(outline) myfile.list.append('') myfile.save(rst_path)
def append_lectures_rst(self): # find lectures dir lectures_dir = self.get_lectures_dir() rst_path = os.path.join(lectures_dir, 'lectures.rst') if not os.path.exists(rst_path): print('rst_path does not exist: ' + rst_path) return None myfile = txt_mixin.txt_file_with_list(rst_path) full_date_path = self.get_lectures_date_folder_from_textbox() junk, date_folder = os.path.split(full_date_path) inds = myfile.list.findall(date_folder) if inds: #already in, do nothing return None else: while not myfile.list[-1]: myfile.list.pop(-1) index_rp = os.path.join(date_folder, 'index.rst') outline = ' ' * 3 + index_rp myfile.list.append(outline) myfile.list.append('') myfile.save(rst_path)
def fix_one_file(pathin): #from IPython.core.debugger import Pdb myfile = txt_mixin.txt_file_with_list(pathin) #old_inds = myfile.list.findallre('from +IPython.Debugger', match=0) old_inds = myfile.list.findall('IPython.Debugger') new_inds = myfile.list.findall('from IPython.core.debugger') new_line = 'from IPython.core.debugger import Pdb' if old_inds and not new_inds: for ind in old_inds: myfile.list[ind] = new_line elif (len(old_inds) == len(new_inds)): #assume I commented out the old and inserted the new for ind in old_inds: if myfile.list[ind][0] == '#': myfile.list[ind:ind + 1] = [] else: print('found old and new inds, but old isn not commented out:') print(myfile.list[ind]) if old_inds: myfile.save(pathin)
def Add_Header(self): if not hasattr(self, 'latex'): self.Build_Latex() header_path = '/home/ryan/siue/tenure/student_evaluations/header.tex' header = txt_mixin.txt_file_with_list(header_path) self.full_latex = header.list + self.latex + ['\\end{document}']
n = 0 self.list_out = [] #Pdb().set_trace() while keepgoing and (n < len(self.list)): chunk = self.PopNext() if chunk: clean_chunk = self._CleanChunk(chunk) self.list_out.extend(clean_chunk) else: keepgoing = False n += 1 return self.list_out def save(self, outpath): txt_mixin.dump(outpath, self.list_out) if __name__ == '__main__': filepath = '/home/ryan/siue/Research/DT_TMM/cantilever_beam/two_masses_analysis.rst' import txt_mixin myfile = txt_mixin.txt_file_with_list(filepath) mylist = myfile.list mypopper = rst_popper(mylist) mypopper.Execute() pne, ext = os.path.splitext(filepath) outpath = pne + '.sage' mypopper.save(outpath)
from optparse import OptionParser usage = 'usage: %prog [options] regexp replacement inputfile' parser = OptionParser(usage) ## parser.add_option("-r", "--runlatex", dest="runlatex", \ ## help="Run LaTeX after presentation is converted to tex.", \ ## default=1, type="int") ## parser.add_option("-s", "--sectiond", dest="sections", \ ## help="Indices of the sections of the document that you want converted to LaTeX.", \ ## default='', type="string") ## parser.add_option("-o", "--output", dest="output", \ ## help="Desired output path or filename.", \ ## default='', type="string") (options, args) = parser.parse_args() print('options=' + str(options)) print('args=' + str(args)) pat = args[0] replacement = args[1] pathin = args[2] myfile = txt_mixin.txt_file_with_list(pathin) myfile.replaceallre(pat, replacement) myfile.save(pathin)
## parser.add_option("-c", "--case", dest="case", \ ## help="A string containing the list of cases to run:\n" + ## "1 = siue office ssh \n 2 = home ssh \n" + \ ## "3 = CORSAIR Fall 2010 \n 4 = IOMEGA Fall 2010", \ ## default='12', type="string") ## parser.add_option("-p", action="store_true", dest="set_perms", \ ## help="set website permissions") ## parser.set_defaults(set_perms=False) (options, args) = parser.parse_args() filename = args[0] myfile = txt_mixin.txt_file_with_list(filename) pat = '^\\\\item \\(([0-9]+) points\\)' p = re.compile(pat) inds = myfile.findallre(pat) assert len(inds) > 0, "Did not find any lines that start with \\item (xx points)" total = 0 for i, ind in enumerate(inds): curline = myfile.list[ind] q = p.search(curline) curpoints = int(q.group(1)) print('Problem %i: %s points' % (i+1, curpoints))
from scipy import * import spreadsheet mymap = {'Lastname':'lastname','Firstname':'firstname','student ID':'ID', '356':'ME356'} mysheet = spreadsheet.CSVSpreadSheet('class_list.csv',colmap=mymap) labels=['Lastname','Firstname'] mysheet.FindLabelRow(labels) mysheet.MapCols() import txt_mixin emails = txt_mixin.txt_file_with_list('email_addresses.csv') def try_one_email(strin): ind_list = emails.findall(strin) if len(ind_list) == 1: return True else: return False def find_email(first, last, verbosity=1): N = 7 nf = len(first) nl = len(last) for i in range(nf): curfirst = first[0:i+1] n = N-len(curfirst) if n > nl: curlast = last else:
def load_md(self): myfile = txt_mixin.txt_file_with_list(self.path) self.list = myfile.list
junk, exe_name = os.path.split(exe_file) win_name = re.sub('linux.*\\.exe', 'win32.exe', exe_name) win_path = os.path.join(dest_dir, win_name) shutil.copy2(exe_file, win_path) gz_pat = os.path.join(dist_dir, 'controls*.tar.gz') gz_files = glob.glob(gz_pat) assert len(gz_files) == 1, "Found more than one gz file." gz_path = gz_files[0] junk, gz_name = os.path.split(gz_path) shutil.copy2(gz_path, dest_dir) gz_dest_path = os.path.join(dest_dir, gz_name) exe_re = '<controls-.*\\.exe>`_' gz_re = '<controls-.*\\.tar\\.gz>`_' exe_new = '<%s>`_' % win_name gz_new = '<%s>`_' % gz_name rst_path = '/home/ryan/siue/classes/mechatronics/2010/python_controls/python.rst' myfile = txt_mixin.txt_file_with_list(rst_path) myfile.replaceallre(exe_re, exe_new) myfile.replaceallre(gz_re, gz_new) myfile.save(rst_path) fne, ext = os.path.splitext(rst_path) html_path = fne + '.html' rstcmd = 'rst2html %s %s' % (rst_path, html_path) print(rstcmd) os.system(rstcmd)
import spreadsheet mymap = { 'Lastname': 'lastname', 'Firstname': 'firstname', 'student ID': 'ID', '356': 'ME356' } mysheet = spreadsheet.CSVSpreadSheet('class_list.csv', colmap=mymap) labels = ['Lastname', 'Firstname'] mysheet.FindLabelRow(labels) mysheet.MapCols() import txt_mixin emails = txt_mixin.txt_file_with_list('email_addresses.csv') def try_one_email(strin): ind_list = emails.findall(strin) if len(ind_list) == 1: return True else: return False def find_email(first, last, verbosity=1): N = 7 nf = len(first) nl = len(last) for i in range(nf):
## parser.add_option("-c", "--case", dest="case", \ ## help="A string containing the list of cases to run:\n" + ## "1 = siue office ssh \n 2 = home ssh \n" + \ ## "3 = CORSAIR Fall 2010 \n 4 = IOMEGA Fall 2010", \ ## default='12', type="string") ## parser.add_option("-p", action="store_true", dest="set_perms", \ ## help="set website permissions") ## parser.set_defaults(set_perms=False) (options, args) = parser.parse_args() filename = args[0] myfile = txt_mixin.txt_file_with_list(filename) pat = '^\\\\item \\(([0-9]+) points\\)' p = re.compile(pat) inds = myfile.findallre(pat) assert len( inds) > 0, "Did not find any lines that start with \\item (xx points)" total = 0 for i, ind in enumerate(inds): curline = myfile.list[ind] q = p.search(curline) curpoints = int(q.group(1))
import os, txt_mixin, copy, re from IPython.core.debugger import Pdb texpath = '/home/ryan/siue/Research/litreview/article_per_day/article_per_day.tex' texfile = txt_mixin.txt_file_with_list(texpath) texlist = texfile.list ssinds = texlist.findall('\\subsection{') sslists = [] nextinds = ssinds[1:] + [-3] for curind, nextind in zip(ssinds, nextinds): curlist = texlist[curind:nextind] sslists.append(curlist) secinds = texlist.findall('\\section{') ## \subsubsection{Labels} ## capstone design, mechatronics, education, TUES 2011 ## \subsubsection{Rating} ## \myrating{3} ## \subsubsection{Path} ## \mylink{articles/education/Identifying_barriers_to_and_outcomes_of_interdisciplinarity_in_the_engineering_classroom.pdf}
shutil.copy2(exe_file, win_path) gz_pat = os.path.join(dist_dir, 'controls*.tar.gz') gz_files = glob.glob(gz_pat) assert len(gz_files)==1, "Found more than one gz file." gz_path = gz_files[0] junk, gz_name = os.path.split(gz_path) shutil.copy2(gz_path, dest_dir) gz_dest_path = os.path.join(dest_dir, gz_name) exe_re = '<controls-.*\\.exe>`_' gz_re = '<controls-.*\\.tar\\.gz>`_' exe_new = '<%s>`_' % win_name gz_new = '<%s>`_' % gz_name rst_path = '/home/ryan/siue/classes/mechatronics/2010/python_controls/python.rst' myfile = txt_mixin.txt_file_with_list(rst_path) myfile.replaceallre(exe_re, exe_new) myfile.replaceallre(gz_re, gz_new) myfile.save(rst_path) fne, ext = os.path.splitext(rst_path) html_path = fne + '.html' rstcmd = 'rst2html %s %s' % (rst_path, html_path) print(rstcmd) os.system(rstcmd)
usage = 'usage: %prog [options] regexp replacement inputfile' parser = OptionParser(usage) ## parser.add_option("-r", "--runlatex", dest="runlatex", \ ## help="Run LaTeX after presentation is converted to tex.", \ ## default=1, type="int") ## parser.add_option("-s", "--sectiond", dest="sections", \ ## help="Indices of the sections of the document that you want converted to LaTeX.", \ ## default='', type="string") ## parser.add_option("-o", "--output", dest="output", \ ## help="Desired output path or filename.", \ ## default='', type="string") (options, args) = parser.parse_args() print('options='+str(options)) print('args='+str(args)) pat = args[0] replacement = args[1] pathin = args[2] myfile = txt_mixin.txt_file_with_list(pathin) myfile.replaceallre(pat, replacement) myfile.save(pathin)
def _load_emails(self): raw_list = txt_mixin.txt_file_with_list(self.raw_email_filename) emails = raw_list.list[0].split(',') emails = [item.strip() for item in emails] mylist = txt_mixin.txt_list(emails) self.emails = mylist