def commit_factory(self, commit_id): "Make a Commit object holding data for a specified commit ID." commit = Commit(self, commit_id) commit.branch = re.sub(r"^refs/[^/]*/", "", self.refname) # Compute a description for the revision if self.revformat == 'raw': commit.rev = commit.commit elif self.revformat == 'short': commit.rev = '' else: # self.revformat == 'describe' commit.rev = do("git describe %s 2>/dev/null" % shellquote(commit.commit)) if not commit.rev: commit.rev = commit.commit[:12] # Extract the meta-information for the commit commit.files = do("git diff-tree -r --name-only " + shellquote(commit.commit)) commit.files = " ".join(commit.files.strip().split("\n")[1:]) # Design choice: for git we ship only the first message line, which is # conventionally supposed to be a summary of the commit. Under # other VCSes a different choice may be appropriate. commit.author_name, commit.mail, commit.logmsg = \ do("git log -1 '--pretty=format:%an%n%ae%n%s' " + shellquote(commit.commit)).split("\n") # This discards the part of the author's address after @. # Might be be nice to ship the full email address, if not # for spammers' address harvesters - getting this wrong # would make the freenode #commits channel into harvester heaven. commit.author = commit.mail.split("@")[0] commit.author_date, commit.commit_date = \ do("git log -1 '--pretty=format:%ai|%ci' " + shellquote(commit.commit)).split("|") return commit
def get_app_path_and_sync_command(viewer, path_pdf, path_tex_file, line_number): """Get the path and pdfsync command for the specified viewer. This function returns a tuple containing - the full path to the application, and - a command which can be used to show the PDF output corresponding to ``line_number`` inside tex file. If one of these two variables could not be determined, then the corresponding value will be set to ``None``. Arguments: viewer: The name of the PDF viewer application. path_pdf: The path to the generated PDF file. path_tex_file The path to the tex file for which we want to generate the pdfsync command. line_number The line in the tex file for which we want to get the synchronization command. Examples: # We assume that Skim is installed >>> app_path, sync_command = get_app_path_and_sync_command( ... 'Skim', 'test.pdf', 'test.tex', 1) >>> print('({}, {})'.format(app_path, ... sync_command)) # doctest:+ELLIPSIS (.../Skim.app, .../Skim.app/.../displayline' 1 test.pdf test.tex) # Preview has no pdfsync support >>> app_path, sync_command = get_app_path_and_sync_command( ... 'Preview', 'test.pdf', 'test.tex', 1) >>> print('({}, {})'.format(app_path, ... sync_command)) # doctest:+ELLIPSIS (/Applications/Preview.app, None) """ sync_command = None path_to_viewer = get_app_path(viewer) if path_to_viewer and viewer == 'Skim': sync_command = ( "'{}/Contents/SharedSupport/displayline' ".format(path_to_viewer) + "{} {} {}".format(line_number, shellquote(path_pdf), shellquote(path_tex_file))) return path_to_viewer, sync_command
def get_app_path_and_sync_command(viewer, path_pdf, path_tex_file, line_number): """Get the path and pdfsync command for the specified viewer. This function returns a tuple containing - the full path to the application, and - a command which can be used to show the PDF output corresponding to ``line_number`` inside tex file. If one of these two variables could not be determined, then the corresponding value will be set to ``None``. Arguments: viewer: The name of the PDF viewer application. path_pdf: The path to the generated PDF file. path_tex_file The path to the tex file for which we want to generate the pdfsync command. line_number The line in the tex file for which we want to get the synchronization command. Examples: # We assume that Skim is installed >>> app_path, sync_command = get_app_path_and_sync_command( ... 'Skim', 'test.pdf', 'test.tex', 1) >>> print('({}, {})'.format(app_path, ... sync_command)) # doctest:+ELLIPSIS (.../Skim.app, .../Skim.app/.../displayline' 1 test.pdf test.tex) # Preview has no pdfsync support >>> app_path, sync_command = get_app_path_and_sync_command( ... 'Preview', 'test.pdf', 'test.tex', 1) >>> print('({}, {})'.format(app_path, ... sync_command)) # doctest:+ELLIPSIS (/Applications/Preview.app, None) """ sync_command = None path_to_viewer = get_app_path(viewer) if path_to_viewer and viewer == 'Skim': sync_command = ("'{}/Contents/SharedSupport/displayline' ".format( path_to_viewer) + "{} {} {}".format(line_number, shellquote(path_pdf), shellquote(path_tex_file))) return path_to_viewer, sync_command
def run_cmd(cmd, env=None, cwd=None): cmd_str = " ".join(shellquote(arg) for arg in cmd) env_extra = env or {} env = os.environ.copy() print( "+ " + " ".join(["%s=%s" % (k, shellquote(v)) for k, v in env_extra.items()]) + " " + cmd_str) env.update(env_extra) subprocess.check_call(cmd, env=env, cwd=cwd)
def run_cmd(cmd, env=None, cwd=None): cmd_str = " ".join(shellquote(arg) for arg in cmd) env_extra = env or {} env = os.environ.copy() print( "+ " + " ".join(["%s=%s" % (k, shellquote(v)) for k, v in env_extra.items()]) + " " + cmd_str ) env.update(env_extra) subprocess.check_call(cmd, env=env, cwd=cwd)
def puppet(command, *args): '''Execute puppet command locally''' confdir = relative_path('puppet') vardir = relative_path('puppet', 'var') command = ' '.join([ 'puppet', shellquote(command), '--confdir=' + shellquote(confdir), '--vardir=' + shellquote(vardir) ] + [shellquote(arg) for arg in args]) return local(command, capture=True)
def expand_name(filename, program='pdflatex'): """Get the expanded file name for a certain tex file. Arguments: filename The name of the file we want to expand. program The name of the tex program for which we want to expand the name of the file. Returns: ``str`` Examples: >>> print(expand_name('Tests/TeX/text.tex')) Tests/TeX/text.tex >>> print(expand_name('non_existent_file.tex')) non_existent_file.tex """ if isfile(filename): return filename stdout.flush() run_object = Popen("kpsewhich -progname='{}' {}".format( program, shellquote(filename)), shell=True, stdout=PIPE, universal_newlines=True) expanded_filepath = run_object.stdout.read().strip() return expanded_filepath if expanded_filepath else filename
def get_documentation_files(texmf_directory): """Get a dictionary containing tex documentation files. This function searches all directories under the ``texmf`` root for dvi or pdf files that might be documentation. It returns a dictionary containing file-paths. The dictionary uses the filenames without their extensions as keys. Arguments: texmf_directory The location of the main tex and metafont directory. Returns: ``{str: str}`` Examples: >>> texmf_directory = check_output( ... "kpsewhich --expand-path '$TEXMFMAIN'", shell=True, ... universal_newlines=True).strip() >>> documentation_files = get_documentation_files(texmf_directory) >>> print(documentation_files['lastpage']) # doctest:+ELLIPSIS /.../lastpage.pdf """ doc_files = check_output( "find -E {} -regex '.*\.(pdf|dvi)' -type f".format( shellquote(texmf_directory)), shell=True, universal_newlines=True).splitlines() return {basename(splitext(line)[0]): line.strip() for line in doc_files}
def run_biber(filename, verbose=False): """Run biber for a certain file. The interface for this function is exactly the same as the one for ``run_bibtex``. For the list of arguments and return values please take a look at the doc string of ``run_bibtex``. Examples: >>> chdir('Tests/TeX') >>> # Generate files for biber >>> call('pdflatex external_bibliography_biber.tex > /dev/null', ... shell=True) 0 >>> run_biber('external_bibliography_biber') # doctest:+ELLIPSIS <... ... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen("biber {}".format(shellquote(filename)), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) bp = BiberParser(run_object.stdout, verbose) fatal, errors, warnings = bp.parse_stream() stat = run_object.wait() return stat, fatal, errors, warnings
def expand_name(filename, program='pdflatex'): """Get the expanded file name for a certain tex file. Arguments: filename The name of the file we want to expand. program The name of the tex program for which we want to expand the name of the file. Returns: ``str`` Examples: >>> expand_name('Tests/TeX/text.tex') './Tests/TeX/text.tex' >>> expand_name('non_existent_file.tex') '' """ stdout.flush() run_object = Popen("kpsewhich -progname='{}' {}".format( program, shellquote(filename)), shell=True, stdout=PIPE) return run_object.stdout.read().strip()
def get_documentation_files(texmf_directory): """Get a dictionary containing tex documentation files. This function searches all directories under the ``texmf`` root for dvi or pdf files that might be documentation. It returns a dictionary containing file-paths. The dictionary uses the filenames without their extensions as keys. Arguments: texmf_directory The location of the main tex and metafont directory. Returns: ``{str: str}`` Examples: >>> texmf_directory = check_output( ... "kpsewhich --expand-path '$TEXMFMAIN'", shell=True, ... universal_newlines=True).strip() >>> documentation_files = get_documentation_files(texmf_directory) >>> print(documentation_files['lastpage']) # doctest:+ELLIPSIS /.../lastpage.pdf """ doc_files = check_output("find -E {} -regex '.*\.(pdf|dvi)' -type f". format(shellquote(texmf_directory)), shell=True, universal_newlines=True).splitlines() return {basename(splitext(line)[0]): line.strip() for line in doc_files}
def run_bibtex(filename, verbose=False): """Run bibtex for a certain file. Run bibtex for ``filename`` and return the following values: - The return value of the bibtex runs done by this function: This value will be ``0`` after a successful run. Any other value indicates that there were some kind of problems. - Fatal error: Specifies if there was a fatal error while processing the bibliography. - Errors: The number of non-fatal errors encountered while processing the bibliography - Warnings: The number of warnings found while running this function Arguments: filename Specifies the name of the tex file without its extension. This information will be used to find the bibliography. verbose Specifies if the output by this function should be verbose. Returns: ``(int, bool, int, int)`` Examples: >>> chdir('Tests/TeX') >>> run_bibtex('external_bibliography') # doctest:+ELLIPSIS <h4>Processing: ... ... (0, False, 0, 0) >>> chdir('../..') """ directory = dirname(filename) if dirname(filename) else '.' regex_auxfiles = (r'.*/({}|bu\d+)\.aux$'.format(filename)) auxfiles = [f for f in glob("{}/*.aux".format(directory)) if match(regex_auxfiles, f)] stat, fatal, errors, warnings = 0, False, 0, 0 for bib in auxfiles: print('<h4>Processing: {} </h4>'.format(bib)) run_object = Popen("bibtex {}".format(shellquote(bib)), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) bp = BibTexParser(run_object.stdout, verbose) f, e, w = bp.parse_stream() fatal |= f errors += e warnings += w stat |= run_object.wait() return stat, fatal, errors, warnings
def compile_catalog(): '''Compile catalog for host''' catalog_file = relative_path('catalog', shellquote(env.host + ".json")) with open(catalog_file, 'wb') as catalog: json = puppet('master', '--compile', env.host) catalog.write(json[json.index('{'):])
def run_latex(ltxcmd, texfile, cache_filename, verbose=False): """Run the flavor of latex specified by ltxcmd on texfile. This function returns: - the return value of ``ltxcmd``, - a value specifying if there were any fatal flaws (``True``) or not (``False``), and - the number of errors and - the number of warnings encountered while processing ``texfile``. Arguments: cache_filename The path to the cache file for the current tex project. This file is used to store information about gutter marks between runs of ``texmate``. ltxcmd The latex command which should be used translate ``texfile``. texfile The path of the tex file which should be translated by ``ltxcmd``. Returns: ``(int, bool, int, int)`` Examples: >>> chdir('Tests/TeX') >>> run_latex(ltxcmd='pdflatex', ... cache_filename='.external_bibliography.lb', ... texfile='external_bibliography.tex') # doctest:+ELLIPSIS <h4>... ... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen( "{} {}".format(ltxcmd, shellquote(texfile)), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True, ) lp = LaTexParser(run_object.stdout, verbose, texfile) fatal, errors, warnings = lp.parse_stream() stat = run_object.wait() update_marks(cache_filename, lp.marks) return stat, fatal, errors, warnings
def make_gluster_args(self, *args): args = ('gluster',) + args kw = {} if self.remote_user: args = ('ssh', '@'.join([self.remote_user, self.host]), ' '.join(shellquote(a) for a in args)) else: kw['run_as_root'] = True return args, kw
def run_latex(ltxcmd, texfile, cache_filename, verbose=False): """Run the flavor of latex specified by ltxcmd on texfile. This function returns: - the return value of ``ltxcmd``, - a value specifying if there were any fatal flaws (``True``) or not (``False``), and - the number of errors and - the number of warnings encountered while processing ``texfile``. Arguments: cache_filename The path to the cache file for the current tex project. This file is used to store information about gutter marks between runs of ``texmate``. ltxcmd The latex command which should be used translate ``texfile``. texfile The path of the tex file which should be translated by ``ltxcmd``. Returns: ``(int, bool, int, int)`` Examples: >>> chdir('Tests/TeX') >>> run_latex(ltxcmd='pdflatex', ... cache_filename='.external_bibliography.lb', ... texfile='external_bibliography.tex') # doctest:+ELLIPSIS <h4>... ... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen("{} {}".format(ltxcmd, shellquote(texfile)), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) lp = LaTexParser(run_object.stdout, verbose, texfile) fatal, errors, warnings = lp.parse_stream() stat = run_object.wait() update_marks(cache_filename, lp.marks) return stat, fatal, errors, warnings
def refresh_viewer(viewer, pdf_path, tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')): """Tell the specified PDF viewer to refresh the PDF output. If the viewer does not support refreshing PDFs (e.g. “Preview”) then this command will do nothing. This command will return a non-zero value if the the viewer could not be found or the PDF viewer does not support a “manual” refresh. For this method to work correctly ``viewer`` needs to be open beforehand. Arguments: viewer The viewer for which we want to refresh the output of the PDF file specified in ``pdf_path``. pdf_path The path to the PDF file for which we want to refresh the output. tm_bundle_support The location of the “LaTeX Bundle” support folder Returns: ``int`` Examples: >>> # The viewer application needs to be open before we call the >>> # function >>> call('open -a Skim', shell=True) 0 >>> refresh_viewer('Skim', 'test.pdf', ... tm_bundle_support=realpath('Support')) <p class="info">Tell Skim to refresh 'test.pdf'</p> 0 """ print('<p class="info">Tell {} to refresh \'{}\'</p>'.format( viewer, pdf_path)) if viewer in ['Skim', 'TeXShop']: return call("osascript '{}/bin/refresh_viewer.scpt' {} {} ".format( tm_bundle_support, viewer, shellquote(pdf_path)), shell=True) return 1
def refresh_viewer(viewer, pdf_path, tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')): """Tell the specified PDF viewer to refresh the PDF output. If the viewer does not support refreshing PDFs (e.g. “Preview”) then this command will do nothing. This command will return a non-zero value if the the viewer could not be found or the PDF viewer does not support a “manual” refresh. For this method to work correctly ``viewer`` needs to be open beforehand. Arguments: viewer The viewer for which we want to refresh the output of the PDF file specified in ``pdf_path``. pdf_path The path to the PDF file for which we want to refresh the output. tm_bundle_support The location of the “LaTeX Bundle” support folder Returns: ``int`` Examples: >>> # The viewer application needs to be open before we call the >>> # function >>> call('open -a Skim', shell=True) 0 >>> refresh_viewer('Skim', 'test.pdf', ... tm_bundle_support=realpath('Support')) <p class="info">Tell Skim to refresh 'test.pdf'</p> 0 """ print('<p class="info">Tell {} to refresh \'{}\'</p>'.format(viewer, pdf_path)) if viewer in ['Skim', 'TeXShop']: return call("osascript '{}/bin/refresh_viewer.scpt' {} {} ".format( tm_bundle_support, viewer, shellquote(pdf_path)), shell=True) return 1
def getRegions(self, username, api_key, project_id, auth_url, collector): """Get a list of available regions, given a keystone endpoint and credentials.""" cmd = [_helper, "getRegions"] cmd.append("--username=%s" % username) cmd.append("--api_key=%s" % api_key) cmd.append("--project_id=%s" % project_id) cmd.append("--auth_url=%s" % auth_url) if os.path.exists(zenPath("bin", "zminion")): # Escape shell characters in command cmd = [shellquote(x) for x in cmd] cmd = [ 'zminion', '--minion-name', 'zminion_%s' % collector, 'run', '--' ] + cmd return _runcommand(cmd)
def getRegions(self, username, api_key, project_id, auth_url, collector): """Get a list of available regions, given a keystone endpoint and credentials.""" cmd = [_helper, "getRegions"] cmd.append("--username=%s" % username) cmd.append("--api_key=%s" % api_key) cmd.append("--project_id=%s" % project_id) cmd.append("--auth_url=%s" % auth_url) if os.path.exists(zenPath("bin", "zminion")): # Escape shell characters in command cmd = [shellquote(x) for x in cmd] cmd = [ 'zminion', '--minion-name', 'zminion_%s' % collector, 'run', '--'] + cmd return _runcommand(cmd)
def run_makeindex(filename, verbose=False): """Run the makeindex command. Generate the index for the given file returning - the return value of ``makeindex``, - a value specifying if there were any fatal flaws (``True``) or not (``False``), and - the number of errors and - the number of warnings encountered while processing ``filename``. Arguments: filename The name of the tex file for which we want to generate an index. Returns: ``(int, bool, int, int)`` Examples: >>> chdir('Tests/TeX') >>> run_makeindex('makeindex.tex') # doctest:+ELLIPSIS <p class="info">Run...Makeindex... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen( "makeindex {}".format(shellquote("{}.idx".format(get_filename_without_extension(filename)))), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True, ) ip = MakeIndexParser(run_object.stdout, verbose) fatal, errors, warnings = ip.parse_stream() stat = run_object.wait() return stat, fatal, errors, warnings
def run_makeglossaries(filename, verbose=False): """Run makeglossaries for the given file. The interface of this function is exactly the same as the one for ``run_makeindex``. For the list of arguments and return values, please take a look at ``run_makeindex``. Arguments: filename The name of the tex file for which we want to generate an index. verbose This value specifies if all output should be printed (``verbose=True``) or if only significant messages should be printed. Examples: >>> chdir('Tests/TeX') >>> call('pdflatex makeglossaries.tex > /dev/null', shell=True) 0 >>> run_makeglossaries('makeglossaries.tex') # doctest:+ELLIPSIS <h2>Make Glossaries... ... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen("makeglossaries {}".format( shellquote(splitext(filename)[0])), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) bp = MakeGlossariesParser(run_object.stdout, verbose) fatal, errors, warnings = bp.parse_stream() stat = run_object.wait() return stat, fatal, errors, warnings
def run_makeindex(filename, verbose=False): """Run the makeindex command. Generate the index for the given file returning - the return value of ``makeindex``, - a value specifying if there were any fatal flaws (``True``) or not (``False``), and - the number of errors and - the number of warnings encountered while processing ``filename``. Arguments: filename The name of the tex file for which we want to generate an index. Returns: ``(int, bool, int, int)`` Examples: >>> chdir('Tests/TeX') >>> run_makeindex('makeindex.tex') # doctest:+ELLIPSIS <p class="info">Run...Makeindex... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen("makeindex {}".format( shellquote("{}.idx".format(splitext(filename)[0]))), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) ip = MakeIndexParser(run_object.stdout, verbose) fatal, errors, warnings = ip.parse_stream() stat = run_object.wait() return stat, fatal, errors, warnings
def run_makeglossaries(filename, verbose=False): """Run makeglossaries for the given file. The interface of this function is exactly the same as the one for ``run_makeindex``. For the list of arguments and return values, please take a look at ``run_makeindex``. Arguments: filename The name of the tex file for which we want to generate an index. verbose This value specifies if all output should be printed (``verbose=True``) or if only significant messages should be printed. Examples: >>> chdir('Tests/TeX') >>> run_makeglossaries('makeglossaries.tex') # doctest:+ELLIPSIS <h2>Make Glossaries... ... (0, False, 0, 0) >>> chdir('../..') """ run_object = Popen( "makeglossaries {}".format(shellquote(get_filename_without_extension(filename))), shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True, ) bp = MakeGlossariesParser(run_object.stdout, verbose) fatal, errors, warnings = bp.parse_stream() stat = run_object.wait() return stat, fatal, errors, warnings
problematic_characters = search('[$"]', filename) if problematic_characters: print('''<p class="error"><strong> The filename {0} contains a problematic character: {1}<br> Please remove all occurrences of {1} in the filename. </strong></p> '''.format(filename, problematic_characters.group(0))) # Run the command passed on the command line or modified by preferences elif command == 'latexmk': engine_options = construct_engine_options(typesetting_directives, tm_engine_options, synctex) write_latexmkrc(engine, engine_options, '/tmp/latexmkrc') latexmkrc_path = "{}/config/latexmkrc".format(tm_bundle_support) command = "latexmk -pdf{} -f -r /tmp/latexmkrc -r {} {}".format( 'ps' if engine == 'latex' else '', shellquote(latexmkrc_path), shellquote(filename)) process = Popen(command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) command_parser = LaTexMkParser(process.stdout, verbose, filename) status = command_parser.parse_stream() update_marks(cache_filename, command_parser.marks) fatal_error, number_errors, number_warnings = status tex_status = process.wait() remove("/tmp/latexmkrc") if tm_autoview and number_errors < 1 and not suppress_viewer: viewer_status = run_viewer( viewer, filepath, pdffile_path, number_errors > 1 or number_warnings > 0 and tm_preferences['latexKeepLogWin'], 'pdfsync' in packages or synctex, line_number)
""" return """<a href="javascript: TextMate.system(""" + r"""'\'{}/bin/viewDoc.sh\' {}', null);">{}</a>""".format( tm_bundle_support, file_path, description ) # -- Main --------------------------------------------------------------------- if __name__ == "__main__": # If the caret is right next to or between a word, then we show the # documentation for that word using the the shell command `texdoc` tm_current_word = getenv("TM_CURRENT_WORD") if tm_current_word: output = check_output("texdoc {}".format(shellquote(tm_current_word)), shell=True).strip() # Close the html output window on success if not output: exit(200) # Find all the packages included in the file or its inputs master_file, master_dir = find_file_to_typeset(find_tex_directives(getenv("TM_FILEPATH"))) chdir(master_dir) packages = find_tex_packages(master_file) texmf_directory = check_output("kpsewhich --expand-path '$TEXMFMAIN'", shell=True, universal_newlines=True).strip() docdbpath = "{}/Library/Caches/TextMate".format(expanduser("~")) docdbfile = "{}/latexdocindex".format(docdbpath) if exists(docdbfile) and getmtime(docdbfile) > getmtime(texmf_directory): # Read from cache
def svnlook(self, info): return do("svnlook %s %s --revision %s" % (shellquote(info), shellquote( self.repository), shellquote(self.id)))
def run_viewer(viewer, texfile_path, pdffile_path, suppress_pdf_output_textmate, use_pdfsync, line_number, tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')): """Open the PDF viewer containing the PDF generated from ``file_name``. If ``use_pdfsync`` is set to ``True`` and the ``viewer`` supports pdfsync then the part of the PDF corresponding to ``line_number`` will be opened. The function returns the exit value of the shell command used to display the PDF file. Arguments: viewer Specifies which PDF viewer should be used to display the PDF tex_file_path The location of the tex file. suppress_pdf_output_textmate This variable is only used when ``viewer`` is set to ``TextMate``. If it is set to ``True`` then TextMate will not try to display the generated PDF. tm_bundle_support The location of the “LaTeX Bundle” support folder Returns: ``int`` Examples: >>> chdir('Tests/TeX') >>> call("xelatex ünicöde.tex > /dev/null", shell=True) 0 >>> for viewer in ['Skim', 'TextMate']: # doctest: +ELLIPSIS ... run_viewer(viewer, './ünicöde.tex', './ünicöde.pdf', ... suppress_pdf_output_textmate=None, ... use_pdfsync=True, line_number=10, ... tm_bundle_support=realpath('../../Support')) 0 <script type="text/javascript"> ... ... 0 >>> chdir('../..') """ status = 0 if viewer == 'TextMate': if not suppress_pdf_output_textmate: if isfile(pdffile_path): print('''<script type="text/javascript"> window.location="file://{}" </script>'''.format(quote( pdffile_path.encode('utf8')))) else: print("File does not exist: {}".format(pdffile_path)) else: path_to_viewer, sync_command = get_app_path_and_sync_command( viewer, pdffile_path, texfile_path, line_number) # PDF viewer is installed if path_to_viewer: if version_info <= (3, 0): # If this is not done, the next line will thrown an encoding # exception when the PDF file contains non-ASCII characters. viewer = viewer.encode('utf-8') pdf_already_open = not (bool( call("'{}/bin/check_open' '{}' {} > /dev/null".format( tm_bundle_support, viewer, shellquote(pdffile_path)), shell=True))) if pdf_already_open: refresh_viewer(viewer, pdffile_path) else: status = call("open -a '{}.app' {}".format( viewer, shellquote(pdffile_path)), shell=True) # PDF viewer supports pdfsync if sync_command and use_pdfsync: call(sync_command, shell=True) elif not sync_command and use_pdfsync: print("{} does not supported pdfsync".format(viewer)) # PDF viewer could not be found else: print('<strong class="error"> {} does not appear '.format(viewer) + 'to be installed on your system.</strong>') return status
""" return ("""<a href="javascript: TextMate.system(""" + r"""'\'{}/bin/viewDoc.sh\' {}', null);">{}</a>""".format( tm_bundle_support, file_path, description)) # -- Main --------------------------------------------------------------------- if __name__ == '__main__': # If the caret is right next to or between a word, then we show the # documentation for that word using the the shell command `texdoc` tm_current_word = getenv('TM_CURRENT_WORD') if tm_current_word: output = check_output("texdoc {}".format(shellquote(tm_current_word)), shell=True).strip() # Close the html output window on success if not output: exit(200) # Find all the packages included in the file or its inputs master_file, master_dir = find_file_to_typeset( find_tex_directives(getenv("TM_FILEPATH"))) chdir(master_dir) packages = find_tex_packages(master_file) texmf_directory = check_output("kpsewhich --expand-path '$TEXMFMAIN'", shell=True, universal_newlines=True).strip() docdbpath = "{}/Library/Caches/TextMate".format(expanduser('~'))
<a href="javascript:...viewDoc.sh...file.pdf\', null);">description</a> """ return ("""<a href="javascript: TextMate.system(""" + r"""'\'{}/bin/viewDoc.sh\' {}', null);">{}</a>""".format( tm_bundle_support, file_path, description)) # -- Main --------------------------------------------------------------------- if __name__ == '__main__': # If the caret is right next to or between a word, then we show the # documentation for that word using the the shell command `texdoc` tm_current_word = getenv('TM_CURRENT_WORD') if tm_current_word: output = check_output("texdoc {}".format(shellquote(tm_current_word)), shell=True).strip() # Close the html output window on success if not output: exit(200) # Find all the packages included in the file or its inputs master_file, master_dir = find_file_to_typeset( find_tex_directives(getenv("TM_FILEPATH"))) chdir(master_dir) packages = find_tex_packages(master_file) texmf_directory = check_output("kpsewhich --expand-path '$TEXMFMAIN'", shell=True, universal_newlines=True).strip() docdbpath = "{}/Library/Caches/TextMate".format(expanduser('~')) docdbfile = "{}/latexdocindex".format(docdbpath)
def svnlook(self, info): return do("svnlook %s %s --revision %s" % (shellquote(info), shellquote(self.repository), shellquote(self.id)))
def run_viewer(viewer, texfile_path, pdffile_path, suppress_pdf_output_textmate, use_pdfsync, line_number, tm_bundle_support=getenv('TM_BUNDLE_SUPPORT')): """Open the PDF viewer containing the PDF generated from ``file_name``. If ``use_pdfsync`` is set to ``True`` and the ``viewer`` supports pdfsync then the part of the PDF corresponding to ``line_number`` will be opened. The function returns the exit value of the shell command used to display the PDF file. Arguments: viewer Specifies which PDF viewer should be used to display the PDF tex_file_path The location of the tex file. suppress_pdf_output_textmate This variable is only used when ``viewer`` is set to ``TextMate``. If it is set to ``True`` then TextMate will not try to display the generated PDF. tm_bundle_support The location of the “LaTeX Bundle” support folder Returns: ``int`` Examples: >>> chdir('Tests/TeX') >>> call("pdflatex makeindex.tex > /dev/null", shell=True) 0 >>> run_viewer('Skim', './makeindex.tex', './makeindex.pdf', ... suppress_pdf_output_textmate=None, use_pdfsync=True, ... line_number=10, ... tm_bundle_support=realpath('../../Support')) 0 >>> chdir('../..') """ status = 0 if viewer == 'TextMate': if not suppress_pdf_output_textmate: if isfile(pdffile_path): print('''<script type="text/javascript"> window.location="file://{}" </script>'''.format(quote(pdffile_path))) else: print("File does not exist: {}".format(pdffile_path)) else: path_to_viewer, sync_command = get_app_path_and_sync_command( viewer, pdffile_path, texfile_path, line_number) # PDF viewer is installed if path_to_viewer: if version_info <= (3, 0): # If this is not done, the next line will thrown an encoding # exception when the PDF file contains non-ASCII characters. viewer = viewer.encode('utf-8') pdf_already_open = not(bool( call("'{}/bin/check_open' '{}' {} > /dev/null".format( tm_bundle_support, viewer, shellquote(pdffile_path)), shell=True))) if pdf_already_open: refresh_viewer(viewer, pdffile_path) else: status = call("open -a '{}.app' {}".format(viewer, shellquote(pdffile_path)), shell=True) # PDF viewer supports pdfsync if sync_command and use_pdfsync: call(sync_command, shell=True) elif not sync_command and use_pdfsync: print("{} does not supported pdfsync".format(viewer)) # PDF viewer could not be found else: print('<strong class="error"> {} does not appear '.format(viewer) + 'to be installed on your system.</strong>') return status
def run_cmd(airship, args): command = ' '.join(shellquote(a) for a in args.command) airship.get_bucket(args.bucket_id or _newest).run(command)
problematic_characters = search('[$"]', filename) if problematic_characters: print('''<p class="error"><strong> The filename {0} contains a problematic character: {1}<br> Please remove all occurrences of {1} in the filename. </strong></p> '''.format(filename, problematic_characters.group(0))) # Run the command passed on the command line or modified by preferences elif command == 'latexmk': engine_options = construct_engine_options(typesetting_directives, tm_engine_options, synctex) write_latexmkrc(engine, engine_options, '/tmp/latexmkrc') latexmkrc_path = "{}/config/latexmkrc".format(tm_bundle_support) command = "latexmk -pdf{} -f -r /tmp/latexmkrc -r {} {}".format( 'ps' if engine == 'latex' else '', shellquote(latexmkrc_path), shellquote(filename)) process = Popen(command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True) command_parser = LaTexMkParser(process.stdout, verbose, filename) status = command_parser.parse_stream() update_marks(cache_filename, command_parser.marks) fatal_error, number_errors, number_warnings = status tex_status = process.wait() remove("/tmp/latexmkrc") if tm_autoview and number_errors < 1 and not suppress_viewer:
def ship(extractor, commit, debug): "Ship a notification for the specified commit." metadata = extractor.commit_factory(commit) # This is where we apply filtering if extractor.filtercmd: cmd = '%s %s' % (shellquote(extractor.filtercmd), shellquote(json.dumps(metadata.__dict__))) data = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read() try: metadata.__dict__.update(json.loads(data)) except ValueError: sys.stderr.write("irkerhook.py: could not decode JSON: %s\n" % data) raise SystemExit(1) # Rewrite the file list if too long. The objective here is only # to be easier on the eyes. if extractor.cialike \ and extractor.cialike.lower() != "none" \ and len(metadata.files) > int(extractor.cialike): files = metadata.files.split() dirs = set([d.rpartition('/')[0] for d in files]) if len(dirs) == 1: metadata.files = "(%s files)" % (len(files),) else: metadata.files = "(%s files in %s dirs)" % (len(files), len(dirs)) # Message reduction. The assumption here is that IRC can't handle # lines more than 510 characters long. If we exceed that length, we # try knocking out the file list, on the theory that for notification # purposes the commit text is more important. If it's still too long # there's nothing much can be done other than ship it expecting the IRC # server to truncate. privmsg = unicode(metadata) if len(privmsg) > 510: metadata.files = "" privmsg = unicode(metadata) # Anti-spamming guard. It's deliberate that we get maxchannels not from # the user-filtered metadata but from the extractor data - means repo # administrators can lock in that setting. channels = metadata.channels.split(",") if extractor.maxchannels != 0: channels = channels[:extractor.maxchannels] # Ready to ship. message = json.dumps({"to": channels, "privmsg": privmsg}) if debug: print message elif channels: try: if extractor.email: # We can't really figure out what our SF username is without # exploring our environment. The mail pipeline doesn't care # about who sent the mail, other than being from sourceforge. # A better way might be to simply call mail(1) sender = "*****@*****.**" msg = """From: %(sender)s Subject: irker json %(message)s""" % {"sender":sender, "message":message} import smtplib smtp = smtplib.SMTP() smtp.connect() smtp.sendmail(sender, extractor.email, msg) smtp.quit() elif extractor.tcp: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((extractor.server or default_server, IRKER_PORT)) sock.sendall(message + "\n") finally: sock.close() else: try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(message + "\n", (extractor.server or default_server, IRKER_PORT)) finally: sock.close() except socket.error, e: sys.stderr.write("%s\n" % e)
print( """<p class="error"><strong> The filename {0} contains a problematic character: {1}<br> Please remove all occurrences of {1} in the filename. </strong></p> """.format( filename, problematic_characters.group(0) ) ) # Run the command passed on the command line or modified by preferences elif command == "latexmk": engine_options = construct_engine_options(typesetting_directives, tm_engine_options, synctex) write_latexmkrc(engine, engine_options, "/tmp/latexmkrc") latexmkrc_path = "{}/config/latexmkrc".format(tm_bundle_support) command = "latexmk -pdf{} -f -r /tmp/latexmkrc -r {} {}".format( "ps" if engine == "latex" else "", shellquote(latexmkrc_path), shellquote(filename) ) process = Popen( command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True, universal_newlines=True ) command_parser = LaTexMkParser(process.stdout, verbose, filename) status = command_parser.parse_stream() update_marks(cache_filename, command_parser.marks) fatal_error, number_errors, number_warnings = status tex_status = process.wait() remove("/tmp/latexmkrc") if tm_autoview and number_errors < 1 and not suppress_viewer: viewer_status = run_viewer( viewer, filepath, pdffile_path,
def update_marks(cache_filename, marks_to_set=[]): """Set or remove gutter marks. This function starts by removing marks from the files specified inside the dictionary item ``files_with_guttermarks`` stored inside the ``pickle`` file ``cache_filename``. After that it sets all marks specified in ``marks_to_set``. cache_filename The path to the cache file for the current tex project. This file stores a dictionary containing the item ``files_with_guttermarks``. ``files_with_guttermarks`` stores a list of files, from which we need to remove gutter marks. marks_to_set A list of tuples of the form ``(file_path, line_number, marker_type, message)``, where file_path and line_number specify the location where a marker of type ``marker_type`` together with an optional message should be placed. Examples: >>> marks_to_set = [('Tests/TeX/lualatex.tex', 1, 'note', ... 'Lua was created in 1993.'), ... ('Tests/TeX/lualatex.tex', 4, 'warning', ... 'Lua means "Moon" in Portuguese.'), ... ('Tests/TeX/lualatex.tex', 6, 'error', None)] >>> data = {'files_with_guttermarks': {'Tests/TeX/lualatex.tex'}} >>> cache_filename = '.test.lb' >>> with open(cache_filename, 'wb') as storage: ... dump(data, storage) Set marks >>> update_marks(cache_filename, marks_to_set) Remove marks >>> update_marks(cache_filename) >>> from os import remove >>> remove(cache_filename) Working with a non existent file should just set the marks in ``marks_to_set`` >>> update_marks('non_existent_file') >>> remove('non_existent_file') """ try: # Try to read from cache with open(cache_filename, 'rb') as storage: typesetting_data = load(storage) files_with_guttermarks = typesetting_data['files_with_guttermarks'] marks_to_remove = [] for filename in files_with_guttermarks: marks_to_remove.extend([(filename, 'error'), (filename, 'warning')]) except: typesetting_data = {} marks_to_remove = [] try: # Try to write cache data for next run newfiles = {filename for (filename, _, _, _) in marks_to_set} if 'files_with_guttermarks' in typesetting_data: typesetting_data['files_with_guttermarks'].update(newfiles) else: typesetting_data['files_with_guttermarks'] = newfiles with open(cache_filename, 'wb') as storage: dump(typesetting_data, storage) except: print('<p class="warning"> Could not write cache file {}!</p>'.format( cache_filename)) marks_remove = {} mate = getenv('TM_MATE') for filepath, mark in marks_to_remove: path = normpath(realpath(filepath)) marks = marks_remove.get(path) if marks: marks.append(mark) else: marks_remove[path] = [mark] marks_add = {} for filepath, line, mark, message in marks_to_set: path = normpath(realpath(filepath)) message = shellquote(message) if message else None marks = marks_add.get(path) if marks: marks.append((line, mark, message)) else: marks_add[path] = [(line, mark, message)] commands = { filepath: '{} {}'.format(mate, ' '.join(['-c {}'.format(mark) for mark in marks])) for filepath, marks in marks_remove.items() } for filepath, markers in marks_add.items(): command = ' '.join([ '-l {} -s {}{}'.format(line, mark, ":{}".format(content) if content else '') for line, mark, content in markers ]) commands[filepath] = '{} {}'.format(commands.get(filepath, mate), command) for filepath, command in commands.items(): call("{} {}".format(command, shellquote(filepath)), shell=True)
print("") sys.exit(0) # get config, input etc. latexfname = sys.argv[1] dvifname = os.environ["KLF_FN_DVI"] latexcmd = os.environ.get("KLF_ARG_latexcmd", "latex {texfname}") print("User-provided LaTeX command:", repr(latexcmd)) tempdir = os.path.dirname(os.environ["KLF_TEMPFNAME"]) # Run latex custom command # ------------------------ # perform required replacements latexcmd_full = latexcmd.format(latex=shellquote(os.environ["KLF_LATEX"]), texfname=shellquote(latexfname)) # raises exception if error -- will be picked up by Python runtime print("Running `{}' ...\n".format(latexcmd_full)) res = subprocess.call(latexcmd_full, cwd=tempdir, shell=True) if res != 0: print("Failed, res=", res) sys.exit(res>>8)
def update_marks(cache_filename, marks_to_set=[]): """Set or remove gutter marks. This function starts by removing marks from the files specified inside the dictionary item ``files_with_guttermarks`` stored inside the ``pickle`` file ``cache_filename``. After that it sets all marks specified in ``marks_to_set``. cache_filename The path to the cache file for the current tex project. This file stores a dictionary containing the item ``files_with_guttermarks``. ``files_with_guttermarks`` stores a list of files, from which we need to remove gutter marks. marks_to_set A list of tuples of the form ``(file_path, line_number, marker_type, message)``, where file_path and line_number specify the location where a marker of type ``marker_type`` together with an optional message should be placed. Examples: >>> marks_to_set = [('Tests/TeX/lualatex.tex', 1, 'note', ... 'Lua was created in 1993.'), ... ('Tests/TeX/lualatex.tex', 4, 'warning', ... 'Lua means "Moon" in Portuguese.'), ... ('Tests/TeX/lualatex.tex', 6, 'error', None)] >>> data = {'files_with_guttermarks': {'Tests/TeX/lualatex.tex'}} >>> cache_filename = '.test.lb' >>> with open(cache_filename, 'wb') as storage: ... dump(data, storage) Set marks >>> update_marks(cache_filename, marks_to_set) Remove marks >>> update_marks(cache_filename) >>> from os import remove >>> remove(cache_filename) Working with a non existent file should just set the marks in ``marks_to_set`` >>> update_marks('non_existent_file') >>> remove('non_existent_file') """ try: # Try to read from cache with open(cache_filename, 'rb') as storage: typesetting_data = load(storage) files_with_guttermarks = typesetting_data['files_with_guttermarks'] marks_to_remove = [] for filename in files_with_guttermarks: marks_to_remove.extend([(filename, 'error'), (filename, 'warning')]) except: typesetting_data = {} marks_to_remove = [] try: # Try to write cache data for next run newfiles = {filename for (filename, _, _, _) in marks_to_set} if 'files_with_guttermarks'in typesetting_data: typesetting_data['files_with_guttermarks'].update(newfiles) else: typesetting_data['files_with_guttermarks'] = newfiles with open(cache_filename, 'wb') as storage: dump(typesetting_data, storage) except: print('<p class="warning"> Could not write cache file {}!</p>'.format( cache_filename)) marks_remove = {} for filepath, mark in marks_to_remove: path = normpath(realpath(filepath)) marks = marks_remove.get(path) if marks: marks.append(mark) else: marks_remove[path] = [mark] marks_add = {} for filepath, line, mark, message in marks_to_set: path = normpath(realpath(filepath)) message = shellquote(message) if message else None marks = marks_add.get(path) if marks: marks.append((line, mark, message)) else: marks_add[path] = [(line, mark, message)] commands = {filepath: 'mate {}'.format(' '.join(['-c {}'.format(mark) for mark in marks])) for filepath, marks in marks_remove.items()} for filepath, markers in marks_add.items(): command = ' '.join(['-l {} -s {}{}'.format(line, mark, ":{}".format(content) if content else '') for line, mark, content in markers]) commands[filepath] = '{} {}'.format(commands.get(filepath, 'mate'), command) for filepath, command in commands.items(): call("{} {}".format(command, shellquote(filepath)), shell=True)
def notify(title='LaTeX Watch', summary='', messages=[], token=None): """Display a list of messages via a notification window. This function returns a notification token that can be used to reuse the opened notification window. Arguments: title The (window) title for the notification window. summary A summary explaining the reasoning why we show this notification window. messages A list of strings containing informative messages. token A token that can be used to reuse an already existing notification window. Returns: ``int`` Examples: >>> token = notify(summary='Mahatma Gandhi', messages=[ ... "An eye for an eye only ends up making the whole world " + ... "blind."]) >>> # The token the function returns is a number >>> token = int(token) """ dialog = getenv('DIALOG') tm_support = getenv('TM_SUPPORT_PATH') nib_location = '{}/nibs/SimpleNotificationWindow.nib'.format(tm_support) log = '\n'.join(messages).replace('\\', '\\\\').replace('"', '\\"') command = "{} nib".format(shellquote(dialog)) content = shellquote( """{{ title = "{}"; summary = "{}"; log = "{}"; }}""".format( title, summary, log)) # Update notification window if token: command_update = "{} --update {} --model {}".format( command, token, content) notification_output = check_output(command_update, stderr=STDOUT, shell=True, universal_newlines=True) # If the window still exists and we could therefore update it here we # return the token of the old window. If we could not update the # window we get an error message. In this case we try to open a new # notification window. if notification_output.strip() == '': return(int(token)) # Create new notification window command_load = "{} --load {} --model {}".format( command, shellquote(nib_location), content) notification_output = check_output(command_load, shell=True, universal_newlines=True) return int(notification_output)
def ship(extractor, commit, debug): "Ship a notification for the specified commit." metadata = extractor.commit_factory(commit) # This is where we apply filtering if extractor.filtercmd: cmd = '%s %s' % (shellquote( extractor.filtercmd), shellquote(json.dumps(metadata.__dict__))) data = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read() try: metadata.__dict__.update(json.loads(data)) except ValueError: sys.stderr.write("irkerhook.py: could not decode JSON: %s\n" % data) raise SystemExit(1) # Rewrite the file list if too long. The objective here is only # to be easier on the eyes. if extractor.cialike \ and extractor.cialike.lower() != "none" \ and len(metadata.files) > int(extractor.cialike): files = metadata.files.split() dirs = set([d.rpartition('/')[0] for d in files]) if len(dirs) == 1: metadata.files = "(%s files)" % (len(files), ) else: metadata.files = "(%s files in %s dirs)" % (len(files), len(dirs)) # Message reduction. The assumption here is that IRC can't handle # lines more than 510 characters long. If we exceed that length, we # try knocking out the file list, on the theory that for notification # purposes the commit text is more important. If it's still too long # there's nothing much can be done other than ship it expecting the IRC # server to truncate. privmsg = unicode(metadata) if len(privmsg) > 510: metadata.files = "" privmsg = unicode(metadata) # Anti-spamming guard. It's deliberate that we get maxchannels not from # the user-filtered metadata but from the extractor data - means repo # administrators can lock in that setting. channels = metadata.channels.split(",") if extractor.maxchannels != 0: channels = channels[:extractor.maxchannels] # Ready to ship. message = json.dumps({"to": channels, "privmsg": privmsg}) if debug: print message elif channels: try: if extractor.email: # We can't really figure out what our SF username is without # exploring our environment. The mail pipeline doesn't care # about who sent the mail, other than being from sourceforge. # A better way might be to simply call mail(1) sender = "*****@*****.**" msg = """From: %(sender)s Subject: irker json %(message)s""" % { "sender": sender, "message": message } import smtplib smtp = smtplib.SMTP() smtp.connect() smtp.sendmail(sender, extractor.email, msg) smtp.quit() elif extractor.tcp: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((extractor.server or default_server, IRKER_PORT)) sock.sendall(message + "\n") finally: sock.close() else: try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto( message + "\n", (extractor.server or default_server, IRKER_PORT)) finally: sock.close() except socket.error, e: sys.stderr.write("%s\n" % e)
def _encode_command(cmd): if isinstance(cmd, list): cmd = ' '.join(shellquote(c) for c in cmd) return cmd.encode().strip(b'\r').strip(b'\n')
print('</p></div>') #div texActions print('''<script type="text/javascript">runLatexmkpvc()</script>''') #run the second run with -addoutput exit(EXIT_SUCCESS) else: #not first_run engine_options = construct_engine_options(typesetting_directives, tm_engine_options, synctex) write_latexmkrc(engine, engine_options, '/tmp/latexmkrc') latexmkrc_path = "{}/config/latexmkrc".format(tm_bundle_support) command = "latexmk -pdf{} {} -f -r /tmp/latexmkrc -r {} {}".format( 'ps' if engine == 'latex' else '', '-pvc' if use_pvc else '', shellquote(latexmkrc_path), shellquote(filename)) process = Popen(command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT, close_fds=True) def round_finished(parser, fatal_error, number_errors, number_warnings): update_marks(cache_filename, parser.marks) #don't want sync as it doesn't work with multiple source files use_pdfsync = False; #'pdfsync' in packages or synctex if tm_autoview and number_errors < 1 and not suppress_viewer: viewer_status = run_viewer( viewer, filepath, pdffile_path, number_errors > 1 or number_warnings > 0
def notify(title='LaTeX Watch', summary='', messages=[], token=None): """Display a list of messages via a notification window. This function returns a notification token that can be used to reuse the opened notification window. Arguments: title The (window) title for the notification window. summary A summary explaining the reasoning why we show this notification window. messages A list of strings containing informative messages. token A token that can be used to reuse an already existing notification window. Returns: ``int`` Examples: >>> token = notify(summary='Mahatma Gandhi', messages=[ ... "An eye for an eye only ends up making the whole world " + ... "blind."]) >>> # The token the function returns is a number >>> token = int(token) """ dialog = getenv('DIALOG') tm_support = getenv('TM_SUPPORT_PATH') nib_location = '{}/nibs/SimpleNotificationWindow.nib'.format(tm_support) log = '\n'.join(messages).replace('\\', '\\\\').replace('"', '\\"') command = "{} nib".format(shellquote(dialog)) content = shellquote( """{{ title = "{}"; summary = "{}"; log = "{}"; }}""".format( title, summary, log)) # Update notification window if token: command_update = "{} --update {} --model {}".format( command, token, content) notification_output = check_output(command_update, stderr=STDOUT, shell=True, universal_newlines=True) # If the window still exists and we could therefore update it here we # return the token of the old window. If we could not update the # window we get an error message. In this case we try to open a new # notification window. if notification_output.strip() == '': return (int(token)) # Create new notification window command_load = "{} --load {} --model {}".format(command, shellquote(nib_location), content) notification_output = check_output(command_load, shell=True, universal_newlines=True) return int(notification_output)