def generate_config(pkgname='astropy', filename=None, verbose=False): """Generates a configuration file, from the list of `ConfigItem` objects for each subpackage. .. versionadded:: 4.1 Parameters ---------- pkgname : str or None The package for which to retrieve the configuration object. filename : str or file object or None If None, the default configuration path is taken from `get_config`. """ if verbose: verbosity = nullcontext filter_warnings = AstropyDeprecationWarning else: verbosity = silence filter_warnings = Warning package = importlib.import_module(pkgname) with verbosity(), warnings.catch_warnings(): warnings.simplefilter('ignore', category=filter_warnings) for mod in pkgutil.walk_packages(path=package.__path__, prefix=package.__name__ + '.'): if mod.module_finder.path.endswith(('test', 'tests')): # Skip test modules continue if mod.name.split('.')[-1].startswith('_'): # Skip private modules continue with contextlib.suppress(ImportError): importlib.import_module(mod.name) wrapper = TextWrapper(initial_indent="## ", subsequent_indent='## ', width=78) if filename is None: filename = get_config(pkgname).filename with contextlib.ExitStack() as stack: if isinstance(filename, (str, pathlib.Path)): fp = stack.enter_context(open(filename, 'w')) else: # assume it's a file object, or io.StringIO fp = filename # Parse the subclasses, ordered by their module name subclasses = ConfigNamespace.__subclasses__() processed = set() for conf in sorted(subclasses, key=lambda x: x.__module__): mod = conf.__module__ # Skip modules for other packages, e.g. astropy modules that # would be imported when running the function for astroquery. if mod.split('.')[0] != pkgname: continue # Check that modules are not processed twice, which can happen # when they are imported in another module. if mod in processed: continue else: processed.add(mod) print_module = True for item in conf().values(): if print_module: # If this is the first item of the module, we print the # module name, but not if this is the root package... if item.module != pkgname: modname = item.module.replace(f'{pkgname}.', '') fp.write(f"[{modname}]\n\n") print_module = False fp.write(wrapper.fill(item.description) + '\n') fp.write(f'# {item.name} = {item.defaultvalue}\n\n')
def event(self) -> None: wrapper: TextWrapper = TextWrapper(width=self.state['width']) word_list: List[str] = wrapper.wrap(text=self.state['text']) for element in word_list: self._pprint(element, 3) print()
# License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import, print_function r'''This module contains code for managing clobbering of the tree.''' import errno import os import subprocess import sys from mozfile.mozfile import remove as mozfileremove from textwrap import TextWrapper CLOBBER_MESSAGE = ''.join([ TextWrapper().fill(line) + '\n' for line in ''' The CLOBBER file has been updated, indicating that an incremental build since \ your last build will probably not work. A full/clobber build is required. The reason for the clobber is: {clobber_reason} Clobbering can be performed automatically. However, we didn't automatically \ clobber this time because: {no_reason} The easiest and fastest way to clobber is to run: $ mach clobber
def do_list(dbctx, fields, afields, sort_by, ascending, search_text, line_width, separator, prefix, limit, for_machine=False): if sort_by is None: ascending = True ans = dbctx.run('list', fields, sort_by, ascending, search_text, limit) try: book_ids, data, metadata = ans['book_ids'], ans['data'], ans[ 'metadata'] except TypeError: raise SystemExit(ans) fields = list(ans['fields']) try: fields.remove('id') except ValueError: pass fields = ['id'] + fields stringify(data, metadata, for_machine) if for_machine: json.dump(list(as_machine_data(book_ids, data, metadata)), sys.stdout, indent=2, sort_keys=True) return from calibre.utils.terminal import ColoredStream, geometry output_table = prepare_output_table(fields, book_ids, data, metadata) widths = list(map(lambda x: 0, fields)) for record in output_table: for j in range(len(fields)): widths[j] = max(widths[j], str_width(record[j])) screen_width = geometry()[0] if line_width < 0 else line_width if not screen_width: screen_width = 80 field_width = screen_width // len(fields) base_widths = map(lambda x: min(x + 1, field_width), widths) while sum(base_widths) < screen_width: adjusted = False for i in range(len(widths)): if base_widths[i] < widths[i]: base_widths[i] += min(screen_width - sum(base_widths), widths[i] - base_widths[i]) adjusted = True break if not adjusted: break widths = list(base_widths) titles = map(lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields) with ColoredStream(sys.stdout, fg='green'): prints(''.join(titles)) wrappers = [ TextWrapper(x - 1).wrap if x > 1 else lambda y: y for x in widths ] for record in output_table: text = [wrappers[i](record[i]) for i, field in enumerate(fields)] lines = max(map(len, text)) for l in range(lines): for i, field in enumerate(text): ft = text[i][l] if l < len(text[i]) else u'' sys.stdout.write(ft.encode('utf-8')) if i < len(text) - 1: filler = (u'%*s' % (widths[i] - str_width(ft) - 1, u'')) sys.stdout.write((filler + separator).encode('utf-8')) print()
def _jclassDoc(cls): """Generator for JClass.__doc__ property Parameters: cls (JClass): class to document. Returns: The doc string for the class. """ from textwrap import TextWrapper jclass = cls.class_ out = [] out.append("Java class '%s'" % (jclass.getName())) out.append("") sup = jclass.getSuperclass() if sup: out.append(" Extends:") out.append(" %s" % sup.getName()) out.append("") intfs = jclass.getInterfaces() if intfs: out.append(" Interfaces:") words = ", ".join([str(i.getCanonicalName()) for i in intfs]) wrapper = TextWrapper(initial_indent=' ', subsequent_indent=' ') out.extend(wrapper.wrap(words)) out.append("") ctors = jclass.getDeclaredConstructors() if ctors: exceptions = [] name = jclass.getSimpleName() ctordecl = [] for ctor in ctors: modifiers = ctor.getModifiers() if not modifiers & 1: continue params = ", ".join( [str(i.getCanonicalName()) for i in ctor.getParameterTypes()]) ctordecl.append(" * %s(%s)" % (name, params)) exceptions.extend(ctor.getExceptionTypes()) if ctordecl: out.append(" Constructors:") out.extend(ctordecl) out.append("") if exceptions: out.append(" Raises:") for exc in set(exceptions): out.append(" %s: from java" % exc.getCanonicalName()) out.append("") fields = jclass.getDeclaredFields() if fields: fielddesc = [] for field in fields: modifiers = field.getModifiers() if not modifiers & 1: continue fieldInfo = [] if modifiers & 16: fieldInfo.append("final") if modifiers & 8: fieldInfo.append("static") if field.isEnumConstant(): fieldInfo.append("enum constant") else: fieldInfo.append("field") fielddesc.append(" %s (%s): %s" % (field.getName(), field.getType().getName(), " ".join(fieldInfo))) if fielddesc: out.append(" Attributes:") out.extend(fielddesc) out.append("") return "\n".join(out)
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import print_function r'''This module contains code for managing clobbering of the tree.''' import os import sys from mozfile.mozfile import rmtree from textwrap import TextWrapper CLOBBER_MESSAGE = ''.join([TextWrapper().fill(line) + '\n' for line in ''' The CLOBBER file has been updated, indicating that an incremental build since \ your last build will probably not work. A full/clobber build is required. The reason for the clobber is: {clobber_reason} Clobbering can be performed automatically. However, we didn't automatically \ clobber this time because: {no_reason} The easiest and fastest way to clobber is to run:
def help(self): # @ReservedAssignment """Prints this help message and quits""" if self._get_prog_version(): self.version() print("") if self.DESCRIPTION: print(self.DESCRIPTION.strip() + '\n') def split_indentation(s): """Identifies the initial indentation (all spaces) of the string and returns the indentation as well as the remainder of the line. """ i = 0 while i < len(s) and s[i] == ' ': i += 1 return s[:i], s[i:] def paragraphs(text): """Yields each paragraph of text along with its initial and subsequent indentations to be used by textwrap.TextWrapper. Identifies list items from their first non-space character being one of bullets '-', '*', and '/'. However, bullet '/' is invisible and is removed from the list item. :param text: The text to separate into paragraphs """ paragraph = None initial_indent = "" subsequent_indent = "" def current(): """Yields the current result if present. """ if paragraph: yield paragraph, initial_indent, subsequent_indent for part in text.lstrip("\n").split("\n"): indent, line = split_indentation(part) if len(line) == 0: # Starting a new paragraph for item in current(): yield item yield "", "", "" paragraph = None initial_indent = "" subsequent_indent = "" else: # Adding to current paragraph def is_list_item(line): """Returns true if the first element of 'line' is a bullet character. """ bullets = ['-', '*', '/'] return line[0] in bullets def has_invisible_bullet(line): """Returns true if the first element of 'line' is the invisible bullet ('/'). """ return line[0] == '/' if is_list_item(line): # Done with current paragraph for item in current(): yield item if has_invisible_bullet(line): line = line[1:] paragraph = line initial_indent = indent # Calculate extra indentation for subsequent lines of this list item i = 1 while i < len(line) and line[i] == ' ': i += 1 subsequent_indent = indent + " " * i else: if not paragraph: # Start a new paragraph paragraph = line initial_indent = indent subsequent_indent = indent else: # Add to current paragraph paragraph = paragraph + ' ' + line for item in current(): yield item def wrapped_paragraphs(text, width): """Yields each line of each paragraph of text after wrapping them on 'width' number of columns. :param text: The text to yield wrapped lines of :param width: The width of the wrapped output """ if not text: return width = max(width, 1) for paragraph, initial_indent, subsequent_indent in paragraphs( text): wrapper = TextWrapper(width, initial_indent=initial_indent, subsequent_indent=subsequent_indent) w = wrapper.wrap(paragraph) for line in w: yield line if len(w) == 0: yield "" cols, _ = get_terminal_size() for line in wrapped_paragraphs(self.DESCRIPTION_MORE, cols): print(line) m = six.getfullargspec(self.main) tailargs = m.args[1:] # skip self if m.defaults: for i, d in enumerate(reversed(m.defaults)): tailargs[-i - 1] = "[{0}={1}]".format(tailargs[-i - 1], d) if m.varargs: tailargs.append("{0}...".format(m.varargs, )) tailargs = " ".join(tailargs) with self.COLOR_USAGE: print(T_("Usage:")) if not self.USAGE: if self._subcommands: self.USAGE = T_( " {progname} [SWITCHES] [SUBCOMMAND [SWITCHES]] {tailargs}\n" ) else: self.USAGE = T_(" {progname} [SWITCHES] {tailargs}\n") print( self.USAGE.format(progname=colors.filter(self.PROGNAME), tailargs=tailargs)) by_groups = {} for si in self._switches_by_func.values(): if si.group not in by_groups: by_groups[si.group] = [] by_groups[si.group].append(si) def switchs(by_groups, show_groups): for grp, swinfos in sorted(by_groups.items(), key=lambda item: item[0]): if show_groups: lgrp = T_(grp) if grp in _switch_groups else grp print(self.COLOR_GROUPS[grp] | lgrp + ':') for si in sorted(swinfos, key=lambda si: si.names): swnames = ", ".join(("-" if len(n) == 1 else "--") + n for n in si.names if n in self._switches_by_name and self._switches_by_name[n] == si) if si.argtype: if hasattr(si.argtype, '__name__'): typename = si.argtype.__name__ else: typename = str(si.argtype) argtype = " {0}:{1}".format(si.argname.upper(), typename) else: argtype = "" prefix = swnames + argtype yield si, prefix, self.COLOR_GROUPS[grp] if show_groups: print("") sw_width = max( len(prefix) for si, prefix, color in switchs(by_groups, False)) + 4 description_indent = " {0}{1}{2}" wrapper = TextWrapper(width=max(cols - min(sw_width, 60), 50) - 6) indentation = "\n" + " " * (cols - wrapper.width) for switch_info, prefix, color in switchs(by_groups, True): help = switch_info.help # @ReservedAssignment if switch_info.list: help += T_("; may be given multiple times") if switch_info.mandatory: help += T_("; required") if switch_info.requires: help += T_("; requires {0}").format(", ".join( (("-" if len(switch) == 1 else "--") + switch) for switch in switch_info.requires)) if switch_info.excludes: help += T_("; excludes {0}").format(", ".join( (("-" if len(switch) == 1 else "--") + switch) for switch in switch_info.excludes)) msg = indentation.join( wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(prefix) + wrapper.width >= cols: padding = indentation else: padding = " " * max(cols - wrapper.width - len(prefix) - 4, 1) print( description_indent.format(color | prefix, padding, color | msg)) if self._subcommands: gc = self.COLOR_GROUPS["Subcommands"] print(gc | T_("Sub-commands:")) for name, subcls in sorted(self._subcommands.items()): with gc: subapp = subcls.get() doc = subapp.DESCRIPTION if subapp.DESCRIPTION else getdoc( subapp) if self.SUBCOMMAND_HELPMSG: help = doc + "; " if doc else "" # @ReservedAssignment help += self.SUBCOMMAND_HELPMSG.format( parent=self.PROGNAME, sub=name) else: help = doc if doc else "" # @ReservedAssignment msg = indentation.join( wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(name) + wrapper.width >= cols: padding = indentation else: padding = " " * max( cols - wrapper.width - len(name) - 4, 1) if colors.contains_colors(subcls.name): bodycolor = colors.extract(subcls.name) else: bodycolor = gc print( description_indent.format( subcls.name, padding, bodycolor | colors.filter(msg)))
# License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from __future__ import absolute_import, print_function, unicode_literals r"""This module contains code for managing clobbering of the tree.""" import errno import os import subprocess import sys from mozfile.mozfile import remove as mozfileremove from textwrap import TextWrapper CLOBBER_MESSAGE = "".join([ TextWrapper().fill(line) + "\n" for line in """ The CLOBBER file has been updated, indicating that an incremental build since \ your last build will probably not work. A full/clobber build is required. The reason for the clobber is: {clobber_reason} Clobbering can be performed automatically. However, we didn't automatically \ clobber this time because: {no_reason} The easiest and fastest way to clobber is to run: $ mach clobber
def help(self): # @ReservedAssignment """Prints this help message and quits""" if self._get_prog_version(): self.version() print("") if self.DESCRIPTION: print(self.DESCRIPTION.strip() + '\n') m = six.getfullargspec(self.main) tailargs = m.args[1:] # skip self if m.defaults: for i, d in enumerate(reversed(m.defaults)): tailargs[-i - 1] = "[%s=%r]" % (tailargs[-i - 1], d) if m.varargs: tailargs.append("%s..." % (m.varargs, )) tailargs = " ".join(tailargs) with self.COLOR_USAGE: print("Usage:") if not self.USAGE: if self._subcommands: self.USAGE = " %(progname)s [SWITCHES] [SUBCOMMAND [SWITCHES]] %(tailargs)s\n" else: self.USAGE = " %(progname)s [SWITCHES] %(tailargs)s\n" print(self.USAGE % { "progname": colors.filter(self.PROGNAME), "tailargs": tailargs }) by_groups = {} for si in self._switches_by_func.values(): if si.group not in by_groups: by_groups[si.group] = [] by_groups[si.group].append(si) def switchs(by_groups, show_groups): for grp, swinfos in sorted(by_groups.items(), key=lambda item: item[0]): if show_groups: print(self.COLOR_GROUPS[grp] | grp) for si in sorted(swinfos, key=lambda si: si.names): swnames = ", ".join(("-" if len(n) == 1 else "--") + n for n in si.names if n in self._switches_by_name and self._switches_by_name[n] == si) if si.argtype: if isinstance(si.argtype, type): typename = si.argtype.__name__ else: typename = str(si.argtype) argtype = " %s:%s" % (si.argname.upper(), typename) else: argtype = "" prefix = swnames + argtype yield si, prefix, self.COLOR_GROUPS[grp] if show_groups: print("") sw_width = max( len(prefix) for si, prefix, color in switchs(by_groups, False)) + 4 cols, _ = get_terminal_size() description_indent = " %s%s%s" wrapper = TextWrapper(width=max(cols - min(sw_width, 60), 50) - 6) indentation = "\n" + " " * (cols - wrapper.width) for si, prefix, color in switchs(by_groups, True): help = si.help # @ReservedAssignment if si.list: help += "; may be given multiple times" if si.mandatory: help += "; required" if si.requires: help += "; requires %s" % (", ".join( (("-" if len(s) == 1 else "--") + s) for s in si.requires)) if si.excludes: help += "; excludes %s" % (", ".join( (("-" if len(s) == 1 else "--") + s) for s in si.excludes)) msg = indentation.join( wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(prefix) + wrapper.width >= cols: padding = indentation else: padding = " " * max(cols - wrapper.width - len(prefix) - 4, 1) print(description_indent % (color | prefix, padding, color | msg)) if self._subcommands: gc = self.COLOR_GROUPS["Subcommands"] print(gc | "Subcommands:") for name, subcls in sorted(self._subcommands.items()): with gc: subapp = subcls.get() doc = subapp.DESCRIPTION if subapp.DESCRIPTION else getdoc( subapp) if self.SUBCOMMAND_HELPMSG: help = doc + "; " if doc else "" # @ReservedAssignment help += self.SUBCOMMAND_HELPMSG.format( parent=self.PROGNAME, sub=name) else: help = doc if doc else "" # @ReservedAssignment msg = indentation.join( wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(name) + wrapper.width >= cols: padding = indentation else: padding = " " * max( cols - wrapper.width - len(name) - 4, 1) if colors.contains_colors(subcls.name): bodycolor = colors.extract(subcls.name) else: bodycolor = gc print( description_indent % (subcls.name, padding, bodycolor | colors.filter(msg)))
def __init__(self): self._lines = [] self._wrapper = TextWrapper()
def get_wrapper(cls): return TextWrapper(width=79)
def _wrap(self, indent, s, width=70): indent_str = " " * indent t = TextWrapper(width=width, subsequent_indent=indent_str) return '\n'.join(t.wrap(s))
class StreamWatcherListener(tweepy.StreamListener): status_wrapper = TextWrapper(width=60, initial_indent=' ', subsequent_indent=' ') def __init__(self, db): tweepy.StreamListener.__init__(self) self.db = db def on_status(self, status): try: # This will prevent replies and retweets from being submitted to consideration if (not status.retweeted) and ('RT @' not in status.text) and (not status.text.startswith("@")): print(self.status_wrapper.fill(status.text)) print('\n %s %s via %s\n' % (status.author.screen_name, status.created_at, status.source)) self.processTweet(status.text,status.author.screen_name,status.author.id) # Tweet should now be submitted for consideration of the user # User can select POSITIVE, NEGATIVE, NEUTRAL(??) or BLOCK (to disable tracking of the user who wrote # the tweet) except KeyboardInterrupt: # Catch any unicode errors while printing to console # and just ignore them to avoid breaking application. pass def on_error(self, status_code): print('An error has occured! Status code = %s' % status_code) return True # keep stream alive def on_timeout(self): print('Snoozing Zzzzzz') # This method will process a tweet, collect user input and have it all ready to pass it to the database def processTweet(self, tweetText, tweetUserScreenName, tweetUserID): validReplies = ['YES', 'NO', 'NEUTRAL', 'IGNORE','BLOCK'] validDecision = False while not (validDecision): print("TEXT: " + tweetText) clearText = self.cleanupTweetText(tweetText) print("CLEAR TEXT:" + clearText) print("USER: "******"PREDICTION:" + prediction) decision = input("Do you like this tweet?") decision = decision.upper() decision = decision.strip() if decision in validReplies: validDecision = True if (decision != 'BLOCK')and(decision != 'IGNORE'): tweetWithOpinion = self.__extractWords__(clearText, decision) # Add entry to MongoDB print(tweetWithOpinion[0], tweetWithOpinion[1]) classifier.addTrainingTweet(self.db, tweetWithOpinion) print(classifier.get_tweet(self.db)) # Retrain the classifier with the new information classifier.runClassifier(self.db) else: print("ERROR: Invalid reply" + decision) # Split tweet's text into words equal or bigger than three words def __extractWords__(self, clearText, decision): wordsFiltered = [e.lower() for e in clearText.split() if len(e) >= 3] result = (wordsFiltered, decision) return result # This removes any hash signs and links from a Tweets text def cleanupTweetText(self, tweetText): clearText = ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",tweetText, re.UNICODE).split()) return clearText # This removes a user from the file and list of users being followed def stopFollowing(self, friendsFile): pass
from textwrap import TextWrapper presyms = [" 𝑡¹", " "] tw = TextWrapper( width=120, tabsize=2, # initial_indent=presyms[0], subsequent_indent=presyms[1], drop_whitespace=False, ) def implies_iterable_is_str_len1(v): if (v[0] in ("{", "[", "(") and isinstance(v[0], str) and len(v) > 1 and len(v[0]) == 1): return True else: return False header = '' monostr = ( '__init__:(00023|00381)return <= (gen_extractor_classes ' ' <class youtube_dl.extractor.abc.ABCIE>\n' '<class youtube_dl.extractor.abc.ABCIViewIE>,\n ' ' <class youtube_dl.extractor.abcnews.AbcNewsIE>,\n ') monostr = ('(["<class \'youtube_dl.extractor.abc.ABCIE\'>", ' '"<class \'youtube_dl.extractor.abc.ABCIViewIE\'>", ' '"<class \'youtube_dl.extractor.abcnews.AbcNewsIE\'>", ' '"<class \'youtube_dl.extractor.abcnews.AbcNewsVideoIE\'>"])') monostr = ('["<class \'youtube_dl.extractor.abc.ABCIE\'>", ' '"<class \'youtube_dl.extractor.abc.ABCIViewIE\'>", '
class Player: 'A wrapper class for player data, used to track who is playing the game.' wrapper = TextWrapper(width=80, replace_whitespace=False, initial_indent='', break_on_hyphens=False) def __init__(self, socket): self.socket = socket self.socket.player = self self.interpreter = None self.account = None self.character = None self.promptInBuffer = False self.defaultPrompt = "\n\n> " self.prompt = self.defaultPrompt def quit(self): self.socket.close() def interpret(self): input = self.read().strip() # If we don't have input at this point, it means the player just sent white space. So we'll # skip interpreting it and just send a new prompt. if input: self.interpreter.interpret(input) if not self.hasPromptInBuffer(): self.write(self.getPrompt(), wrap=False) self.setPromptInBuffer(True) def hasInput(self): return self.socket.hasInput() def write(self, text, wrap=True): if wrap: # Wrap will blow away any trailing white space. We want each call to `write` to define a # new line, however, so we need to add a newline. text = self.wrapper.fill(str(text)) + "\n" # Add the output to the player output batch. It will be send and cleared in the next cycle. self.socket.appendToOutputBuffer(text) return self def read(self): if self.socket.inputQueue: return self.socket.popInput() def hasPromptInBuffer(self): return self.promptInBuffer def setPromptInBuffer(self, promptInBuffer): self.promptInBuffer = promptInBuffer return self def getPrompt(self): return self.prompt def setPrompt(self, prompt): self.prompt = "\n\n" + prompt self.setPromptInBuffer(False) return self def setDefaultPrompt(self): self.prompt = self.defaultPrompt self.setPromptInBuffer(False) return self def disableEcho(self): #self.socket.disableEcho() pass def enableEcho(self): #self.socket.enableEcho() pass
def setUp(self): self.wrapper = TextWrapper() self.text = '''\
def wrap(payload): indent = ' ' tw = TextWrapper(width=70, initial_indent=indent, subsequent_indent=indent) return u'\n'.join(tw.wrap(payload))
def setUp(self): self.wrapper = TextWrapper(width=45)
import yt ds = yt.load("RD0005-mine/RedshiftOutput0005") output = open("source/analyzing/_dq_docstrings.inc", "w") template = """ .. function:: %(funcname)s%(sig)s: (This is a proxy for :func:`~%(funcproxy)s`.) %(docstring)s """ tw = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=60) def write_docstring(f, name, func): docstring = inspect.getdoc(func) funcname = name sig = inspect.formatargspec(*inspect.getargspec(func)) sig = sig.replace("data, ", "") sig = sig.replace("(data)", "()") funcproxy = f"yt.data_objects.derived_quantities.{func.__name__}" docstring = "\n".join([" %s" % line for line in docstring.split("\n")]) f.write(template % dict( funcname=funcname, sig=sig, funcproxy=funcproxy, docstring=docstring)) # docstring = "\n".join(tw.wrap(docstring))))
from pyfaidx import Fasta, FastaVariant import pybedtools as pb from textwrap import TextWrapper import subprocess wr = TextWrapper() wr.width = 50 fn = '/home/sergio/media/NAS4/PFlab/TLX3_project/WES-seq/references/mouse_mm9_reference_genome.fa' vn = 'WES_TLX3.vcf.gz' fa = Fasta(fn) bd = pb.BedTool('test.bed') inf = 'in_py.fa' with open(inf, 'w') as fp: for it in bd: rg = fa[it.chrom][it.start:it.end] fp.write('>' + rg.longname + '\n' + wr.fill(rg.seq) + '\n') outf = 'out_py.fa' cons_fa = "bcftools consensus -f {} {} -o {}".format(inf, vn, outf) print('Running process ........ \n') print(cons_fa) subprocess.call(['bash', '-c', cons_fa]) fv = Fasta(outf) ## Only SNP
def _init_project_interactive(self, project_name, min_version=None, element_path="elements"): bst_major, bst_minor = utils._get_bst_api_version() if min_version is None: min_version = "{}.{}".format(bst_major, bst_minor) def project_name_proc(user_input): try: node._assert_symbol_name(user_input, "project name") except LoadError as e: message = "{}\n\n{}\n".format(e, e.detail) raise UsageError(message) from e return user_input def min_version_proc(user_input): try: self._assert_min_version(user_input) except AppError as e: raise UsageError(str(e)) from e return user_input def element_path_proc(user_input): try: self._assert_element_path(user_input) except AppError as e: raise UsageError(str(e)) from e return user_input w = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=79) # Collect project name click.echo("", err=True) click.echo( self._content_profile.fmt("Choose a unique name for your project"), err=True) click.echo( self._format_profile.fmt("-------------------------------------"), err=True) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "The project name is a unique symbol for your project and will be used " "to distinguish your project from others in user preferences, namespacing " "of your project's artifacts in shared artifact caches, and in any case where " "BuildStream needs to distinguish between multiple projects." )), err=True, ) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "The project name must contain only alphanumeric characters, " "may not start with a digit, and may contain dashes or underscores." )), err=True, ) click.echo("", err=True) project_name = click.prompt(self._content_profile.fmt("Project name"), value_proc=project_name_proc, err=True) click.echo("", err=True) # Collect minimum BuildStream version click.echo(self._content_profile.fmt( "Select the minimum required BuildStream version for your project" ), err=True) click.echo(self._format_profile.fmt( "----------------------------------------------------------------" ), err=True) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "The minimum version is used to provide users who build your project " "with a helpful error message in the case that they do not have a recent " "enough version of BuildStream to support all the features which your " "project uses.")), err=True, ) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "The lowest version allowed is {major}.0, the currently installed version of BuildStream is {major}.{minor}" .format(major=bst_major, minor=bst_minor))), err=True, ) click.echo("", err=True) min_version = click.prompt( self._content_profile.fmt("Minimum version"), value_proc=min_version_proc, default=min_version, err=True, ) click.echo("", err=True) # Collect element path click.echo(self._content_profile.fmt("Select the element path"), err=True) click.echo(self._format_profile.fmt("-----------------------"), err=True) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "The element path is a project subdirectory where element .bst files are stored " "within your project.")), err=True, ) click.echo("", err=True) click.echo( self._detail_profile.fmt( w.fill( "Elements will be displayed in logs as filenames relative to " "the element path, and similarly, dependencies must be expressed as filenames " "relative to the element path.")), err=True, ) click.echo("", err=True) element_path = click.prompt(self._content_profile.fmt("Element path"), value_proc=element_path_proc, default=element_path, err=True) return (project_name, min_version, element_path)
def __init__(cls, name, bases, dict): """ Parameters ---------- name : str Name of the class bases : iterable Base classes dict : dict Attributes. """ if __debug__: debug( "COLR", "AttributesCollector call for %s.%s, where bases=%s, dict=%s ", (cls, name, bases, dict)) super(AttributesCollector, cls).__init__(name, bases, dict) collections = {} for name, value in dict.iteritems(): if isinstance(value, IndexedCollectable): baseclassname = value.__class__.__name__ col = _known_collections[baseclassname][0] # XXX should we allow to throw exceptions here? if col not in collections: collections[col] = {} collections[col][name] = value # and assign name if not yet was set if value.name is None: value.name = name # !!! We do not keep copy of this attribute static in the class. # Due to below traversal of base classes, we should be # able to construct proper collections even in derived classes delattr(cls, name) # XXX can we first collect parent's ca and then populate with ours? # TODO for base in bases: if hasattr(base, "__class__") and \ base.__class__ == AttributesCollector: # TODO take care about overriding one from super class # for state in base.ca: # if state[0] = newcollections = base._collections_template if len(newcollections) == 0: continue if __debug__: # XXX RF: and "COLR" in debug.active: debug("COLR", "Collect collections %s for %s from %s", (newcollections, cls, base)) for col, collection in newcollections.iteritems(): if col in collections: collections[col].update(collection) else: collections[col] = collection if __debug__: debug( "COLR", "Creating ConditionalAttributesCollection template %s " "with collections %s", (cls, collections.keys())) # if there is an explicit if hasattr(cls, "_ATTRIBUTE_COLLECTIONS"): for col in cls._ATTRIBUTE_COLLECTIONS: if not col in _col2class: raise ValueError, \ "Requested collection %s is unknown to collector" % \ col if not col in collections: collections[col] = None # TODO: check on conflict in names of Collections' items! since # otherwise even order is not definite since we use dict for # collections. # XXX should we switch to tuple? for col, colitems in collections.iteritems(): # so far we collected the collection items in a dict, but the new # API requires to pass a _list_ of collectables instead of a dict. # So, whenever there are items, we pass just the values of the dict. # There is no information last, since the keys of the dict are the # name attributes of each collectable in the list. if not colitems is None: collections[col] = _col2class[col](items=colitems.values()) else: collections[col] = _col2class[col]() setattr(cls, "_collections_template", collections) # # Expand documentation for the class based on the listed # parameters an if it is stateful # # TODO -- figure nice way on how to alter __init__ doc directly... textwrapper = TextWrapper(subsequent_indent=" ", initial_indent=" ", width=70) # Parameters paramsdoc = [] paramscols = [] for col in ('params', 'kernel_params'): if col in collections: paramscols.append(col) # lets at least sort the parameters for consistent output col_items = collections[col] iparams = [(v._instance_index, k) for k, v in col_items.iteritems()] iparams.sort() paramsdoc += [(col_items[iparam[1]].name, col_items[iparam[1]]._paramdoc()) for iparam in iparams] # Parameters collection could be taked hash of to decide if # any were changed? XXX may be not needed at all? setattr(cls, "_paramscols", paramscols) # States doc cadoc = "" if 'ca' in collections: paramsdoc += [('enable_ca', "enable_ca : None or list of str\n " "Names of the conditional attributes which should " "be enabled in addition\n to the default ones"), ('disable_ca', "disable_ca : None or list of str\n " "Names of the conditional attributes which should " "be disabled" "")] if len(collections['ca']): cadoc += '\n'.join( ['* ' + x for x in collections['ca'].listing]) cadoc += "\n\n(Conditional attributes enabled by default suffixed with `+`)" if __debug__: debug("COLR", "Assigning __cadoc to be %s", (cadoc, )) setattr(cls, "_cadoc", cadoc) if paramsdoc != "": if __debug__ and 'COLR' in debug.active: debug("COLR", "Assigning __paramsdoc to be %s", (paramsdoc, )) setattr(cls, "_paramsdoc", paramsdoc) if len(paramsdoc) or cadoc != "": cls.__doc__ = enhanced_doc_string(cls, *bases)
def getMsg( self ): primary_authors = [] for auth in self._abstract.getPrimaryAuthorList(): primary_authors.append("""%s (%s) <%s>"""%(auth.getFullName(), auth.getAffiliation(), auth.getEmail()) ) co_authors = [] for auth in self._abstract.getCoAuthorList(): email = "" if auth.getEmail() != "": email = " <%s>"%auth.getEmail() co_authors.append( """%s (%s)%s"""%(auth.getFullName(), auth.getAffiliation(), email) ) speakers = [] for spk in self._abstract.getSpeakerList(): speakers.append( spk.getFullName() ) tracks = [] for track in self._abstract.getTrackListSorted(): tracks.append( """%s"""%track.getTitle() ) tw = TextWrapper() msg = [ i18nformat("""_("Dear") %s,""")%self._abstract.getSubmitter().getStraightFullName() ] msg.append( "" ) msg.append( tw.fill(_("The submission of your abstract has been successfully processed.")) ) msg.append( "" ) tw.break_long_words = False msg.append( tw.fill( i18nformat("""_("Abstract submitted"):\n<%s>.""")%urlHandlers.UHUserAbstracts.getURL( self._conf ) ) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Status of your abstract"):\n<%s>.""")%urlHandlers.UHAbstractDisplay.getURL( self._abstract ) ) ) msg.append( "" ) tw.subsequent_indent = "" msg.append( tw.fill( i18nformat("""_("See below a detailed summary of your submitted abstract"):""") ) ) msg.append( "" ) tw.subsequent_indent = " "*3 msg.append( tw.fill( i18nformat("""_("Conference"): %s""")%self._conf.getTitle() ) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Submitted by"): %s""")%self._abstract.getSubmitter().getFullName() ) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Submitted on"): %s""")%self._abstract.getSubmissionDate().strftime( "%d %B %Y %H:%M" ) ) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Title"): %s""")%self._abstract.getTitle() ) ) msg.append( "" ) for f in self._conf.getAbstractMgr().getAbstractFieldsMgr().getFields(): msg.append( tw.fill(f.getCaption()) ) msg.append( self._abstract.getField(f.getId()) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Primary Authors"):""") ) ) msg += primary_authors msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Co-authors"):""") ) ) msg += co_authors msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Abstract presenters"):""") ) ) msg += speakers msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Track classification"):""") ) ) msg += tracks msg.append( "" ) ctype= i18nformat("""--_("not specified")--""") if self._abstract.getContribType() is not None: ctype=self._abstract.getContribType().getName() msg.append( tw.fill( i18nformat("""_("Presentation type"): %s""")%ctype) ) msg.append( "" ) msg.append( tw.fill( i18nformat("""_("Comments"): %s""")%self._abstract.getComments() ) ) msg.append( "" ) return "\n".join( msg )
def info_columns(search): """ Show all columns that match the given search-text. :param search: String which must be in the column-name, shortcuts or description. Case insensitive. :return: `None` """ # Has a valid search-text been provided? if search is None or len(str(search).strip()) == 0: # Print error-message. print('Please provide a valid search-string for the columns.') else: # Ensure the search-text is lower-case. search = search.lower() # Load dict with info about all the columns. info = load_info_columns() # Used to ensure the list of datasets looks nice on multiple lines. space_indent = ' ' wrapper_datasets = TextWrapper(width=80, break_on_hyphens=False, break_long_words=False, initial_indent='Datasets: ', subsequent_indent=space_indent) # Used to ensure the list of shortcuts looks nice on multiple lines. wrapper_shortcuts = TextWrapper(width=80, break_on_hyphens=False, break_long_words=False, initial_indent='Shortcuts: ', subsequent_indent=space_indent) # Used to ensure the description looks nice on multiple lines. wrapper_descr = TextWrapper(width=80, initial_indent='Description: ', subsequent_indent=space_indent) # Number of matching columns found. num_found = 0 # For each column. for column in info: # Full name of the column e.g. "Shares (Basic)" name = column['name'] name_lower = name.lower() # List of shortcuts e.g. SHARES_BASIC shortcuts = sorted(column['shortcuts']) shortcuts = ', '.join(shortcuts) shortcuts_lower = shortcuts.lower() # Description of the column. descr = column['description'].strip() descr_lower = descr.lower() if search in name_lower or search in shortcuts_lower \ or search in descr_lower: # Increase number of matching columns. num_found += 1 # Boolean whether column is free or premium. is_premium = column['is_premium'] # Show the full column-name. Note the space-alignment. print('Name: \"{0}\"'.format(name)) # Show list of shortcuts. This is aligned by the text-wrapper. shortcuts = wrapper_shortcuts.wrap(shortcuts) print('\n'.join(shortcuts)) # Show whether the data is free or premium. Note the alignment. print('Premium: ', is_premium) # Show list of datasets. This is aligned by the text-wrapper. datasets = sorted(column['datasets']) datasets = ', '.join(datasets) datasets = wrapper_datasets.wrap(datasets) print('\n'.join(datasets)) # Show the description. This is aligned by the text-wrapper. if len(descr) == 0: descr = '-' descr = wrapper_descr.wrap(descr) descr = '\n'.join(descr) print(descr) # Print newline. print() # Show error if we did not find any matching columns. if num_found == 0: msg = 'Search-text \'{0}\' was not found.' msg = msg.format(search) print(msg)
def execute(self, force_required=False, env=None, path=None, dry_run=False, forward_x11=False): """Execute command on remote host. Returns False if remote re-invocation is not needed, True if it is needed and executes successfully otherwise aborts. """ if not self.is_remote: return False from cylc.cfgspec.globalcfg import GLOBAL_CFG from cylc.version import CYLC_VERSION name = os.path.basename(self.argv[0])[5:] # /path/to/cylc-foo => foo # Build the remote command command = shlex.split( GLOBAL_CFG.get_host_item("ssh command", self.host, self.owner)) if forward_x11: command.append("-Y") user_at_host = "" if self.owner: user_at_host = self.owner + "@" if self.host: user_at_host += self.host else: user_at_host += "localhost" command.append(user_at_host) # Use bash -l? ssh_login_shell = self.ssh_login_shell if ssh_login_shell is None: ssh_login_shell = GLOBAL_CFG.get_host_item("use login shell", self.host, self.owner) # Pass cylc version through. command += ["env", "CYLC_VERSION=%s" % CYLC_VERSION] if ssh_login_shell: # A login shell will always source /etc/profile and the user's bash # profile file. To avoid having to quote the entire remote command # it is passed as arguments to the bash script. command += ["bash", "--login", "-c", "'exec $0 \"$@\"'"] # "cylc" on the remote host if path: command.append(os.sep.join(path + ["cylc"])) else: command.append( GLOBAL_CFG.get_host_item("cylc executable", self.host, self.owner)) command.append(name) if env is None: env = {} for var, val in env.iteritems(): command.append("--env=%s=%s" % (var, val)) for arg in self.args: command.append("'" + arg + "'") # above: args quoted to avoid interpretation by the shell, # e.g. for match patterns such as '.*' on the command line. if cylc.flags.verbose: # Wordwrap the command, quoting arguments so they can be run # properly from the command line command_str = ' '.join(quote(arg) for arg in command) print '\n'.join( TextWrapper(subsequent_indent='\t').wrap(command_str)) if dry_run: return command try: popen = subprocess.Popen(command) except OSError as exc: sys.exit("ERROR: remote command invocation failed %s" % str(exc)) res = popen.wait() if WIFSIGNALED(res): sys.exit("ERROR: remote command terminated by signal %d" % res) elif res: sys.exit("ERROR: remote command failed %d" % res) else: return True
def info_datasets(dataset=None, show_columns=True): """ Show a list of all available datasets, or show the details for the given dataset. :param dataset: String with the exact name of a dataset. If None then show a list of all available datasets. :param show_columns: Boolean whether to show the columns of the given dataset. :return: `None` """ # Load dict with info about all the datasets. info = load_info_datasets() if dataset is None: # Show all available datasets. # String with list of dataset names. datasets = sorted(list(info)) datasets = ', '.join(datasets) # Used to ensure the list of datasets looks nice on multiple lines. wrapper_datasets = TextWrapper(width=80, break_on_hyphens=False, break_long_words=False, initial_indent='All datasets: ', subsequent_indent=' ') # Print the list of datasets. datasets = wrapper_datasets.wrap(datasets) print('\n'.join(datasets)) else: # Ensure the dataset name is lower-case. dataset = dataset.lower() # Lookup the info for this dataset. x = info.get(dataset) if x is None: # Dataset does not exist. Print error-message. msg = 'Dataset \'{0}\' not found.' msg = msg.format(dataset) print(msg) else: # Show dataset name. print('Dataset: ', dataset) # Used to ensure the list of variants looks nice on multiple lines. space_indent = ' ' wrapper_variants = TextWrapper(width=80, initial_indent='Variants: ', subsequent_indent=space_indent) # Used to ensure the list of markets looks nice on multiple lines. wrapper_markets = TextWrapper(width=80, initial_indent='Markets: ', subsequent_indent=space_indent) # Show list of variants for this dataset. variants = sorted(x['variants']) if len(variants) > 0: variants = ', '.join(variants) else: variants = '-' variants = wrapper_variants.wrap(variants) variants = '\n'.join(variants) print(variants) # Show list of markets for this dataset. markets = sorted(x['markets']) if len(markets) > 0: markets = ', '.join(markets) else: markets = '-' markets = wrapper_markets.wrap(markets) markets = '\n'.join(markets) print(markets) # Show columns for this dataset? if show_columns: print( 'Columns: (The * marks data that requires a paid subscription)' ) # Used to ensure the columns look nice on multiple lines. wrapper_columns = TextWrapper(width=80, initial_indent='', subsequent_indent=' ') # For each column in this dataset. for column in x['columns']: # String to indicate if column-data is premium or free. is_premium = '*' if column['is_premium'] else '-' # String with list of Python shortcuts. shortcuts = sorted(column['shortcuts']) shortcuts = ', '.join(shortcuts) # String with the column's full name and Python shortcuts. msg = '{0} \"{1}\" {2}' msg = msg.format(is_premium, column['name'], shortcuts) # Break the string into lines of some max-length so it # looks nice if it has to be printed on multiple lines. msg = wrapper_columns.wrap(msg) msg = '\n'.join(msg) # Print the lines. print(msg)
# Setup file for the tutorial # # # Required modules # import os from textwrap import TextWrapper from util import get_script_name, write_option, write_section max_chars_per_line = 100 # utility for pretty printing text_wrapper = TextWrapper(replace_whitespace=True, drop_whitespace=False, width=max_chars_per_line) def pretty_print(text, new_lines=0): print(text_wrapper.fill(text) + '\n' * new_lines) # # Setup # script_name = get_script_name() pretty_print('=== This is {} ===\n'.format(script_name), 1) # get this path this_path = os.getcwd() # make an output path for the output of the tutorial tasks os.makedirs(os.path.join(this_path, 'output'), exist_ok=True)
def wrap_text(text, width): wrapper = TextWrapper(width=width, break_long_words=False, break_on_hyphens=False) return chain(*[wrapper.wrap(l) for l in text.split("\n")])
def message_box(message, width=79, padding=3, print_callable=print): """ Prints a message inside a box. Parameters ---------- message : unicode Message to print. width : int, optional Message box width. padding : unicode, optional Padding on each sides of the message. print_callable : callable, optional Callable used to print the message box. Returns ------- bool Definition success. Examples -------- >>> message = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit, ' ... 'sed do eiusmod tempor incididunt ut labore et dolore magna ' ... 'aliqua.') >>> message_box(message, width=75) =========================================================================== * * * Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do * * eiusmod tempor incididunt ut labore et dolore magna aliqua. * * * =========================================================================== True >>> message_box(message, width=60) ============================================================ * * * Lorem ipsum dolor sit amet, consectetur adipiscing * * elit, sed do eiusmod tempor incididunt ut labore et * * dolore magna aliqua. * * * ============================================================ True >>> message_box(message, width=75, padding=16) =========================================================================== * * * Lorem ipsum dolor sit amet, consectetur * * adipiscing elit, sed do eiusmod tempor * * incididunt ut labore et dolore magna * * aliqua. * * * =========================================================================== True """ ideal_width = width - padding * 2 - 2 def inner(text): """ Formats and pads inner text for the message box. """ return '*{0}{1}{2}{0}*'.format( ' ' * padding, text, (' ' * (width - len(text) - padding * 2 - 2))) print_callable('=' * width) print_callable(inner('')) wrapper = TextWrapper(width=ideal_width, break_long_words=False, replace_whitespace=False) lines = [wrapper.wrap(line) for line in message.split("\n")] lines = [' ' if len(line) == 0 else line for line in lines] for line in chain(*lines): print_callable(inner(line.expandtabs())) print_callable(inner('')) print_callable('=' * width) return True
def _write_comment(node, out, indent): wrapper = TextWrapper(initial_indent='# ', subsequent_indent=indent * ' ' + '# ', break_long_words=False) out.write(wrapper.fill(node.value))