def format_search_result(number, result): term_width = terminal_size()[0] if term_width > 8: term_width = term_width - 8 wrapper = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=term_width) heading = "%s) %s " % (number, result['name']) if 'categories' in result and result['categories']: heading += "[%s] " % (", ".join(result['categories']),) if 'authors' in result and result['authors']: heading += "(author: %s) " % (", ".join(result['authors']),) right = "" if 'last_updated' in result: right += " Updated %s" % (result['last_updated']) if 'stage' in result: right += " [%s]" % (result['stage'],) heading += right.rjust(term_width - len(heading)) lines = [heading, ''] if 'summary' in result and result['summary']: lines += wrapper.wrap(result['summary']) lines.append('') return lines
def get_text(elements, itemize=False): paragraphs = [] highlight_elements = ['varname', 'parameter'] strip_elements = [ 'returnvalue', 'command', 'link', 'footnote', 'simpara', 'footnoteref', 'function' ] + highlight_elements for element in elements: # put "Since MPD version..." in paranthese etree.strip_tags(element, "application") for e in element.xpath("footnote/simpara"): e.text = "(" + e.text.strip() + ")" for e in element.xpath("|".join(highlight_elements)): e.text = "*" + e.text.strip() + "*" etree.strip_tags(element, *strip_elements) if itemize: initial_indent = " * " subsequent_indent = " " else: initial_indent = " " subsequent_indent = " " wrapper = TextWrapper(subsequent_indent=subsequent_indent, initial_indent=initial_indent) text = element.text.replace("\n", " ").strip() text = re.subn(r'\s+', ' ', text)[0] paragraphs.append(wrapper.fill(text)) return "\n\n".join(paragraphs)
def __classrepr__(cls): """ Note: ipython3 doesn't seem to render class reprs correctly -- may be a bug in the beta version I used. Looks fine in python3 and ipython2. """ def field_items(field_list): return list((attr, getattr(cls, attr, "")) for attr in field_list) def format_fields(field_list): s = ", ".join( "{field}={value!r}".format(field=field.lower(), value=value) for field, value in field_items(field_list) if not value # show only fields without default values ) return s + "," if s else "# <none>" textwrapper = TextWrapper(initial_indent=" " * 4, subsequent_indent=" " * 4) l = [] l.append("\n{cls.__name__}(".format(cls=cls)) l.append(" # Required fields") l.append(textwrapper.fill(format_fields(cls._required))) if getattr(cls, "_conditional", None): for label, fields in cls._conditional.items(): l.append("\n # Required if using " + label) l.append(textwrapper.fill(format_fields(fields))) if cls._discretionary_data_allowed is True: l.append("\n " "# Customer-defined discretionary data may also be included.") l.append("\n # Optional fields") l.append(textwrapper.fill(format_fields(cls._optional))) l.append(")\n") return "\n".join(l)
def text(self, text, offset=None, horiz=None, vert=None, *, angle=None, font=None, colour=None, width=None): attrs = dict() style = list() transform = list() if vert is not None: baselines = { self.CENTRE: "central", self.TOP: "text-before-edge", self.BOTTOM: "text-after-edge", } style.append(("dominant-baseline", baselines[vert])) if horiz is not None: anchors = { self.CENTRE: "middle", self.LEFT: "start", self.RIGHT: "end", } style.append(("text-anchor", anchors[horiz])) transform.extend(self._offset(offset)) if angle is not None: transform.append("rotate({})".format(angle * self.flip[1])) if font is not None: attrs["class"] = font attrs.update(self._colour(colour)) with self.element("text", attrs, style=style, transform=transform): if width is None: if isinstance(text, str): self.xml.characters(text) else: for seg in text: attrs = dict() if seg.get("overline"): attrs["text-decoration"] = "overline" self.tree(("tspan", attrs, (seg["text"],))) return # Very hacky approximation of the size of each character # as one en wide width /= self.textsize / 2 wrapper = TextWrapper(width=width, replace_whitespace=False) hardlines = text.splitlines(keepends=True) if not hardlines: hardlines.append("") line = 0 for hardline in hardlines: wrapped = wrapper.wrap(hardline) if not wrapped: # Caused by empty string wrapped.append("") for softline in wrapped: lineattrs = { "x": "0", "y": "{}em".format(line / 0.875), "xml:space": "preserve", } self.tree(("tspan", lineattrs, (softline,))) line += 1
def usage(self): tw = TextWrapper( width=78, drop_whitespace=True, expand_tabs=True, fix_sentence_endings=True, break_long_words=True, break_on_hyphens=True, ) text = tw.fill(self.__doc__.strip()) + "\n\n" try: options = self.pluginOptions except AttributeError: return text + "This plugin does not support any options.\n" tw.subsequent_indent=' ' * 16, text += "Options supported by this plugin:\n" for opt in sorted(options.keys()): text += "--opt={:<12} ".format(opt) text += tw.fill(options[opt].strip()) + "\n" text += "\n" text += "You can also chain options together, e.g.:\n" text += " --opt=systems,stations,csvonly\n" return text
def test_whitespace(self): # Whitespace munging and end-of-sentence detection text = """\ This is a paragraph that already has line breaks. But some of its lines are much longer than the others, so it needs to be wrapped. Some lines are \ttabbed too. What a mess! """ expect = [ "This is a paragraph that already has line", "breaks. But some of its lines are much", "longer than the others, so it needs to be", "wrapped. Some lines are tabbed too. What a", "mess!", ] wrapper = TextWrapper(45, fix_sentence_endings=True) result = wrapper.wrap(text) self.check(result, expect) result = wrapper.fill(text) self.check(result, "\n".join(expect))
def test_fix_sentence_endings(self): wrapper = TextWrapper(60, fix_sentence_endings=True) # SF #847346: ensure that fix_sentence_endings=True does the # right thing even on input short enough that it doesn't need to # be wrapped. text = "A short line. Note the single space." expect = ["A short line. Note the single space."] self.check(wrapper.wrap(text), expect) # Test some of the hairy end cases that _fix_sentence_endings() # is supposed to handle (the easy stuff is tested in # test_whitespace() above). text = "Well, Doctor? What do you think?" expect = ["Well, Doctor? What do you think?"] self.check(wrapper.wrap(text), expect) text = "Well, Doctor?\nWhat do you think?" self.check(wrapper.wrap(text), expect) text = 'I say, chaps! Anyone for "tennis?"\nHmmph!' expect = ['I say, chaps! Anyone for "tennis?" Hmmph!'] self.check(wrapper.wrap(text), expect) wrapper.width = 20 expect = ["I say, chaps!", 'Anyone for "tennis?"', "Hmmph!"] self.check(wrapper.wrap(text), expect) text = 'And she said, "Go to hell!"\nCan you believe that?' expect = ['And she said, "Go to', 'hell!" Can you', "believe that?"] self.check(wrapper.wrap(text), expect) wrapper.width = 60 expect = ['And she said, "Go to hell!" Can you believe that?'] self.check(wrapper.wrap(text), expect)
def _split_body(self, text_lines): """Split the body into summary and details. This will assign to self.body_summary the summary text, but it will return the details text for further santization. :return: the raw details text. """ # If there are no non-blank lines, then we're done. if len(text_lines) == 0: self.body_summary = u'' return u'' # If the first line is of a completely arbitrarily chosen reasonable # length, then we'll just use that as the summary. elif len(text_lines[0]) < 60: self.body_summary = text_lines[0] return u'\n'.join(text_lines[1:]) # It could be the case that the text is actually flowed using RFC # 3676 format="flowed" parameters. In that case, just split the line # at the first whitespace after, again, our arbitrarily chosen limit. else: first_line = text_lines.pop(0) wrapper = TextWrapper(width=60) filled_lines = wrapper.fill(first_line).splitlines() self.body_summary = filled_lines[0] text_lines.insert(0, u''.join(filled_lines[1:])) return u'\n'.join(text_lines)
def fill(output_str, indent = " "): from textwrap import TextWrapper console_width = get_console_width() wrapper = TextWrapper(initial_indent = indent, subsequent_indent = indent, width = console_width) return wrapper.fill(output_str)
def settings(self, short=None): """List available settings.""" types = {v: k for k, v in TYPE_CLASSES.items()} wrapper = TextWrapper(initial_indent='# ', subsequent_indent='# ') for i, section in enumerate(sorted(self._settings)): if not short: print('%s[%s]' % ('' if i == 0 else '\n', section)) for option in sorted(self._settings[section]._settings): meta = self._settings[section].get_meta(option) desc = meta['description'] if short: print('%s.%s -- %s' % (section, option, desc.splitlines()[0])) continue if option == '*': option = '<option>' if 'choices' in meta: value = "{%s}" % ', '.join(meta['choices']) else: value = '<%s>' % types[meta['type_cls']] print(wrapper.fill(desc)) print(';%s=%s' % (option, value))
def _serialize_string(cls, value, level): length = len(value) if length == 0: return "''" elif length <= 5 and value.lower() in cls.requires_quotes: return "'%s'" % value indent = cls.indent * level if '\n' in value: lines = ['|'] for line in value.split('\n'): if line: lines.append(indent + line) else: lines.append('') return lines elif length + len(indent) <= cls.line_width: if cls._requires_escaping(value): return "'%s'" % value.replace("'", "''") else: return value else: if cls._requires_escaping(value): value = "'%s'" % value.replace("'", "''") wrapper = TextWrapper(width=cls.line_width, initial_indent=indent, subsequent_indent=indent, break_long_words=False, replace_whitespace=False, drop_whitespace=False) lines = [] for line in value.splitlines(): lines.extend(wrapper.wrap(line)) return lines
def print_error(fname, line_number, err_msg): header = "Error in file {0}, line {1}:".format(fname, line_number + 1) tw = TextWrapper(width=80, initial_indent=' '*4, subsequent_indent=' '*4) body = '\n'.join(tw.wrap(err_msg)) print(header + '\n' + body + '\n')
def refill(msg): """ Refill a changelog message. Normalize the message reducing multiple spaces and newlines to single spaces, recognizing common form of ``bullet lists``, that is paragraphs starting with either a dash "-" or an asterisk "*". """ wrapper = TextWrapper() res = [] items = itemize_re.split(msg.strip()) if len(items)>1: # Remove possible first empty split, when the message immediately # starts with a bullet if not items[0]: del items[0] if len(items)>1: wrapper.initial_indent = '- ' wrapper.subsequent_indent = ' '*2 for item in items: if item: words = filter(None, item.strip().replace('\n', ' ').split(' ')) normalized = ' '.join(words) res.append(wrapper.fill(normalized)) return '\n\n'.join(res)
def quote_text_as_email(text, width=80): """Quote the text as if it is an email response. Uses '> ' as a line prefix, and breaks long lines. Trailing whitespace is stripped. """ # Empty text begets empty text. if text is None: return '' text = text.rstrip() if not text: return '' prefix = '> ' # The TextWrapper's handling of code is somewhat suspect. wrapper = TextWrapper( initial_indent=prefix, subsequent_indent=prefix, width=width, replace_whitespace=False) result = [] # Break the string into lines, and use the TextWrapper to wrap the # individual lines. for line in text.rstrip().split('\n'): # TextWrapper won't do an indent of an empty string. if line.strip() == '': result.append(prefix) else: result.extend(wrapper.wrap(line)) return '\n'.join(result)
def _stream_formatter(self, record): """The formatter for standard output.""" if record.levelno < logging.DEBUG: print(record.levelname, end='') elif(record.levelno < logging.INFO): colourPrint(record.levelname, 'green', end='') elif(record.levelno < IMPORTANT): colourPrint(record.levelname, 'magenta', end='') elif(record.levelno < logging.WARNING): colourPrint(record.levelname, 'lightblue', end='') elif(record.levelno < logging.ERROR): colourPrint(record.levelname, 'brown', end='') else: colourPrint(record.levelname, 'red', end='') if record.levelno == logging.WARN: message = '{0}'.format(record.msg[record.msg.find(':')+2:]) else: message = '{0}'.format(record.msg) if len(message) > self.wrapperLength: tw = TextWrapper() tw.width = self.wrapperLength tw.subsequent_indent = ' ' * (len(record.levelname)+2) tw.break_on_hyphens = False message = '\n'.join(tw.wrap(message)) print(': ' + message)
def display_search_results(self, mixes, s): if self._search_results_page < self.total_pages: next_notification = "--Next-- (Enter)" else: next_notification = "" print('Results for "{}":'.format(s)) wrapper = TextWrapper(width=self.console_width - 5, subsequent_indent=(' ' * 5)) mix_info_tpl = Template('$name ($trackcount tracks, ${hours}h ${minutes}m, by ${user})') page_info_tpl = Template('Page $page on $total_pages. $next_notification') # If this is a new query, reset mixes dictionary if self._search_results_page == 0: self.mixes = {} # Store and show new mix results start_page_no = (self._search_results_page - 1) * self.config['results_per_page'] + 1 for i, mix in enumerate(mixes, start_page_no): # Cache mix self.mixes[i] = mix # Print line prefix = ' {0})'.format(i).ljust(5) hours = mix['duration'] // 60 // 60 minutes = (mix['duration'] // 60) % 60 mix_info = mix_info_tpl.substitute(name=bold(mix['name']), user=mix['user']['login'], trackcount=mix['tracks_count'], hours=hours, minutes=minutes) print(prefix + wrapper.fill(mix_info)) print(wrapper.fill(' Tags: {}'.format(mix['tag_list_cache']))) page_info = page_info_tpl.substitute(page=bold(str(self._search_results_page)), total_pages=bold(str(self.total_pages)), next_notification=next_notification) print(wrapper.fill(page_info))
def wrap(text, width=77, indent='', long_words=False, hyphens=False): """ Wrap text for cleaner output (this is a simple wrapper around `textwrap.TextWrapper` in the standard library). :param text: The text to wrap :param width: The max width of a line before breaking :param indent: String to prefix subsequent lines after breaking :param long_words: Break on long words :param hyphens: Break on hyphens :returns: str(text) """ if sys.version_info[0] < 3: # pragma: no cover # noqa types = [str, unicode] # pragma: no cover # noqa else: # pragma: no cover # noqa types = [str] # pragma: no cover # noqa if type(text) not in types: raise TypeError("Argument `text` must be one of [str, unicode].") wrapper = TextWrapper(subsequent_indent=indent, width=width, break_long_words=long_words, break_on_hyphens=hyphens) return wrapper.fill(text)
def dump_recursive_parents(rpc, post_author, post_permlink, limit=1, format="markdown"): global currentThreadDepth limit = int(limit) postWrapper = TextWrapper() postWrapper.width = 120 postWrapper.initial_indent = " " * (limit) postWrapper.subsequent_indent = " " * (limit) if limit > currentThreadDepth: currentThreadDepth = limit + 1 post = rpc.get_content(post_author, post_permlink) if limit and post["parent_author"]: parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"]) if len(parent): dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1) meta = {} for key in ["author", "permlink"]: meta[key] = post[key] meta["reply"] = "@{author}/{permlink}".format(**post) if format == "markdown": body = markdownify(post["body"]) else: body = post["body"] yaml = frontmatter.Post(body, **meta) print(frontmatter.dumps(yaml))
def dump_recursive_comments(rpc, post_author, post_permlink, depth=0, format="markdown"): global currentThreadDepth postWrapper = TextWrapper() postWrapper.width = 120 postWrapper.initial_indent = " " * (depth + currentThreadDepth) postWrapper.subsequent_indent = " " * (depth + currentThreadDepth) depth = int(depth) posts = rpc.get_content_replies(post_author, post_permlink) for post in posts: meta = {} for key in ["author", "permlink"]: meta[key] = post[key] meta["reply"] = "@{author}/{permlink}".format(**post) if format == "markdown": body = markdownify(post["body"]) else: body = post["body"] yaml = frontmatter.Post(body, **meta) print(frontmatter.dumps(yaml)) reply = rpc.get_content_replies(post["author"], post["permlink"]) if len(reply): dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
def dict_str(self): """Build a human-readable definition for this word, including data for each synset""" tw = TextWrapper(width=self.LINE_WIDTH_MAX, initial_indent=(self.prefix_fmtf_line_first % self.category_map_rev[self.category]), subsequent_indent=(self.prefix_fmtn_line_first % (len(self.category_map_rev[self.category]), ''))) lines = (tw.wrap(self.synsets[0].synset_get().dict_str())) i = 2 prefix_fmtn_line_nonfirst = self.prefix_fmtn_line_nonfirst pfln_len = 0 for ss_wrap in self.synsets[1:]: # adjust indenting based on index-number with pfln_len_new = len('%d' % (i,)) if (pfln_len_new > pfln_len): pfln_len = pfln_len_new pfln_str = (self.prefix_fmtn_line_nonfirst % (pfln_len, '')) # format data for this synset synset = ss_wrap.synset_get() tw = TextWrapper(width=self.LINE_WIDTH_MAX, initial_indent=(self.prefix_fmtf_line_nonfirst % i), subsequent_indent=pfln_str) lines.extend(tw.wrap(synset.dict_str())) i += 1 return self.linesep.join(lines)
def list_posts(discussions): t = PrettyTable([ "identifier", "title", "category", "replies", # "votes", "payouts", ]) t.align = "l" t.align["payouts"] = "r" # t.align["votes"] = "r" t.align["replies"] = "c" for d in discussions: identifier = "@%s/%s" % (d["author"], d["permlink"]) identifier_wrapper = TextWrapper() identifier_wrapper.width = 60 identifier_wrapper.subsequent_indent = " " t.add_row([ identifier_wrapper.fill(identifier), identifier_wrapper.fill(d["title"]), d["category"], d["children"], # d["net_rshares"], d["pending_payout_value"], ]) print(t)
def msg(text, sep=' ', *args, **kwargs): ''' A convenience to neatly format message strings, such as error messages. ''' text_wrapper = TextWrapper(*args, **kwargs) return sep.join(text_wrapper.wrap(text.strip()))
def format(self, **kargs): self.parse() if self._package_name is not None: kargs['package_name'] = self._package_name if 'version' not in kargs: if len(self.logs) > 0: version = self.logs[0]['version'] reobj = re.match(r"""(.*)(\d+)$""", version) if reobj is None: version += '~1' else: version = reobj.group(1) + str(int(reobj.group(2)) + 1) else: version = '1.0' kargs['version'] = version title = '{package_name} ({version})'.format(**kargs) messages = ' * ' wrapper = TextWrapper(width=80, initial_indent=' * ', subsequent_indent=' ') if 'messages' in kargs: messages = [] for message in kargs['messages']: messages.append(wrapper.fill(message)) messages = '\n'.join(messages) kargs['time'] = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) tailer = ' -- {author} <{email}> {time}'.format(**kargs) return title + '\n\n' + messages + '\n\n' + tailer + '\n\n'
def print_arguments(arguments, width=None): if width == None: width = 0 for arg in arguments: width = max(width, len(get_option_names(arg))) help_wrapper = TextWrapper( width=terminal_width, initial_indent=' ' * (width + 5), subsequent_indent=' ' * (width + 5), ) return ('\n'.join( ' ' * 2 + '{0:<{width}} {1}'.format( get_option_names(arg), help_wrapper.fill( arg.help + ( _('(default: {0})').format(arg.default) if arg.default not in (None, False) else _('(required)') if not (arg.optional or arg.positional) else '' ) )[width + 4:] if arg.help else '', width=width, ) for arg in arguments))
def CppEvaluations(self, Indent=4): """Evaluate all derived variables in C++ This function uses the `substitution` expressions for the derived variables. This output is appropriate for updating the values of the variables at each step of an integration, for example. """ from textwrap import TextWrapper wrapper = TextWrapper(width=120) wrapper.initial_indent = ' '*Indent wrapper.subsequent_indent = wrapper.initial_indent + ' ' def Evaluation(atom): def Ccode(a) : try: return a.ccode() except : from sympy.printing import ccode return ccode(a) if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >') : return '\n'.join([wrapper.fill('{0}[{1}] = {2};'.format(self.Variables[atom], i, Ccode(atom.substitution[i]))) for i in range(len(atom.substitution))]) else: return wrapper.fill('{0} = {1};'.format(self.Variables[atom], atom.ccode())) return '\n'.join([Evaluation(atom) for atom in self.Atoms if not atom.fundamental and not atom.constant])
def wrap_for_make(items): line = join(sorted(items)) wrapper = TextWrapper() wrapper.width = 60 wrapper.break_on_hyphens = False wrapper.subsequent_indent = '\t' * 2 return ' \\\n'.join(wrapper.wrap(line))
def wrap(text, width=77, indent='', long_words=False, hyphens=False): """ Wrap text for cleaner output (this is a simple wrapper around ``textwrap.TextWrapper`` in the standard library). Args: text (str): The text to wrap Keyword Arguments: width (int): The max width of a line before breaking indent (str): String to prefix subsequent lines after breaking long_words (bool): Whether or not to break on long words hyphens (bool): Whether or not to break on hyphens Returns: str: The wrapped string """ types = [str] if type(text) not in types: raise TypeError("Argument `text` must be one of [str, unicode].") wrapper = TextWrapper(subsequent_indent=indent, width=width, break_long_words=long_words, break_on_hyphens=hyphens) return wrapper.fill(text)
def CppInitializations(self, Indent=4): """Create initialization list for C++ For example, if the `Variables` object contains atoms m1, m2, t, and x referred to in the `Expressions` object, where m1 and m2 are constant, and t and x are variables, the initialization list should be m1(m1_i), m2(m2_i), t(t_i), x(x_i) The quantities m1_i, etc., appear in the input-argument list output by the method `CppInputArguments`. """ from textwrap import TextWrapper wrapper = TextWrapper(width=120) wrapper.initial_indent = ' '*Indent wrapper.subsequent_indent = wrapper.initial_indent def Initialization(atom): if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >'): return '{0}({1})'.format(self.Variables[atom], len(atom.substitution)) if atom.fundamental: return '{0}({0}_i)'.format(self.Variables[atom]) else: return '{0}({1})'.format(self.Variables[atom], atom.ccode()) Initializations = [Initialization(atom) for atom in self.Atoms] return wrapper.fill(', '.join(Initializations))
def report(trans, html=False): """Returns a summary report of all of the transactions.""" invs = inventories(trans) rankings = [] for player, inv in invs.items(): rankings.append((player, inv['cones'], inv['magic'])) rankings.sort(key=lambda x: x[1], reverse=True) listings = [] tw = TextWrapper(width=30) mctemp = '{1}x {0} cone{2}' for player, cones, magic in rankings: s = ', '.join([mctemp.format(key, value, '' if value == 1 else 's') \ for key, value in sorted(magic.items()) if value > 0]) s = '\n'.join(tw.wrap(s)) listings.append((player, cones // CONES_PER_TREE or '', (cones // CONES_PER_SAPLING) % (CONES_PER_TREE // CONES_PER_SAPLING) or \ ('' if cones // CONES_PER_TREE == 0 else 0), cones % CONES_PER_SAPLING, s, )) tab = PrettyTable(['Player', 'Trees', 'Saplings', 'Cones', 'Magic Cones']) for listing in listings: tab.add_row(listing) rep = tab.get_html_string(format=True) if html else tab.get_string() return rep
try: README = open('README.txt') except IOError: README = open('../README.txt') try: AUTHORS = open('AUTHORS.txt') except IOError: AUTHORS = open('../AUTHORS.txt') LOG = Popen(['git','log','--reverse'], stdout=PIPE) # Setup TextWrap instances wrapper = TextWrapper(width=82) commit_msg_wrapper = TextWrapper(subsequent_indent=' ', width=82) # Print FONTLOG to stdout for line in README: print wrapper.fill(line) print "" print "____" print "" print "Designers:" print "" print AUTHORS.read() print ""
def __init__(self): self.__wrapper = TextWrapper(width = 80, replace_whitespace = False, break_long_words = False, break_on_hyphens = False) self.__tags = GroffTagReplacer()
def setUp(self): self.wrapper = TextWrapper() self.text = '''\
class TopicTreeSpecPrinter: """ Helper class to print the topic tree using the Python class syntax. The "printout" can be sent to any file object (object that has a write() method). If printed to a module, the module can be imported and given to pub.addTopicDefnProvider(module, 'module'). Importing the module also provides code completion of topic names (rootTopic.subTopic can be given to any pubsub function requiring a topic name). """ INDENT_CH = ' ' # INDENT_CH = '.' def __init__(self, rootTopic: Union[str, Topic] = None, fileObj: TextIO = None, width: int = 70, indentStep: int = 4, treeDoc: str = defaultTopicTreeSpecHeader, footer: str = defaultTopicTreeSpecFooter): """ For formatting, can specify the width of output, the indent step, the header and footer to print to override defaults. The destination is fileObj; if none is given, then sys.stdout is used. If rootTopic is given, calls writeAll(rootTopic) at end of __init__. """ self.__traverser = TopicTreeTraverser(self) import sys fileObj = fileObj or sys.stdout self.__destination = fileObj self.__output = [] self.__header = self.__toDocString(treeDoc) self.__footer = dedent(footer) self.__lastWasAll = False # True when last topic done was the ALL_TOPICS self.__width = width self.__wrapper = TextWrapper(width) self.__indentStep = indentStep self.__indent = 0 args = dict(width=width, indentStep=indentStep, treeDoc=treeDoc, footer=self.__footer, fileObj=fileObj) def fmItem(argName, argVal): if isinstance(argVal, str): MIN_OFFSET = 5 lenAV = width - MIN_OFFSET - len(argName) if lenAV > 0: argVal = repr(argVal[:lenAV] + '...') elif argName == 'fileObj': argVal = fileObj.__class__.__name__ return '# - %s: %s' % (argName, argVal) fmtArgs = [fmItem(key, args[key]) for key in sorted(args.keys())] self.__comment = [ '# Automatically generated by %s(**kwargs).' % self.__class__.__name__, '# The kwargs were:', ] self.__comment.extend(fmtArgs) self.__comment.extend(['']) # two empty line after comment if rootTopic is not None: self.writeAll(rootTopic) def getOutput(self) -> str: """ Each line that was sent to fileObj was saved in a list; returns a string which is ``'\\n'.join(list)``. """ return '\n'.join(self.__output) def writeAll(self, topicObj: Topic): """ Traverse each topic of topic tree, starting at topicObj, printing each topic definition as the tree gets traversed. """ self.__traverser.traverse(topicObj) def _accept(self, topicObj: Topic): # accept every topic return True def _startTraversal(self): # output comment self.__wrapper.initial_indent = '# ' self.__wrapper.subsequent_indent = self.__wrapper.initial_indent self.__output.extend(self.__comment) # output header: if self.__header: self.__output.extend(['']) self.__output.append(self.__header) self.__output.extend(['']) def _doneTraversal(self): if self.__footer: self.__output.append('') self.__output.append('') self.__output.append(self.__footer) if self.__destination is not None: self.__destination.write(self.getOutput()) def _onTopic(self, topicObj: Topic): """This gets called for each topic. Print as per specified content.""" # don't print root of tree, it is the ALL_TOPICS builtin topic if topicObj.isAll(): self.__lastWasAll = True return self.__lastWasAll = False self.__output.append('') # empty line # topic name self.__wrapper.width = self.__width head = 'class %s:' % topicObj.getNodeName() self.__formatItem(head) # each extra content (assume constructor verified that chars are valid) self.__printTopicDescription(topicObj) self.__printTopicArgSpec(topicObj) def _startChildren(self): """Increase the indent""" if not self.__lastWasAll: self.__indent += self.__indentStep def _endChildren(self): """Decrease the indent""" if not self.__lastWasAll: self.__indent -= self.__indentStep def __toDocString(self, msg: str) -> str: if not msg: return msg if msg.startswith("'''") or msg.startswith('"""'): return msg return '"""\n%s\n"""' % msg.strip() def __printTopicDescription(self, topicObj: Topic): if topicObj.getDescription(): extraIndent = self.__indentStep self.__formatItem('"""', extraIndent) self.__formatItem(topicObj.getDescription(), extraIndent) self.__formatItem('"""', extraIndent) def __printTopicArgSpec(self, topicObj: Topic): extraIndent = self.__indentStep # generate the message data specification reqdArgs, optArgs = topicObj.getArgs() argsStr = [] if reqdArgs: argsStr.append(", ".join(reqdArgs)) if optArgs: optStr = ', '.join([('%s=None' % arg) for arg in optArgs]) argsStr.append(optStr) argsStr = ', '.join(argsStr) # print it only if there are args; ie if listener() don't print it if argsStr: # output a blank line and protocol self.__formatItem('\n', extraIndent) protoListener = 'def %s(%s):' % (SPEC_METHOD_NAME, argsStr) self.__formatItem(protoListener, extraIndent) # and finally, the args docs extraIndent += self.__indentStep self.__formatItem('"""', extraIndent) # but ignore the arg keys that are in parent args docs: parentMsgKeys = () if topicObj.getParent() is not None: parentMsgKeys = topicObj.getParent().getArgDescriptions().keys() # keys iter ok argsDocs = topicObj.getArgDescriptions() for key in sorted(argsDocs.keys()): if key not in parentMsgKeys: argDesc = argsDocs[key] msg = "- %s: %s" % (key, argDesc) self.__formatItem(msg, extraIndent) self.__formatItem('"""', extraIndent) def __formatItem(self, item: str, extraIndent: int = 0): indent = extraIndent + self.__indent indentStr = self.INDENT_CH * indent lines = item.splitlines() for line in lines: self.__output.append('%s%s' % (indentStr, line)) def __formatBlock(self, text: str, extraIndent: int = 0): self.__wrapper.initial_indent = self.INDENT_CH * (self.__indent + extraIndent) self.__wrapper.subsequent_indent = self.__wrapper.initial_indent self.__output.append(self.__wrapper.fill(text))
def text(self, text, offset=None, horiz=None, vert=None, *, angle=None, font=None, colour=None, width=None): attrs = dict() style = list() transform = list() if vert is not None: baselines = { self.CENTRE: "central", self.TOP: "text-before-edge", self.BOTTOM: "text-after-edge", } style.append(("dominant-baseline", baselines[vert])) if horiz is not None: anchors = { self.CENTRE: "middle", self.LEFT: "start", self.RIGHT: "end", } style.append(("text-anchor", anchors[horiz])) transform.extend(self._offset(offset)) if angle is not None: transform.append("rotate({})".format(angle * self.flip[1])) if font is not None: attrs["class"] = font attrs.update(self._colour(colour)) with self.element("text", attrs, style=style, transform=transform): if width is None: if isinstance(text, str): self.xml.characters(text) else: for seg in text: attrs = dict() if seg.get("overline"): attrs["text-decoration"] = "overline" self.tree(("tspan", attrs, (seg["text"], ))) return # Very hacky approximation of the size of each character # as one en wide width /= self.textsize / 2 wrapper = TextWrapper(width=width, replace_whitespace=False) hardlines = text.splitlines(keepends=True) if not hardlines: hardlines.append("") line = 0 for hardline in hardlines: wrapped = wrapper.wrap(hardline) if not wrapped: # Caused by empty string wrapped.append("") for softline in wrapped: lineattrs = { "x": "0", "y": "{}em".format(line / 0.875), "xml:space": "preserve", } self.tree(("tspan", lineattrs, (softline, ))) line += 1
from shutil import * import yaml from contextlib import contextmanager import re from textwrap import TextWrapper from glob import glob import codecs from typing import Any, Sequence, Dict, Mapping, Generator, Union, TextIO try: _columns = int(os.getenv('COLUMNS', '80')) except Exception: _columns = 80 _ALERT_PREFIX = '*** ' _alert_wrapper = TextWrapper(width=_columns - 1, subsequent_indent=' ' * len(_ALERT_PREFIX)) def import_from_file(path: str, module_name: str) -> Any: """ Import a file as a module. Parameters: - path: the path to the Python file - module_name: the name to assign the module_from_spec Returns: the module object """ spec = importlib.util.spec_from_file_location(module_name, path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod)
p384_u2 = 0xf3b240751d5d8ed394a4b5bf8e2a4c0e1e21aa51f2620a08b8c55a2bc334c9689923162648f06e5f4659fc526d9c1fd6 p384_v = 0xa0c27ec893092dea1e1bd2ccfed3cf945c8134ed0c9f81311a0f4a05942db8dbed8dd59f267471d5462aa14fe72de856 from textwrap import TextWrapper from os.path import basename from sys import argv from pyasn1.type.univ import Sequence, Choice, Integer, OctetString, ObjectIdentifier, BitString from pyasn1.type.namedtype import NamedTypes, NamedType, OptionalNamedType from pyasn1.type.namedval import NamedValues from pyasn1.type.tag import Tag, tagClassContext, tagFormatSimple from pyasn1.type.constraint import SingleValueConstraint from pyasn1.codec.der.encoder import encode as DER_Encode from pyasn1.codec.der.decoder import decode as DER_Decode wrapper = TextWrapper(width=78, initial_indent=" " * 2, subsequent_indent=" " * 2) def long_to_bytes(number, order): # # This is just plain nasty. # s = "%x" % number s = ("0" * (order / 8 - len(s))) + s return s.decode("hex") def bytes_to_bits(bytes): # # This, on the other hand, is not just plain nasty, this is fancy nasty.
def to_string(self, verbose=0, width=90): """ String representation with verbosity level `verbose`. Text is wrapped at `width` columns. """ w = TextWrapper(initial_indent="\t", subsequent_indent="\t", width=width) lines = [] app = lines.append app("%s: %s\n" % (self.__class__.__name__.upper(), self.name)) app("Directory: %s" % os.path.basename(self.dirname)) if self.ancestor is not None: app("ANCESTOR:\n\t%s (%s)" % (self.ancestor.name, self.ancestor.__class__.__name__)) if self.uses: app("USES:\n%s\n" % w.fill(", ".join(self.uses))) diff = sorted(set(self.local_uses) - set(self.uses)) if diff: app("LOCAL USES:\n%s\n" % w.fill(", ".join(diff))) if self.includes: app("INCLUDES:\n%s\n" % w.fill(", ".join(self.includes))) if self.contains: app("CONTAINS:\n%s\n" % w.fill(", ".join(c.name for c in self.contains))) if self.types: app("DATATYPES:\n%s\n" % w.fill(", ".join(d.name for d in self.types))) if self.interfaces: app("INTERFACES:\n%s\n" % w.fill(", ".join(i.name for i in self.interfaces))) app("PARENTS:\n%s\n" % w.fill(", ".join(sorted(p.name for p in self.parents)))) #if verbose: # Add directory of parents dirnames = sorted( set(os.path.basename(p.dirname) for p in self.parents)) app("PARENT_DIRS:\n%s\n" % w.fill(", ".join(dirnames))) app("CHILDREN:\n%s\n" % w.fill(", ".join(sorted(c for c in self.children)))) if verbose > 1: app("") app("number of Fortran lines:%s" % self.num_f90lines) app("number of doc lines: %s" % self.num_doclines) app("number of OpenMP statements: %s" % self.num_omp_statements) # Add directory of children #dirnames = sorted(set(os.path.basename(p.dirname) for p in self.children)) #app("CHILDREN_DIRS:\n%s\n" % w.fill(", ".join(dirnames))) app("PREAMBLE:\n%s" % self.preamble) return "\n".join(lines)
def help(self): # @ReservedAssignment """Prints this help message and quits""" if self._get_prog_version(): self.version() print("") if self.DESCRIPTION: print(self.DESCRIPTION.strip() + '\n') m = six.getfullargspec(self.main) tailargs = m.args[1:] # skip self if m.defaults: for i, d in enumerate(reversed(m.defaults)): tailargs[-i - 1] = "[%s=%r]" % (tailargs[-i - 1], d) if m.varargs: tailargs.append("%s..." % (m.varargs,)) tailargs = " ".join(tailargs) with self.COLOR_USAGE: print("Usage:") if not self.USAGE: if self._subcommands: self.USAGE = " %(progname)s [SWITCHES] [SUBCOMMAND [SWITCHES]] %(tailargs)s\n" else: self.USAGE = " %(progname)s [SWITCHES] %(tailargs)s\n" print(self.USAGE % {"progname": colors.filter(self.PROGNAME), "tailargs": tailargs}) by_groups = {} for si in self._switches_by_func.values(): if si.group not in by_groups: by_groups[si.group] = [] by_groups[si.group].append(si) def switchs(by_groups, show_groups): for grp, swinfos in sorted(by_groups.items(), key = lambda item: item[0]): if show_groups: print(self.COLOR_GROUPS[grp] | grp) for si in sorted(swinfos, key = lambda si: si.names): swnames = ", ".join(("-" if len(n) == 1 else "--") + n for n in si.names if n in self._switches_by_name and self._switches_by_name[n] == si) if si.argtype: if hasattr(si.argtype, '__name__'): typename = si.argtype.__name__ else: typename = str(si.argtype) argtype = " %s:%s" % (si.argname.upper(), typename) else: argtype = "" prefix = swnames + argtype yield si, prefix, self.COLOR_GROUPS[grp] if show_groups: print("") sw_width = max(len(prefix) for si, prefix, color in switchs(by_groups, False)) + 4 cols, _ = get_terminal_size() description_indent = " %s%s%s" wrapper = TextWrapper(width = max(cols - min(sw_width, 60), 50) - 6) indentation = "\n" + " " * (cols - wrapper.width) for si, prefix, color in switchs(by_groups, True): help = si.help # @ReservedAssignment if si.list: help += "; may be given multiple times" if si.mandatory: help += "; required" if si.requires: help += "; requires %s" % (", ".join((("-" if len(s) == 1 else "--") + s) for s in si.requires)) if si.excludes: help += "; excludes %s" % (", ".join((("-" if len(s) == 1 else "--") + s) for s in si.excludes)) msg = indentation.join(wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(prefix) + wrapper.width >= cols: padding = indentation else: padding = " " * max(cols - wrapper.width - len(prefix) - 4, 1) print(description_indent % (color | prefix, padding, color | msg)) if self._subcommands: gc = self.COLOR_GROUPS["Subcommands"] print(gc | "Subcommands:") for name, subcls in sorted(self._subcommands.items()): with gc: subapp = subcls.get() doc = subapp.DESCRIPTION if subapp.DESCRIPTION else getdoc(subapp) if self.SUBCOMMAND_HELPMSG: help = doc + "; " if doc else "" # @ReservedAssignment help += self.SUBCOMMAND_HELPMSG.format(parent=self.PROGNAME, sub=name) else: help = doc if doc else "" # @ReservedAssignment msg = indentation.join(wrapper.wrap(" ".join(l.strip() for l in help.splitlines()))) if len(name) + wrapper.width >= cols: padding = indentation else: padding = " " * max(cols - wrapper.width - len(name) - 4, 1) if colors.contains_colors(subcls.name): bodycolor = colors.extract(subcls.name) else: bodycolor = gc print(description_indent % (subcls.name, padding, bodycolor | colors.filter(msg)))
def main(): wrapper = TextWrapper(initial_indent=" - ") extra_info = [] devel = False if DEVEL_OPT in sys.argv: sys.argv = [a for a in sys.argv if a != DEVEL_OPT] devel = True extra_info.append(bold_text("\nADDITIONAL INFORMATION:")) clock_behaviors = [] for x in ClockBehaviorsFactory.get_clockbehaviors(): wrapper.subsequent_indent = " " * (len(" - \"\": " + x.get_name())) clock_behaviors.append("\n".join( wrapper.wrap("\"%s\": %s, parameters (%s)" % (x.get_name(), x.get_desc(), x.get_interface())))) extra_info.append('\nClock behaviors:\n%s' % ("\n".join(clock_behaviors))) sugars = [] for x in SyntacticSugarFactory.get_sugars(): wrapper.subsequent_indent = " " * (len(" - \"\": " + x.get_name())) sugars.append("\n".join( wrapper.wrap("\"%s\": %s, parameters (%s)" % (x.get_name(), x.get_desc(), x.get_interface())))) extra_info.append('\nSpecial operators:\n%s' % ("\n".join(sugars))) generators = [] for x in GeneratorsFactory.get_generators(): wrapper.subsequent_indent = " " * (len(" - \"\": " + x.get_name())) generators.append("\n".join( wrapper.wrap("\"%s\": %s, parameters (%s)" % (x.get_name(), x.get_desc(), x.get_interface())))) extra_info.append('\nModule generators:\n%s' % ("\n".join(generators))) modifiers = [] modifiers.append(" - \"None\": No extension") for x in ModelModifiersFactory.get_modifiers(): wrapper.subsequent_indent = " " * (len(" - \"\": " + x.get_name())) modifiers.append("\n".join( wrapper.wrap("\"%s\": %s" % (x.get_name(), x.get_desc())))) extra_info.append('\nModel modifiers:\n%s' % ("\n".join(modifiers))) parser = argparse.ArgumentParser(description=bold_text('CoSA: CoreIR Symbolic Analyzer\n..an SMT-based Symbolic Model Checker for Hardware Design'), \ #usage='%(prog)s [options]', \ formatter_class=RawTextHelpFormatter, \ epilog="\n".join(extra_info)) config = Config() # Main inputs in_options = parser.add_argument_group('input options') av_input_types = [" - \"%s\": %s"%(x.name, ", ".join(["*.%s"%e for e in x.extensions])) \ for x in ModelParsersFactory.get_parsers() if x.is_available()] ua_input_types = [" - \"%s\": %s"%(x.name, ", ".join(["*.%s"%e for e in x.extensions])) \ for x in ModelParsersFactory.get_parsers() if not x.is_available()] in_options.set_defaults(input_files=None) in_options.add_argument('-i', '--input_files', metavar='<input files>', type=str, required=False, help='comma separated list of input files.\nSupported types:\n%s%s'%\ ("\n".join(av_input_types), "\nNot enabled:\n%s"%("\n".join(ua_input_types)) \ if len(ua_input_types) > 0 else "")) in_options.set_defaults(problems=None) in_options.add_argument( '--problems', metavar='<problems file>', type=str, required=False, help='problems file describing the verifications to be performed.') # Verification Options ver_options = parser.add_argument_group('analysis') ver_options.set_defaults(safety=False) ver_options.add_argument('--safety', dest='safety', action='store_true', help='safety verification using BMC.') ver_options.set_defaults(ltl=False) ver_options.add_argument('--ltl', dest='ltl', action='store_true', help='ltl verification using BMC.') ver_options.set_defaults(simulate=False) ver_options.add_argument('--simulate', dest='simulate', action='store_true', help='simulate system using BMC.') ver_options.set_defaults(equivalence=None) ver_options.add_argument('--equivalence', metavar='<input files>', type=str, required=False, help='equivalence checking using BMC.') ver_options.set_defaults(fsm_check=False) ver_options.add_argument( '--fsm-check', dest='fsm_check', action='store_true', help='check if the state machine is deterministic.') ver_options.set_defaults(parametric=False) ver_options.add_argument('--parametric', dest='parametric', action='store_true', help='parametric analysis using BMC.') # Verification parameters ver_params = parser.add_argument_group('verification parameters') ver_params.set_defaults(properties=None) ver_params.add_argument('-p', '--properties', metavar='<invar list>', type=str, required=False, help='comma separated list of properties.') ver_params.set_defaults(bmc_length=config.bmc_length) ver_params.add_argument( '-k', '--bmc-length', metavar='<BMC length>', type=int, required=False, help="depth of BMC unrolling. (Default is \"%s\")" % config.bmc_length) ver_params.set_defaults(bmc_length_min=config.bmc_length_min) ver_params.add_argument( '-km', '--bmc-length-min', metavar='<BMC length>', type=int, required=False, help="minimum depth of BMC unrolling. (Default is \"%s\")" % config.bmc_length_min) ver_params.set_defaults(precondition=None) ver_params.add_argument('-r', '--precondition', metavar='<invar>', type=str, required=False, help='invariant properties precondition.') ver_params.set_defaults(lemmas=None) ver_params.add_argument('-l', '--lemmas', metavar='<invar list>', type=str, required=False, help='comma separated list of lemmas.') ver_params.set_defaults(assumptions=None) ver_params.add_argument( '-a', '--assumptions', metavar='<invar assumptions list>', type=str, required=False, help='semi column separated list of invariant assumptions.') ver_params.add_argument( '--generators', metavar='generators', type=str, nargs='?', help='semi column separated list of generators instantiation.') ver_params.add_argument( '--clock-behaviors', metavar='clock_behaviors', type=str, nargs='?', help='semi column separated list of clock behaviors instantiation.') ver_params.set_defaults(prove=False) ver_params.add_argument( '--prove', dest='prove', action='store_true', help= "use indution to prove the satisfiability of the property. (Default is \"%s\")" % config.prove) ver_params.set_defaults(assume_if_true=False) ver_params.add_argument( '--assume-if-true', dest='assume_if_true', action='store_true', help="add true properties as assumptions. (Default is \"%s\")" % config.assume_if_true) ver_params.set_defaults(coi=False) ver_params.add_argument( '--coi', dest='coi', action='store_true', help="enables Cone of Influence. (Default is \"%s\")" % config.coi) ver_params.set_defaults(cardinality=config.cardinality) ver_params.add_argument( '--cardinality', dest='cardinality', type=int, required=False, help= "bounds number of active parameters. -1 is unbounded. (Default is \"%s\")" % config.cardinality) strategies = [ " - \"%s\": %s" % (x[0], x[1]) for x in MCConfig.get_strategies() ] defstrategy = MCConfig.get_strategies()[0][0] ver_params.set_defaults(strategy=defstrategy) ver_params.add_argument( '--strategy', metavar='strategy', type=str, nargs='?', help='select the BMC strategy between (Default is \"%s\"):\n%s' % (defstrategy, "\n".join(strategies))) ver_params.set_defaults(processes=config.processes) ver_params.add_argument( '-j', dest='processes', metavar="<integer level>", type=int, help="number of multi-processes for MULTI strategy. (Default is \"%s\")" % config.processes) ver_params.set_defaults(ninc=False) ver_params.add_argument( '--ninc', dest='ninc', action='store_true', help="disables incrementality. (Default is \"%s\")" % (not config.incremental)) ver_params.set_defaults(solver_name=config.solver_name) ver_params.add_argument( '--solver-name', metavar='<Solver Name>', type=str, required=False, help="name of SMT solver to be use. (Default is \"%s\")" % config.solver_name) # Encoding parameters enc_params = parser.add_argument_group('encoding') enc_params.set_defaults(cache_files=False) enc_params.add_argument( '-c', '--cache-files', dest='cache_files', action='store_true', help="caches encoded files to speed-up parsing. (Default is \"%s\")" % config.cache_files) enc_params.set_defaults(add_clock=False) enc_params.add_argument('--add-clock', dest='add_clock', action='store_true', help="adds clock behavior. (Default is \"%s\")" % config.add_clock) enc_params.set_defaults(abstract_clock=False) enc_params.add_argument( '--abstract-clock', dest='abstract_clock', action='store_true', help="abstracts the clock behavior. (Default is \"%s\")" % config.abstract_clock) enc_params.set_defaults(symbolic_init=config.symbolic_init) enc_params.add_argument( '--symbolic-init', dest='symbolic_init', action='store_true', help='removes constraints on the initial state. (Default is \"%s\")' % config.symbolic_init) enc_params.set_defaults(zero_init=config.zero_init) enc_params.add_argument( '--zero-init', dest='zero_init', action='store_true', help='sets initial state to zero. (Default is \"%s\")' % config.zero_init) enc_params.set_defaults(boolean=config.boolean) enc_params.add_argument( '--boolean', dest='boolean', action='store_true', help= 'interprets single bits as Booleans instead of 1-bit Bitvector. (Default is \"%s\")' % config.boolean) enc_params.set_defaults(run_passes=config.run_passes) enc_params.add_argument( '--no-run-passes', dest='run_passes', action='store_false', help='does not run CoreIR passes. (Default is \"%s\")' % config.run_passes) enc_params.set_defaults(model_extension=config.model_extension) enc_params.add_argument( '--model-extension', metavar='model_extension', type=str, nargs='?', help='select the model modifier. (Default is \"%s\")' % (config.model_extension)) # Printing parameters print_params = parser.add_argument_group('trace printing') print_params.set_defaults(trace_vars_change=config.trace_vars_change) print_params.add_argument( '--trace-vars-change', dest='trace_vars_change', action='store_true', help= "show variable assignments in the counterexamples even when unchanged. (Default is \"%s\")" % config.trace_vars_change) print_params.set_defaults(trace_all_vars=config.trace_all_vars) print_params.add_argument( '--trace-all-vars', dest='trace_all_vars', action='store_true', help="show all variables in the counterexamples. (Default is \"%s\")" % config.trace_all_vars) print_params.set_defaults(full_trace=config.full_trace) print_params.add_argument( '--full-trace', dest='full_trace', action='store_true', help= "sets trace-vars-unchanged and trace-all-vars to True. (Default is \"%s\")" % config.full_trace) print_params.set_defaults(prefix=None) print_params.add_argument( '--prefix', metavar='<prefix location>', type=str, required=False, help='write the counterexamples with a specified location prefix.') print_params.set_defaults(vcd=False) print_params.add_argument( '--vcd', dest='vcd', action='store_true', help="generate traces also in vcd format. (Default is \"%s\")" % config.vcd) # Translation parameters trans_params = parser.add_argument_group('translation') trans_params.set_defaults(translate=None) trans_params.add_argument('--translate', metavar='<output file>', type=str, required=False, help='translate input file.') printers = [ " - \"%s\": %s" % (x.get_name(), x.get_desc()) for x in HTSPrintersFactory.get_printers_by_type(HTSPrinterType.TRANSSYS) ] trans_params.set_defaults(printer=config.printer) trans_params.add_argument( '--printer', metavar='printer', type=str, nargs='?', help='select the printer between (Default is \"%s\"):\n%s' % (config.printer, "\n".join(printers))) trans_params.set_defaults(skip_solving=False) trans_params.add_argument( '--skip-solving', dest='skip_solving', action='store_true', help="does not call the solver. (Default is \"%s\")" % config.skip_solving) # Debugging deb_params = parser.add_argument_group('verbosity') deb_params.set_defaults(verbosity=config.verbosity) deb_params.add_argument('-v', dest='verbosity', metavar="<integer level>", type=int, help="verbosity level. (Default is \"%s\")" % config.verbosity) deb_params.set_defaults(time=False) deb_params.add_argument( '--time', dest='time', action='store_true', help="prints time for every verification. (Default is \"%s\")" % config.time) deb_params.set_defaults(devel=False) deb_params.add_argument( '--devel', dest='devel', action='store_true', help="enables developer mode. (Default is \"%s\")" % config.devel) # Developers if devel: config.devel = True devel_params = parser.add_argument_group('developer') devel_params.set_defaults(smt2=None) devel_params.add_argument( '--smt2', metavar='<smt-lib2 file>', type=str, required=False, help='generates the smtlib2 tracing file for each solver call.') args = parser.parse_args() config.strfiles = args.input_files config.simulate = args.simulate config.safety = args.safety config.parametric = args.parametric config.ltl = args.ltl config.properties = args.properties config.lemmas = args.lemmas config.precondition = args.precondition config.assumptions = args.assumptions config.equivalence = args.equivalence config.symbolic_init = args.symbolic_init config.zero_init = args.zero_init config.fsm_check = args.fsm_check config.bmc_length = args.bmc_length config.bmc_length_min = args.bmc_length_min config.full_trace = args.full_trace config.trace_vars_change = args.trace_vars_change config.trace_all_vars = args.trace_all_vars config.prefix = args.prefix config.translate = args.translate config.strategy = args.strategy config.processes = args.processes config.skip_solving = args.skip_solving config.abstract_clock = args.abstract_clock config.boolean = args.boolean config.verbosity = args.verbosity config.vcd = args.vcd config.prove = args.prove config.solver_name = args.solver_name config.incremental = not args.ninc config.time = args.time config.add_clock = args.add_clock config.generators = args.generators config.clock_behaviors = args.clock_behaviors config.assume_if_true = args.assume_if_true config.coi = args.coi config.model_extension = args.model_extension config.cardinality = args.cardinality config.cache_files = args.cache_files if devel: config.smt2file = args.smt2 if len(sys.argv) == 1: parser.print_help() sys.exit(1) if args.printer in [ str(x.get_name()) for x in HTSPrintersFactory.get_printers_by_type( HTSPrinterType.TRANSSYS) ]: config.printer = args.printer else: Logger.error("Printer \"%s\" not found" % (args.printer)) if args.problems: if config.devel: sys.exit(run_problems(args.problems, config)) else: try: sys.exit(run_problems(args.problems, config)) except Exception as e: Logger.error(str(e), False) sys.exit(1) Logger.error_raise_exept = False if (args.problems is None) and (args.input_files is None): Logger.error("No input files provided") if args.strategy not in [s[0] for s in MCConfig.get_strategies()]: Logger.error("Strategy \"%s\" not found" % (args.strategy)) if not(config.simulate or \ (config.safety) or \ (config.parametric) or \ (config.ltl) or \ (config.equivalence is not None) or\ (config.translate is not None) or\ (config.fsm_check)): Logger.error("Analysis selection is necessary") Logger.error_raise_exept = True if config.devel: sys.exit(run_verification(config)) else: try: sys.exit(run_verification(config)) except Exception as e: Logger.error(str(e), False) sys.exit(1)
def iprint(*args): print( TextWrapper(initial_indent=" ", subsequent_indent=" ").fill(*args))
import re import typing as t from textwrap import TextWrapper, dedent, indent import attr from tableschema import Field, Schema # type: ignore from ._types import TypeInfo, get_type_info _NL = "\n\s*" wrapper = TextWrapper(width=70, break_long_words=False, replace_whitespace=False) def _clean_newlines(snippet: str, max_empty_lines=3): return re.sub(f"{_NL}{_NL}{_NL}{_NL}", "\n\n\n", snippet) def _mapping_block(mapping: dict, join: str, value_join: str = ", ", prefix: str = ""): return "\n".join( [ join.join( [ prefix + key, value_join.join(value) if not isinstance(value, str) else value, ] ) for key, value in mapping.items() ] )
def typewriter(text: str, speed: int = 10, method='char', end='\n', wrap_text: bool = True, textwrapper_args: dict = None, godspeed=False): # On passe le texte en str text = str(text) # Mode goodspeed if godspeed: print(text) return if wrap_text: # On utilise TextWrapper pour éviter les retours à la ligne bizarres textwrapper_args = textwrapper_args or {} wrapper = TextWrapper(width=get_terminal_size().columns, **textwrapper_args) text = wrapper.fill(text) # On divise le texte en fragments, selon la méthode utilisée if method == 'char': # Division caractère par caractère fragments = list(text) elif method == 'line': # Division ligne par ligne fragments = [line + '\n' for line in text.split('\n')] else: # Si jamais la méthode utilisée n'est pas reconnue, on lève une erreur. raise ValueError('Unknown typewriter method ' + method) # On calcule la valeur du délai à appliquer entre l'écriture de chaque fragment delay = 1 / speed # TODO: print all fragments instantly when enter pressed during printing (skip like functionnality) # Pour chaque fragment... for i, fragment in enumerate(fragments): # On récupère le fragment précédent prev_fragment = previous(fragments, i) # On affiche le fragment, sans retour à la ligne print(fragment, end='') # On "flush" la sortie (module sys) sys.stdout.flush() # Si ce fragment est un espace et que le précédent en était un aussi, on n'attend pas le délai. if fragment == ' ' and prev_fragment == ' ': continue # Si ce fragment est un espace et que le fragment précédent était un point, c'est une fin de phrase: # On rajoute un délai additionnel if fragment == ' ' and prev_fragment in ('.', '?', '!'): sleep(0.5) # On attend le délai avant d'écrire le prochain fragment. sleep(delay) # On met un retour à la ligne final print(end, end='')
class asybot(asychat): def __init__(self, server, port, nickname, targets, bottype, probability, **kwargs): asychat.__init__(self) if bottype == "cleverbot": self.botfactory = factory.create(ChatterBotType.CLEVERBOT) elif bottype == "jabberwacky": self.botfactory = factory.create(ChatterBotType.JABBERWACKY) elif bottype == "pandorabots": self.botfactory = factory.create(ChatterBotType.PANDORABOTS, "c7068fbf3e344abf") elif bottype == "twssbot": self.botfactory = factory.create(ChatterBotType.TWSSBOT) else: log.error("do not know bottype %s" % bottype) exit(1) self.bot = self.botfactory.create_session() self.probability = probability self.server = server self.port = port self.nickname = nickname self.targets = targets self.username = kwargs['username'] if 'username' in kwargs else nickname self.hostname = kwargs['hostname'] if 'hostname' in kwargs else nickname self.ircname = kwargs['ircname'] if 'ircname' in kwargs else nickname self.realname = kwargs['realname'] if 'realname' in kwargs else nickname log.info("This is " + BOT_VERSION) log.info("Probability : " + str(probability)) log.info("nick : " + str(nick)) log.info("Bottype: " + str(bottype)) self.data = '' self.set_terminator('\r\n') self.create_socket(AF_INET, SOCK_STREAM) self.connect((self.server, self.port)) self.wrapper = TextWrapper(subsequent_indent=" ", width=400) # When we don't receive data for alarm_timeout seconds then issue a # PING every hammer_interval seconds until kill_timeout seconds have # passed without a message. Any incoming message will reset alarm. self.alarm_timeout = 300 self.hammer_interval = 10 self.kill_timeout = 360 signal(SIGALRM, lambda signum, frame: self.alarm_handler()) self.reset_alarm() def reset_alarm(self): self.last_activity = date.now() alarm(self.alarm_timeout) def alarm_handler(self): delta = date.now() - self.last_activity if delta > timedelta(seconds=self.kill_timeout): log.error('No data for %s. Giving up...' % delta) exit(2) else: log.error('No data for %s. PINGing server...' % delta) self.push('PING :%s' % self.nickname) alarm(self.hammer_interval) def collect_incoming_data(self, data): self.data += data def found_terminator(self): log.debug('<< %s' % self.data) message = self.data self.data = '' _, prefix, command, params, rest, _ = \ split('^(?::(\S+)\s)?(\S+)((?:\s[^:]\S*)*)(?:\s:(.*))?$', message) params = params.split(' ')[1:] #print([prefix, command, params, rest]) if command == 'PING': self.push('PONG :%s' % rest) log.debug("Replying to servers PING with PONG :%s" % rest) elif command == 'PRIVMSG': self.on_privmsg(prefix, command, params, rest) elif command == '433': # ERR_NICKNAMEINUSE, retry with another name _, nickname, int, _ = split('^.*[^0-9]([0-9]+)$', self.nickname) \ if search('[0-9]$', self.nickname) \ else ['', self.nickname, 0, ''] self.nickname = nickname + str(int + 1) self.handle_connect() self.reset_alarm() def push(self, message): log.debug('>> %s' % message) asychat.push(self, message + self.get_terminator()) def handle_connect(self): self.push('NICK %s' % self.nickname) self.push('USER %s %s %s :%s' % (self.username, self.hostname, self.server, self.realname)) self.push('JOIN %s' % ','.join(self.targets)) def on_privmsg(self, prefix, command, params, rest): def PRIVMSG(text): for line in self.wrapper.wrap(text): msg = 'PRIVMSG %s :%s' % (','.join(params), line) self.push(msg) sleep(1) def ME(text): PRIVMSG('ACTION ' + text + '') _from = prefix.split('!', 1)[0] from random import random if random() < self.probability: try: # delay log.info("%s will answer to '%s'" % (self.nickname, rest)) now = time() answer = str(self.bot.think(rest)) timetosleep = now + base_delay - time() + len(answer) / CPS log.debug("sleeping for %f seconds" % timetosleep) if (timetosleep > 0): sleep(timetosleep) PRIVMSG(answer) except Exception as e: log.error(str(e)) else: log.info("%s will not answer to '%s', probability check failed" % (self.nickname, rest))
fin = os.popen( 'git log --summary --stat --no-merges --date=short %s' % rev_range, 'r') # Create a ChangeLog file in the current directory. fout = open('ChangeLog', 'w') # Set up the loop variables in order to locate the blocks we want authorFound = False dateFound = False messageFound = False filesFound = False message = "" messageNL = False files = "" prevAuthorLine = "" wrapper = TextWrapper(initial_indent="\t", subsequent_indent="\t ") # The main part of the loop for line in fin: # The commit line marks the start of a new commit object. if line.startswith('commit'): # Start all over again... authorFound = False dateFound = False messageFound = False messageNL = False message = "" filesFound = False files = "" continue # Match the author line and extract the part we want
def wprint(*args): print(TextWrapper().fill(*args))
def wrapText(t): postWrapper = TextWrapper() postWrapper.width = width return ("\n".join(postWrapper.fill(l) for l in t.splitlines()))
def cruReview(obj, htmlLink=None): obj["htmlURL"] = htmlLink # Replace reviewer array with formatted reviewers reviewers = [] reviewers_color = [] reviewers_completed_color = [] reviewers_uncompleted_color = [] for r in obj["reviewers"]["reviewer"]: reviewers.append(r["displayName"]) if r["completed"]: name = "\033[32m%s\033[0m" % r["displayName"] reviewers_color.insert(0, name) reviewers_completed_color.append(name) else: name = "\033[31m%s\033[0m" % r["displayName"] reviewers_color.append(name) reviewers_uncompleted_color.append(name) obj["fmt_reviewers"] = ", ".join(reviewers) obj["fmt_reviewers_color"] = ", ".join(reviewers_color) obj["fmt_reviewers_uncompleted_color"] = ", ".join( reviewers_uncompleted_color) obj["fmt_reviewers_completed_color"] = ", ".join( reviewers_completed_color) # Replace dates with formatted dates def replaceFmtDate(field): fmt = locale.nl_langinfo(locale.D_T_FMT) dt = dateutil.parser.parse(obj[field]) strtime = dt.strftime(fmt) obj["fmt_" + field] = strtime if dt < datetime.datetime.now(dt.tzinfo): obj["fmt_%s_color" % field] = "\033[32m%s\033[0m" % strtime else: obj["fmt_%s_color" % field] = "\033[31m%s\033[0m" % strtime for field in ["dueDate", "createDate"]: replaceFmtDate(field) # Replace general comments array with formatted comments cmtstr = "" for cmt in obj["generalComments"]["comments"]: if cmt["draft"] or cmt["deleted"]: continue if cmt["defectRaised"]: cmtstr += "\033[31m[defect]\033[0m" cmtstr += "\033[34m%s\033[0m:" % cmt["user"]["displayName"] cmtstr += "\n" cmtstr += cmt["message"] cmtstr += "\n\n" obj["fmt_general_comments"] = cmtstr.strip() def addComment(cmt): cmtstr = "" if cmt.get("draft", False) or cmt.get("deleted", False): return if cmt.get("defectRaised", False): cmtstr += "\033[31m[defect]\033[0m" cmtstr += "\033[34m%s\033[0m:" % cmt["user"]["displayName"] cmtstr += "\n" if "toLineRange" in cmt: cmtstr += "Line %s: " % cmt["toLineRange"] cmtstr += cmt["message"] return cmtstr # Replace versioned comments array with formatted comments cmtstr = "" wrapper = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=120) for cmt in obj["versionedComments"]["comments"]: cmtstr += addComment(cmt) + "\n" for r in cmt["replies"]: cmtstr += wrapper.fill(addComment(r)) + "\n" cmtstr += "\n\n" obj["fmt_versioned_comments"] = cmtstr.strip() # Uncomment to edit template: # print json_pp(obj) print(config.format("templates", "review", obj))
choices=(1, 8), default=8, help="PKCS encoding to use for PEM commented private key") parser.add_argument("output", nargs="?", type=FileType("w"), default=sys.stdout, help="output file") args = parser.parse_args() plaintext = "You can hack anything you want with TECO and DDT." scriptname = os.path.basename(sys.argv[0]) wrapper = TextWrapper(width=78, initial_indent=" " * 2, subsequent_indent=" " * 2) def printlines(*lines, **kwargs): for line in lines: args.output.write(line % kwargs + "\n") def trailing_comma(item, sequence): return "" if item == sequence[-1] else "," def print_hex(name, value, comment): printlines( "static const uint8_t %(name)s[] = { /* %(comment)s, %(length)d bytes */",
class TopicTreePrinter(ITopicTreeVisitor): """ Example topic tree visitor that prints a prettified representation of topic tree by doing a depth-first traversal of topic tree and print information at each (topic) node of tree. Extra info to be printed is specified via the 'extra' kwarg. Its value must be a list of characters, the order determines output order: - D: print description of topic - a: print kwarg names only - A: print topic kwargs and their description - L: print listeners currently subscribed to topic E.g. TopicTreePrinter(extra='LaDA') would print, for each topic, the list of subscribed listeners, the topic's list of kwargs, the topic description, and the description for each kwarg, >>> Topic "delTopic" >> Listeners: > listener1_2880 (from yourModule) > listener2_3450 (from yourModule) >> Names of Message arguments: > arg1 > arg2 >> Description: whenever a topic is deleted >> Descriptions of Message arguments: > arg1: (required) its description > arg2: some other description """ allowedExtras = frozenset('DAaL') # must NOT change ALL_TOPICS_NAME = 'ALL_TOPICS' # output for name of 'all topics' topic def __init__(self, extra=None, width=70, indentStep=4, bulletTopic='\\--', bulletTopicItem='|==', bulletTopicArg='-', fileObj=None): """Topic tree printer will print listeners for each topic only if printListeners is True. The width will be used to limit the width of text output, while indentStep is the number of spaces added each time the text is indented further. The three bullet parameters define the strings used for each item (topic, topic items, and kwargs). """ self.__contentMeth = dict( D = self.__printTopicDescription, A = self.__printTopicArgsAll, a = self.__printTopicArgNames, L = self.__printTopicListeners) assert self.allowedExtras == set(self.__contentMeth) import sys self.__destination = fileObj or sys.stdout self.__output = [] self.__content = extra or '' unknownSel = set(self.__content) - self.allowedExtras if unknownSel: msg = 'These extra chars not known: %s' % ','.join(unknownSel) raise ValueError(msg) self.__width = width self.__wrapper = TextWrapper(width) self.__indent = 0 self.__indentStep = indentStep self.__topicsBullet = bulletTopic self.__topicItemsBullet = bulletTopicItem self.__topicArgsBullet = bulletTopicArg def getOutput(self): return '\n'.join( self.__output ) def _doneTraversal(self): if self.__destination is not None: self.__destination.write(self.getOutput()) def _onTopic(self, topicObj): """This gets called for each topic. Print as per specified content.""" # topic name self.__wrapper.width = self.__width indent = self.__indent if topicObj.isAll(): topicName = self.ALL_TOPICS_NAME else: topicName = topicObj.getNodeName() head = '%s Topic "%s"' % (self.__topicsBullet, topicName) self.__output.append( self.__formatDefn(indent, head) ) indent += self.__indentStep # each extra content (assume constructor verified that chars are valid) for item in self.__content: function = self.__contentMeth[item] function(indent, topicObj) def _startChildren(self): """Increase the indent""" self.__indent += self.__indentStep def _endChildren(self): """Decrease the indent""" self.__indent -= self.__indentStep def __formatDefn(self, indent, item, defn='', sep=': '): """Print a definition: a block of text at a certain indent, has item name, and an optional definition separated from item by sep. """ if defn: prefix = '%s%s%s' % (' '*indent, item, sep) self.__wrapper.initial_indent = prefix self.__wrapper.subsequent_indent = ' '*(indent+self.__indentStep) return self.__wrapper.fill(defn) else: return '%s%s' % (' '*indent, item) def __printTopicDescription(self, indent, topicObj): # topic description defn = '%s Description' % self.__topicItemsBullet self.__output.append( self.__formatDefn(indent, defn, topicObj.getDescription()) ) def __printTopicArgsAll(self, indent, topicObj, desc=True): # topic kwargs args = topicObj.getArgDescriptions() if args: #required, optional, complete = topicObj.getArgs() headName = 'Names of Message arguments:' if desc: headName = 'Descriptions of message arguments:' head = '%s %s' % (self.__topicItemsBullet, headName) self.__output.append( self.__formatDefn(indent, head) ) tmpIndent = indent + self.__indentStep required = topicObj.getArgs()[0] for key, arg in args.items(): # iter in 3, list in 2 ok if not desc: arg = '' elif key in required: arg = '(required) %s' % arg msg = '%s %s' % (self.__topicArgsBullet,key) self.__output.append( self.__formatDefn(tmpIndent, msg, arg) ) def __printTopicArgNames(self, indent, topicObj): self.__printTopicArgsAll(indent, topicObj, False) def __printTopicListeners(self, indent, topicObj): if topicObj.hasListeners(): item = '%s Listeners:' % self.__topicItemsBullet self.__output.append( self.__formatDefn(indent, item) ) tmpIndent = indent + self.__indentStep for listener in topicObj.getListenersIter(): item = '%s %s (from %s)' % (self.__topicArgsBullet, listener.name(), listener.module()) self.__output.append( self.__formatDefn(tmpIndent, item) )
def get_wrap(): t_width = int(subprocess.check_output(['tput', 'cols']).strip()) wrap_width = t_width / 3 return TextWrapper(width=wrap_width).wrap
def pretty_print_stmts(stmt_list: List[Statement], stmt_limit: Optional[int] = None, ev_limit: Optional[int] = 5, width: Optional[int] = None) -> None: """Print a formatted list of statements along with evidence text. Requires the tabulate package (https://pypi.org/project/tabulate). Parameters ---------- stmt_list : List[Statement] The list of INDRA Statements to be printed. stmt_limit : Optional[int] The maximum number of INDRA Statements to be printed. If None, all Statements are printed. (Default is None) ev_limit : Optional[int] The maximum number of Evidence to print for each Statement. If None, all evidence will be printed for each Statement. (Default is 5) width : Optional[int] Manually set the width of the table. If `None` the function will try to match the current terminal width using `os.get_terminal_size()`. If this fails the width defaults to 80 characters. The maximum width can be controlled by setting :data:`pretty_print_max_width` using the :func:`set_pretty_print_max_width` function. This is useful in Jupyter notebooks where the environment returns a terminal size of 80 characters regardless of the width of the window. (Default is None). """ # Import some modules helpful for text formatting. from textwrap import TextWrapper from tabulate import tabulate from os import get_terminal_size # Try to get the actual number of columns in the terminal. if width is None: width = 80 try: width = get_terminal_size().columns except Exception as e: logger.debug(f"Failed to get terminal size (using default " f"{width}): {e}.") # Apply the maximum. if pretty_print_max_width is not None: assert isinstance(pretty_print_max_width, int) width = min(width, pretty_print_max_width) # Parameterize the text wrappers that format the ev text and the metadata. stmt_tr = TextWrapper(width=width) metadata_tr = TextWrapper(width=16) evidence_tr = TextWrapper(width=width - metadata_tr.width - 2) # Print the table. for i, s in enumerate(stmt_list[:stmt_limit]): # Print the Statement heading. stmt_str = f"[LIST INDEX: {i}] " + str(s) print(stmt_tr.fill(stmt_str)) print("=" * width) # Print the evidence for j, ev in enumerate(s.evidence[:ev_limit]): # Gather the metadata we want to display. metadata = [("EV INDEX", j), ("SOURCE", ev.source_api)] for id_type in ['PMID', 'PMCID', 'DOI']: if id_type in ev.text_refs: metadata.append((id_type, ev.text_refs[id_type])) break # Form the metadata string to fill out its allocated space. metadata_str = '\n'.join( line + ' ' * (metadata_tr.width - len(line)) for k, v in metadata for line in metadata_tr.wrap(f"{k}: {v}")) # Form the evidence string. if ev.text: text_str = evidence_tr.fill(ev.text) else: text_str = evidence_tr.fill("(No evidence text)") # Print the entire thing full_str = tabulate([[metadata_str, text_str]], tablefmt='plain') print(full_str) print('-' * width) print()
'needed_by': set(), 'needs': set(), 'preceded_by': set(), 'precedes': set(), 'error_on_missing_fault': False, 'tags': set(), 'triggered': False, 'triggered_by': set(), 'triggers': set(), 'unless': "", 'when_creating': {}, } wrapper = TextWrapper( break_long_words=False, break_on_hyphens=False, expand_tabs=False, replace_whitespace=False, ) def format_comment(comment): result = "\n\n" for line in wrapper.wrap(cleandoc(comment)): for inlineline in line.split("\n"): result += "# {}\n".format(italic(inlineline)) return result class ItemStatus: """ Holds information on a particular Item such as whether it needs
import inspect from textwrap import TextWrapper ds = load("RD0005-mine/RedshiftOutput0005") output = open("source/analyzing/_obj_docstrings.inc", "w") template = """ .. class:: %(clsname)s%(sig)s: For more information, see :ref:`%(docstring)s` (This is a proxy for :class:`~%(clsproxy)sBase`.) """ tw = TextWrapper(initial_indent=" ", subsequent_indent=" ", width=60) def write_docstring(f, name, cls): for clsi in inspect.getmro(cls): docstring = inspect.getdoc(clsi.__init__) if docstring is not None: break clsname = name sig = inspect.formatargspec(*inspect.getargspec(cls.__init__)) sig = sig.replace("**kwargs", "**field_parameters") clsproxy = "yt.data_objects.data_containers.%s" % (cls.__name__) f.write(template % dict(clsname=clsname, sig=sig, clsproxy=clsproxy, docstring="physical-object-api"))
def setUp(self): self.wrapper = TextWrapper(width=45)
def _exportAnnoFile(abpath_out, anno, verbose=True): '''Export annotations in a single PDF <abpath_out>: str, absolute path to output txt file. <anno>: list, in the form [file_path, highlight_list, note_list]. highlight_list and note_list are both lists of Anno objs (see extracthl.py), containing highlights and notes in TEXT format with metadata. To be distinguished with FileAnno objs which contains texts coordinates. if highlight_list or note_list is [], no such info in this PDF. Function takes annotations from <anno> and output to the target txt file in the following format: ----------------------------------------------------- # Title of PDF > Highlighted text line 1 Highlighted text line 2 Highlighted text line 3 ... - @citationkey - Tags: @tag1, @tag2, @tag3... - Ctime: creation time ----------------------------------------------------- # Title of another PDF > Highlighted text line 1 Highlighted text line 2 Highlighted text line 3 ... - @citationkey - Tags: @tag1, @tag2, @tag3... - Ctime: creation time Use tabs in indention, and markup syntax: ">" for highlights, and "-" for notes. Update time: 2016-02-24 13:59:56. ''' conv = lambda x: unicode(x) wrapper = TextWrapper() wrapper.width = 80 wrapper.initial_indent = '' #wrapper.subsequent_indent='\t'+int(len('> '))*' ' wrapper.subsequent_indent = '\t' wrapper2 = TextWrapper() wrapper2.width = 80 - 7 wrapper2.initial_indent = '' #wrapper2.subsequent_indent='\t\t'+int(len('- Tags: '))*' ' wrapper2.subsequent_indent = '\t\t' hlii = anno.highlights ntii = anno.notes try: titleii = hlii[0].title except: titleii = ntii[0].title outstr = u'\n\n{0}\n# {1}'.format(int(80) * '-', conv(titleii)) with open(abpath_out, mode='a') as fout: #outstr=outstr.encode('ascii','replace') outstr = outstr.encode('utf8', 'replace') fout.write(outstr) #-----------------Write highlights----------------- if len(hlii) > 0: #-------------Loop through highlights------------- for hljj in hlii: hlstr = wrapper.fill(hljj.text) tagstr = ', '.join(['@' + kk for kk in hljj.tags]) tagstr = wrapper2.fill(tagstr) outstr=u''' \n\t> {0} \t\t- @{1} \t\t- Tags: {2} \t\t- Ctime: {3} '''.format(*map(conv,[hlstr, hljj.citationkey,\ tagstr, hljj.ctime])) #outstr=outstr.encode('ascii','replace') outstr = outstr.encode('utf8', 'replace') fout.write(outstr) #-----------------Write notes----------------- if len(ntii) > 0: #----------------Loop through notes---------------- for ntjj in ntii: ntstr = wrapper.fill(ntjj.text) tagstr = ', '.join(['@' + kk for kk in ntjj.tags]) tagstr = wrapper2.fill(tagstr) outstr=u''' \n\t- {0} \t\t- @{1} \t\t- Tags: {2} \t\t- Ctime: {3} '''.format(*map(conv,[ntstr, ntjj.citationkey,\ tagstr, ntjj.ctime])) #outstr=outstr.encode('ascii','replace') outstr = outstr.encode('utf8', 'replace') fout.write(outstr)
class WrapTestCase(BaseTestCase): def setUp(self): self.wrapper = TextWrapper(width=45) def test_simple(self): # Simple case: just words, spaces, and a bit of punctuation text = "Hello there, how are you this fine day? I'm glad to hear it!" self.check_wrap(text, 12, [ "Hello there,", "how are you", "this fine", "day? I'm", "glad to hear", "it!" ]) self.check_wrap(text, 42, [ "Hello there, how are you this fine day?", "I'm glad to hear it!" ]) self.check_wrap(text, 80, [text]) def test_whitespace(self): # Whitespace munging and end-of-sentence detection text = """\ This is a paragraph that already has line breaks. But some of its lines are much longer than the others, so it needs to be wrapped. Some lines are \ttabbed too. What a mess! """ expect = [ "This is a paragraph that already has line", "breaks. But some of its lines are much", "longer than the others, so it needs to be", "wrapped. Some lines are tabbed too. What a", "mess!" ] wrapper = TextWrapper(45, fix_sentence_endings=True) result = wrapper.wrap(text) self.check(result, expect) result = wrapper.fill(text) self.check(result, '\n'.join(expect)) def test_fix_sentence_endings(self): wrapper = TextWrapper(60, fix_sentence_endings=True) # SF #847346: ensure that fix_sentence_endings=True does the # right thing even on input short enough that it doesn't need to # be wrapped. text = "A short line. Note the single space." expect = ["A short line. Note the single space."] self.check(wrapper.wrap(text), expect) # Test some of the hairy end cases that _fix_sentence_endings() # is supposed to handle (the easy stuff is tested in # test_whitespace() above). text = "Well, Doctor? What do you think?" expect = ["Well, Doctor? What do you think?"] self.check(wrapper.wrap(text), expect) text = "Well, Doctor?\nWhat do you think?" self.check(wrapper.wrap(text), expect) text = 'I say, chaps! Anyone for "tennis?"\nHmmph!' expect = ['I say, chaps! Anyone for "tennis?" Hmmph!'] self.check(wrapper.wrap(text), expect) wrapper.width = 20 expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!'] self.check(wrapper.wrap(text), expect) text = 'And she said, "Go to hell!"\nCan you believe that?' expect = ['And she said, "Go to', 'hell!" Can you', 'believe that?'] self.check(wrapper.wrap(text), expect) wrapper.width = 60 expect = ['And she said, "Go to hell!" Can you believe that?'] self.check(wrapper.wrap(text), expect) text = 'File stdio.h is nice.' expect = ['File stdio.h is nice.'] self.check(wrapper.wrap(text), expect) def test_wrap_short(self): # Wrapping to make short lines longer text = "This is a\nshort paragraph." self.check_wrap(text, 20, ["This is a short", "paragraph."]) self.check_wrap(text, 40, ["This is a short paragraph."]) def test_wrap_short_1line(self): # Test endcases text = "This is a short line." self.check_wrap(text, 30, ["This is a short line."]) self.check_wrap(text, 30, ["(1) This is a short line."], initial_indent="(1) ") def test_hyphenated(self): # Test breaking hyphenated words text = ("this-is-a-useful-feature-for-" "reformatting-posts-from-tim-peters'ly") self.check_wrap(text, 40, [ "this-is-a-useful-feature-for-", "reformatting-posts-from-tim-peters'ly" ]) self.check_wrap(text, 41, [ "this-is-a-useful-feature-for-", "reformatting-posts-from-tim-peters'ly" ]) self.check_wrap(text, 42, [ "this-is-a-useful-feature-for-reformatting-", "posts-from-tim-peters'ly" ]) def test_hyphenated_numbers(self): # Test that hyphenated numbers (eg. dates) are not broken like words. text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n" "released on 1994-02-15.") self.check_wrap(text, 30, [ 'Python 1.0.0 was released on', '1994-01-26. Python 1.0.1 was', 'released on 1994-02-15.' ]) self.check_wrap(text, 40, [ 'Python 1.0.0 was released on 1994-01-26.', 'Python 1.0.1 was released on 1994-02-15.' ]) text = "I do all my shopping at 7-11." self.check_wrap(text, 25, ["I do all my shopping at", "7-11."]) self.check_wrap(text, 27, ["I do all my shopping at", "7-11."]) self.check_wrap(text, 29, ["I do all my shopping at 7-11."]) def test_em_dash(self): # Test text with em-dashes text = "Em-dashes should be written -- thus." self.check_wrap(text, 25, ["Em-dashes should be", "written -- thus."]) # Probe the boundaries of the properly written em-dash, # ie. " -- ". self.check_wrap(text, 29, ["Em-dashes should be written", "-- thus."]) expect = ["Em-dashes should be written --", "thus."] self.check_wrap(text, 30, expect) self.check_wrap(text, 35, expect) self.check_wrap(text, 36, ["Em-dashes should be written -- thus."]) # The improperly written em-dash is handled too, because # it's adjacent to non-whitespace on both sides. text = "You can also do--this or even---this." expect = ["You can also do", "--this or even", "---this."] self.check_wrap(text, 15, expect) self.check_wrap(text, 16, expect) expect = ["You can also do--", "this or even---", "this."] self.check_wrap(text, 17, expect) self.check_wrap(text, 19, expect) expect = ["You can also do--this or even", "---this."] self.check_wrap(text, 29, expect) self.check_wrap(text, 31, expect) expect = ["You can also do--this or even---", "this."] self.check_wrap(text, 32, expect) self.check_wrap(text, 35, expect) # All of the above behaviour could be deduced by probing the # _split() method. text = "Here's an -- em-dash and--here's another---and another!" expect = [ "Here's", " ", "an", " ", "--", " ", "em-", "dash", " ", "and", "--", "here's", " ", "another", "---", "and", " ", "another!" ] self.check_split(text, expect) text = "and then--bam!--he was gone" expect = [ "and", " ", "then", "--", "bam!", "--", "he", " ", "was", " ", "gone" ] self.check_split(text, expect) def test_unix_options(self): # Test that Unix-style command-line options are wrapped correctly. # Both Optik (OptionParser) and Docutils rely on this behaviour! text = "You should use the -n option, or --dry-run in its long form." self.check_wrap(text, 20, [ "You should use the", "-n option, or --dry-", "run in its long", "form." ]) self.check_wrap(text, 21, [ "You should use the -n", "option, or --dry-run", "in its long form." ]) expect = [ "You should use the -n option, or", "--dry-run in its long form." ] self.check_wrap(text, 32, expect) self.check_wrap(text, 34, expect) self.check_wrap(text, 35, expect) self.check_wrap(text, 38, expect) expect = [ "You should use the -n option, or --dry-", "run in its long form." ] self.check_wrap(text, 39, expect) self.check_wrap(text, 41, expect) expect = [ "You should use the -n option, or --dry-run", "in its long form." ] self.check_wrap(text, 42, expect) # Again, all of the above can be deduced from _split(). text = "the -n option, or --dry-run or --dryrun" expect = [ "the", " ", "-n", " ", "option,", " ", "or", " ", "--dry-", "run", " ", "or", " ", "--dryrun" ] self.check_split(text, expect) def test_funky_hyphens(self): # Screwy edge cases cooked up by David Goodger. All reported # in SF bug #596434. self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"]) self.check_split("what the--", ["what", " ", "the--"]) self.check_split("what the--.", ["what", " ", "the--."]) self.check_split("--text--.", ["--text--."]) # When I first read bug #596434, this is what I thought David # was talking about. I was wrong; these have always worked # fine. The real problem is tested in test_funky_parens() # below... self.check_split("--option", ["--option"]) self.check_split("--option-opt", ["--option-", "opt"]) self.check_split("foo --option-opt bar", ["foo", " ", "--option-", "opt", " ", "bar"]) def test_punct_hyphens(self): # Oh bother, SF #965425 found another problem with hyphens -- # hyphenated words in single quotes weren't handled correctly. # In fact, the bug is that *any* punctuation around a hyphenated # word was handled incorrectly, except for a leading "--", which # was special-cased for Optik and Docutils. So test a variety # of styles of punctuation around a hyphenated word. # (Actually this is based on an Optik bug report, #813077). self.check_split("the 'wibble-wobble' widget", ['the', ' ', "'wibble-", "wobble'", ' ', 'widget']) self.check_split('the "wibble-wobble" widget', ['the', ' ', '"wibble-', 'wobble"', ' ', 'widget']) self.check_split("the (wibble-wobble) widget", ['the', ' ', "(wibble-", "wobble)", ' ', 'widget']) self.check_split("the ['wibble-wobble'] widget", ['the', ' ', "['wibble-", "wobble']", ' ', 'widget']) def test_funky_parens(self): # Second part of SF bug #596434: long option strings inside # parentheses. self.check_split("foo (--option) bar", ["foo", " ", "(--option)", " ", "bar"]) # Related stuff -- make sure parens work in simpler contexts. self.check_split("foo (bar) baz", ["foo", " ", "(bar)", " ", "baz"]) self.check_split("blah (ding dong), wubba", ["blah", " ", "(ding", " ", "dong),", " ", "wubba"]) def test_initial_whitespace(self): # SF bug #622849 reported inconsistent handling of leading # whitespace; let's test that a bit, shall we? text = " This is a sentence with leading whitespace." self.check_wrap(text, 50, [" This is a sentence with leading whitespace."]) self.check_wrap(text, 30, [" This is a sentence with", "leading whitespace."]) def test_no_drop_whitespace(self): # SF patch #1581073 text = " This is a sentence with much whitespace." self.check_wrap(text, 10, [ " This is a", " ", "sentence ", "with ", "much white", "space." ], drop_whitespace=False) def test_split(self): # Ensure that the standard _split() method works as advertised # in the comments text = "Hello there -- you goof-ball, use the -b option!" result = self.wrapper._split(text) self.check(result, [ "Hello", " ", "there", " ", "--", " ", "you", " ", "goof-", "ball,", " ", "use", " ", "the", " ", "-b", " ", "option!" ]) def test_break_on_hyphens(self): # Ensure that the break_on_hyphens attributes work text = "yaba daba-doo" self.check_wrap(text, 10, ["yaba daba-", "doo"], break_on_hyphens=True) self.check_wrap(text, 10, ["yaba", "daba-doo"], break_on_hyphens=False) def test_bad_width(self): # Ensure that width <= 0 is caught. text = "Whatever, it doesn't matter." self.assertRaises(ValueError, wrap, text, 0) self.assertRaises(ValueError, wrap, text, -1) def test_no_split_at_umlaut(self): text = "Die Empf\xe4nger-Auswahl" self.check_wrap(text, 13, ["Die", "Empf\xe4nger-", "Auswahl"]) def test_umlaut_followed_by_dash(self): text = "aa \xe4\xe4-\xe4\xe4" self.check_wrap(text, 7, ["aa \xe4\xe4-", "\xe4\xe4"])
def do_list( dbctx, fields, afields, sort_by, ascending, search_text, line_width, separator, prefix, limit, for_machine=False ): if sort_by is None: ascending = True ans = dbctx.run('list', fields, sort_by, ascending, search_text, limit) try: book_ids, data, metadata = ans['book_ids'], ans['data'], ans['metadata'] except TypeError: raise SystemExit(ans) fields = list(ans['fields']) try: fields.remove('id') except ValueError: pass fields = ['id'] + fields stringify(data, metadata, for_machine) if for_machine: raw = json.dumps( list(as_machine_data(book_ids, data, metadata)), indent=2, sort_keys=True ) if not isinstance(raw, bytes): raw = raw.encode('utf-8') getattr(sys.stdout, 'buffer', sys.stdout).write(raw) return from calibre.utils.terminal import ColoredStream, geometry output_table = prepare_output_table(fields, book_ids, data, metadata) widths = list(map(lambda x: 0, fields)) for record in output_table: for j in range(len(fields)): widths[j] = max(widths[j], str_width(record[j])) screen_width = geometry()[0] if line_width < 0 else line_width if not screen_width: screen_width = 80 field_width = screen_width // len(fields) base_widths = list(map(lambda x: min(x + 1, field_width), widths)) while sum(base_widths) < screen_width: adjusted = False for i in range(len(widths)): if base_widths[i] < widths[i]: base_widths[i] += min( screen_width - sum(base_widths), widths[i] - base_widths[i] ) adjusted = True break if not adjusted: break widths = list(base_widths) titles = map( lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields ) with ColoredStream(sys.stdout, fg='green'): prints(''.join(titles)) stdout = getattr(sys.stdout, 'buffer', sys.stdout) linesep = as_bytes(os.linesep) wrappers = [TextWrapper(x - 1).wrap if x > 1 else lambda y: y for x in widths] for record in output_table: text = [ wrappers[i](record[i]) for i, field in enumerate(fields) ] lines = max(map(len, text)) for l in range(lines): for i, field in enumerate(text): ft = text[i][l] if l < len(text[i]) else '' stdout.write(ft.encode('utf-8')) if i < len(text) - 1: filler = ('%*s' % (widths[i] - str_width(ft) - 1, '')) stdout.write((filler + separator).encode('utf-8')) stdout.write(linesep)
def test_fix_sentence_endings(self): wrapper = TextWrapper(60, fix_sentence_endings=True) # SF #847346: ensure that fix_sentence_endings=True does the # right thing even on input short enough that it doesn't need to # be wrapped. text = "A short line. Note the single space." expect = ["A short line. Note the single space."] self.check(wrapper.wrap(text), expect) # Test some of the hairy end cases that _fix_sentence_endings() # is supposed to handle (the easy stuff is tested in # test_whitespace() above). text = "Well, Doctor? What do you think?" expect = ["Well, Doctor? What do you think?"] self.check(wrapper.wrap(text), expect) text = "Well, Doctor?\nWhat do you think?" self.check(wrapper.wrap(text), expect) text = 'I say, chaps! Anyone for "tennis?"\nHmmph!' expect = ['I say, chaps! Anyone for "tennis?" Hmmph!'] self.check(wrapper.wrap(text), expect) wrapper.width = 20 expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!'] self.check(wrapper.wrap(text), expect) text = 'And she said, "Go to hell!"\nCan you believe that?' expect = ['And she said, "Go to', 'hell!" Can you', 'believe that?'] self.check(wrapper.wrap(text), expect) wrapper.width = 60 expect = ['And she said, "Go to hell!" Can you believe that?'] self.check(wrapper.wrap(text), expect) text = 'File stdio.h is nice.' expect = ['File stdio.h is nice.'] self.check(wrapper.wrap(text), expect)
def __init__(self, width): TextWrapper.__init__(self, width, expand_tabs=False, replace_whitespace=False, drop_whitespace=False)
def __str__(self): if self.networks and len(self.networks) > 1: lines = ["Nexus dataset '%s' (#%s) with %d networks" % \ (self.sid, self.id, len(self.networks))] else: lines = ["Nexus dataset '%(sid)s' (#%(id)s)" % self.__dict__] lines.append("vertices/edges: %s" % self.vertices_edges) if self.name: lines.append("name: %s" % self.name) if self.tags: lines.append("tags: %s" % "; ".join(self.tags)) if self.rest: wrapper = TextWrapper(width=76, subsequent_indent=' ') keys = sorted(self.rest.iterkeys()) if "attribute" in self.rest: keys.remove("attribute") keys.append("attribute") for key in keys: for value in self.rest.getlist(key): paragraphs = str(value).splitlines() wrapper.initial_indent = "%s: " % key for paragraph in paragraphs: ls = wrapper.wrap(paragraph) if ls: lines.extend(wrapper.wrap(paragraph)) else: lines.append(" .") wrapper.initial_indent = " " return "\n".join(lines)