示例#1
0
 def _indent(self, text):
     wrapper = textwrap.TextWrapper(initial_indent='    ',
                                    subsequent_indent='        ')
     return '\n'.join([wrapper.fill(line) for line in text.split('\n')])
示例#2
0
class Sequence:
	_FILL_CHARACTER = '#'
	_EMPTY_CHARACTER = '.'
	_TICK_STEP = 5
	_TEXT_WRAPPER = textwrap.TextWrapper(
		replace_whitespace=False,
		drop_whitespace=False,
		break_on_hyphens=False
	)

	def __init__(self, *sequence_containers, **kwargs):
		# kwargs.get('aa_sequence', )
		aa_sequence = []

		for sequence_container in sequence_containers:
			if isinstance(sequence_container, MDAnalysis.core.groups.AtomGroup):
				aa_sequence.extend(self.get_from_structure(sequence_container))
			elif isinstance(sequence_container, Bio.SeqRecord.SeqRecord):
				aa_sequence.extend(self.get_from_record(sequence_container))
			elif isinstance(sequence_container, list) or isinstance(sequence_container, np.ndarray):
				aa_sequence.extend(sequence_container)
			elif isinstance(sequence_container, Sequence):
				aa_sequence.extend(sequence_container.aa_sequence)
			else:
				raise NotImplementedError

		self.aa_sequence = np.array(aa_sequence)

		# Formatter
		self.fill_character = kwargs.get('fill_character', self._FILL_CHARACTER)
		self.empty_character = kwargs.get('empty_character', self._EMPTY_CHARACTER)
		self.tick_step = kwargs.get('tick_step', self._TICK_STEP)

	def __bool__(self):
		return len(self.aa_sequence) > 0

	def __len__(self):
		return len(self.aa_sequence)

	def __getitem__(self, index):
		if isinstance(index, slice):
			return Sequence(self.aa_sequence[index])
		else:
			return self.aa_sequence[index]

	# def __delitem__(self, index):
	#     pdb.set_trace()
	#     for aa in self.aa_sequence[:index]:
	#         aa.index -= 1
	#     del self.aa_sequence[index]

	def remove(self, *indices):
		self.aa_sequence = np.delete(self.aa_sequence, indices)

	def __iter__(self):
		return iter(self.aa_sequence)

	def __repr__(self):
		position_line = self.position_header

		lines = []
		wrappers = zip(
			self._TEXT_WRAPPER.wrap(position_line),
			self._TEXT_WRAPPER.wrap(self.full_string),
		)

		for wrapped_lines in wrappers:
			position_wrapped_line, sequence_wrapped_line = wrapped_lines
			lines.append(position_wrapped_line)
			lines.append(sequence_wrapped_line)
			lines.append('')

		return '\n'.join(lines)

	def __str__(self):
		# Init sequence with gap characters
		full_string_sequence = np.empty(self.n_positions, dtype='<U1')
		full_string_sequence[:] = AminoAcid._GAP_CHARACTER

		# Get stripped amino acids from sequence (no gap) and their corresponding positions
		# Both array lengths are equal
		stripped_string_sequence = np.array([x for x in self.stripped_string], dtype='<U1')
		positions = self.positions

		# Put aa 1-letter code into corresponding position
		full_string_sequence[positions] = stripped_string_sequence

		return ''.join(full_string_sequence)

	@property
	def full_string(self):
		return str(self)
	
	@property
	def stripped_string(self):
		return ''.join([aa.ol for aa in self.aa_sequence])

	@classmethod
	def get_position_ticks(cls, sequence, tick_step):
		return np.arange(0, len(sequence.positions), tick_step)

	@property
	def get_position_format(cls, sequence, tick_step):
		return '{{:<{}d}}'.format(tick_step) * (sequence.positions.shape[0] // tick_step)

	@property
	def position_ticks(self):
		return np.arange(0, len(self.positions), self.tick_step)

	@property
	def position_format(self):
		return '{{:<{}d}}'.format(self.tick_step) * (self.positions.shape[0] // self.tick_step)

	@property
	def position_header(self):
		return self.position_format.format(*self.position_ticks)

	def string_mask(self, masked_array):
		filled = np.invert(masked_array)
		chars = np.empty(len(masked_array), dtype=str)
		chars[filled == True] = self.fill_character
		chars[filled == False] = self.empty_character

		return ''.join(chars)

	@property
	def resid_mask(self):
		extended_resid_axe = np.empty(self.n_positions, dtype=bool)
		extended_resid_axe[:] = False

	@property
	def n_positions(self):
		return self.aa_sequence[-1].position + 1

	@property
	def start_resid(self):
		return self.aa_sequence[0].resid
	
	@property
	def end_resid(self):
		return self.aa_sequence[-1].resid

	@property
	def positions(self):
		return np.array([aa.position for aa in self.aa_sequence])

	@property
	def resids(self):
		return np.array([aa.resid for aa in self.aa_sequence])

	@property
	def indices(self):
		return np.array([aa.index for aa in self.aa_sequence])

	def get_by_position(self, *positions):
		return Sequence(self.aa_sequence[np.isin(self.positions, np.array(positions))])

	def get_by_resid(self, *resids):
		return Sequence(self.aa_sequence[np.isin(self.resids, np.array(resids))])

	def get_by_position_range(self, start_position, end_position):
		start_aa, end_aa = self.get_by_position(start_position, end_position)
		if end_aa.index < len(self.aa_sequence) - 1:
			end_index = end_aa.index + 1
		else:
			end_index = None # Take the last index while slicing
		return Sequence(self.aa_sequence[start_aa.index:end_index])

	def get_by_resid_range(self, start_resid, end_resid):
		start_aa, end_aa = self.get_by_resid(start_resid, end_resid)
		if end_aa.index < len(self.aa_sequence) - 1:
			end_index = end_aa.index + 1
		else:
			end_index = None # Take the last index while slicing
		return Sequence(self.aa_sequence[start_aa.index:end_index])

	@classmethod
	def get_from_record(self, record):
		aa_sequence = []

		if 'start' in record.annotations:
			resid = record.annotations['start']
		else:
			resid = 1

		for position, aa_ol in enumerate(str(record.seq)):
			if aa_ol != AminoAcid._GAP_CHARACTER:
				aa = AminoAcid(aa_ol, position=position, resid=resid, index=len(aa_sequence))
				aa_sequence.append(aa)
				resid += 1

		return np.array(aa_sequence)

	def load_from_record(self, record):
		self.aa_sequence = self.get_from_record(record)

	@classmethod
	def get_from_structure(cls, protein_structure):
		aa_sequence = []

		position = 0
		previous_resid = 0

		for residue in protein_structure.residues:
			resid = residue.resid
			shift = resid - previous_resid

			for i in range(1, shift):
				shifted_resid = previous_resid + i
				aa = AminoAcid(AminoAcid._ANY_CHARACTER, resid=shifted_resid, position=position)
				aa_sequence.append(aa)
				position += 1

			aa = AminoAcid(residue.resname[:3], resid=resid, position=position, index=residue.resindex)
			aa_sequence.append(aa)

			previous_resid = resid
			position += 1

		return np.array(aa_sequence)

	@classmethod
	def identity(cls, sequence_a, sequence_b):
		length = np.max([len(sequence_a), len(sequence_b)])

	def load_from_structure(self, protein_structure):
		self.aa_sequence = self.get_from_structure(protein_structure)

	def update_from_alignment(self, aligned_sequence):
		"""
		..INFO: aligned_sequence is not garanted to contain same residues as in current sequence
		"""
		index = 0

		for position in range(len(aligned_sequence)):
			aligned_aa_ol = aligned_sequence[position]
			if aligned_aa_ol != AminoAcid._GAP_CHARACTER:
				aa = self.aa_sequence[index]
				if aa.ol != aligned_aa_ol:
					raise ValueError('{aligned_aa_ol} in aligned sequence does not correspond to {aa.ol} in defined sequence at position {position}')
				aa.position = position
				index += 1
示例#3
0
 def __init__(self, doc_width):
     self.text_wrapper = textwrap.TextWrapper(width=doc_width)
     self.indented_wrapper = textwrap.TextWrapper(width=doc_width,
             subsequent_indent=r'  ')
     self.whitespace_re = re.compile(r'\n\s*', re.MULTILINE | re.DOTALL)
from script_config import *
import textwrap
import argparse
import sys
import re

# NOTES
# - each line in a paragraph much have an even number of chars
# - use spaces instead of newline for <name> since that has variable length

LINE_LENGTH = 24
PARAGRAPH_LENGTH = LINE_LENGTH * 3

w = textwrap.TextWrapper()
w.drop_whitespace = True
#w.drop_whitespace = False
w.break_on_hyphens = True
w.width = LINE_LENGTH


def fill_bytes(l):
    return w.fill(l.decode('shift-jis')).encode('shift-jis')


def fill_bytes_ja(l):
    dat = [s.encode('shift-jis') for s in w.wrap(l.decode('shift-jis'))]
    return b'\x81\x40\x0A'.join(dat)


def wrap_bytes(p):
    lines = [s.encode('shift-jis') for s in w.wrap(p.decode('shift-jis'))]
示例#5
0
def generate_function(name, called_name, template, **kwargs):
    """
    Create a wrapper function *pyplot_name* calling *call_name*.

    Parameters
    ----------
    name : str
        The function to be created.
    called_name : str
        The function to be wrapped.
    template : str
        The template to be used. The template must contain {}-style format
        placeholders. The following placeholders are filled in:

        - name: The function name.
        - signature: The function signature (including parentheses).
        - called_name: The name of the called function.
        - call: Parameters passed to *called_name* (including parentheses).

    **kwargs
        Additional parameters are passed to ``template.format()``.
    """
    text_wrapper = textwrap.TextWrapper(break_long_words=False,
                                        width=70,
                                        initial_indent=' ' * 8,
                                        subsequent_indent=' ' * 8)

    # Get signature of wrapped function.
    signature = inspect.signature(getattr(Axes, called_name))
    # Replace self argument.
    params = list(signature.parameters.values())[1:]
    signature = str(
        signature.replace(parameters=[
            param.replace(default=value_formatter(param.default)) if param.
            default is not param.empty else param for param in params
        ]))
    if len('def ' + name + signature) >= 80:
        # Move opening parenthesis before newline.
        signature = '(\n' + text_wrapper.fill(signature).replace('(', '', 1)
    # How to call the wrapped function.
    call = '(' + ', '.join((
        # Pass "intended-as-positional" parameters positionally to avoid
        # forcing third-party subclasses to reproduce the parameter names.
        '{0}' if param.kind in [Parameter.POSITIONAL_OR_KEYWORD]
        and param.default is Parameter.empty else
        # Only pass the data kwarg if it is actually set, to avoid forcing
        # third-party subclasses to support it.
        '**({{"data": data}} if data is not None else {{}})' if param.name ==
        "data" else '{0}={0}' if param.kind in
        [Parameter.POSITIONAL_OR_KEYWORD, Parameter.
         KEYWORD_ONLY] else '*{0}' if param.kind is Parameter.
        VAR_POSITIONAL else '**{0}' if param.kind is Parameter.VAR_KEYWORD else
        # Intentionally crash for Parameter.POSITIONAL_ONLY.
        None).format(param.name) for param in params) + ')'
    MAX_CALL_PREFIX = 18  # len('    __ret = gca().')
    if MAX_CALL_PREFIX + max(len(name), len(called_name)) + len(call) >= 80:
        call = '(\n' + text_wrapper.fill(call[1:])
    # Bail out in case of name collision.
    for reserved in ('gca', 'gci', '__ret'):
        if reserved in params:
            raise ValueError(
                f'Axes method {called_name} has kwarg named {reserved}')

    return template.format(name=name,
                           called_name=called_name,
                           signature=signature,
                           call=call,
                           **kwargs)
示例#6
0
def printBenchmakrks():
	print "List of benchmarks: "
	wrapper = textwrap.TextWrapper(initial_indent="* ")
	for b in __BENCHMARKS__:
		print wrapper.fill(b)
示例#7
0
def help_solvers():
    import pyomo.environ
    wrapper = textwrap.TextWrapper(replace_whitespace=False)
    print("")
    print("Pyomo Solvers and Solver Managers")
    print("---------------------------------")

    print(
        wrapper.fill(
            "Pyomo uses 'solver managers' to execute 'solvers' that perform optimization and other forms of model analysis.  A solver directly executes an optimizer, typically using an executable found on the user's PATH environment.  Solver managers support a flexible mechanism for asyncronously executing solvers either locally or remotely.  The following solver managers are available in Pyomo:"
        ))
    print("")
    solvermgr_list = pyomo.opt.SolverManagerFactory.services()
    solvermgr_list = sorted(filter(lambda x: '_' != x[0], solvermgr_list))
    n = max(map(len, solvermgr_list))
    wrapper = textwrap.TextWrapper(subsequent_indent=' ' * (n + 9))
    for s in solvermgr_list:
        format = '    %-' + str(n) + 's     %s'
        print(wrapper.fill(format %
                           (s, pyomo.opt.SolverManagerFactory.doc(s))))
    print("")
    wrapper = textwrap.TextWrapper(subsequent_indent='')
    print(
        wrapper.fill(
            "If no solver manager is specified, Pyomo uses the serial solver manager to execute solvers locally.  The pyro and phpyro solver managers require the installation and configuration of the pyro software.  The neos solver manager is used to execute solvers on the NEOS optimization server."
        ))
    print("")

    print("")
    print("Serial Solver Interfaces")
    print("------------------------")
    print(
        wrapper.fill(
            "The serial, pyro and phpyro solver managers support the following solver interfaces:"
        ))
    print("")
    solver_list = pyomo.opt.SolverFactory.services()
    solver_list = sorted(filter(lambda x: '_' != x[0], solver_list))
    n = max(map(len, solver_list))
    wrapper = textwrap.TextWrapper(subsequent_indent=' ' * (n + 9))
    try:
        # Disable warnings
        logging.disable(logging.WARNING)
        for s in solver_list:
            # Create a solver, and see if it is available
            with pyomo.opt.SolverFactory(s) as opt:
                if s == 'py' or (hasattr(opt, "_metasolver")
                                 and opt._metasolver):
                    # py is a metasolver, but since we don't specify a subsolver
                    # for this test, opt is actually an UnknownSolver, so we
                    # can't try to get the _metasolver attribute from it.
                    # Also, default to False if the attribute isn't implemented
                    msg = '    %-' + str(n) + 's   + %s'
                elif opt.available(False):
                    msg = '    %-' + str(n) + 's   * %s'
                else:
                    msg = '    %-' + str(n) + 's     %s'
                print(wrapper.fill(msg % (s, pyomo.opt.SolverFactory.doc(s))))
    finally:
        # Reset logging level
        logging.disable(logging.NOTSET)
    print("")
    wrapper = textwrap.TextWrapper(subsequent_indent='')
    print(
        wrapper.fill(
            "An asterisk indicates solvers that are currently available to be run from Pyomo with the serial solver manager. A plus indicates meta-solvers, that are always available."
        ))
    print('')
    print(
        wrapper.fill(
            'Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed.  For example, the following indicates that the ipopt solver will be used:'
        ))
    print('')
    print('   asl:ipopt')
    print('')
    print(
        wrapper.fill(
            'The asl interface provides a generic wrapper for all solvers that use the AMPL Solver Library.'
        ))
    print('')
    print(
        wrapper.fill(
            'Note that subsolvers can not be enumerated automatically for these interfaces.  However, if a solver is specified that is not found, Pyomo assumes that the asl solver interface is being used.  Thus the following solver name will launch ipopt if the \'ipopt\' executable is on the user\'s path:'
        ))
    print('')
    print('   ipopt')
    print('')
    try:
        logging.disable(logging.WARNING)
        import pyomo.neos.kestrel
        kestrel = pyomo.neos.kestrel.kestrelAMPL()
        #print "HERE", solver_list
        solver_list = list(
            set([
                name[:-5].lower() for name in kestrel.solvers()
                if name.endswith('AMPL')
            ]))
        #print "HERE", solver_list
        if len(solver_list) > 0:
            print("")
            print("NEOS Solver Interfaces")
            print("----------------------")
            print(
                wrapper.fill(
                    "The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server.  The following solver interfaces are available with your current system configuration:"
                ))
            print("")
            solver_list = sorted(solver_list)
            n = max(map(len, solver_list))
            format = '    %-' + str(n) + 's     %s'
            for name in solver_list:
                print(
                    wrapper.fill(
                        format %
                        (name,
                         pyomo.neos.doc.get(name, 'Unexpected NEOS solver'))))
            print("")
        else:
            print("")
            print("NEOS Solver Interfaces")
            print("----------------------")
            print(
                wrapper.fill(
                    "The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server.  This server is not available with your current system configuration."
                ))
            print("")
    except ImportError:
        pass
    finally:
        logging.disable(logging.NOTSET)
示例#8
0
    def PrettyPrintNode(self, node, indent=0):
        """Pretty-prints the given XML node at the given indent level.

    Args:
      node: The minidom node to pretty-print.
      indent: The current indent level.

    Returns:
      The pretty-printed string (including embedded newlines).

    Raises:
      Error if the XML has unknown tags or attributes.
    """
        # Handle the top-level document node.
        if node.nodeType == xml.dom.minidom.Node.DOCUMENT_NODE:
            return '\n'.join(
                [self.PrettyPrintNode(n) for n in node.childNodes])

        # Handle text nodes.
        if node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
            # Wrap each paragraph in the text to fit in the 80 column limit.
            wrapper = textwrap.TextWrapper()
            wrapper.initial_indent = ' ' * indent
            wrapper.subsequent_indent = ' ' * indent
            wrapper.break_on_hyphens = False
            wrapper.break_long_words = False
            wrapper.width = WRAP_COLUMN
            text = XmlEscape(node.data)
            # Remove any common indent.
            text = textwrap.dedent(text.strip('\n'))
            lines = text.split('\n')
            # Split the text into paragraphs at blank line boundaries.
            paragraphs = [[]]
            for l in lines:
                if len(l.strip()) == 0 and len(paragraphs[-1]) > 0:
                    paragraphs.append([])
                else:
                    paragraphs[-1].append(l)
            # Remove trailing empty paragraph if present.
            if len(paragraphs) > 0 and len(paragraphs[-1]) == 0:
                paragraphs = paragraphs[:-1]
            # Wrap each paragraph and separate with two newlines.
            return '\n\n'.join(
                [wrapper.fill('\n'.join(p)) for p in paragraphs])

        # Handle element nodes.
        if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
            newlines_after_open, newlines_before_close, newlines_after_close = (
                self.tags_that_have_extra_newline.get(node.tagName, (1, 1, 0)))
            # Open the tag.
            s = ' ' * indent + '<' + node.tagName

            # Calculate how much space to allow for the '>' or '/>'.
            closing_chars = 1
            if not node.childNodes:
                closing_chars = 2

            # Pretty-print the attributes.
            attributes = node.attributes.keys()
            if attributes:
                # Reorder the attributes.
                if node.tagName not in self.attribute_order:
                    unrecognized_attributes = attributes
                else:
                    unrecognized_attributes = ([
                        a for a in attributes
                        if a not in self.attribute_order[node.tagName]
                    ])
                    attributes = [
                        a for a in self.attribute_order[node.tagName]
                        if a in attributes
                    ]

                for a in unrecognized_attributes:
                    logging.error('Unrecognized attribute "%s" in tag "%s"' %
                                  (a, node.tagName))
                if unrecognized_attributes:
                    raise Error()

                for a in attributes:
                    value = XmlEscape(node.attributes[a].value)
                    # Replace sequences of whitespace with single spaces.
                    words = value.split()
                    a_str = ' %s="%s"' % (a, ' '.join(words))
                    # Start a new line if the attribute will make this line too long.
                    if LastLineLength(s) + len(
                            a_str) + closing_chars > WRAP_COLUMN:
                        s += '\n' + ' ' * (indent + 3)
                    # Output everything up to the first quote.
                    s += ' %s="' % (a)
                    value_indent_level = LastLineLength(s)
                    # Output one word at a time, splitting to the next line where
                    # necessary.
                    column = value_indent_level
                    for i, word in enumerate(words):
                        # This is slightly too conservative since not every word will be
                        # followed by the closing characters...
                        if i > 0 and (column + len(word) + 1 + closing_chars >
                                      WRAP_COLUMN):
                            s = s.rstrip()  # remove any trailing whitespace
                            s += '\n' + ' ' * value_indent_level
                            column = value_indent_level
                        s += word + ' '
                        column += len(word) + 1
                    s = s.rstrip()  # remove any trailing whitespace
                    s += '"'
                s = s.rstrip()  # remove any trailing whitespace

            # Pretty-print the child nodes.
            if node.childNodes:
                s += '>'
                # Calculate the new indent level for child nodes.
                new_indent = indent
                if node.tagName not in self.tags_that_dont_indent:
                    new_indent += 2
                child_nodes = node.childNodes

                # Recursively pretty-print the child nodes.
                child_nodes = [
                    self.PrettyPrintNode(n, indent=new_indent)
                    for n in child_nodes
                ]
                child_nodes = [c for c in child_nodes if len(c.strip()) > 0]

                # Determine whether we can fit the entire node on a single line.
                close_tag = '</%s>' % node.tagName
                space_left = WRAP_COLUMN - LastLineLength(s) - len(close_tag)
                if (node.tagName in self.tags_that_allow_single_line
                        and len(child_nodes) == 1
                        and len(child_nodes[0].strip()) <= space_left):
                    s += child_nodes[0].strip()
                else:
                    s += '\n' * newlines_after_open + '\n'.join(child_nodes)
                    s += '\n' * newlines_before_close + ' ' * indent
                s += close_tag
            else:
                s += '/>'
            s += '\n' * newlines_after_close
            return s

        # Handle comment nodes.
        if node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
            return '<!--%s-->\n' % node.data

        # Ignore other node types. This could be a processing instruction
        # (<? ... ?>) or cdata section (<![CDATA[...]]!>), neither of which are
        # legal in the histograms XML at present.
        logging.error('Ignoring unrecognized node data: %s' % node.toxml())
        raise Error()
示例#9
0
文件: test_pydoc.py 项目: Gzigithub/-
 def test_bound_python_method(self):
     t = textwrap.TextWrapper()
     self.assertEqual(self._get_summary_line(t.wrap),
         "wrap(text) method of textwrap.TextWrapper instance")
def wrap(string, max_width):
    wrapper = textwrap.TextWrapper(width=max_width)
    s = wrapper.fill(text=string)
    return s
示例#11
0
    def format_option(self, option):
        """
        A copy of the normal optparse.IndentedHelpFormatter.format_option()
        method.  This has been snarfed so we can modify text wrapping to
        out liking:

        --  add our own regular expression that doesn't break on hyphens
            (so things like --no-print-directory don't get broken);

        --  wrap the list of options themselves when it's too long
            (the wrapper.fill(opts) call below);

        --  set the subsequent_indent when wrapping the help_text.
        """
        # The help for each option consists of two parts:
        #   * the opt strings and metavars
        #     eg. ("-x", or "-fFILENAME, --file=FILENAME")
        #   * the user-supplied help string
        #     eg. ("turn on expert mode", "read data from FILENAME")
        #
        # If possible, we write both of these on the same line:
        #   -x      turn on expert mode
        #
        # But if the opt string list is too long, we put the help
        # string on a second line, indented to the same column it would
        # start in if it fit on the first line.
        #   -fFILENAME, --file=FILENAME
        #           read data from FILENAME
        result = []

        try:
            opts = self.option_strings[option]
        except AttributeError:
            # The Python 2.3 version of optparse attaches this to
            # to the option argument, not to this object.
            opts = option.option_strings

        opt_width = self.help_position - self.current_indent - 2
        if len(opts) > opt_width:
            wrapper = textwrap.TextWrapper(width=self.width,
                                           initial_indent='  ',
                                           subsequent_indent='  ')
            wrapper.wordsep_re = no_hyphen_re
            opts = wrapper.fill(opts) + '\n'
            indent_first = self.help_position
        else:  # start help on same line as opts
            opts = "%*s%-*s  " % (self.current_indent, "", opt_width, opts)
            indent_first = 0
        result.append(opts)
        if option.help:

            try:
                expand_default = self.expand_default
            except AttributeError:
                # The HelpFormatter base class in the Python 2.3 version
                # of optparse has no expand_default() method.
                help_text = option.help
            else:
                help_text = expand_default(option)

            # SCons:  indent every line of the help text but the first.
            wrapper = textwrap.TextWrapper(width=self.help_width,
                                           subsequent_indent='  ')
            wrapper.wordsep_re = no_hyphen_re
            help_lines = wrapper.wrap(help_text)
            result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
            for line in help_lines[1:]:
                result.append("%*s%s\n" % (self.help_position, "", line))
        elif opts[-1] != "\n":
            result.append("\n")
        return "".join(result)
示例#12
0
def reference_text(key, ref):
    '''Convert a single reference to plain text format

    Parameters
    ----------
    key : str
        Reference key (authorname2009a, etc)
    ref : dict
        Information about a single reference
    '''

    # Set up the text wrapping of the data (not the key)
    ref_wrap = textwrap.TextWrapper(initial_indent='',
                                    subsequent_indent=' ' * 8)

    s = ''
    if ref['_entry_type'] == 'unpublished':
        s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
        if 'title' in ref:
            s += ref_wrap.fill(ref['title']) + '\n'
        if 'year' in ref:
            s += ref['year'] + ', '
        s += 'unpublished'
    elif ref['_entry_type'] == 'article':
        s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
        s += ref_wrap.fill(ref['title']) + '\n'
        s += '{}, {}, {} ({})'.format(ref['journal'], ref['volume'],
                                      ref['page'], ref['year'])
        if 'doi' in ref:
            s += '\n' + ref['doi']
    elif ref['_entry_type'] == 'incollection':
        s += ref_wrap.fill(', '.join(ref['authors']))
        s += '\n' + ref_wrap.fill('{}'.format(ref['title']))
        s += '\n' + ref_wrap.fill('in \'{}\''.format(ref['booktitle']))
        if 'editors' in ref:
            s += '\n' + ref_wrap.fill('ed. ' + ', '.join(ref['editors']))
        if 'series' in ref:
            s += '\n{}, {}, {} ({})'.format(ref['series'], ref['volume'],
                                            ref['page'], ref['year'])
        if 'doi' in ref:
            s += '\n' + ref['doi']
    elif ref['_entry_type'] == 'phdthesis':
        s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
        s += ref_wrap.fill(ref['title']) + '\n'
        s += '{}, {}'.format(ref.get('type', 'Ph.D. Thesis'), ref['school'])
    elif ref['_entry_type'] == 'techreport':
        s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
        s += '\n' + ref_wrap.fill('{}'.format(ref['title']))
        s += '\n\'{}\''.format(ref['institution'])
        s += '\n' + ref.get('type', 'Technical Report')
        if 'number' in ref:
            s += ' ' + ref['number']
        s += ', {}'.format(ref['year'])
        if 'doi' in ref:
            s += '\n' + ref['doi']
    elif ref['_entry_type'] == 'misc':
        s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
        s += ref_wrap.fill(ref['title'])
        if 'year' in ref:
            s += '\n' + ref['year']
        if 'doi' in ref:
            s += '\n' + ref['doi']
    else:
        raise RuntimeError('Cannot handle reference type {}'.format(
            ref['_entry_type']))
    if 'note' in ref:
        s += '\n' + ref_wrap.fill(ref['note'])

    # The final output has the key on its own line. The rest is indented by 4
    s = '\n'.join(' ' * 4 + x for x in s.splitlines())
    return key + '\n' + s
示例#13
0
def process_comment(comment):
    result = ''

    # Remove C++ comment syntax
    leading_spaces = float('inf')
    for s in comment.expandtabs(tabsize=4).splitlines():
        s = s.strip()
        if s.startswith('/*'):
            s = s[2:].lstrip('*')
        elif s.endswith('*/'):
            s = s[:-2].rstrip('*')
        elif s.startswith('///'):
            s = s[3:]
        if s.startswith('*'):
            s = s[1:]
        if len(s) > 0:
            leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
        result += s + '\n'

    if leading_spaces != float('inf'):
        result2 = ""
        for s in result.splitlines():
            result2 += s[leading_spaces:] + '\n'
        result = result2

    # Doxygen tags
    cpp_group = '([\w:]+)'
    param_group = '([\[\w:\]]+)'

    s = result
    s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s)
    s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s)
    s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s)
    s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s)
    s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s)
    s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s)
    s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group),
               r'\n\n$Parameter ``\2``:\n\n', s)
    s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group),
               r'\n\n$Template parameter ``\2``:\n\n', s)

    for in_, out_ in {
            'return': 'Returns',
            'author': 'Author',
            'authors': 'Authors',
            'copyright': 'Copyright',
            'date': 'Date',
            'remark': 'Remark',
            'sa': 'See also',
            'see': 'See also',
            'extends': 'Extends',
            'throw': 'Throws',
            'throws': 'Throws'
    }.items():
        s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)

    s = re.sub(r'\\details\s*', r'\n\n', s)
    s = re.sub(r'\\brief\s*', r'', s)
    s = re.sub(r'\\short\s*', r'', s)
    s = re.sub(r'\\ref\s*', r'', s)

    s = re.sub(r'\\code\s?(.*?)\s?\\endcode',
               r"```\n\1\n```\n",
               s,
               flags=re.DOTALL)

    # HTML/TeX tags
    s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL)
    s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL)
    s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL)
    s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL)
    s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL)
    s = re.sub(r'<li>', r'\n\n* ', s)
    s = re.sub(r'</?ul>', r'', s)
    s = re.sub(r'</li>', r'\n\n', s)

    s = s.replace('``true``', '``True``')
    s = s.replace('``false``', '``False``')

    # Re-flow text
    wrapper = textwrap.TextWrapper()
    wrapper.expand_tabs = True
    wrapper.replace_whitespace = True
    wrapper.drop_whitespace = True
    wrapper.width = 70
    wrapper.initial_indent = wrapper.subsequent_indent = ''

    result = ''
    in_code_segment = False
    for x in re.split(r'(```)', s):
        if x == '```':
            if not in_code_segment:
                result += '```\n'
            else:
                result += '\n```\n\n'
            in_code_segment = not in_code_segment
        elif in_code_segment:
            result += x.strip()
        else:
            for y in re.split(r'(?: *\n *){2,}', x):
                wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
                if len(wrapped) > 0 and wrapped[0] == '$':
                    result += wrapped[1:] + '\n'
                    wrapper.initial_indent = \
                        wrapper.subsequent_indent = ' ' * 4
                else:
                    if len(wrapped) > 0:
                        result += wrapped + '\n\n'
                    wrapper.initial_indent = wrapper.subsequent_indent = ''
    return result.rstrip().lstrip('\n')
示例#14
0
    def __init__(self, *args, **kwargs):

        optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
        self.wrapper = textwrap.TextWrapper(width=self.width)
    def _create_greetings(client,
                          custom_names=False,
                          components=True,
                          test=False):
        hello_h = textwrap.dedent("""
            #pragma once
            void hello(std::string noun);
            """)

        hello_cpp = textwrap.dedent("""
            #include <iostream>
            #include <string>

            #include "hello.h"

            void hello(std::string noun) {
                #ifdef NDEBUG
                std::cout << "Hello " << noun << " release!" << std::endl;
                #else
                std::cout << "Hello " << noun << " debug!" << std::endl;
                #endif
            }
            """)

        bye_h = textwrap.dedent("""
            #pragma once
            void bye(std::string noun);
            """)

        bye_cpp = textwrap.dedent("""
            #include <iostream>
            #include <string>

            #include "bye.h"

            void bye(std::string noun) {
                #ifdef NDEBUG
                std::cout << "Bye " << noun << " release!" << std::endl;
                #else
                std::cout << "Bye " << noun << " debug!" << std::endl;
                #endif
            }
            """)

        conanfile_greetings = textwrap.dedent("""
            from conans import ConanFile, CMake

            class GreetingsConan(ConanFile):
                name = "greetings"
                version = "0.0.1"
                settings = "os", "compiler", "build_type", "arch"
                generators = "cmake"
                exports_sources = "src/*"

                def build(self):
                    cmake = CMake(self)
                    cmake.configure(source_folder="src")
                    cmake.build()

                def package(self):
                    self.copy("*.h", dst="include", src="src")
                    self.copy("*.lib", dst="lib", keep_path=False)
                    self.copy("*.a", dst="lib", keep_path=False)

                def package_info(self):
                %s
            """)
        if components:
            info = textwrap.dedent("""
                        self.cpp_info.components["hello"].libs = ["hello"]
                        self.cpp_info.components["bye"].libs = ["bye"]
                        """)
            if custom_names:
                info += textwrap.dedent("""
                        self.cpp_info.names["cmake_find_package_multi"] = "Greetings"
                        self.cpp_info.components["hello"].names["cmake_find_package_multi"] = "Hello"
                        self.cpp_info.components["bye"].names["cmake_find_package_multi"] = "Bye"
                        """)
        else:
            info = textwrap.dedent("""
                        self.cpp_info.libs = ["hello", "bye"]
                        """)
        wrapper = textwrap.TextWrapper(width=85,
                                       initial_indent="   ",
                                       subsequent_indent="        ")
        conanfile_greetings = conanfile_greetings % wrapper.fill(info)

        cmakelists_greetings = textwrap.dedent("""
            cmake_minimum_required(VERSION 3.0)
            project(greetings CXX)

            include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
            conan_basic_setup(NO_OUTPUT_DIRS)
            set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})

            add_library(hello hello.cpp)
            add_library(bye bye.cpp)
            """)

        test_package_greetings_conanfile = textwrap.dedent("""
            import os
            from conans import ConanFile, CMake

            class GreetingsTestConan(ConanFile):
                settings = "os", "compiler", "build_type", "arch"
                generators = "cmake", "cmake_find_package_multi"
                requires = "greetings/0.0.1"

                def build(self):
                    cmake = CMake(self)
                    cmake.configure()
                    cmake.build()

                def test(self):
                    os.chdir("bin")
                    self.run(".%sexample" % os.sep)
            """)
        test_package_greetings_cpp = textwrap.dedent("""
            #include <string>

            #include "hello.h"
            #include "bye.h"

            int main() {
                hello("Moon");
                bye("Moon");
            }
            """)
        test_package_greetings_cmakelists = textwrap.dedent("""
            cmake_minimum_required(VERSION 3.0)
            project(PackageTest CXX)

            include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
            conan_basic_setup(NO_OUTPUT_DIRS)
            set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})

            find_package(greetings)

            add_executable(example example.cpp)
            target_link_libraries(example greetings::greetings)
            """)
        client.save({
            "conanfile.py": conanfile_greetings,
            "src/CMakeLists.txt": cmakelists_greetings,
            "src/hello.h": hello_h,
            "src/hello.cpp": hello_cpp,
            "src/bye.h": bye_h,
            "src/bye.cpp": bye_cpp
        })
        if test:
            client.save({
                "fake_test_package/conanfile.py":
                test_package_greetings_conanfile,
                "fake_test_package/example.cpp":
                test_package_greetings_cpp,
                "fake_test_package/CMakeLists.txt":
                test_package_greetings_cmakelists
            })
        client.run("create . -s build_type=Release")
        client.run("create . -s build_type=Debug")
示例#16
0
# Implement a class to hold room information. This should have name and
# description attributes.
import textwrap

wrapper = textwrap.TextWrapper(initial_indent="    ", subsequent_indent="    ")


class Room:
    def __init__(self, name, description, is_lit):
        self.name = name
        self.description = description
        self.n_to = None
        self.s_to = None
        self.e_to = None
        self.w_to = None
        self.items = []
        self.is_lit = is_lit

    def __str__(self):
        display_string = ""
        display_string += "\n---------------------------------------------------------------------------\n"
        display_string += f"\n{self.name}\n"
        display_string += f"\n{self.description}\n\n"
        return display_string

    def get_room_in_direction(self, direction):
        if hasattr(self, f"{direction}_to"):
            return getattr(self, f"{direction}_to")
        return None

    def add_item(self, item):
def wrap(string, max_width):
    wrapper = textwrap.TextWrapper(width=max_width)

    return '\n'.join(wrapper.wrap(text=string))
示例#18
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    parser.add_argument('url',
                        help="The base URL of the zuul server.  "
                        "E.g., https://zuul.example.com/ or path"
                        " to project public key file. E.g.,"
                        " file:///path/to/key.pub")
    parser.add_argument('project',
                        default=None,
                        nargs="?",
                        help="The name of the project. Required when using"
                        " the Zuul API to fetch the public key.")
    parser.add_argument('--tenant',
                        default=None,
                        help="The name of the Zuul tenant.  This may be "
                        "required in a multi-tenant environment.")
    parser.add_argument('--strip',
                        default=None,
                        help='Unused, kept for backward compatibility.')
    parser.add_argument('--no-strip',
                        action='store_true',
                        default=False,
                        help="Do not strip whitespace from beginning or "
                        "end of input.")
    parser.add_argument('--infile',
                        default=None,
                        help="A filename whose contents will be encrypted.  "
                        "If not supplied, the value will be read from "
                        "standard input.")
    parser.add_argument('--outfile',
                        default=None,
                        help="A filename to which the encrypted value will be "
                        "written.  If not supplied, the value will be written "
                        "to standard output.")
    parser.add_argument('--insecure',
                        action='store_true',
                        default=False,
                        help="Do not verify remote certificate")
    args = parser.parse_args()

    # We should not use unencrypted connections for retrieving the public key.
    # Otherwise our secret can be compromised. The schemes file and https are
    # considered safe.
    url = urlparse(args.url)
    if url.scheme not in ('file', 'https'):
        sys.stderr.write("WARNING: Retrieving encryption key via an "
                         "unencrypted connection. Your secret may get "
                         "compromised.\n")

    ssl_ctx = None
    if url.scheme == 'file':
        req = Request(args.url)
    else:
        if args.insecure:
            ssl_ctx = ssl.create_default_context()
            ssl_ctx.check_hostname = False
            ssl_ctx.verify_mode = ssl.CERT_NONE

        # Check if tenant is white label
        req = Request("%s/api/info" % (args.url.rstrip('/'), ))
        info = json.loads(urlopen(req, context=ssl_ctx).read().decode('utf8'))

        api_tenant = info.get('info', {}).get('tenant')
        if not api_tenant and not args.tenant:
            print("Error: the --tenant argument is required")
            exit(1)

        if api_tenant:
            req = Request("%s/api/key/%s.pub" %
                          (args.url.rstrip('/'), args.project))
        else:
            req = Request("%s/api/tenant/%s/key/%s.pub" %
                          (args.url.rstrip('/'), args.tenant, args.project))
    try:
        pubkey = urlopen(req, context=ssl_ctx)
    except Exception:
        sys.stderr.write("ERROR: Couldn't retrieve project key via %s\n" %
                         req.full_url)
        raise

    if args.infile:
        with open(args.infile) as f:
            plaintext = f.read()
    else:
        plaintext = sys.stdin.read()

    plaintext = plaintext.encode("utf-8")
    if not args.no_strip:
        plaintext = plaintext.strip()

    pubkey_file = tempfile.NamedTemporaryFile(delete=False)
    try:
        pubkey_file.write(pubkey.read())
        pubkey_file.close()

        p = subprocess.Popen(
            ['openssl', 'rsa', '-text', '-pubin', '-in', pubkey_file.name],
            stdout=subprocess.PIPE)
        (stdout, stderr) = p.communicate()
        if p.returncode != 0:
            raise Exception("Return code %s from openssl" % p.returncode)
        output = stdout.decode('utf-8')
        openssl_version = subprocess.check_output(['openssl',
                                                   'version']).split()[1]
        if openssl_version.startswith(b'0.'):
            key_length_re = r'^Modulus \((?P<key_length>\d+) bit\):$'
        else:
            key_length_re = r'^(|RSA )Public-Key: \((?P<key_length>\d+) bit\)$'
        m = re.match(key_length_re, output, re.MULTILINE)
        nbits = int(m.group('key_length'))
        nbytes = int(nbits / 8)
        max_bytes = nbytes - 42  # PKCS1-OAEP overhead
        chunks = int(math.ceil(float(len(plaintext)) / max_bytes))

        ciphertext_chunks = []

        print("Public key length: {} bits ({} bytes)".format(nbits, nbytes))
        print("Max plaintext length per chunk: {} bytes".format(max_bytes))
        print("Input plaintext length: {} bytes".format(len(plaintext)))
        print("Number of chunks: {}".format(chunks))

        for count in range(chunks):
            chunk = plaintext[int(count * max_bytes):int((count + 1) *
                                                         max_bytes)]
            p = subprocess.Popen([
                'openssl', 'rsautl', '-encrypt', '-oaep', '-pubin', '-inkey',
                pubkey_file.name
            ],
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE)
            (stdout, stderr) = p.communicate(chunk)
            if p.returncode != 0:
                raise Exception("Return code %s from openssl" % p.returncode)
            ciphertext_chunks.append(base64.b64encode(stdout).decode('utf-8'))

    finally:
        os.unlink(pubkey_file.name)

    output = textwrap.dedent('''
        - secret:
            name: <name>
            data:
              <fieldname>: !encrypted/pkcs1-oaep
        ''')

    twrap = textwrap.TextWrapper(width=79,
                                 initial_indent=' ' * 8,
                                 subsequent_indent=' ' * 10)
    for chunk in ciphertext_chunks:
        chunk = twrap.fill('- ' + chunk)
        output += chunk + '\n'

    if args.outfile:
        with open(args.outfile, "w") as f:
            f.write(output)
    else:
        print(output)
示例#19
0
def help_api(options):
    import pyomo.common
    services = pyomo.common.PyomoAPIFactory.services()
    #
    f = {}
    for name in services:
        f[name] = pyomo.common.PyomoAPIFactory(name)
    #
    ns = {}
    for name in services:
        ns_set = ns.setdefault(f[name].__namespace__, set())
        ns_set.add(name)
    #
    if options.asciidoc:
        print("//")
        print("// Pyomo Library API Documentation")
        print("//")
        print("// Generated with 'pyomo api' on ", datetime.date.today())
        print("//")
        print("")
        print("== Pyomo Functor API ==")
        for ns_ in sorted(ns.keys()):
            print("")
            level = ns_ + " Functors"
            print('=== %s ===' % level)
            for name in sorted(ns[ns_]):
                if ns_ != '':
                    tname = name[len(ns_) + 1:]
                else:
                    tname = name
                print("")
                print('==== %s ====' % tname)
                print(f[name].__short_doc__)
                if f[name].__long_doc__ != '':
                    print("")
                    print(f[name].__long_doc__)
                print("")
                flag = False
                print("- [underline]#Required Keyword Arguments:#")
                for port in sorted(f[name].inputs):
                    if f[name].inputs[port].optional:
                        flag = True
                        continue
                    print("")
                    print('*%s*::\n %s' % (port, f[name].inputs[port].doc))
                if flag:
                    # A function may not have optional arguments
                    print("")
                    print("- [underline]#Optional Keyword Arguments:#")
                    for port in sorted(f[name].inputs):
                        if not f[name].inputs[port].optional:
                            continue
                        print("")
                        print('*%s*::\n %s' % (port, f[name].inputs[port].doc))
                print("")
                print("- [underline]#Return Values:#")
                for port in sorted(f[name].outputs):
                    print("")
                    print('*%s*::\n %s' % (port, f[name].outputs[port].doc))
                print("")
    else:
        print("")
        print("Pyomo Functor API")
        print("-----------------")
        wrapper = textwrap.TextWrapper(subsequent_indent='')
        print(
            wrapper.fill(
                "The Pyomo library contains a set of functors that define operations that are likely to be major steps in Pyomo scripts.  This API is defined with functors to ensure a consistent function syntax.  Additionally, these functors can be accessed with a factory, thereby avoiding the need to import modules throughout Pyomo."
            ))
        print("")
        for ns_ in sorted(ns.keys()):
            print("")
            level = ns_ + " Functors"
            print("-" * len(level))
            print(level)
            print("-" * len(level))
            for name in sorted(ns[ns_]):
                if ns_ != '':
                    tname = name[len(ns_) + 1:]
                else:
                    tname = name
                print(tname + ':')
                for line in f[name].__short_doc__.split('\n'):
                    print("    " + line)
示例#20
0
        print >>sys.stderr, "Missing configuration for " + config_name
        tfile.write(line)
        continue

    skip = ('@configstart' in line)

    if not api_data.methods[config_name].config:
        tfile.write(prefix + '@configempty{' + config_name +
                ', see dist/api_data.py}\n')
        continue

    tfile.write(prefix + '@configstart{' + config_name +
            ', see dist/api_data.py}\n')

    w = textwrap.TextWrapper(width=80-len(prefix.expandtabs()),
            break_on_hyphens=False,
            replace_whitespace=False,
            fix_sentence_endings=True)
    lastname = None
    for c in sorted(api_data.methods[config_name].config):
        name = c.name
        if '.' in name:
            print >>sys.stderr, "Bad config key " + name

        # Deal with duplicates: with complex configurations (like
        # WT_SESSION::create), it's simpler to deal with duplicates here than
        # manually in api_data.py.
        if name == lastname:
            continue
        lastname = name
        if 'undoc' in c.flags:
            continue
示例#21
0
def process_comment(comment):
    """
    Converts Doxygen-formatted string to look presentable in a Python
    docstring.
    """
    result = ''

    # Remove C++ comment syntax
    leading_spaces = float('inf')
    for s in comment.expandtabs(tabsize=4).splitlines():
        s = s.strip()
        if s.startswith('/*!'):
            s = s[3:]
        if s.startswith('/*'):
            s = s[2:].lstrip('*')
        if s.endswith('*/'):
            s = s[:-2].rstrip('*')
        if s.startswith('///') or s.startswith('//!'):
            s = s[3:]
        if s.startswith('*'):
            s = s[1:]
        if len(s) > 0:
            leading_spaces = min(leading_spaces, len(s) - len(s.lstrip()))
        result += s + '\n'

    if leading_spaces != float('inf'):
        result2 = ""
        for s in result.splitlines():
            result2 += s[leading_spaces:] + '\n'
        result = result2

    s = result

    # Remove HTML comments. Must occur before Doxygen commands are parsed
    # since they may be used to workaround limitations related to line breaks
    # in Doxygen.
    s = re.sub(r'<!--(.*?)-->', r'', s, flags=re.DOTALL)

    # Markdown to reStructuredText.
    # TODO(jamiesnape): Find a third-party library do this?
    # Convert _italics_ to *italics*.
    s = re.sub(r'([\s\-,;:!.()]+)_([^\s_]+|[^\s_].*?[^\s_])_([\s\-,;:!.()]+)',
               r'\1*\2*\3',
               s,
               flags=re.DOTALL)
    # Convert __bold__ to **bold**.
    s = re.sub(
        r'([\s\-,;:!.()]+)__([^\s_]+|[^\s_].*?[^\s_])__([\s\-,;:!.()]+)',
        r'\1**\2**\3',
        s,
        flags=re.DOTALL)
    # Convert `typewriter` to ``typewriter``.
    s = re.sub(r'([\s\-,;:!.()]+)`([^\s`]|[^\s`].*?[^\s`])`([\s\-,;:!.()]+)',
               r'\1``\2``\3',
               s,
               flags=re.DOTALL)
    # Convert [Link](https://example.org) to `Link <https://example.org>`_.
    s = re.sub(r'\[(.*?)\]\(([\w:.?/#]+)\)', r'`\1 <\2>`_', s, flags=re.DOTALL)

    s = re.sub(r'#### (.*?) ####', r'\n*\1*\n', s)
    s = re.sub(r'#### (.*?)', r'\n*\1*\n', s)
    s = re.sub(r'### (.*?) ###', r'\n**\1**\n', s)
    s = re.sub(r'### (.*?)', r'\n**\1**\n', s)

    def replace_with_header(pattern, token, s, **kwargs):
        def repl(match):
            return '\n{}\n{}\n'.format(match.group(1),
                                       token * len(match.group(1)))

        return re.sub(pattern, repl, s, **kwargs)

    s = replace_with_header(r'## (.*?) ##', '-', s)
    s = replace_with_header(r'## (.*?)', '-', s)
    s = replace_with_header(r'# (.*?) #', '=', s)
    s = replace_with_header(r'# (.*?)', '=', s)

    # Doxygen tags
    cpp_group = r'([\w:*()]+)'
    param_group = r'([\[\w,\]]+)'

    s = re.sub(r'[@\\][cp]\s+%s' % cpp_group, r'``\1``', s)
    s = re.sub(r'[@\\](?:a|e|em)\s+%s' % cpp_group, r'*\1*', s)
    s = re.sub(r'[@\\]b\s+%s' % cpp_group, r'**\1**', s)
    s = re.sub(r'[@\\]param%s?\s+%s' % (param_group, cpp_group),
               r'\n\n$Parameter ``\2``:\n\n', s)
    s = re.sub(r'[@\\]tparam%s?\s+%s' % (param_group, cpp_group),
               r'\n\n$Template parameter ``\2``:\n\n', s)
    s = re.sub(r'[@\\]retval\s+%s' % cpp_group, r'\n\n$Returns ``\1``:\n\n', s)

    # Ordering is significant for command names with a common prefix.
    for in_, out_ in (
        ('result', 'Returns'),
        ('returns', 'Returns'),
        ('return', 'Returns'),
        ('attention', 'Attention'),
        ('authors', 'Authors'),
        ('author', 'Authors'),
        ('bug', 'Bug report'),
        ('copyright', 'Copyright'),
        ('date', 'Date'),
        ('deprecated', 'Deprecated'),
        ('exception', 'Raises'),
        ('invariant', 'Invariant'),
        ('note', 'Note'),
        ('post', 'Postcondition'),
        ('pre', 'Precondition'),
        ('remarks', 'Remark'),
        ('remark', 'Remark'),
        ('sa', 'See also'),
        ('see', 'See also'),
        ('since', 'Since'),
        ('extends', 'Extends'),
        ('throws', 'Raises'),
        ('throw', 'Raises'),
        ('test', 'Test case'),
        ('todo', 'Todo'),
        ('version', 'Version'),
        ('warning', 'Warning'),
    ):
        s = re.sub(r'[@\\]%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s)

    s = re.sub(r'[@\\]details\s*', r'\n\n', s)
    s = re.sub(r'[@\\](?:brief|short)\s*', r'', s)
    s = re.sub(r'[@\\]ref\s+', r'', s)

    for start_, end_ in (('code', 'endcode'), ('verbatim', 'endverbatim')):
        s = re.sub(r'[@\\]%s(?:\{\.\w+\})?\s?(.*?)\s?[@\\]%s' % (start_, end_),
                   r"```\n\1\n```\n",
                   s,
                   flags=re.DOTALL)

    s = re.sub(r'[@\\](?:end)?htmlonly\s+', r'', s)

    # These commands are always prefixed with an @ sign.
    s = re.sub(r'@[{}]\s*', r'', s)

    # Doxygen list commands.
    s = re.sub(r'[@\\](?:arg|li)\s+', r'\n\n* ', s)

    # Doxygen sectioning commands.
    s = replace_with_header(r'[@\\]section\s+\w+\s+(.*)', '=', s)
    s = replace_with_header(r'[@\\]subsection\s+\w+\s+(.*)', '-', s)
    s = re.sub(r'[@\\]subsubsection\s+\w+\s+(.*)', r'\n**\1**\n', s)

    # Doxygen LaTeX commands.
    s = re.sub(r'[@\\]f\$\s*(.*?)\s*[@\\]f\$',
               r':math:`\1`',
               s,
               flags=re.DOTALL)
    s = re.sub(r'[@\\]f\[\s*(.*?)\s*[@\\]f\]',
               r'\n\n.. math:: \1\n\n',
               s,
               flags=re.DOTALL)
    s = re.sub(r'[@\\]f\{([\w*]+)\}\s*(.*?)\s*[@\\]f\}',
               r'\n\n.. math:: \\begin{\1}\2\\end{\1}\n\n',
               s,
               flags=re.DOTALL)

    # Drake-specific Doxygen aliases.
    s = re.sub(r'[@\\]default\s+', r'\n$*Default:* ', s)

    # Remove these commands that take no argument. Ordering is significant for
    # command names with a common prefix.
    for cmd_ in (
            '~english',
            '~',
            'callergraph',
            'callgraph',
            'hidecallergraph',
            'hidecallgraph',
            'hideinitializer',
            'nosubgrouping',
            'privatesection',
            'private',
            'protectedsection',
            'protected',
            'publicsection',
            'public',
            'pure',
            'showinitializer',
            'static',
            'tableofcontents',
    ):
        s = re.sub(r'[@\\]%s\s+' % cmd_, r'', s)

    # Remove these commands and their one optional single-word argument.
    for cmd_ in [
            'dir',
            'file',
    ]:
        s = re.sub(r'[@\\]%s( +[\w:./]+)?\s+' % cmd_, r'', s)

    # Remove these commands and their one optional single-line argument.
    for cmd_ in [
            'mainpage',
            'name'
            'overload',
    ]:
        s = re.sub(r'[@\\]%s( +.*)?\s+' % cmd_, r'', s)

    # Remove these commands and their one single-word argument. Ordering is
    # significant for command names with a common prefix.
    for cmd_ in [
            'anchor',
            'copybrief',
            'copydetails',
            'copydoc',
            'def',
            'dontinclude',
            'enum',
            'example',
            'extends',
            'htmlinclude',
            'idlexcept',
            'implements',
            'includedoc',
            'includelineno',
            'include',
            'latexinclude',
            'memberof',
            'namespace',
            'package',
            'relatedalso',
            'related',
            'relatesalso',
            'relates',
            'verbinclude',
    ]:
        s = re.sub(r'[@\\]%s\s+[\w:.]+\s+' % cmd_, r'', s)

    # Remove these commands and their one single-line argument. Ordering is
    # significant for command names with a common prefix.
    for cmd_ in [
            'addindex',
            'fn',
            'ingroup',
            'line',
            'property',
            'skipline',
            'skip',
            'typedef',
            'until',
            'var',
    ]:
        s = re.sub(r'[@\\]%s\s+.*\s+' % cmd_, r'', s)

    # Remove this command and its one single-word argument and one
    # optional single-word argument.
    s = re.sub(r'[@\\]headerfile\s+[\w:.]+( +[\w:.]+)?\s+', r'', s)

    # Remove these commands and their one single-word argument and one
    # optional single-line argument.
    for cmd_ in [
            'addtogroup',
            'weakgroup',
    ]:
        s = re.sub(r'[@\\]%s\s+[\w:.]( +.*)?\s+' % cmd_, r'', s)

    # Remove these commands and their one single-word argument and one
    # single-line argument. Ordering is significant for command names with a
    # common prefix.
    for cmd_ in [
            'snippetdoc',
            'snippetlineno',
            'snippet',
    ]:
        s = re.sub(r'[@\\]%s\s+[\w:.]\s+.*\s+' % cmd_, r'', s)

    # Remove these commands and their one single-word argument and two
    # optional single-word arguments.
    for cmd_ in [
            'category',
            'class',
            'interface',
            'protocol',
            'struct',
            'union',
    ]:
        s = re.sub(r'[@\\]%s\s+[\w:.]+( +[\w:.]+){0,2}\s+' % cmd_, r'', s)

    # Remove these commands and their one single-word argument, one optional
    # quoted argument, and one optional single-word arguments.
    for cmd_ in [
            'diafile',
            'dotfile',
            'mscfile',
    ]:
        s = re.sub(
            r'[@\\]%s\s+[\w:.]+(\s+".*?")?(\s+[\w:.]+=[\w:.]+)?s+' % cmd_, r'',
            s)

    # Remove these pairs of commands and any text in between.
    for start_, end_ in (
        ('cond', 'endcond'),
        ('docbookonly', 'enddocbookonly'),
        ('dot', 'enddot'),
        ('internal', 'endinternal'),
        ('latexonly', 'endlatexonly'),
        ('manonly', 'endmanonly'),
        ('msc', 'endmsc'),
        ('rtfonly', 'endrtfonly'),
        ('secreflist', 'endsecreflist'),
        ('startuml', 'enduml'),
        ('xmlonly', 'endxmlonly'),
    ):
        s = re.sub(r'[@\\]%s\s?(.*?)\s?[@\\]%s' % (start_, end_),
                   r'',
                   s,
                   flags=re.DOTALL)

        # Some command pairs may bridge multiple comment blocks, so individual
        # start and end commands may appear alone.
        s = re.sub(r'[@\\]%s\s+' % start_, r'', s)
        s = re.sub(r'[@\\]%s\s+' % end_, r'', s)

    # Remove auto-linking character. Be sure to remove only leading % signs.
    s = re.sub(r'(\s+)%(\S+)', r'\1\2', s)

    # HTML tags. Support both lowercase and uppercase tags.
    s = re.sub(r'<tt>(.*?)</tt>',
               r'``\1``',
               s,
               flags=re.DOTALL | re.IGNORECASE)
    s = re.sub(r'<pre>(.*?)</pre>',
               r"```\n\1\n```\n",
               s,
               flags=re.DOTALL | re.IGNORECASE)
    s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL | re.IGNORECASE)
    s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL | re.IGNORECASE)

    s = re.sub(r'<li>', r'\n\n* ', s, flags=re.IGNORECASE)
    s = re.sub(r'</?ol( start=[0-9]+)?>', r'', s, flags=re.IGNORECASE)
    s = re.sub(r'</?ul>', r'', s, flags=re.IGNORECASE)
    s = re.sub(r'</li>', r'\n\n', s, flags=re.IGNORECASE)

    s = re.sub(r'<a href="([\w:.?/#]+)">(.*?)</a>',
               r'`\2 <\1>`_',
               s,
               flags=re.DOTALL | re.IGNORECASE)

    s = re.sub(r'<br/?>', r'\n\n', s, flags=re.IGNORECASE)

    s = replace_with_header(r'<h1>(.*?)</h1>', '=', s, flags=re.IGNORECASE)
    s = replace_with_header(r'<h2>(.*?)</h2>', '-', s, flags=re.IGNORECASE)
    s = re.sub(r'<h3>(.*?)</h3>', r'\n**\1**\n', s, flags=re.IGNORECASE)
    s = re.sub(r'<h4>(.*?)</h4>', r'\n*\1*\n', s, flags=re.IGNORECASE)

    s = s.replace('``true``', '``True``')
    s = s.replace('``false``', '``False``')

    # Exceptions
    s = s.replace('std::bad_alloc', 'MemoryError')
    s = s.replace('std::bad_any_cast', 'RuntimeError')
    s = s.replace('std::bad_array_new_length', 'MemoryError')
    s = s.replace('std::bad_cast', 'RuntimeError')
    s = s.replace('std::bad_exception', 'RuntimeError')
    s = s.replace('std::bad_function_call', 'RuntimeError')
    s = s.replace('std::bad_optional_access', 'RuntimeError')
    s = s.replace('std::bad_typeid', 'RuntimeError')
    s = s.replace('std::bad_variant_access', 'RuntimeError')
    s = s.replace('std::bad_weak_ptr', 'RuntimeError')
    s = s.replace('std::domain_error', 'ValueError')
    s = s.replace('std::exception', 'RuntimeError')
    s = s.replace('std::future_error', 'RuntimeError')
    s = s.replace('std::invalid_argument', 'ValueError')
    s = s.replace('std::length_error', 'ValueError')
    s = s.replace('std::logic_error', 'RuntimeError')
    s = s.replace('std::out_of_range', 'ValueError')
    s = s.replace('std::overflow_error', 'RuntimeError')
    s = s.replace('std::range_error', 'ValueError')
    s = s.replace('std::regex_error', 'RuntimeError')
    s = s.replace('std::runtime_error', 'RuntimeError')
    s = s.replace('std::system_error', 'RuntimeError')
    s = s.replace('std::underflow_error', 'RuntimeError')

    # Doxygen escaped characters.
    s = re.sub(r'[@\\]n\s+', r'\n\n', s)

    # Ordering of ---, --, @, and \ is significant.
    for escaped_ in (
            '---',
            '--',
            '::',
            '\.',
            '"',
            '&',
            '#',
            '%',
            '<',
            '>',
            '\$',
            '@',
            '\\\\',
    ):
        s = re.sub(r'[@\\](%s)' % escaped_, r'\1', s)

    # Reflow text where appropriate.
    wrapper = textwrap.TextWrapper()
    wrapper.break_long_words = False
    wrapper.break_on_hyphens = False
    wrapper.drop_whitespace = True
    wrapper.expand_tabs = True
    wrapper.replace_whitespace = True
    wrapper.width = 70
    wrapper.initial_indent = wrapper.subsequent_indent = ''

    result = ''
    in_code_segment = False
    for x in re.split(r'(```)', s):
        if x == '```':
            if not in_code_segment:
                result += '\n::\n'
            else:
                result += '\n\n'
            in_code_segment = not in_code_segment
        elif in_code_segment:
            result += '    '.join(('\n' + x.strip()).splitlines(True))
        else:
            for y in re.split(r'(?: *\n *){2,}', x):
                lines = re.split(r'(?: *\n *)', y)
                # Do not reflow lists or section headings.
                if (re.match('^(?:[*+\-]|[0-9]+[.)]) ', lines[0]) or
                    (len(lines) > 1 and (lines[1] == '=' * len(lines[0])
                                         or lines[1] == '-' * len(lines[0])))):
                    result += y + '\n\n'
                else:
                    wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip())
                    if len(wrapped) > 0 and wrapped[0] == '$':
                        result += wrapped[1:] + '\n'
                        wrapper.initial_indent = \
                            wrapper.subsequent_indent = ' ' * 4
                    else:
                        if len(wrapped) > 0:
                            result += wrapped + '\n\n'
                        wrapper.initial_indent = wrapper.subsequent_indent = ''
    return result.rstrip().lstrip('\n')
示例#22
0
文件: azhpc.py 项目: Smahane/azurehpc
def _wait_for_deployment(resource_group, deploy_name):
    building = True
    success = True
    del_lines = 1
    while building:
        time.sleep(5)
        res = azutil.get_deployment_status(resource_group, deploy_name)
        log.debug(res)

        print("\033[F" * del_lines)
        del_lines = 1

        for i in res:
            props = i["properties"]
            status_code = props["statusCode"]
            if props.get("targetResource", None):
                resource_name = props["targetResource"]["resourceName"]
                resource_type = props["targetResource"]["resourceType"]
                del_lines += 1
                print(
                    f"{resource_name:15} {resource_type:47} {status_code:15}")
            else:
                provisioning_state = props["provisioningState"]
                del_lines += 1
                building = False
                if provisioning_state != "Succeeded":
                    success = False

    if success:
        log.info("Provising succeeded")
    else:
        log.error("Provisioning failed")
        for i in res:
            props = i["properties"]
            status_code = props["statusCode"]
            if props.get("targetResource", None):
                resource_name = props["targetResource"]["resourceName"]
                if props.get("statusMessage", None):
                    if "error" in props["statusMessage"]:
                        error_code = props["statusMessage"]["error"]["code"]
                        error_message = textwrap.TextWrapper(width=60).wrap(
                            text=props["statusMessage"]["error"]["message"])
                        error_target = props["statusMessage"]["error"].get(
                            "target", None)
                        error_target_str = ""
                        if error_target:
                            error_target_str = f"({error_target})"
                        print(
                            f"  Resource : {resource_name} - {error_code} {error_target_str}"
                        )
                        print(f"  Message  : {error_message[0]}")
                        for line in error_message[1:]:
                            print(f"             {line}")
                        if "details" in props["statusMessage"]["error"]:

                            def pretty_print(d, indent=0):
                                def wrapped_print(indent, text, max_width=80):
                                    lines = textwrap.TextWrapper(
                                        width=max_width -
                                        indent).wrap(text=text)
                                    for line in lines:
                                        print(" " * indent + line)

                                if isinstance(d, list):
                                    for value in d:
                                        pretty_print(value, indent)
                                elif isinstance(d, dict):
                                    for key, value in d.items():
                                        if isinstance(value, dict):
                                            wrapped_print(indent, str(key))
                                            pretty_print(value, indent + 4)
                                        elif isinstance(value, list):
                                            wrapped_print(indent, str(key))
                                            pretty_print(value, indent + 4)
                                        else:
                                            wrapped_print(
                                                indent, f"{key}: {value}")
                                else:
                                    wrapped_print(indent, str(d))

                            pretty_print(
                                props["statusMessage"]["error"]["details"], 13)

        sys.exit(1)
示例#23
0
def do_draw_main(scr):
    global log_line_offset
    global longest_visible_line
    global last_full_redraw
    global auto_scroll
    global size_log_area

    if time.time() - last_full_redraw > FULL_REDRAW_FREQUENCY:
        # Do a full-screen redraw periodically to clear and
        # noise from non-curses text that get output to the
        # screen (e.g. modules that do a 'print')
        scr.clear()
        last_full_redraw = time.time()
    else:
        scr.erase()

    # Display log output at the top
    cLogs = len(filteredLog) + 1  # +1 for the '--end--'
    size_log_area = curses.LINES - (cy_chat_area + 5)
    start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
    end = cLogs - log_line_offset
    if start < 0:
        end -= start
        start = 0
    if end > cLogs:
        end = cLogs

    auto_scroll = (end == cLogs)

    # adjust the line offset (prevents paging up too far)
    log_line_offset = cLogs - end

    # Top header and line counts
    if find_str:
        scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
        scr.addstr(0, 16, find_str, CLR_FIND)
        scr.addstr(
            0, 16 + len(find_str),
            " ctrl+X to end" + " " * (curses.COLS - 31 - 12 - len(find_str)) +
            str(start) + "-" + str(end) + " of " + str(cLogs), CLR_HEADING)
    else:
        scr.addstr(
            0, 0, "Log Output:" + " " * (curses.COLS - 31) + str(start) + "-" +
            str(end) + " of " + str(cLogs), CLR_HEADING)
    ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
    scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING)
    scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING)

    y = 2
    len_line = 0
    for i in range(start, end):
        if i >= cLogs - 1:
            log = '   ^--- NEWEST ---^ '
        else:
            log = filteredLog[i]
        logid = log[0]
        if len(log) > 25 and log[5] == '-' and log[8] == '-':
            log = log[27:]  # skip logid & date/time at the front of log line
        else:
            log = log[1:]  # just skip the logid

        # Categorize log line
        if " - DEBUG - " in log:
            log = log.replace("Skills ", "")
            clr = CLR_LOG_DEBUG
        elif " - ERROR - " in log:
            clr = CLR_LOG_ERROR
        else:
            if logid == "1":
                clr = CLR_LOG1
            elif logid == "@":
                clr = CLR_LOG_CMDMESSAGE
            else:
                clr = CLR_LOG2

        # limit output line to screen width
        len_line = len(log)
        if len(log) > curses.COLS:
            start = len_line - (curses.COLS - 4) - log_line_lr_scroll
            if start < 0:
                start = 0
            end = start + (curses.COLS - 4)
            if start == 0:
                log = log[start:end] + "~~~~"  # start....
            elif end >= len_line - 1:
                log = "~~~~" + log[start:end]  # ....end
            else:
                log = "~~" + log[start:end] + "~~"  # ..middle..
        if len_line > longest_visible_line:
            longest_visible_line = len_line
        scr.addstr(y, 0, handleNonAscii(log), clr)
        y += 1

    # Log legend in the lower-right
    y_log_legend = curses.LINES - (3 + cy_chat_area)
    scr.addstr(y_log_legend, curses.COLS // 2 + 2,
               make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
               CLR_HEADING)
    scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2, "DEBUG output",
               CLR_LOG_DEBUG)
    if len(log_files) > 0:
        scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
                   os.path.basename(log_files[0]) + ", other", CLR_LOG1)
    if len(log_files) > 1:
        scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
                   os.path.basename(log_files[1]), CLR_LOG2)

    # Meter
    y_meter = y_log_legend
    if show_meter:
        scr.addstr(y_meter, curses.COLS - 14, " Mic Level ", CLR_HEADING)

    # History log in the middle
    y_chat_history = curses.LINES - (3 + cy_chat_area)
    chat_width = curses.COLS // 2 - 2
    chat_out = []
    scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
               CLR_HEADING)

    # Build a nicely wrapped version of the chat log
    idx_chat = len(chat) - 1
    while len(chat_out) < cy_chat_area and idx_chat >= 0:
        if chat[idx_chat][0] == '>':
            wrapper = textwrap.TextWrapper(initial_indent="",
                                           subsequent_indent="   ",
                                           width=chat_width)
        else:
            wrapper = textwrap.TextWrapper(width=chat_width)

        chatlines = wrapper.wrap(chat[idx_chat])
        for txt in reversed(chatlines):
            if len(chat_out) >= cy_chat_area:
                break
            chat_out.insert(0, txt)

        idx_chat -= 1

    # Output the chat
    y = curses.LINES - (2 + cy_chat_area)
    for txt in chat_out:
        if txt.startswith(">> ") or txt.startswith("   "):
            clr = CLR_CHAT_RESP
        else:
            clr = CLR_CHAT_QUERY
        scr.addstr(y, 1, handleNonAscii(txt), clr)
        y += 1

    if show_gui and curses.COLS > 20 and curses.LINES > 20:
        _do_gui(curses.COLS - 20)

    # Command line at the bottom
    ln = line
    if len(line) > 0 and line[0] == ":":
        scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
                   CLR_CMDLINE)
        scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
        ln = line[1:]
    else:
        prompt = "Input (':' for command, Ctrl+C to quit)"
        if show_last_key:
            prompt += " === keycode: " + last_key
        scr.addstr(curses.LINES - 2, 0, make_titlebar(prompt, curses.COLS - 1),
                   CLR_HEADING)
        scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)

    _do_meter(cy_chat_area + 2)
    scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)

    # Curses doesn't actually update the display until refresh() is called
    scr.refresh()
示例#24
0
文件: azhpc.py 项目: Smahane/azurehpc
 def wrapped_print(indent, text, max_width=80):
     lines = textwrap.TextWrapper(
         width=max_width -
         indent).wrap(text=text)
     for line in lines:
         print(" " * indent + line)
示例#25
0
class SequenceAligner:
	_ALIGNER = Align.PairwiseAligner()
	_ALIGNER.open_gap_score = -3.0
	_ALIGNER.left_open_gap_score = -2.0
	_ALIGNER.right_open_gap_score = -2.0
	_ALIGNER.extend_gap_score = -1.0
	try:
		_ALIGNER.substitution_matrix = matlist.blosum62
	except ValueError:
		from Bio.Align import substitution_matrices
		_ALIGNER.substitution_matrix = substitution_matrices.load("BLOSUM62")

	_TEXT_WRAPPER = textwrap.TextWrapper(
		replace_whitespace=False,
		drop_whitespace=False,
		break_on_hyphens=False
	)

	def __init__(self):
		self.target = Sequence()
		self.query = Sequence()

	def __repr__(self):
		position_line = self.position_header

		lines = []
		wrappers = zip(
			self._TEXT_WRAPPER.wrap(position_line),
			self._TEXT_WRAPPER.wrap(self.full_string),
		)

		for wrapped_lines in wrappers:
			position_wrapped_line, sequence_wrapped_line = wrapped_lines
			lines.append(position_wrapped_line)
			lines.append(sequence_wrapped_line)
			lines.append('')

		return '\n'.join(lines)

	def align(self, target, query):
		self.target = target
		self.query = query

		self.alignments = self._ALIGNER.align(
			self.target.stripped_string,
			self.query.stripped_string
		)

		aligned_target_string, _, aligned_query_string, _ = str(self.alignments[0]).split('\n')

		self.target.update_from_alignment(aligned_target_string)
		self.query.update_from_alignment(aligned_query_string)

	@classmethod
	def renumber(cls, src, dst):
		"""
		..NOTE: Sequences must be aligned that means positions are coherent.
		"""

		# Renumber residues for which positions are equal
		source_positions = src.positions
		destination_positions = dst.positions

		common_positions, source_indices, destination_indices = np.intersect1d(
			source_positions, destination_positions, assume_unique=True, return_indices=True
		)

		error = False
		for src_index, dst_index in zip(source_indices, destination_indices):
			src_aa = src[src_index]
			dst_aa = dst[dst_index]

			if dst_aa.ol != src_aa.ol and src_aa.ol != AminoAcid._ANY_CHARACTER and dst_aa.ol != AminoAcid._ANY_CHARACTER:
				logger.warning(f'Residue mismatch {src_aa} {dst_aa}')
				error = True

			dst_aa.resid = src_aa.resid
			dst_aa.index = src_aa.index

		if error:
			logger.warning('Mismatches were present after alignment!')

		# Remove residues in destination which is not in source
		unique_source_positions = np.setdiff1d(
			source_positions, destination_positions, assume_unique=True
		)

		unique_destination_positions = np.setdiff1d(
			destination_positions, source_positions, assume_unique=True
		)

		dst_indices = destination_positions[np.isin(destination_positions, unique_destination_positions)]
		dst.remove(*dst_indices)

	def align_reference(self, basic_frame):
		self.align(basic_frame.sequence, basic_frame.structure_sequence)
		self.renumber(basic_frame.structure_sequence, basic_frame.sequence)
示例#26
0
    def __init__(self, logfile_path: str, kubeinfo, verbose: bool) -> None:
        """
        :param logfile_path: Path or string file path or "-" for stdout
        :param kubeinfo: How to run kubectl or equivalent
        :param verbose: Whether subcommand should run in verbose mode.
        """
        self.output = Output(logfile_path)
        self.logfile_path = self.output.logfile_path
        self.kubectl = kubeinfo
        self.verbose = verbose
        self.start_time = time()
        self.current_span = None  # type: typing.Optional[Span]
        self.counter = 0
        self.cleanup_stack = []  # type: typing.List[_CleanupItem]
        self.sudo_held = False
        self.quitting = False
        self.ended = []  # type: typing.List[str]

        if sys.platform.startswith("linux"):
            self.platform = "linux"
        elif sys.platform.startswith("darwin"):
            self.platform = "darwin"
        else:
            # For untested platforms...
            self.platform = sys.platform
        self.output.write("Platform: {}".format(self.platform))

        term_width = 99999
        self.chatty = False
        if sys.stderr.isatty():
            err_fd = sys.stderr.fileno()
            try:
                term_width = os.get_terminal_size(err_fd).columns - 1
                self.chatty = True
            except OSError:
                pass
        if term_width < 25:
            term_width = 99999
        self.wrapper = textwrap.TextWrapper(
            width=term_width,
            initial_indent="T: ",
            subsequent_indent="T: ",
            replace_whitespace=False,
            drop_whitespace=False,
        )
        self.raw_wrapper = textwrap.TextWrapper(
            width=99999,
            initial_indent="T: ",
            subsequent_indent="T: ",
            replace_whitespace=False,
            drop_whitespace=False,
        )
        self.session_id = uuid.uuid4().hex

        # Log some version info
        self.output.write("Python {}".format(sys.version))
        self.check_call(["uname", "-a"])

        cache_dir = os.path.expanduser("~/.cache/telepresence")
        os.makedirs(cache_dir, exist_ok=True)
        self.cache = Cache.load(os.path.join(cache_dir, "cache.json"))
        self.cache.invalidate(12 * 60 * 60)
        self.add_cleanup("Save caches", self.cache.save)

        # Docker for Mac only shares some folders; the default TMPDIR
        # on OS X is not one of them, so make sure we use /tmp:
        self.temp = Path(mkdtemp(prefix="tel-", dir="/tmp"))
        (self.temp / "session_id.txt").write_text(self.session_id)
        self.add_cleanup("Remove temporary directory", rmtree, str(self.temp))

        # Adjust PATH to cover common locations for conntrack, ifconfig, etc.
        # Also maybe prepend Telepresence's libexec directory.
        path = os.environ.get("PATH", os.defpath)
        path_elements = path.split(os.pathsep)
        for additional in "/usr/sbin", "/sbin":
            if additional not in path_elements:
                path += ":" + additional
        try:
            libexec = TELEPRESENCE_BINARY.parents[1] / "libexec"
        except IndexError:
            libexec = TELEPRESENCE_BINARY / "does_not_exist_please"
        if libexec.exists():
            path = "{}:{}".format(libexec, path)
        os.environ["PATH"] = path
示例#27
0
def has_legal_source(fts):
    """Checks whether the source of a function (sometimes module) is legal
    according to the function test setup."""
    module = sys.modules[fts.function.__module__]

    # visit function source
    source = inspect.getsource(fts.function)
    root = ast.parse(source)
    checker = SourceVisitor(module)
    checker.visit(root)

    # collect illegal (toxic) statements and calls
    toxic_statements = checker.get_node_types() & fts.illegal_statements
    toxic_calls = checker.get_callables() & fts.illegal_calls
    if len(fts.legal_calls) > 0:
        toxic_calls |= checker.get_callables() - fts.legal_calls
    toxic_modules = None

    # check module for imports
    imports = set(['Import', 'ImportFrom']) & fts.illegal_statements
    if len(imports) > 0 or len(fts.blacklist) > 0 or len(fts.whitelist) > 0:
        msource = inspect.getsource(module)
        mroot = ast.parse(msource)
        mchecker = SourceVisitor()
        mchecker.visit(mroot)
        if len(fts.blacklist) == 0 and len(fts.whitelist) == 0:
            toxic_statements |= mchecker.get_node_types() & imports
        else:
            toxic_modules = [m for m in mchecker.get_imports() if m not in
                    fts.whitelist or m in fts.blacklist]

    # check for loop
    # loops = set([
    #     'DictComp',
    #     'For',
    #     'GeneratorExp',
    #     'ListComp',
    #     'SetComp',
    #     'While',
    # ])
    # if len(loops & checker.get_node_types()) == 0:
    #     print(ANSI_COLOR['yellow']('WARNING -- no loop(s) found...'))

    # generate report
    report = []
    if len(toxic_statements) > 0:
        report.append('You used {}.'.format(', '.join(STATEMENT.get(stmt, stmt)
                      for stmt in sorted(toxic_statements))))
    if len(toxic_calls) > 0:
        report.append('You called {}.'.format(', '.join(call for call in
                      sorted(toxic_calls))))
    if toxic_modules:
        report.append('You imported illegal module{} {}.'.format('s' if
            len(toxic_modules) > 1 else '', ', '.join(m for m in
                sorted(toxic_modules))))

    # display report
    wrap = textwrap.TextWrapper(initial_indent='legal?  ',
            subsequent_indent='        ', width=80).wrap
    print('\n'.join(wrap(doh() + '  ' + '  '.join(report)
                    if len(report) > 0 else yay())))

    return len(report) == 0
示例#28
0
def wrap_ylabel(text, width=MAX_INSTR_YLABEL_CHARS):
    return textwrap.TextWrapper(width=width).fill(text.replace('_', '\n'))
示例#29
0
def feedbackReview(feedback, size):
    return textwrap.TextWrapper(width=size).wrap(feedback)
示例#30
0
def prettyprint(annots, outlines, mediaboxes, alllinenos):

    tw = textwrap.TextWrapper(width=80,
                              initial_indent=" * ",
                              subsequent_indent="   ")

    def fmtpos(annot):
        apos = annot.getstartpos()
        if apos:
            o = nearest_outline(outlines, annot.pageno,
                                mediaboxes[annot.pageno], apos)
        else:
            o = None
        if o:
            return "Page %d (%s):" % (annot.pageno + 1, o.title)
        else:
            return "Page %d:" % (annot.pageno + 1)

    def fmttext(annot):
        if annot.boxes:
            if annot.gettext():
                return '"%s"' % annot.gettext()
            else:
                return "(XXX: missing text!)"
        else:
            return ''

    def formatitem(*args):
        msg = '- ' + '\n'.join(args)
        # print(tw.fill(msg) + "\n")
        return msg + "\n"

    def printitem(*args):
        print(formatitem(*args))

    nits = [
        a for a in annots
        if a.tagname in ['squiggly', 'strikeout', 'underline']
    ]
    comments = [a for a in annots if a.tagname in ['highlight'] and a.contents]
    # commentslinenos = [alllinenos[ai] for ai,a in enumerate(annots) if a.tagname in ['highlight', 'text'] and a.contents]
    highlights = [
        a for a in annots if a.tagname == 'highlight' and a.contents is None
    ]
    # print(alllinenos)
    allcomments = []
    if comments:
        # if highlights:
        #     print() # blank
        # print("# All comments\n")
        commenti = 0
        for a in comments:
            text = fmttext(a)
            if text:
                # XXX: lowercase the first word, to join it to the "Regarding" sentence
                contents = a.contents
                firstword = contents.split()[0]
                if firstword != 'I' and not firstword.startswith("I'"):
                    contents = contents[0].lower() + contents[1:]
                # printitem(fmtpos(a), alllinenos[commenti] if commenti<len(alllinenos) else '', "Regarding", text + ",", contents)
                # printitem(fmtpos(a), "Regarding "+text, contents)
                allcomments.append(
                    formatitem(fmtpos(a), "Regarding " + text, contents))
                commenti += 1
            # else:
            #     printitem(fmtpos(a), a.contents)
    hashtag2category = {
        '#major': '# Major comments:\n',
        '#minor': '# Minor comments:\n',
        '#checkinabstract': '#checkinabstract\n',
        '#checkinresults': '#checkinresults\n',
        '#checkindiscussion': '#checkindiscussion\n',
        '#checkinmethods': '#checkinmethods\n',
        '#checkinrefs': '#checkinrefs\n',
        '#checkout': '#checkout\n',
    }
    for ctype in ['#major', '#minor']:
        print(hashtag2category[ctype])
        for c in allcomments:
            if not '#check' in c:
                if ctype in c:
                    print(c.replace(ctype, ''))

    for ctype in [k for k in hashtag2category.keys() if '#check' in k]:
        ci = 0
        for c in allcomments:
            if '#check' in c:
                if ci == 0:
                    print(hashtag2category[ctype])
                print(c)
                ci += 1

    ci = 0
    for c in allcomments:
        if not '#' in c:
            if ci == 0:
                print('# Not hashtagged comments:')
            print(c)
            ci += 1

    if highlights:
        print("## Just Highlights\n")
        for a in highlights:
            printitem(fmtpos(a), fmttext(a))

    if nits:
        if highlights or comments:
            print()  #  blank
        print("## Nits\n")
        for a in nits:
            text = fmttext(a)
            if a.contents:
                printitem(fmtpos(a), "%s -> %s" % (text, a.contents))
            else:
                printitem(fmtpos(a), "%s" % text)