コード例 #1
0
    def __str__(self):
        if self.networks and len(self.networks) > 1:
            lines = ["Nexus dataset '%s' (#%s) with %d networks" % \
                    (self.sid, self.id, len(self.networks))]
        else:
            lines = ["Nexus dataset '%(sid)s' (#%(id)s)" % self.__dict__]

        lines.append("vertices/edges: %s" % self.vertices_edges)

        if self.name:
            lines.append("name: %s" % self.name)
        if self.tags:
            lines.append("tags: %s" % "; ".join(self.tags))

        if self.rest:
            wrapper = TextWrapper(width=76, subsequent_indent='  ')

            keys = sorted(self.rest.keys())
            if "attribute" in self.rest:
                keys.remove("attribute")
                keys.append("attribute")

            for key in keys:
                for value in self.rest.getlist(key):
                    paragraphs = str(value).splitlines()
                    wrapper.initial_indent = "%s: " % key
                    for paragraph in paragraphs:
                        ls = wrapper.wrap(paragraph)
                        if ls:
                            lines.extend(wrapper.wrap(paragraph))
                        else:
                            lines.append("  .")
                        wrapper.initial_indent = "  "

        return "\n".join(lines)
コード例 #2
0
def print_terminal_help_topic(topic):
    helpmodule = importlib.import_module('cqlhelp.cql%s' % "".join(w.capitalize() for w in topic.split("_")))

    lines = helpmodule.__doc__.split('\n')

    try:
        rows, columns = subprocess.check_output(['stty', 'size']).split()
    except:
        rows = 25
        columns = 80

    wrapper = TextWrapper()
    wrapper.break_on_hyphens = False
    try:
        wrapper.width = int(columns)
    except ValueError:
        wrapper.width = 80

    for line in lines:
        if re.match(r'\s', line):
            line = line.strip()
            wrapper.initial_indent = '    '
            if line.startswith('-'):
                wrapper.subsequent_indent = '        '
            else:
                wrapper.subsequent_indent = '    '
        else:
            wrapper.initial_indent = ''
            wrapper.subsequent_indent = ''

        print(wrapper.fill(line))
コード例 #3
0
    def info(cls, _str=True):
        if not _str:
            return PCFGConfig.argNames

        # Auto text wrapper to output the doc.
        tw = TextWrapper()
        tw.initial_indent = "    "
        tw.subsequent_indent = "    "

        retVal = "General Configuration: \n"
        for argName in PCFGConfig.argNames:
            arg = str(argName["arg"])
            argreq = str(argName["req"])
            argtype = str(argName["type"].__name__)
            argdef = str(argName["def"])
            argdoc = str(argName["doc"])
            argex = str(argName["ex"])
            doclines = tw.wrap(argdoc)

            aType = "optional"
            if argreq:
                aType = "required"

            retVal += "  %s (%s, %s):\n" % (arg, argtype, aType)
            retVal += "    Defaults to %s\n" % (argdef)
            for docline in doclines:
                retVal += "%s\n" % docline
            retVal += "    Example: %s\n" % argex
            retVal += "\n"
        return retVal
コード例 #4
0
ファイル: ui.py プロジェクト: PixelNoob/piston
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
コード例 #5
0
ファイル: ui.py プロジェクト: PixelNoob/piston
def dump_recursive_comments(rpc,
                            post_author,
                            post_permlink,
                            depth=0,
                            format="markdown"):
    global currentThreadDepth
    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (depth + currentThreadDepth)
    postWrapper.subsequent_indent = "  " * (depth + currentThreadDepth)

    depth = int(depth)

    posts = rpc.get_content_replies(post_author, post_permlink)
    for post in posts:
        meta = {}
        for key in ["author", "permlink"]:
            meta[key] = post[key]
        meta["reply"] = "@{author}/{permlink}".format(**post)
        if format == "markdown":
            body = markdownify(post["body"])
        else:
            body = post["body"]
        yaml = frontmatter.Post(body, **meta)
        print(frontmatter.dumps(yaml))
        reply = rpc.get_content_replies(post["author"], post["permlink"])
        if len(reply):
            dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
コード例 #6
0
 def do_me(self, mdig_model):
     from textwrap import TextWrapper
     import re
     models = mdig.repository.get_models()
     title_str = "Models in MDiG GRASS db @ " + mdig.repository.db
     print "-" * len(title_str)
     print title_str
     print "model_name [location]"
     print "    description"
     print "-" * len(title_str)
     ms = models.keys()[:]
     ms.sort()
     for m in ms:
         try:
             dm = DispersalModel(models[m], setup=False)
             tw = TextWrapper(expand_tabs=False, replace_whitespace=True)
             tw.initial_indent = " " * 4
             tw.subsequent_indent = " " * 4
             desc = dm.get_description()
             desc = re.sub("[\\s\\t]+", " ", desc)
             desc = tw.fill(desc)
             loc = dm.get_location()
             if not loc:
                 loc = dm.infer_location()
             if not loc:
                 loc = "unknown"
             print "%s [%s]:\n%s" % (m, loc, desc)
         except mdig.model.ValidationError:
             print "%s [ERROR]" % (m, )
     sys.exit(0)
コード例 #7
0
ファイル: changes.py プロジェクト: c0ns0le/cygwin
def refill(msg):
    """
    Refill a changelog message.

    Normalize the message reducing multiple spaces and newlines to single
    spaces, recognizing common form of ``bullet lists``, that is paragraphs
    starting with either a dash "-" or an asterisk "*".
    """

    wrapper = TextWrapper()
    res = []
    items = itemize_re.split(msg.strip())

    if len(items)>1:
        # Remove possible first empty split, when the message immediately
        # starts with a bullet
        if not items[0]:
            del items[0]

        if len(items)>1:
            wrapper.initial_indent = '- '
            wrapper.subsequent_indent = ' '*2

    for item in items:
        if item:
            words = filter(None, item.strip().replace('\n', ' ').split(' '))
            normalized = ' '.join(words)
            res.append(wrapper.fill(normalized))

    return '\n\n'.join(res)
コード例 #8
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppInitializations(self, Indent=4):
        """Create initialization list for C++

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the initialization
        list should be

            m1(m1_i), m2(m2_i), t(t_i), x(x_i)

        The quantities m1_i, etc., appear in the input-argument list
        output by the method `CppInputArguments`.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' '*Indent
        wrapper.subsequent_indent = wrapper.initial_indent
        def Initialization(atom):
            if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >'):
                return '{0}({1})'.format(self.Variables[atom], len(atom.substitution))
            if atom.fundamental:
                return '{0}({0}_i)'.format(self.Variables[atom])
            else:
                return '{0}({1})'.format(self.Variables[atom], atom.ccode())
        Initializations  = [Initialization(atom) for atom in self.Atoms]
        return wrapper.fill(', '.join(Initializations))
コード例 #9
0
ファイル: tools.py プロジェクト: timtangcoding/Menotexport
def printHeader(s, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs = {1: '=', 2: '-', 3: '.'}
    indents = {1: 0, 2: 4, 3: 8}

    dec = decs[level]
    ind = indents[level]
    indstr = ' ' * int(ind)

    wrapper = TextWrapper()
    wrapper.width = length - ind
    wrapper.initial_indent = indstr
    wrapper.subsequent_indent = indstr

    #-------------Get delimiter line-------------
    hline = '%s%s' % (' ' * int(ind), dec * int(length - ind))

    #--------------------Wrap texts--------------------
    strings = wrapper.wrap('%s %s' % (prefix, s))

    #----------------------Print----------------------
    try:
        print('\n' + hline)
    except:
        print('\n' + hline.encode('ascii', 'replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii', 'replace'))
    #print(hline)

    return
コード例 #10
0
ファイル: changes.py プロジェクト: yut148/tailor
def refill(msg):
    """
    Refill a changelog message.

    Normalize the message reducing multiple spaces and newlines to single
    spaces, recognizing common form of ``bullet lists``, that is paragraphs
    starting with either a dash "-" or an asterisk "*".
    """

    wrapper = TextWrapper()
    res = []
    items = itemize_re.split(msg.strip())

    if len(items) > 1:
        # Remove possible first empty split, when the message immediately
        # starts with a bullet
        if not items[0]:
            del items[0]

        if len(items) > 1:
            wrapper.initial_indent = '- '
            wrapper.subsequent_indent = ' ' * 2

    for item in items:
        if item:
            words = filter(None, item.strip().replace('\n', ' ').split(' '))
            normalized = ' '.join(words)
            res.append(wrapper.fill(normalized))

    return '\n\n'.join(res)
コード例 #11
0
ファイル: ui.py プロジェクト: dpays/dpaycli
def dump_recursive_comments(rpc,
                            post_author,
                            post_permlink,
                            depth=0,
                            format="markdown"):
    global currentThreadDepth
    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (depth + currentThreadDepth)
    postWrapper.subsequent_indent = "  " * (depth + currentThreadDepth)

    depth = int(depth)

    posts = rpc.get_content_replies(post_author, post_permlink)
    for post in posts:
        meta = {}
        for key in ["author", "permlink"]:
            meta[key] = post[key]
        meta["reply"] = "@{author}/{permlink}".format(**post)
        if format == "markdown":
            body = markdownify(post["body"])
        else:
            body = post["body"]
        yaml = frontmatter.Post(body, **meta)
        print(frontmatter.dumps(yaml))
        reply = rpc.get_content_replies(post["author"], post["permlink"])
        if len(reply):
            dump_recursive_comments(rpc, post["author"], post["permlink"],
                                    depth + 1)
コード例 #12
0
    def CppEvaluateExpressions(self, Indent=4, Expressions=None):
        """Declare and define the `Expressions` for C++

        The output of this function declares are defines the
        `Expressions` as individual variables.  An optional dictionary
        of expressions allows just a subset of this object's
        expressions to be output; if this argument is not present, all
        will be output.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' ' * Indent
        wrapper.subsequent_indent = wrapper.initial_indent + '  '
        Evaluations = []
        if not Expressions:
            Expressions = self.Expressions
        for Expression in Expressions:
            try:
                Evaluations.append(
                    wrapper.fill('{0}{1} {2} = {3};'.format(
                        self.const(Expression), self.dtype(Expression),
                        Expressions[Expression], Expression.ccode())))
            except TypeError:
                pass
        return '\n'.join(Evaluations)
コード例 #13
0
    def _print_list_helper(cls, list_msg, message_list, helper_info=None):
        def kwargs_fn(msg):
            fieldnames = [
                fname for _, fname, _, _ in Formatter().parse(list_msg)
                if fname
            ]
            dic = dict()
            for fieldname in fieldnames:
                val = getattr(msg, fieldname)
                if isinstance(val, pathlib.Path):
                    val = val.name
                dic[fieldname] = val
            return dic

        final_msg = "%s:\n" % cls.STATIC_MSG

        final_msg += "\t%s" % ("\n\t".join(
            "- " + list_msg.format(**kwargs_fn(msg)) for msg in message_list))
        if helper_info:
            wrapper = TextWrapper()
            wrapper.initial_indent = "\t\t"
            wrapper.subsequent_indent = "\t\t"
            final_msg += "\n\tRemarks:\n%s" % wrapper.fill(helper_info)

        return final_msg
コード例 #14
0
    def CppInitializations(self, Indent=4):
        """Create initialization list for C++

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the initialization
        list should be

            m1(m1_i), m2(m2_i), t(t_i), x(x_i)

        The quantities m1_i, etc., appear in the input-argument list
        output by the method `CppInputArguments`.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' ' * Indent
        wrapper.subsequent_indent = wrapper.initial_indent

        def Initialization(atom):
            if atom.datatype and (atom.datatype == 'std::vector<double>'
                                  or atom.datatype
                                  == 'std::vector<std::complex<double> >'):
                return '{0}({1})'.format(self.Variables[atom],
                                         len(atom.substitution))
            if atom.fundamental:
                return '{0}({0}_i)'.format(self.Variables[atom])
            else:
                return '{0}({1})'.format(self.Variables[atom], atom.ccode())

        Initializations = [Initialization(atom) for atom in self.Atoms]
        return wrapper.fill(', '.join(Initializations))
コード例 #15
0
ファイル: ui.py プロジェクト: sanbir/piston
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
コード例 #16
0
    def CppInputArguments(self, Indent=12):
        """Create basic input arguments for C++

        The fundamental variables are listed, along with their data
        types and `const` if the variable is constant.  This would be
        an appropriate string to represent the input arguments for a
        function or class constructor to calculate the `Expressions`
        of this CodeConstructor object.

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the input argument
        list should be

            const double m1_i, const double m2_i, double t_i, double x_i

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' ' * Indent
        wrapper.subsequent_indent = wrapper.initial_indent
        InputArguments = [
            'const {0} {1}_i'.format(self.dtype(atom), self.Variables[atom])
            for atom in self.Atoms if atom.fundamental
        ]
        return wrapper.fill(', '.join(InputArguments)).lstrip()
コード例 #17
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printHeader(s, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs={1: '=', 2: '-', 3: '.'}
    indents={1: 0, 2: 4, 3: 8}

    dec=decs[level]
    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length-ind
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    #-------------Get delimiter line-------------
    hline='%s%s' %(' '*int(ind),dec*int(length-ind)) 

    #--------------------Wrap texts--------------------
    strings=wrapper.wrap('%s %s' %(prefix,s))

    #----------------------Print----------------------
    try:
        print('\n'+hline)
    except:
        print('\n'+hline.encode('ascii','replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii','replace'))
    #print(hline)

    return
コード例 #18
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppEvaluations(self, Indent=4):
        """Evaluate all derived variables in C++

        This function uses the `substitution` expressions for the
        derived variables.  This output is appropriate for updating
        the values of the variables at each step of an integration,
        for example.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' '*Indent
        wrapper.subsequent_indent = wrapper.initial_indent + '  '
        def Evaluation(atom):
            def Ccode(a) :
                try:
                    return a.ccode()
                except :
                    from sympy.printing import ccode
                    return ccode(a)
            if atom.datatype and (atom.datatype=='std::vector<double>' or atom.datatype=='std::vector<std::complex<double> >') :
                return '\n'.join([wrapper.fill('{0}[{1}] = {2};'.format(self.Variables[atom], i, Ccode(atom.substitution[i])))
                                  for i in range(len(atom.substitution))])
            else:
                return wrapper.fill('{0} = {1};'.format(self.Variables[atom], atom.ccode()))
        return '\n'.join([Evaluation(atom) for atom in self.Atoms if not atom.fundamental and not atom.constant])
コード例 #19
0
ファイル: admin.py プロジェクト: dgpreatoni/mdig
 def do_me(self,mdig_model):
     from textwrap import TextWrapper
     import re
     models = mdig.repository.get_models()
     title_str = "Models in MDiG GRASS db @ " + mdig.repository.db
     print "-"*len(title_str)
     print title_str
     print "model_name [location]"
     print "    description"
     print "-"*len(title_str)
     ms=models.keys()[:]
     ms.sort()
     for m in ms:
         try:
             dm = DispersalModel(models[m],setup=False)
             tw = TextWrapper(expand_tabs = False, replace_whitespace = True )
             tw.initial_indent = " "*4
             tw.subsequent_indent = " "*4
             desc = dm.get_description()
             desc = re.sub("[\\s\\t]+"," ",desc)
             desc = tw.fill(desc)
             loc = dm.get_location()
             if not loc:
                 loc = dm.infer_location()
             if not loc:
                 loc = "unknown"
             print "%s [%s]:\n%s" % (m,loc,desc)
         except mdig.model.ValidationError:
             print "%s [ERROR]" % (m,)
     sys.exit(0)
コード例 #20
0
ファイル: plot.py プロジェクト: paccanarolab/ConSAT
 def plot_list(self):
     """Lists the names of the available figures"""
     wrapper = TextWrapper(subsequent_indent=" " * 22,
                           width=78)
     for method, func in self.get_available_figures():
         if method != "list":
             wrapper.initial_indent = ("%-20s " % method).ljust(22)
             print(wrapper.fill(func.figure_name))
コード例 #21
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def default_attribute_formatter(self, key, value):
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   attribute_text_template='/{attribute_key}="{attribute_value}"'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #22
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def number_attribute_formatter(self, key, value):
   # transl_table attributes do not have their values in quotes
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   attribute_text_template='/{attribute_key}={attribute_value}'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #23
0
ファイル: EMBLContig.py プロジェクト: luoluo690/gff3toembl
 def default_attribute_formatter(self, key, value):
     wrapper = TextWrapper()
     wrapper.initial_indent = 'FT                   '
     wrapper.subsequent_indent = 'FT                   '
     wrapper.width = 80  # can use 80 characters plus the new line
     attribute_text_template = '/{attribute_key}="{attribute_value}"'
     attribute_text = attribute_text_template.format(attribute_key=key,
                                                     attribute_value=value)
     return wrapper.fill(attribute_text)
コード例 #24
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def header_attribute_formatter(self, key, header_text, quote_character, final_character):
   wrapper = TextWrapper()
   wrapper.initial_indent=key + '   '
   wrapper.subsequent_indent=key + '   '
   wrapper.width=79
   attribute_text_template='{attribute_quote_character}{attribute_header_text}{attribute_quote_character}{attribute_final_character}'
   attribute_text=attribute_text_template.format(attribute_header_text = header_text, 
                                                 attribute_quote_character = quote_character, 
                                                 attribute_final_character = final_character)
   return wrapper.fill(attribute_text)
コード例 #25
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def product_attribute_formatter(self, key, value):
   # Products can include very long enzyme names which we don't want to break
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   wrapper.break_on_hyphens=True
   attribute_text_template='/{attribute_key}="{attribute_value}"'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #26
0
ファイル: EMBLContig.py プロジェクト: luoluo690/gff3toembl
 def number_attribute_formatter(self, key, value):
     # transl_table attributes do not have their values in quotes
     wrapper = TextWrapper()
     wrapper.initial_indent = 'FT                   '
     wrapper.subsequent_indent = 'FT                   '
     wrapper.width = 80  # can use 80 characters plus the new line
     attribute_text_template = '/{attribute_key}={attribute_value}'
     attribute_text = attribute_text_template.format(attribute_key=key,
                                                     attribute_value=value)
     return wrapper.fill(attribute_text)
コード例 #27
0
ファイル: EMBLContig.py プロジェクト: luoluo690/gff3toembl
 def product_attribute_formatter(self, key, value):
     # Products can include very long enzyme names which we don't want to break
     wrapper = TextWrapper()
     wrapper.initial_indent = 'FT                   '
     wrapper.subsequent_indent = 'FT                   '
     wrapper.width = 80  # can use 80 characters plus the new line
     wrapper.break_on_hyphens = True
     attribute_text_template = '/{attribute_key}="{attribute_value}"'
     attribute_text = attribute_text_template.format(attribute_key=key,
                                                     attribute_value=value)
     return wrapper.fill(attribute_text)
コード例 #28
0
ファイル: EMBLContig.py プロジェクト: luoluo690/gff3toembl
 def header_attribute_formatter(self, key, header_text, quote_character,
                                final_character):
     wrapper = TextWrapper()
     wrapper.initial_indent = key + '   '
     wrapper.subsequent_indent = key + '   '
     wrapper.width = 80  # can use 80 characters plus the new line
     attribute_text_template = '{attribute_quote_character}{attribute_header_text}{attribute_quote_character}{attribute_final_character}'
     attribute_text = attribute_text_template.format(
         attribute_header_text=header_text,
         attribute_quote_character=quote_character,
         attribute_final_character=final_character)
     return wrapper.fill(attribute_text)
コード例 #29
0
    def CppDeclarations(self, Indent=4):
        """Create declaration statements for C++

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the declaration
        list should be

            const double m1, m2;
            double t, x;

        The code knows which atoms need to be declared at the
        beginning, and which ones should be `const`, for example.  For
        C++, the default datatype is `double`; if the atom was created
        with a different datatype, that will be used appropriately.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ''
        wrapper.subsequent_indent = ''
        datatype = ''
        Declarations = ''
        Names = []
        for atom in self.Atoms:
            thisdatatype = CodeConstructor.const(atom) + CodeConstructor.dtype(
                atom) + ' '
            if thisdatatype != datatype:
                if Names:
                    Declarations += wrapper.fill(', '.join(Names)) + ";\n"
                Names = []
                datatype = thisdatatype
                wrapper.initial_indent = ' ' * Indent + thisdatatype
                wrapper.subsequent_indent = ' ' * len(wrapper.initial_indent)
            Names.append(self.Variables[atom])
        if Names:
            Declarations += wrapper.fill(', '.join(Names)) + ";\n"
        return Declarations.rstrip()
コード例 #30
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppDeclarations(self, Indent=4):
        """Create declaration statements for C++

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the declaration
        list should be

            const double m1, m2;
            double t, x;

        The code knows which atoms need to be declared at the
        beginning, and which ones should be `const`, for example.  For
        C++, the default datatype is `double`; if the atom was created
        with a different datatype, that will be used appropriately.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ''
        wrapper.subsequent_indent = ''
        datatype = ''
        Declarations = ''
        Names = []
        for atom in self.Atoms:
            thisdatatype = CodeConstructor.const(atom) + CodeConstructor.dtype(atom) + ' '
            if thisdatatype != datatype:
                if Names:
                    Declarations += wrapper.fill(', '.join(Names)) + ";\n"
                Names = []
                datatype = thisdatatype
                wrapper.initial_indent = ' '*Indent + thisdatatype
                wrapper.subsequent_indent = ' '*len(wrapper.initial_indent)
            Names.append(self.Variables[atom])
        if Names:
            Declarations += wrapper.fill(', '.join(Names)) + ";\n"
        return Declarations.rstrip()
コード例 #31
0
    def usage(self, argv):
        """
            Generate the outlying usage text for TD.
            This tells the user the list of current
            commands, generated programatically,
            and the outlying command functionality.
        """

        text = (
            "Usage: {prog} <command>\n\n"
            "Where <command> is one of:\n\n"
                .format(prog=argv[0])
        )

        # Figure out the pre-indentation
        cmdFmt = '  {:<12s}  '
        cmdFmtLen = len(cmdFmt.format(''))
        # Generate a formatter which will produce nicely formatted text
        # that wraps at column 78 but puts continuation text one character
        # indented from where the previous text started, e.g
        #   cmd1    Cmd1 help text stuff
        #            continued cmd1 text
        #   cmd2    Cmd2 help text
        tw = TextWrapper(
                subsequent_indent=' '*(cmdFmtLen + 1),
                width=78,
                drop_whitespace=True,
                expand_tabs=True,
                fix_sentence_endings=True,
                break_long_words=False,
                break_on_hyphens=True,
                )

        # List each command with its help text
        lastCmdName = None
        for cmdName, cmd in sorted(commandIndex.items()):
            tw.initial_indent = cmdFmt.format(cmdName)
            text += tw.fill(cmd.help) + "\n"
            lastCmdName = cmdName

        # Epilog
        text += (
            "\n"
            "For additional help on a specific command, such as '{cmd}' use\n"
            "  {prog} {cmd} -h"
                .format(prog=argv[0], cmd=lastCmdName)
            )
        return text
コード例 #32
0
    def usage(self, argv):
        """
            Generate the outlying usage text for TD.
            This tells the user the list of current
            commands, generated programatically,
            and the outlying command functionality.
        """

        text = (
            "Usage: {prog} <command>\n\n"
            "Where <command> is one of:\n\n"
                .format(prog=argv[0])
        )

        # Figure out the pre-indentation
        cmdFmt = '  {:<12s}  '
        cmdFmtLen = len(cmdFmt.format(''))
        # Generate a formatter which will produce nicely formatted text
        # that wraps at column 78 but puts continuation text one character
        # indented from where the previous text started, e.g
        #   cmd1    Cmd1 help text stuff
        #            continued cmd1 text
        #   cmd2    Cmd2 help text
        tw = TextWrapper(
                subsequent_indent=' '*(cmdFmtLen + 1),
                width=78,
                drop_whitespace=True,
                expand_tabs=True,
                fix_sentence_endings=True,
                break_long_words=False,
                break_on_hyphens=True,
                )

        # List each command with its help text
        lastCmdName = None
        for cmdName, cmd in sorted(commandIndex.items()):
            tw.initial_indent = cmdFmt.format(cmdName)
            text += tw.fill(cmd.help) + "\n"
            lastCmdName = cmdName

        # Epilog
        text += (
            "\n"
            "For additional help on a specific command, such as '{cmd}' use\n"
            "  {prog} {cmd} -h"
                .format(prog=argv[0], cmd=lastCmdName)
            )
        return text
コード例 #33
0
    def CppExpressionsAsFunctions(self, Indent=4, Expressions=None):
        """Define functions to calculate the `Expressions` in C++

        The output of this function gives C++ functions to calculate
        the `Expressions`, assuming the functions are member methods
        in a class, and so can access the atoms of the expression
        without explicit arguments.  An optional dictionary of
        expressions allows just a subset of this object's expressions
        to be output; if this argument is not present, all will be
        output.

        """
        def dtype(e):
            if e.datatype:
                return e.datatype
            else:
                return 'double'

        from textwrap import TextWrapper
        from PNObjects import PNCollection
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' ' * Indent + '  return '
        wrapper.subsequent_indent = ' ' * Indent + '    '
        Evaluations = []
        if not Expressions:
            Expressions = self.Expressions
        for Expression in Expressions:
            ExprColl = PNCollection()
            for atom in Expression.substitution_atoms:
                if atom not in self.Variables:
                    try:
                        ExprColl.AddDerivedVariable(
                            str(atom),
                            atom.substitution,
                            substitution_atoms=atom.substitution_atoms,
                            datatype=atom.datatype)
                    except TypeError:
                        pass
            MiniConstructor = CodeConstructor(self.Variables, ExprColl)
            Evaluations.append(' ' * Indent + dtype(Expression) + ' ' +
                               Expressions[Expression] + '() {\n' +
                               MiniConstructor.CppEvaluateExpressions(Indent +
                                                                      2) +
                               '\n' + wrapper.fill(Expression.ccode()) +
                               ';\n' + ' ' * Indent + '}')
        return '\n'.join(Evaluations)
コード例 #34
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printInd(s, level=1, length=70, prefix=''):
    from textwrap import TextWrapper
    indents={1: 0, 2: 4, 3: 8, 4: 12, 5: 16}

    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    string=wrapper.fill('%s %s' %(prefix,s))
    try:
        print('\n'+string)
    except:
        print('\n'+string.encode('ascii','replace'))

    return 
コード例 #35
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppExpressionsAsFunctions(self, Indent=4, Expressions=None):
        """Define functions to calculate the `Expressions` in C++

        The output of this function gives C++ functions to calculate
        the `Expressions`, assuming the functions are member methods
        in a class, and so can access the atoms of the expression
        without explicit arguments.  An optional dictionary of
        expressions allows just a subset of this object's expressions
        to be output; if this argument is not present, all will be
        output.

        """
        def dtype(e):
            if e.datatype:
                return e.datatype
            else:
                return 'double'
        from textwrap import TextWrapper
        from PNObjects import PNCollection
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' '*Indent + '  return '
        wrapper.subsequent_indent = ' '*Indent + '    '
        Evaluations = []
        if not Expressions:
            Expressions=self.Expressions
        for Expression in Expressions:
            ExprColl = PNCollection()
            for atom in Expression.substitution_atoms:
                if atom not in self.Variables:
                    try:
                        ExprColl.AddDerivedVariable(str(atom), atom.substitution,
                                                    substitution_atoms=atom.substitution_atoms,
                                                    datatype=atom.datatype)
                    except TypeError:
                        pass
            MiniConstructor = CodeConstructor(self.Variables, ExprColl)
            Evaluations.append(
                ' '*Indent + dtype(Expression) + ' ' + Expressions[Expression] + '() {\n'
                + MiniConstructor.CppEvaluateExpressions(Indent+2) + '\n'
                + wrapper.fill(Expression.ccode())
                + ';\n' + ' '*Indent + '}'
            )
        return '\n'.join(Evaluations)
コード例 #36
0
ファイル: tools.py プロジェクト: timtangcoding/Menotexport
def printInd(s, level=1, length=70, prefix=''):
    from textwrap import TextWrapper
    indents = {1: 0, 2: 4, 3: 8, 4: 12, 5: 16}

    ind = indents[level]
    indstr = ' ' * int(ind)

    wrapper = TextWrapper()
    wrapper.width = length
    wrapper.initial_indent = indstr
    wrapper.subsequent_indent = indstr

    string = wrapper.fill('%s %s' % (prefix, s))
    try:
        print('\n' + string)
    except:
        print('\n' + string.encode('ascii', 'replace'))

    return
コード例 #37
0
def main():
    ugly = False
    if os.sys.platform[0:3] == 'win':
        ugly = True

    response = urllib2.urlopen(sys.argv[1])
    encoding = response.headers.getparam('charset')
    html = response.read().decode(encoding)

    f = StringIO(html)
    parser = etree.HTMLParser()

    #create SAX tree
    tree = etree.parse(f, parser)

    handler = BoilerpipeHTMLContentHandler()
    sax.saxify(tree, handler)

    a = ArticleExtractor()

    #parses our data and creates TextDocument with TextBlocks
    doc = handler.toTextDocument()

    tw = TextWrapper()
    tw.width = 80
    tw.initial_indent = os.linesep + os.linesep
    parsed_url = urllib2.urlparse.urlparse(sys.argv[1])
    filename = parsed_url.netloc + "-" + "".join([
        c for c in parsed_url.path if c.isalpha() or c.isdigit() or c == ' '
    ]).rstrip() + '.txt'
    output = []
    for line in a.getText(doc).splitlines():
        output.append(tw.fill(line))
    i = 0
    with codecs.open(filename, 'w', encoding='utf8') as f:
        for line in output:
            if ugly:
                line.replace('\n', os.linesep)
            f.write(line)
    print "Article saved. Lines: %s. Filename: %s" % (len(output), filename)
コード例 #38
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printNumHeader(s, idx, num, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs={1: '=', 2: '-', 3: '.'}
    indents={1: 0, 2: 4, 3: 8}

    dec=decs[level]
    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length-ind
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    #-------------Get delimiter line-------------
    decl=int((length-ind-2-len(str(idx))-len(str(num)))/2.)
    decl=decl*dec

    hline1='%s%s %d/%d %s' %(' '*int(ind),decl,idx,num,decl) 
    #hline2='%s%s' %(' '*int(ind),dec*int(length-ind)) 

    #--------------------Wrap texts--------------------
    strings=wrapper.wrap('%s %s' %(prefix,s))

    #----------------------Print----------------------
    try:
        print('\n'+hline1)
    except:
        print('\n'+hline1.encode('ascii','replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii','replace'))
    #print(hline2)

    return
コード例 #39
0
    def CppEvaluations(self, Indent=4):
        """Evaluate all derived variables in C++

        This function uses the `substitution` expressions for the
        derived variables.  This output is appropriate for updating
        the values of the variables at each step of an integration,
        for example.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' ' * Indent
        wrapper.subsequent_indent = wrapper.initial_indent + '  '

        def Evaluation(atom):
            def Ccode(a):
                try:
                    return a.ccode()
                except:
                    from sympy.printing import ccode
                    return ccode(a)

            if atom.datatype and (atom.datatype == 'std::vector<double>'
                                  or atom.datatype
                                  == 'std::vector<std::complex<double> >'):
                return '\n'.join([
                    wrapper.fill('{0}[{1}] = {2};'.format(
                        self.Variables[atom], i, Ccode(atom.substitution[i])))
                    for i in range(len(atom.substitution))
                ])
            else:
                return wrapper.fill('{0} = {1};'.format(
                    self.Variables[atom], atom.ccode()))

        return '\n'.join([
            Evaluation(atom) for atom in self.Atoms
            if not atom.fundamental and not atom.constant
        ])
コード例 #40
0
ファイル: tools.py プロジェクト: timtangcoding/Menotexport
def printNumHeader(s, idx, num, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs = {1: '=', 2: '-', 3: '.'}
    indents = {1: 0, 2: 4, 3: 8}

    dec = decs[level]
    ind = indents[level]
    indstr = ' ' * int(ind)

    wrapper = TextWrapper()
    wrapper.width = length - ind
    wrapper.initial_indent = indstr
    wrapper.subsequent_indent = indstr

    #-------------Get delimiter line-------------
    decl = int((length - ind - 2 - len(str(idx)) - len(str(num))) / 2.)
    decl = decl * dec

    hline1 = '%s%s %d/%d %s' % (' ' * int(ind), decl, idx, num, decl)
    #hline2='%s%s' %(' '*int(ind),dec*int(length-ind))

    #--------------------Wrap texts--------------------
    strings = wrapper.wrap('%s %s' % (prefix, s))

    #----------------------Print----------------------
    try:
        print('\n' + hline1)
    except:
        print('\n' + hline1.encode('ascii', 'replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii', 'replace'))
    #print(hline2)

    return
コード例 #41
0
def report_to_display(display, banner, banner_color, hint, hint_wrap,
                      group_index, group_items):
    gutter = '[R:{0}] '.format(group_index + 1)
    indent = ' '.ljust(len(gutter))

    display.display('{0}{1}\n'.format(gutter, banner), color=banner_color)

    for item in group_items:
        display.display('{0}  - {1}'.format(indent, item['path']),
                        color=C.COLOR_HIGHLIGHT)

    if hint and not hint_wrap:
        display.display('\n{0}HINT: {1}\n'.format(indent, hint),
                        color=C.COLOR_HIGHLIGHT)
    elif hint:
        wrapper = TextWrapper()
        wrapper.initial_indent = indent
        wrapper.subsequent_indent = indent
        wrapper.drop_whitespace = False
        wrapper.width = 70 - len(indent)
        wrapped = '\n'.join(wrapper.wrap('HINT: {0}'.format(hint)))

        display.display('\n{0}\n'.format(wrapped), color=C.COLOR_HIGHLIGHT)
コード例 #42
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppEvaluateExpressions(self, Indent=4, Expressions=None):
        """Declare and define the `Expressions` for C++

        The output of this function declares are defines the
        `Expressions` as individual variables.  An optional dictionary
        of expressions allows just a subset of this object's
        expressions to be output; if this argument is not present, all
        will be output.

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' '*Indent
        wrapper.subsequent_indent = wrapper.initial_indent+'  '
        Evaluations = []
        if not Expressions:
            Expressions=self.Expressions
        for Expression in Expressions:
            try:
                Evaluations.append(wrapper.fill('{0}{1} {2} = {3};'.format(self.const(Expression), self.dtype(Expression),
                                                                           Expressions[Expression], Expression.ccode())))
            except TypeError:
                pass
        return '\n'.join(Evaluations)
コード例 #43
0
ファイル: CodeOutput.py プロジェクト: moble/PostNewtonian
    def CppInputArguments(self, Indent=12):
        """Create basic input arguments for C++

        The fundamental variables are listed, along with their data
        types and `const` if the variable is constant.  This would be
        an appropriate string to represent the input arguments for a
        function or class constructor to calculate the `Expressions`
        of this CodeConstructor object.

        For example, if the `Variables` object contains atoms m1, m2,
        t, and x referred to in the `Expressions` object, where m1 and
        m2 are constant, and t and x are variables, the input argument
        list should be

            const double m1_i, const double m2_i, double t_i, double x_i

        """
        from textwrap import TextWrapper
        wrapper = TextWrapper(width=120)
        wrapper.initial_indent = ' '*Indent
        wrapper.subsequent_indent = wrapper.initial_indent
        InputArguments = ['const {0} {1}_i'.format(self.dtype(atom), self.Variables[atom])
                          for atom in self.Atoms if atom.fundamental]
        return wrapper.fill(', '.join(InputArguments)).lstrip()
コード例 #44
0
ファイル: create_client_class.py プロジェクト: Godzil/gdemu
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with NEVRAX NEL; see the file COPYING.  If not, see
# <http://www.gnu.org/licenses/>.
# 
# $Id: create_client_class.py 979 2011-05-29 22:24:40Z kaetemi $
# 

import time, sys
from textwrap import TextWrapper

wrapper = TextWrapper()
indent = " * "
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
gmtime = time.gmtime(time.time())
filename = "NEEDED_FOR_buildLine"
newline = "\n"

print ""

print "This script will create .cpp and .h files for your class."
print "To use the defaults, simply hit ENTER, else type in the new value."

print ""

def askVar(name, default):
	sys.stdout.write(name + " (" + default + "): ")
	line = sys.stdin.readline()
コード例 #45
0
ファイル: plugin.py プロジェクト: jlorieau/mollib
    def process(self, molecules, args):
        """Process the SVD of molecules."""
        # Setup the configuration options
        if 'project_methyls' in args and args.project_methyls:
            settings.project_methyls = True
        if 'methyl_scale' in args and args.methyl_scale is not None:
            settings.methyl_order_parameter = args.methyl_scale
        if 'fix_sign' in args and args.fix_sign:
            settings.enable_signfixer = True
        if 'nofix_sign' in args and args.nofix_sign:
            settings.enable_signfixer = False
        if 'fix_nh_scale' in args and args.fix_nh_scale:
            settings.enable_nhscalefixer = True
        if 'nofix_nh_scale' in args and args.nofix_nh_scale:
            settings.enable_nhscalefixer = False
        if 'fix_outliers' in args and args.fix_outliers:
            settings.enable_outlierfixer = True
        if 'nofix_outliers' in args and args.nofix_outliers:
            settings.enable_outlierfixer = False

        # If specified, get the identifier for the dataset to use.
        set_id = args.set if 'set' in args else None

        # Process the partial alignment calculation
        if args.command == 'pa':
            # Get the alignment data
            data = {}
            for data_filename in args.data[0]:
                # verify that the file exists
                file_path = get_or_fetch(data_filename, extensions='mr.gz',
                                         urls=settings.mr_urls,
                                         critical=True)

                # Read the data from the file.
                data_dict = read_pa_file(file_path, set_id)
                data.update(data_dict)

            # If excluded interactions are specified, remove these.
            if args.exclude:
                data = {k:v for k, v in data.items()
                        if interaction_type(k) not in args.exclude}

            # verify that there is data in the data dict
            msg = "Could not find data in alignment data."
            check_not_empty(data=data, msg=msg, critical=True)

            # Prepare the magnetic interactions for the molecules
            labels = data.keys()
            process = Process(molecules)
            magnetic_interactions = process.process(labels=labels)

            # Apply the fixers to see if the input data can be improved
            fixer = Fixer(molecules)
            data_fixed, fixes = fixer.fix(data)
            data = data_fixed if data_fixed is not None else data

            # Conduct the SVD on the data
            (data_pred, Saupe_components,
             stats) = calc_pa_SVD(magnetic_interactions, data)

            # Prepare table of stats and fit values
            table = stats_table(stats)

            # Prepare a table of the observed and predicted data
            tables = report_tables(data, data_pred)

            if len(molecules) > 1:
                # Make title for stats table
                title = "Summary SVD Statistics for Molecules "
                title += word_list([m.fullname for m in molecules])
                table.title = title

                # Make title for the fit data table
                title = "Observed and Predicted RDCs and RACS for Molecules "
                title += word_list([m.fullname for m in molecules])
                tables['fit'].title = title

                # Make title for the back-calculated predicted data
                title = "Back-calculated RDCs and RACS for Molecules "
                title += word_list([m.fullname for m in molecules])
                tables['pred'].title = title
            else:
                # Make title for stats table
                title = "Summary SVD Statistics for Molecule "
                title += molecules[0].fullname
                table.title = title

                # Make title for the fit data table
                title = "Observed and Predicted RDCs and RACS for Molecule "
                title += molecules[0].fullname
                tables['fit'].title = title

                # Make title for the back-calculated predicted data
                title = "Back-calculated RDCs and RACS for Molecule "
                title += molecules[0].fullname
                tables['pred'].title = title

            # Prepare the standard output
            summary = table.content()
            output = tables['fit'].content()

            # Prepare and format the fixes listing
            if fixes:
                # Setup the text wrapper so that the lines of fixes do not
                # exceed the set maximum number of columns.
                wrapper = TextWrapper()
                wrapper.initial_indent = '* '
                wrapper.subsequent_indent = '  '
                wrapper.width = utils_settings.default_max_width

                fixes_wrapped = ['\n'.join(wrapper.wrap(fix)) for fix in fixes]
                fixes_output = '\n'.join(fixes_wrapped)
            else:
                fixes_output = ''

            # Print or write the report(s)
            print(summary)
            if args.out:
                output += fixes_output
                write_file('\n'.join((summary, output)), args.out)
            elif not args.summary:
                print(output)

            if fixes:
                print(fixes_output)

            # Write the predicted data
            if args.pred:
                write_file(tables['pred'].content(), args.pred)
コード例 #46
0
ファイル: create_class.py プロジェクト: kaetemi/draw_triangle
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#

import time, sys
from textwrap import TextWrapper

wrapper = TextWrapper()
indent = " * "
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
gmtime = time.gmtime(time.time())
filename = "NEEDED_FOR_buildLine"
newline = "\n"

print ""

print "This script will create .cpp and .h files for your class."
print "To use the defaults, simply hit ENTER, else type in the new value."

print ""


def askVar(name, default):
    sys.stdout.write(name + " (" + default + "): ")
コード例 #47
0
ファイル: string.py プロジェクト: bksim/mailman
def wrap(text, column=70, honor_leading_ws=True):
    """Wrap and fill the text to the specified column.

    The input text is wrapped and filled as done by the standard library
    textwrap module.  The differences here being that this function is capable
    of filling multiple paragraphs (as defined by text separated by blank
    lines).  Also, when `honor_leading_ws` is True (the default), paragraphs
    that being with whitespace are not wrapped.  This is the algorithm that
    the Python FAQ wizard used.
    """
    # First, split the original text into paragraph, keeping all blank lines
    # between them.
    paragraphs = []
    paragraph = []
    last_indented = False
    for line in text.splitlines(True):
        is_indented = (len(line) > 0 and line[0] in whitespace)
        if line == NL:
            if len(paragraph) > 0:
                paragraphs.append(EMPTYSTRING.join(paragraph))
            paragraphs.append(line)
            last_indented = False
            paragraph = []
        elif last_indented != is_indented:
            # The indentation level changed.  We treat this as a paragraph
            # break but no blank line will be issued between paragraphs.
            if len(paragraph) > 0:
                paragraphs.append(EMPTYSTRING.join(paragraph))
            # The next paragraph starts with this line.
            paragraph = [line]
            last_indented = is_indented
        else:
            # This line does not constitute a paragraph break.
            paragraph.append(line)
    # We've consumed all the lines in the original text.  Transfer the last
    # paragraph we were collecting to the full set of paragraphs.
    paragraphs.append(EMPTYSTRING.join(paragraph))
    # Now iterate through all paragraphs, wrapping as necessary.
    wrapped_paragraphs = []
    # The dedented wrapper.
    wrapper = TextWrapper(width=column,
                          fix_sentence_endings=True)
    # The indented wrapper.  For this one, we'll clobber initial_indent and
    # subsequent_indent as needed per indented chunk of text.
    iwrapper = TextWrapper(width=column,
                           fix_sentence_endings=True,
                           )
    add_paragraph_break = False
    for paragraph in paragraphs:
        if add_paragraph_break:
            wrapped_paragraphs.append(NL)
            add_paragraph_break = False
        paragraph_text = EMPTYSTRING.join(paragraph)
        # Just copy the blank lines to the final set of paragraphs.
        if len(paragraph) == 0 or paragraph == NL:
            wrapped_paragraphs.append(NL)
        # Choose the wrapper based on whether the paragraph is indented or
        # not.  Also, do not wrap indented paragraphs if honor_leading_ws is
        # set.
        elif paragraph[0] in whitespace:
            if honor_leading_ws:
                # Leave the indented paragraph verbatim.
                wrapped_paragraphs.append(paragraph_text)
            else:
                # The paragraph should be wrapped, but it must first be
                # dedented.  The leading whitespace on the first line of the
                # original text will be used as the indentation for all lines
                # in the wrapped text.
                for i, ch in enumerate(paragraph_text):
                    if ch not in whitespace:
                        break
                leading_ws = paragraph[:i]
                iwrapper.initial_indent=leading_ws
                iwrapper.subsequent_indent=leading_ws
                paragraph_text = dedent(paragraph_text)
                wrapped_paragraphs.append(iwrapper.fill(paragraph_text))
                add_paragraph_break = True
        else:
            # Fill this paragraph.  fill() consumes the trailing newline.
            wrapped_paragraphs.append(wrapper.fill(paragraph_text))
            add_paragraph_break = True
    return EMPTYSTRING.join(wrapped_paragraphs)
コード例 #48
0
ファイル: mentags.py プロジェクト: Xunius/txt2evernote
def export2Evernote(annodf,verbose=True):
    '''Organize annotations by tags and save to txt.

    <annodf>: pandas DataFrame. Annotations.
    '''

    geeknote=send2ever.GeekNoteConnector()
    geeknote.connectToEvertone()

    if verbose:
        print('\n# <export2Txt>: Exporting all taged annotations to Evernote')

    wrapper=TextWrapper()
    wrapper.width=70
    wrapper.initial_indent=''
    wrapper.subsequent_indent=int(len('> '))*' '

    wrapper2=TextWrapper()
    wrapper2.width=70
    wrapper2.initial_indent=''
    wrapper2.subsequent_indent='\t'+int(len('- Title: '))*' '

    taggroup=annodf.groupby('tags')
    tags=getField(annodf,'tags')
    
    #---------------------Get tags---------------------
    if len(tags)==0:
        print('\n# <export2Evernote>: No tags found in data.')
        return
    tags.sort()

    #---------------Put @None at the end---------------
    if '@None' in tags:
        tags.remove('@None')
        tags.append('@None')

    #----------------Loop through tags----------------
    for tagii in tags:

        if verbose:
            print('# <export2Evernote>: Get tag: %s.' %tagii)

        groupii=taggroup.get_group(tagii)
        citesii=getField(groupii,'cite')
        evercontentii=[]

        #--------------Loop through cite keys--------------
        for citejj in citesii:

            outstr=u'''\n## {0}:\n'''.format(conv(citejj))
            evercontentii.append(outstr)

            notesjj=groupii[groupii.cite==citejj]

            #-------------Loop through annotations-------------
            for kk in range(notesjj.shape[0]):
                notekk=notesjj.iloc[kk]
                #strkk=wrapper.fill(notekk.text)
                strkk=notekk.text
                title=wrapper2.fill(notekk.title)
                if notekk.type=='quote':
                    outstr=\
                    u'\n> {0}\n\n\t- Title: {1}\n\t- Ctime: {2}\n'\
                    .format(*map(conv,[strkk, title,notekk.ctime]))
                else:
                    outstr=\
                    u'\n- {0}\n\n\t- Title: {1}\n\t- Ctime: {2}\n'\
                    .format(*map(conv,[strkk, title,notekk.ctime]))

                evercontentii.append(outstr)

        #-----------------Send to Evernote-----------------
        send2ever.createNote(tagii,\
            ''.join(evercontentii),\
            tagii,'Tags summary',geeknote,skipnotebook=True)
        

    return  
コード例 #49
0
ファイル: string.py プロジェクト: TommyLike/mailman
def wrap(text, column=70, honor_leading_ws=True):
    """Wrap and fill the text to the specified column.

    The input text is wrapped and filled as done by the standard library
    textwrap module.  The differences here being that this function is capable
    of filling multiple paragraphs (as defined by text separated by blank
    lines).  Also, when `honor_leading_ws` is True (the default), paragraphs
    that being with whitespace are not wrapped.  This is the algorithm that
    the Python FAQ wizard used.
    """
    # First, split the original text into paragraph, keeping all blank lines
    # between them.
    paragraphs = []
    paragraph = []
    last_indented = False
    for line in text.splitlines(True):
        is_indented = (len(line) > 0 and line[0] in whitespace)
        if line == NL:
            if len(paragraph) > 0:
                paragraphs.append(EMPTYSTRING.join(paragraph))
            paragraphs.append(line)
            last_indented = False
            paragraph = []
        elif last_indented != is_indented:
            # The indentation level changed.  We treat this as a paragraph
            # break but no blank line will be issued between paragraphs.
            if len(paragraph) > 0:
                paragraphs.append(EMPTYSTRING.join(paragraph))
            # The next paragraph starts with this line.
            paragraph = [line]
            last_indented = is_indented
        else:
            # This line does not constitute a paragraph break.
            paragraph.append(line)
    # We've consumed all the lines in the original text.  Transfer the last
    # paragraph we were collecting to the full set of paragraphs, but only if
    # it's not empty.
    if len(paragraph) > 0:
        paragraphs.append(EMPTYSTRING.join(paragraph))
    # Now iterate through all paragraphs, wrapping as necessary.
    wrapped_paragraphs = []
    # The dedented wrapper.
    wrapper = TextWrapper(width=column,
                          fix_sentence_endings=True)
    # The indented wrapper.  For this one, we'll clobber initial_indent and
    # subsequent_indent as needed per indented chunk of text.
    iwrapper = TextWrapper(width=column,
                           fix_sentence_endings=True,
                           )
    add_paragraph_break = False
    for paragraph in paragraphs:
        if add_paragraph_break:
            wrapped_paragraphs.append(NL)
            add_paragraph_break = False
        paragraph_text = EMPTYSTRING.join(paragraph)
        # Just copy the blank lines to the final set of paragraphs.
        if len(paragraph) == 0 or paragraph == NL:
            wrapped_paragraphs.append(NL)
        # Choose the wrapper based on whether the paragraph is indented or
        # not.  Also, do not wrap indented paragraphs if honor_leading_ws is
        # set.
        elif paragraph[0] in whitespace:
            if honor_leading_ws:
                # Leave the indented paragraph verbatim.
                wrapped_paragraphs.append(paragraph_text)
            else:
                # The paragraph should be wrapped, but it must first be
                # dedented.  The leading whitespace on the first line of the
                # original text will be used as the indentation for all lines
                # in the wrapped text.
                for i, ch in enumerate(paragraph_text):   # pragma: no branch
                    if ch not in whitespace:
                        break
                leading_ws = paragraph[:i]
                iwrapper.initial_indent = leading_ws
                iwrapper.subsequent_indent = leading_ws
                paragraph_text = dedent(paragraph_text)
                wrapped_paragraphs.append(iwrapper.fill(paragraph_text))
                add_paragraph_break = True
        else:
            # Fill this paragraph.  fill() consumes the trailing newline.
            wrapped_paragraphs.append(wrapper.fill(paragraph_text))
            add_paragraph_break = True
    return EMPTYSTRING.join(wrapped_paragraphs)
コード例 #50
0
    def _update_package_repo(self):
        ret = True
        mkdir_p(self._repo_dir)
        for name, details in package_list.items():
            if name not in self._packages:
                continue
            if details.get('disable', False):
                continue
            pkgrepo = details.get('pkgrepo', None)
            wheel = details.get('wheel', None)
            no_repo = details.get('no_repo', False)
            package_name = details.get('package_name', name)
            no_download = False
            repo_dir = None
            repo_ok = False
            if pkgrepo is None:
                if no_repo:
                    repo_dir = os.path.join(self._repo_dir, name.lower())
                    repo_ok = True
                    no_download = True
            elif pkgrepo == 'git':
                no_download = True
                pkgrepo_dir = details.get('pkgrepo_dir', None)
                if pkgrepo_dir:
                    repo_dir = os.path.join(self._repo_dir, pkgrepo_dir)
                    repo_ok = os.path.isdir(repo_dir)
                else:
                    pkgrepo_url = details.get('pkgrepo_url', None)
                    repo_dir = os.path.join(self._repo_dir, name.lower())
                    if os.path.isdir(repo_dir):
                        try:
                            (sts, stdoutdata, stderrdata) = runcmdAndGetData(args=['git', 'pull', 'origin'], cwd=repo_dir)
                        except FileNotFoundError as ex:
                            print('Cannot execute git.', file=sys.stderr)
                            sts = -1
                        print(stdoutdata, stderrdata)
                    else:
                        try:
                            (sts, stdoutdata, stderrdata) = runcmdAndGetData(args=['git', 'clone', pkgrepo_url, repo_dir], cwd=repo_dir)
                        except FileNotFoundError as ex:
                            print('Cannot execute git.', file=sys.stderr)
                            sts = -1
                        print(stdoutdata, stderrdata)
                    repo_ok = True if sts == 0 else False

            if repo_ok:
                print('Repository %s ok' % repo_dir)

                rev = None
                url = None
                if no_download:
                    download_ok = True
                else:
                    download_ok = False
                    download_subdir = details.get('repo_subdir', name.lower())
                    if '/' in download_subdir:
                        e = download_subdir.split('/')
                        download_subdir = e[-1]
                    pkg_download_tag_file = os.path.join(self._download_dir, '.' + name.lower() + '.tag')
                    archive = None
                    if os.path.isfile(pkg_download_tag_file):
                        f = IniFile(pkg_download_tag_file)
                        rev = f.getAsInteger(None, 'rev', None)
                        archive = f.get(None, 'archive', None)
                        url = f.get(None, 'url', None)
                        f.close()
                    if archive is not None:
                        download_subdir = os.path.basename(url)
                        i = download_subdir.find(archive)
                        if i >= 0:
                            download_subdir = download_subdir[0:i]
                            if download_subdir[-1] == '.':
                                download_subdir = download_subdir[0:-1]

                    if self._verbose:
                        print(self._download_dir, name.lower(), '' if download_subdir is None else download_subdir)
                    pkg_download_dir = os.path.join(self._download_dir, name.lower(), '' if download_subdir is None else download_subdir)
                    if os.path.isdir(pkg_download_dir):
                        print('Update %s from %s' % (name.lower(), pkg_download_dir))

                        if copy_and_overwrite(pkg_download_dir, repo_dir):
                            download_ok = True
                            delete_files = details.get('delete-files', [])
                            if delete_files:
                                for f in delete_files:
                                    full = os.path.join(repo_dir, f)
                                    if os.path.exists(full):
                                        try:
                                            os.unlink(full)
                                        except IOError as e:
                                            download_ok = False
                                            print('Unable to delete %s: %s' % (full, e), file=sys.stderr)
                    else:
                        print('Download directory %s missing' % pkg_download_dir, file=sys.stderr)

                if download_ok:
                    if self._wheel:
                        if wheel == 'skip':
                            print('Skip wheel for %s' % repo_dir)
                            ret = True
                        else:
                            if no_repo:
                                if not self._docker_build_wheel(package_name, pip_package=package_name, without_depends=False, force=self._force):
                                    print('Failed to create wheel for %s' % repo_dir, file=sys.stderr)
                                    ret = False
                                else:
                                    ret = True
                            else:
                                if not self._docker_build_wheel(package_name, repo_dir, force=self._force):
                                    print('Failed to create wheel for %s' % repo_dir, file=sys.stderr)
                                    ret = False
                                else:
                                    ret = True
                    else:
                        pc_dir = os.path.join(repo_dir, '.pc')
                        if os.path.isdir(pc_dir):
                            if self._verbose:
                                print('Delete directory %s' % (pc_dir))
                            rmdir_p(pc_dir)

                        debian_package_name = None
                        debian_package_version = None
                        debian_package_orig_version = None
                        debian_package_update_ok = False
                        debian_revision = None
                        setup_py_version = None
                        setup_py = os.path.join(repo_dir, 'setup.py')
                        try:
                            f = open(setup_py, 'r')
                            for line in f:
                                m = re_setup_py_version.search(line)
                                if m:
                                    setup_py_version = m.group(1)
                                    break
                            f.close()
                        except IOError as e:
                            print('Unable to open %s: %s' % (setup_py, e), file=sys.stderr)
                            pass

                        if setup_py_version:

                            if rev and url:
                                commit_msg = 'Automatic update %s from %s revision %i' % (setup_py_version, url, rev)
                            else:
                                commit_msg = 'Automatic update %s' % setup_py_version

                            source_format = None
                            source_format_version = None
                            source_format_filename = os.path.join(repo_dir, 'debian/source/format')
                            try:
                                f = open(source_format_filename, 'r')
                                line = f.readline().strip()
                                m = re_source_format.search(line)
                                if m:
                                    source_format_version = m.group(1)
                                    source_format = m.group(2)
                                    break
                                f.close()
                            except IOError as e:
                                print('Unable to open %s: %s' % (source_format_filename, e), file=sys.stderr)
                                pass

                            dch_filename = os.path.join(repo_dir, 'debian/changelog')
                            dch_version = None
                            try:
                                import debian.changelog
                                from textwrap import TextWrapper
                                f = open(dch_filename, 'r')
                                dch = debian.changelog.Changelog(f)
                                f.close()
                                debian_package_name = dch.package
                                old_version = str(dch.version)
                                is_dfsg = old_version.find('dfsg') != -1
                                if rev:
                                    debian_package_orig_version = setup_py_version + '+svn%i' % rev
                                elif is_dfsg:
                                    debian_package_orig_version = setup_py_version + '+dfsg'
                                else:
                                    debian_package_orig_version = setup_py_version
                                new_version = debian_package_orig_version + '-'
                                if old_version.startswith(new_version):
                                    i = old_version.find('-')
                                    if i:
                                        debian_revision = old_version[i+1:] if i else 0
                                else:
                                    debian_revision = '0'

                                debian_revision = increment_debian_revision(debian_revision, strategy=details.get('debian-revision', 'major'))
                                new_version = new_version + debian_revision

                                debian_package_version = new_version
                                dch.new_block(
                                    package=debian_package_name,
                                    version=debian_package_version,
                                    distributions=self._distribution,
                                    urgency=dch.urgency,
                                    author="%s <%s>" % debian.changelog.get_maintainer(),
                                    date=debian.changelog.format_date()
                                )
                                wrapper = TextWrapper()
                                wrapper.initial_indent    = "  * "
                                wrapper.subsequent_indent = "    "
                                dch.add_change('')
                                for l in wrapper.wrap(commit_msg):
                                    dch.add_change(l)
                                dch.add_change('')
                                f = open(dch_filename, 'w')
                                f.write(str(dch))
                                #print(dch)
                                f.close()
                                debian_package_update_ok = True
                            except IOError as e:
                                print('Unable to open %s: %s' % (dch_filename, e), file=sys.stderr)
                                pass
                        else:
                            print('Failed to get version from %s.' % setup_py, file=sys.stderr)
                            ret = False

                        if debian_package_update_ok:

                            if pkgrepo == 'git':
                                try:
                                    (sts, stdoutdata, stderrdata) = runcmdAndGetData(args=['git', 'commit', '-am', commit_msg], cwd=repo_dir)
                                except FileNotFoundError as ex:
                                    print('Cannot execute git.', file=sys.stderr)
                                    ret = False
                                    sts = -1

                                orig_archive_format = details.get('orig-archive-format', 'tar.xz')
                                if source_format == 'native':
                                    orig_archive_source = details.get('orig-archive-source', 'git')
                                else:
                                    orig_archive_source = details.get('orig-archive-source', 'directory')

                                prefix = debian_package_name + '-' + debian_package_orig_version
                                pkgfile = os.path.join(repo_dir, '..', debian_package_name + '_' + debian_package_orig_version + '.orig.' + orig_archive_format)
                                if orig_archive_source == 'git':
                                    if orig_archive_format == 'tar.xz':
                                        if not git_archive_xz(repo_dir, pkgfile, prefix):
                                            print('Failed to create %s.' % pkgfile, file=sys.stderr)
                                            ret = False
                                    elif orig_archive_format == 'tar.gz':
                                        if not git_archive_gz(repo_dir, pkgfile, prefix):
                                            print('Failed to create %s.' % pkgfile, file=sys.stderr)
                                            ret = False
                                    else:
                                        print('Invalid package orig archive format \'%s\' specified.' % orig_archive_format, file=sys.stderr)
                                        ret = False
                                elif orig_archive_source == 'directory':
                                    i = orig_archive_format.find('.')
                                    if i > 0:
                                        format = orig_archive_format[i+1:]
                                    else:
                                        format = 'xz'
                                    make_tarfile(repo_dir, pkgfile, prefix, format=format)

                                else:
                                    print('Invalid package orig archive source \'%s\' specified.' % orig_archive_source, file=sys.stderr)
                                    ret = False
                        else:
                            if self._verbose:
                                print('Debian package update failed.', file=sys.stderr)
                            ret = False
                else:
                    print('Download for %s failed' % repo_dir, file=sys.stderr)
                    ret = False
            else:
                print('Repository %s failed' % repo_dir, file=sys.stderr)
                ret = False
        return ret
コード例 #51
0
ファイル: extracttags.py プロジェクト: Xunius/Menotexport
def exportAnno(annodict,outdir,action,verbose=True):
    '''Export annotations grouped by tags

    '''

    #-----------Export all to a single file-----------
    if 'm' in action and 'n' not in action:
        fileout='Mendeley_highlights_by_tags.txt'
    elif 'n' in action and 'm' not in action:
        fileout='Mendeley_notes_by_tags.txt'
    elif 'm' in action and 'n' in action:
        fileout='Mendeley_annotations_by_tags.txt'

    abpath_out=os.path.join(outdir,fileout)
    if os.path.isfile(abpath_out):
        os.remove(abpath_out)

    if verbose:
        printHeader('Exporting all taged annotations to:',3)
        printInd(abpath_out,4)

    conv=lambda x:unicode(x)

    wrapper=TextWrapper()
    wrapper.width=70
    wrapper.initial_indent=''
    #wrapper.subsequent_indent='\t\t'+int(len('> '))*' '
    wrapper.subsequent_indent='\t\t'

    wrapper2=TextWrapper()
    wrapper2.width=60
    wrapper2.initial_indent=''
    #wrapper2.subsequent_indent='\t\t\t'+int(len('Title: '))*' '
    wrapper2.subsequent_indent='\t\t\t'

    with open(abpath_out, mode='a') as fout:

        #----------------Loop through tags----------------
        tags=annodict.keys()
        if len(tags)==0:
            return
        tags.sort()
        #---------------Put @None at the end---------------
        if '@None' in tags:
            tags.remove('@None')
            tags.append('@None')

        for tagii in tags:

            citedictii=annodict[tagii]
            outstr=u'''\n\n{0}\n# {1}'''.format(int(80)*'-', conv(tagii))
            outstr=outstr.encode('ascii','replace')
            fout.write(outstr)

            #--------------Loop through cite keys--------------
            for citejj, annosjj in citedictii.items():
                hljj=annosjj['highlights']
                ntjj=annosjj['notes']

                outstr=u'''\n\n\t@{0}:'''.format(conv(citejj))
                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)

                #-----------------Write highlights-----------------
                if len(hljj)>0:

                    #-------------Loop through highlights-------------
                    for hlkk in hljj:
                        hlstr=wrapper.fill(hlkk.text)
                        title=wrapper2.fill(hlkk.title)
                        outstr=u'''
\n\t\t> {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[hlstr, title,\
                      hlkk.ctime]))

                        outstr=outstr.encode('ascii','replace')
                        fout.write(outstr)

                #-----------------Write notes-----------------
                if len(ntjj)>0:

                    #----------------Loop through notes----------------
                    for ntkk in ntjj:
                        ntstr=wrapper.fill(ntkk.text)
                        title=wrapper2.fill(ntkk.title)
                        outstr=u'''
\n\t\t- {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[ntstr, title,\
                    ntkk.ctime]))

                        outstr=outstr.encode('ascii','replace')
                        fout.write(outstr)
コード例 #52
0
ファイル: utils.py プロジェクト: watchdogpolska/feder
def email_wrapper(text):
    wrapper = TextWrapper()
    wrapper.subsequent_indent = '> '
    wrapper.initial_indent = '> '
    return "\n".join(wrapper.wrap(text))
コード例 #53
0
ファイル: mentags.py プロジェクト: Xunius/txt2evernote
def export2Txt(annodf,abpath_out,verbose=True):
    '''Organize annotations by tags and save to txt.

    <annodf>: pandas DataFrame. Annotations.
    <abpath_out>: str, absolute path to output txt.
    '''

    if os.path.isfile(abpath_out):
        os.remove(abpath_out)

    if verbose:
        print('\n# <export2Txt>: Exporting all taged annotations to:')
        print(abpath_out)

    wrapper=TextWrapper()
    wrapper.width=70
    wrapper.initial_indent=''
    #wrapper.subsequent_indent='\t\t'+int(len('> '))*' '
    wrapper.subsequent_indent='\t\t'

    wrapper2=TextWrapper()
    wrapper2.width=60
    wrapper2.initial_indent=''
    #wrapper2.subsequent_indent='\t\t\t'+int(len('- Title: '))*' '
    wrapper2.subsequent_indent='\t\t\t'

    taggroup=annodf.groupby('tags')
    tags=getField(annodf,'tags')
    
    #---------------------Get tags---------------------
    if len(tags)==0:
        print('\n# <export2Txt>: No tags found in data.')
        return
    tags.sort()

    #---------------Put @None at the end---------------
    if '@None' in tags:
        tags.remove('@None')
        tags.append('@None')

    with open(abpath_out, mode='a') as fout:

        #----------------Loop through tags----------------
        for tagii in tags:

            if verbose:
                print('# <export2Txt>: Get tag: %s.' %tagii)

            outstr=u'''\n\n{0}\n# {1}'''.format(int(80)*'-', conv(tagii))
            outstr=outstr.encode('ascii','replace')
            fout.write(outstr)

            groupii=taggroup.get_group(tagii)
            citesii=getField(groupii,'cite')

            #--------------Loop through cite keys--------------
            for citejj in citesii:

                outstr=u'''\n\n\t{0}:'''.format(conv(citejj))
                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)
                notesjj=groupii[groupii.cite==citejj]

                #-------------Loop through annotations-------------
                for kk in range(notesjj.shape[0]):
                    notekk=notesjj.iloc[kk]
                    strkk=wrapper.fill(notekk.text)
                    title=wrapper2.fill(notekk.title)
                    if notekk.type=='quote':
                        outstr=u'''
\n\t\t> {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[strkk, title,\
                  notekk.ctime]))
                    else:
                        outstr=u'''
\n\t\t- {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[strkk, title,\
                  notekk.ctime]))


                    outstr=outstr.encode('ascii','replace')
                    fout.write(outstr)

            

    return  
コード例 #54
0
ファイル: setup.py プロジェクト: aroth-arsoft/cef-debian-pkgs
    def _update_package_repo(self):
        ret = True
        mkdir_p(self._repo_dir)
        for name, details in package_list.items():
            if name not in self._packages:
                continue
            if details.get('disable', False):
                continue
            site = site_list.get(details.get('site', None), None)
            url = details.get('site_download_url')
            version = details.get('version', None)
            last_build = details.get('last_build', None)
            if url:
                if version is not None:
                    url = url.replace('${version}',
                                      urllib.parse.quote_plus(str(version)))
                if last_build is not None:
                    url = url.replace('${last_build}',
                                      urllib.parse.quote_plus(str(last_build)))
                basename = urllib.parse.unquote(os.path.basename(url))
            else:
                basename = None
            site_download = site.get('download', None)
            site_archive = site.get('archive', None)

            repo_dir = os.path.join(self._repo_dir, name.lower())
            repo_ok = os.path.isdir(repo_dir)

            repo_debian_dir = os.path.join(repo_dir, 'debian')

            values = {'cef:ABI': version}

            copy_and_configure(self._debian_dir,
                               repo_debian_dir,
                               values=values,
                               ignore=shutil.ignore_patterns(
                                   'changelog', '.git*'))

            if basename is None:
                if site_archive is None:
                    filename = name.lower()
                    filename += '.zip'
                elif last_build is not None:
                    filename = name.lower() + '_%s.%s' % (last_build,
                                                          site_archive)
                else:
                    filename = name.lower() + '_%s.%s' % (version,
                                                          site_archive)
            else:
                filename = basename
            download_file = os.path.join(self._download_dir, filename)
            download_ok = os.path.isfile(download_file)

            if not download_ok:
                print('Download file %s is missing' % (download_file),
                      file=sys.stderr)

            elif repo_ok:
                print('Repository %s ok' % repo_dir)
                download_subdir = details.get('repo_subdir', name.lower())

                debian_package_name = 'cef%i' % version

                orig_file = os.path.join(
                    repo_dir, '../%s_%s.orig.%s' %
                    (debian_package_name, last_build, site_archive))
                if self._verbose:
                    print('Use orig archive file: %s' % orig_file)

                if not os.path.isfile(orig_file):
                    if self._verbose:
                        print('Copy %s to %s' % (download_file, orig_file))
                    if not copyfile(download_file, orig_file):
                        print('Failed to copy file %s to %s' %
                              (download_file, orig_file),
                              file=sys.stderr)
                        repo_ok = False
                    else:
                        # Extract all the contents of zip file in different directory
                        prefix = basename
                        if site_archive and prefix.endswith(site_archive):
                            prefix = prefix[:-len(site_archive) - 1]
                        if self._verbose:
                            print('Extract %s to %s (prefix %s)' %
                                  (orig_file, repo_dir, prefix))
                        if not extract_archive(
                                orig_file, repo_dir, prefix=prefix):
                            print('Failed to extract %s to %s' %
                                  (orig_file, repo_dir),
                                  file=sys.stderr)
                            repo_ok = False

                if repo_ok:
                    print('Prepare build of %s' % (name.lower()))

                    pc_dir = os.path.join(repo_dir, '.pc')
                    if os.path.isdir(pc_dir):
                        if self._verbose:
                            print('Delete directory %s' % (pc_dir))
                        rmdir_p(pc_dir)

                    debian_package_version = None
                    debian_package_orig_version = None
                    debian_package_update_ok = False
                    debian_revision = None
                    cef_version = None
                    cef_version_h = os.path.join(repo_dir,
                                                 'include/cef_version.h')
                    try:
                        f = open(cef_version_h, 'r')
                        for line in f:
                            m = re_cef_version_h.search(line)
                            if m:
                                cef_version = m.group(1)
                                break
                        f.close()
                    except IOError as e:
                        print('Unable to open %s: %s' % (cef_version_h, e),
                              file=sys.stderr)
                        pass

                    if cef_version:

                        commit_msg = 'Automatic update %s' % cef_version

                        source_format = None
                        source_format_version = None
                        source_format_filename = os.path.join(
                            repo_dir, 'debian/source/format')
                        try:
                            f = open(source_format_filename, 'r')
                            line = f.readline().strip()
                            m = re_source_format.search(line)
                            if m:
                                source_format_version = m.group(1)
                                source_format = m.group(2)
                                break
                            f.close()
                        except IOError as e:
                            print('Unable to open %s: %s' %
                                  (source_format_filename, e),
                                  file=sys.stderr)
                            pass

                        dch_filename = os.path.join(repo_dir,
                                                    'debian/changelog')
                        dch_version = None
                        try:
                            import debian.changelog
                            from textwrap import TextWrapper
                            f = open(dch_filename, 'r')
                            dch = debian.changelog.Changelog(f)
                            f.close()
                            old_version = str(dch.version)
                            debian_package_orig_version = cef_version
                            new_version = debian_package_orig_version + '-'
                            #print('old_version %s' % old_version)
                            #print('new_version %s' % new_version)
                            if old_version.startswith(new_version):
                                i = old_version.rfind('-')
                                if i:
                                    debian_revision = old_version[
                                        i + 1:] if i else 0
                            else:
                                debian_revision = '0'

                            debian_revision = increment_debian_revision(
                                debian_revision,
                                strategy=details.get('debian-revision',
                                                     'major'))
                            #print('debian_revision %s' % debian_revision)
                            new_version = new_version + debian_revision
                            #print('new_version %s' % new_version)

                            debian_package_version = new_version
                            dch.new_block(package=debian_package_name,
                                          version=debian_package_version,
                                          distributions=self._distribution,
                                          urgency=dch.urgency,
                                          author="%s <%s>" %
                                          debian.changelog.get_maintainer(),
                                          date=debian.changelog.format_date())
                            wrapper = TextWrapper()
                            wrapper.initial_indent = "  * "
                            wrapper.subsequent_indent = "    "
                            dch.add_change('')
                            for l in wrapper.wrap(commit_msg):
                                dch.add_change(l)
                            dch.add_change('')
                            f = open(dch_filename, 'w')
                            f.write(str(dch))
                            #print(dch)
                            f.close()
                            debian_package_update_ok = True
                        except IOError as e:
                            print('Unable to open %s: %s' % (dch_filename, e),
                                  file=sys.stderr)
                            pass
                    else:
                        print('Failed to get version from %s.' % setup_py,
                              file=sys.stderr)
                        ret = False

                else:
                    print('Download directory %s missing' % pkg_download_dir,
                          file=sys.stderr)
            else:
                print('Repository %s failed' % repo_dir, file=sys.stderr)
                ret = False
        return ret
コード例 #55
0
    def process_javadoc(self, side):
        """Add CSV descriptions to methods and fields as javadoc"""
        pathsrclk = {0: self.srcclient, 1: self.srcserver}

        # HINT: We read the relevant CSVs
        with open(self.csvmethods, 'r') as methods_file, open(self.csvfields, 'r') as fields_file:
            methodsreader = csv.DictReader(methods_file)
            fieldsreader = csv.DictReader(fields_file)

            methods = {}
            for row in methodsreader:
                # HINT: Only include methods that have a non-empty description
                if int(row['side']) == side and 'desc' in row and row['desc']:
                    methods[row['searge']] = row['desc'].replace('*/', '* /')

            fields = {}
            for row in fieldsreader:
                # HINT: Only include fields that have a non-empty description
                if int(row['side']) == side and 'desc' in row and row['desc']:
                    fields[row['searge']] = row['desc'].replace('*/', '* /')

        regexps = {
            'field': re.compile(r'^ {4}(?:[\w$.[\]]+ )*(?P<name>field_[0-9]+_[a-zA-Z_]+) *(?:=|;)'),
            'method': re.compile(r'^ {4}(?:[\w$.[\]]+ )*(?P<name>func_[0-9]+_[a-zA-Z_]+)\('),
        }
        wrapper = TextWrapper(width=120)

        # HINT: We pathwalk the sources
        for path, _, filelist in os.walk(pathsrclk[side], followlinks=True):
            for cur_file in fnmatch.filter(filelist, '*.java'):
                src_file = os.path.normpath(os.path.join(path, cur_file))
                tmp_file = src_file + '.tmp'
                with open(src_file, 'r') as fh:
                    buf_in = fh.readlines()

                buf_out = []
                # HINT: Look for method/field declarations in this file
                for line in buf_in:
                    fielddecl = regexps['field'].match(line)
                    methoddecl = regexps['method'].match(line)
                    if fielddecl:
                        prev_line = buf_out[-1].strip()
                        indent = '    '
                        name = fielddecl.group('name')
                        if name in fields:
                            desc = fields[name]
                            if len(desc) < 70:
                                if prev_line != '' and prev_line != '{':
                                    buf_out.append('\n')
                                buf_out.append(indent + '/** ')
                                buf_out.append(desc)
                                buf_out.append(' */\n')
                            else:
                                wrapper.initial_indent = indent + ' * '
                                wrapper.subsequent_indent = indent + ' * '
                                if prev_line != '' and prev_line != '{':
                                    buf_out.append('\n')
                                buf_out.append(indent + '/**\n')
                                buf_out.append(wrapper.fill(desc) + '\n')
                                buf_out.append(indent + ' */\n')
                    elif methoddecl:
                        prev_line = buf_out[-1].strip()
                        indent = '    '
                        name = methoddecl.group('name')
                        if name in methods:
                            desc = methods[name]
                            wrapper.initial_indent = indent + ' * '
                            wrapper.subsequent_indent = indent + ' * '
                            if prev_line != '' and prev_line != '{':
                                buf_out.append('\n')
                            buf_out.append(indent + '/**\n')
                            buf_out.append(wrapper.fill(desc) + '\n')
                            buf_out.append(indent + ' */\n')
                    buf_out.append(line)

                with open(tmp_file, 'w') as fh:
                    fh.writelines(buf_out)
                shutil.move(tmp_file, src_file)
        return True
コード例 #56
0
ファイル: menotexport.py プロジェクト: samuell/Menotexport
def _exportAnnoFile(abpath_out,anno,verbose=True):
    '''Export annotations in a single PDF

    <abpath_out>: str, absolute path to output txt file.
    <anno>: list, in the form [highlight_list, note_list].
            highlight_list and note_list are both lists of
            Anno objs (see extracthl.py), containing highlights
            and notes in TEXT format with metadata. To be distinguished
            with FileAnno objs which contains texts coordinates.
            if highlight_list or note_list is [], no such info
            in this PDF.

    Function takes annotations from <anno> and output to the target txt file
    in the following format:

    -----------------------------------------------------
    # Title of PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time
    
    -----------------------------------------------------
    # Title of another PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time

    Use tabs in indention, and markup syntax: ">" for highlights, and "-" for notes.

    Update time: 2016-02-24 13:59:56.
    '''

    conv=lambda x:unicode(x)

    wrapper=TextWrapper()
    wrapper.width=80
    wrapper.initial_indent=''
    wrapper.subsequent_indent='\t'+int(len('> '))*' '

    wrapper2=TextWrapper()
    wrapper2.width=80-7
    wrapper2.initial_indent=''
    wrapper2.subsequent_indent='\t\t'+int(len('- Tags: '))*' '

    hlii,ntii=anno
    try:
        titleii=hlii[0].title
    except:
        titleii=ntii[0].title

    outstr=u'\n\n{0}\n# {1}'.format(int(80)*'-',conv(titleii))

    with open(abpath_out, mode='a') as fout:
        outstr=outstr.encode('ascii','replace')
        fout.write(outstr)

        #-----------------Write highlights-----------------
        if len(hlii)>0:

            if verbose:
                print('\n# <mennoteexport>: Exporting highlights in:')
                print(titleii)

            #-------------Loop through highlights-------------
            for hljj in hlii:
                hlstr=wrapper.fill(hljj.text)
                tagstr=', '.join(['@'+kk for kk in hljj.tags])
                tagstr=wrapper2.fill(tagstr)
                outstr=u'''
\n\t> {0}

\t\t- @{1}
\t\t- Tags: {2}
\t\t- Ctime: {3}
'''.format(*map(conv,[hlstr, hljj.citationkey,\
    tagstr, hljj.ctime]))

                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)

        #-----------------Write notes-----------------
        if len(ntii)>0:

            if verbose:
                print('\n# <mennoteexport>: Exporting notes in:')
                print(titleii)

            #----------------Loop through notes----------------
            for ntjj in ntii:
                ntstr=wrapper.fill(ntjj.text)
                tagstr=', '.join(['@'+kk for kk in ntjj.tags])
                tagstr=wrapper2.fill(tagstr)
                outstr=u'''
\n\t- {0}

\t\t- @{1}
\t\t\t- Tags: {2}
\t\t\t- Ctime: {3}
'''.format(*map(conv,[ntstr, ntjj.citationkey,\
    tagstr, ntjj.ctime]))

                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)
コード例 #57
0
ファイル: utils.py プロジェクト: rwakulszowa/feder
def email_wrapper(text):
    wrapper = TextWrapper()
    wrapper.subsequent_indent = "> "
    wrapper.initial_indent = "> "
    return "\n".join(wrapper.wrap(text))
コード例 #58
-19
    def __str__(self):
        if self.networks and len(self.networks) > 1:
            lines = ["Nexus dataset '%s' (#%s) with %d networks" % \
                    (self.sid, self.id, len(self.networks))]
        else:
            lines = ["Nexus dataset '%(sid)s' (#%(id)s)" % self.__dict__]

        lines.append("vertices/edges: %s" % self.vertices_edges)

        if self.name:
            lines.append("name: %s" % self.name)
        if self.tags:
            lines.append("tags: %s" % "; ".join(self.tags))

        if self.rest:
            wrapper = TextWrapper(width=76, subsequent_indent='  ')

            keys = sorted(self.rest.iterkeys())
            if "attribute" in self.rest:
                keys.remove("attribute")
                keys.append("attribute")

            for key in keys:
                for value in self.rest.getlist(key):
                    paragraphs = str(value).splitlines()
                    wrapper.initial_indent = "%s: " % key
                    for paragraph in paragraphs:
                        ls = wrapper.wrap(paragraph)
                        if ls:
                            lines.extend(wrapper.wrap(paragraph))
                        else:
                            lines.append("  .")
                        wrapper.initial_indent = "  "

        return "\n".join(lines)