コード例 #1
0
    def test_fix_sentence_endings(self):
        wrapper = TextWrapper(60, fix_sentence_endings=True)

        # SF #847346: ensure that fix_sentence_endings=True does the
        # right thing even on input short enough that it doesn't need to
        # be wrapped.
        text = "A short line. Note the single space."
        expect = ["A short line.  Note the single space."]
        self.check(wrapper.wrap(text), expect)

        # Test some of the hairy end cases that _fix_sentence_endings()
        # is supposed to handle (the easy stuff is tested in
        # test_whitespace() above).
        text = "Well, Doctor? What do you think?"
        expect = ["Well, Doctor?  What do you think?"]
        self.check(wrapper.wrap(text), expect)

        text = "Well, Doctor?\nWhat do you think?"
        self.check(wrapper.wrap(text), expect)

        text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
        expect = ['I say, chaps!  Anyone for "tennis?"  Hmmph!']
        self.check(wrapper.wrap(text), expect)

        wrapper.width = 20
        expect = ["I say, chaps!", 'Anyone for "tennis?"', "Hmmph!"]
        self.check(wrapper.wrap(text), expect)

        text = 'And she said, "Go to hell!"\nCan you believe that?'
        expect = ['And she said, "Go to', 'hell!"  Can you', "believe that?"]
        self.check(wrapper.wrap(text), expect)

        wrapper.width = 60
        expect = ['And she said, "Go to hell!"  Can you believe that?']
        self.check(wrapper.wrap(text), expect)
コード例 #2
0
ファイル: ui.py プロジェクト: PixelNoob/piston
def dump_recursive_comments(rpc,
                            post_author,
                            post_permlink,
                            depth=0,
                            format="markdown"):
    global currentThreadDepth
    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (depth + currentThreadDepth)
    postWrapper.subsequent_indent = "  " * (depth + currentThreadDepth)

    depth = int(depth)

    posts = rpc.get_content_replies(post_author, post_permlink)
    for post in posts:
        meta = {}
        for key in ["author", "permlink"]:
            meta[key] = post[key]
        meta["reply"] = "@{author}/{permlink}".format(**post)
        if format == "markdown":
            body = markdownify(post["body"])
        else:
            body = post["body"]
        yaml = frontmatter.Post(body, **meta)
        print(frontmatter.dumps(yaml))
        reply = rpc.get_content_replies(post["author"], post["permlink"])
        if len(reply):
            dump_recursive_comments(rpc, post["author"], post["permlink"], depth + 1)
コード例 #3
0
def wrap_for_make(items):
    line = join(sorted(items))
    wrapper = TextWrapper()
    wrapper.width = 60
    wrapper.break_on_hyphens = False
    wrapper.subsequent_indent = '\t' * 2
    return ' \\\n'.join(wrapper.wrap(line))
コード例 #4
0
ファイル: ui.py プロジェクト: PixelNoob/piston
def list_posts(discussions):
        t = PrettyTable([
            "identifier",
            "title",
            "category",
            "replies",
            # "votes",
            "payouts",
        ])
        t.align = "l"
        t.align["payouts"] = "r"
        # t.align["votes"] = "r"
        t.align["replies"] = "c"
        for d in discussions:
            identifier = "@%s/%s" % (d["author"], d["permlink"])
            identifier_wrapper = TextWrapper()
            identifier_wrapper.width = 60
            identifier_wrapper.subsequent_indent = " "

            t.add_row([
                identifier_wrapper.fill(identifier),
                identifier_wrapper.fill(d["title"]),
                d["category"],
                d["children"],
                # d["net_rshares"],
                d["pending_payout_value"],
            ])
        print(t)
コード例 #5
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printHeader(s, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs={1: '=', 2: '-', 3: '.'}
    indents={1: 0, 2: 4, 3: 8}

    dec=decs[level]
    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length-ind
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    #-------------Get delimiter line-------------
    hline='%s%s' %(' '*int(ind),dec*int(length-ind)) 

    #--------------------Wrap texts--------------------
    strings=wrapper.wrap('%s %s' %(prefix,s))

    #----------------------Print----------------------
    try:
        print('\n'+hline)
    except:
        print('\n'+hline.encode('ascii','replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii','replace'))
    #print(hline)

    return
コード例 #6
0
    def _stream_formatter(self, record):
        """The formatter for standard output."""
        if record.levelno < logging.DEBUG:
            print(record.levelname, end='')
        elif(record.levelno < logging.INFO):
            colourPrint(record.levelname, 'green', end='')
        elif(record.levelno < IMPORTANT):
            colourPrint(record.levelname, 'magenta', end='')
        elif(record.levelno < logging.WARNING):
            colourPrint(record.levelname, 'lightblue', end='')
        elif(record.levelno < logging.ERROR):
            colourPrint(record.levelname, 'brown', end='')
        else:
            colourPrint(record.levelname, 'red', end='')

        if record.levelno == logging.WARN:
            message = '{0}'.format(record.msg[record.msg.find(':')+2:])
        else:
            message = '{0}'.format(record.msg)

        if len(message) > self.wrapperLength:
            tw = TextWrapper()
            tw.width = self.wrapperLength
            tw.subsequent_indent = ' ' * (len(record.levelname)+2)
            tw.break_on_hyphens = False
            message = '\n'.join(tw.wrap(message))

        print(': ' + message)
コード例 #7
0
ファイル: ui.py プロジェクト: PixelNoob/piston
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"], post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"], post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
コード例 #8
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def default_attribute_formatter(self, key, value):
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   attribute_text_template='/{attribute_key}="{attribute_value}"'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #9
0
ファイル: asciioutput.py プロジェクト: jmtoball/StudICLI
	def __init__(self, width=80):
		""" Sets up the class and configures the utilized Textwrapper-object"""
		self.width = width
		wrapper = TextWrapper()
		wrapper.width = width	
		wrapper.replace_whitespace = False	
		wrapper.drop_whitespace = False	
		self.wrapper = wrapper
コード例 #10
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def number_attribute_formatter(self, key, value):
   # transl_table attributes do not have their values in quotes
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   attribute_text_template='/{attribute_key}={attribute_value}'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #11
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def header_attribute_formatter(self, key, header_text, quote_character, final_character):
   wrapper = TextWrapper()
   wrapper.initial_indent=key + '   '
   wrapper.subsequent_indent=key + '   '
   wrapper.width=79
   attribute_text_template='{attribute_quote_character}{attribute_header_text}{attribute_quote_character}{attribute_final_character}'
   attribute_text=attribute_text_template.format(attribute_header_text = header_text, 
                                                 attribute_quote_character = quote_character, 
                                                 attribute_final_character = final_character)
   return wrapper.fill(attribute_text)
コード例 #12
0
ファイル: EMBLContig.py プロジェクト: JTumelty/gff3toembl
 def product_attribute_formatter(self, key, value):
   # Products can include very long enzyme names which we don't want to break
   wrapper = TextWrapper()
   wrapper.initial_indent='FT                   '
   wrapper.subsequent_indent='FT                   '
   wrapper.width=79
   wrapper.break_on_hyphens=True
   attribute_text_template='/{attribute_key}="{attribute_value}"'
   attribute_text=attribute_text_template.format(attribute_key=key, attribute_value=value)
   return wrapper.fill(attribute_text)
コード例 #13
0
def linewrap(width = None):
    """Returns a function that wraps long lines to a max of 251 characters.
	Note that this function returns a list of lines, which is suitable as the
	argument for the spss.Submit function.
    """
    wrapper = TextWrapper()
    wrapper.width = width or 251
    wrapper.replace_whitespace = True
    wrapper.break_long_words = False
    wrapper.break_on_hyphens = False
    return wrapper.wrap
コード例 #14
0
ファイル: pydylogger.py プロジェクト: erelson/PyDyPackets
def main():
    """Parse command line options
    
    """
    
    usage = "usage: %(prog)s [options]"
    parser = ArgumentParser(prog='pydylogger', usage=usage,
                            )#formatter_class=RawTextHelpFormatter)
    
    tw = TextWrapper()
    mywrap = lambda x: "\n".join(tw.wrap(x))
    tw.width = 80 - 25
    
    #
    parser.add_argument('arglist', nargs='*', default=list(), help='N/A')
    parser.add_argument('-t', '--translate', action="store_true", \
            dest="translate", default=False, help="Print human readable " \
            "packets. Default: %(default)s")
    parser.add_argument('-a', '--all', action="store_true", \
            dest="saveall", default=False, help="Optionally save all bytes/" \
            "packets, including malformed packets. Default: %(default)s")
    parser.add_argument('-o', '--output', action="store", \
            dest="output", default="logging_output.txt", help="Specify output " \
            "file for log of packets. Default: %(default)s")
    #
    
    options = parser.parse_args()
    args = options.arglist
    
    # TODO get rid of these globals...
    global saveall
    global translate
    saveall = options.saveall
    translate = options.translate
    
    global port
    global baud
    global timing
    global id_dict
    cfg = PyDyConfigParser()
    cfg.read()
    port, baud, __, timing, __ = cfg.get_params()
    id_dict = cfg.get_id_to_device_dict()

    if saveall:
        print "All packets (including bad packets) will be saved in " \
                "{0}".format(options.output)
    if translate:
        print "Packets will be translated for listing in this window."
    
    logger_method(outputfile=options.output)
    
    return
コード例 #15
0
ファイル: ui.py プロジェクト: creativechain/creacli
def __get_text_wrapper(width=60):
    """
    Get text wrapper with a fixed with.

    :param width: width of the wrapper. Default 60.
    :return: text wrapper
    :rtype: :py:class:`TextWrapper`
    """
    wrapper = TextWrapper()
    wrapper.width = width
    wrapper.subsequent_indent = " "

    return wrapper
コード例 #16
0
    def test_fix_sentence_endings(self):
        wrapper = TextWrapper(60, fix_sentence_endings=True)

        # SF #847346: ensure that fix_sentence_endings=True does the
        # right thing even on input short enough that it doesn't need to
        # be wrapped.
        text = "A short line. Note the single space."
        expect = ["A short line.  Note the single space."]
        self.check(wrapper.wrap(text), expect)

        # Test some of the hairy end cases that _fix_sentence_endings()
        # is supposed to handle (the easy stuff is tested in
        # test_whitespace() above).
        text = "Well, Doctor? What do you think?"
        expect = ["Well, Doctor?  What do you think?"]
        self.check(wrapper.wrap(text), expect)

        text = "Well, Doctor?\nWhat do you think?"
        self.check(wrapper.wrap(text), expect)

        text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
        expect = ['I say, chaps!  Anyone for "tennis?"  Hmmph!']
        self.check(wrapper.wrap(text), expect)

        wrapper.width = 20
        expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
        self.check(wrapper.wrap(text), expect)

        text = 'And she said, "Go to hell!"\nCan you believe that?'
        expect = ['And she said, "Go to', 'hell!"  Can you', 'believe that?']
        self.check(wrapper.wrap(text), expect)

        wrapper.width = 60
        expect = ['And she said, "Go to hell!"  Can you believe that?']
        self.check(wrapper.wrap(text), expect)

        text = 'File stdio.h is nice.'
        expect = ['File stdio.h is nice.']
        self.check(wrapper.wrap(text), expect)
コード例 #17
0
ファイル: html_reader.py プロジェクト: ramil350/pm_reader
    def get_text(self):
        text = ''.join(self.data)
        text = text.replace('  ', ' ')
        text = text.splitlines()

        wrapper = TextWrapper()
        wrapper.replace_whitespace = False
        wrapper.width = int(self.config.get('FormatOptions', 'line_width'))
        wrapped_text = ''

        for line in text:
            wrapped_text += wrapper.fill(line) + self.LINE_SEPARATOR

        return wrapped_text
コード例 #18
0
def draw_body(card, raw_text, center_y, card_width, chars_per_line):
	text = raw_text.decode('utf-8')
	wrapper = TextWrapper()
	wrapper.width = chars_per_line
	wrapper.replace_whitespace = True
	wrapper.drop_whitespace = False
	lines = wrapper.wrap(text)
	line_width, line_height = body_font.getsize(lines[0])
	y = center_y - (line_height * (float(len(lines)) / 2.0))
	for line in lines:
		line_width, line_height = body_font.getsize(line)
		draw = ImageDraw.Draw(card)
		draw.text(((card_width - line_width) / 2, y), line, font = body_font, fill = (0, 0, 0))
		y += line_height
コード例 #19
0
def download():
    try:
        myVar.set("")
        root.update()
        pytube.YouTube(link.get()).streams.first().download(
            skip_existing=True, output_path="./Your Downloads")
        w = TextWrapper()
        w.width = 50
        title_wrapped = "\n".join(w.wrap(str(pytube.YouTube(
            link.get()).title)))
        myVar.set("Download was successfully!\n\n{}".format(title_wrapped))
    except:
        myVar.set(
            "You specified an unvalid link, try again please.\nIt should look like: www.youtube.com/watch?v=7zpxgyG7eGk"
        )
コード例 #20
0
ファイル: pygdbdis.py プロジェクト: stephenbradshaw/pygdbdis
def DisassemblyPrinter( instructions, fifoname='', width=int(ExtensionSettings.getValue('main_termal_width')), wrap=False):
	if (fifoname):
		output=fifoname
	else:
		output=sys.stdout
	print >>output, "=" * 16
	data = gdb.execute("x/" + str(instructions) + "i $pc", False, True)
	if wrap:
		print >>output, data
	else:
		wrapper = TextWrapper() # use TextWrapper to properly deal with tab lengths in output
		wrapper.width = width
		wrapper.drop_whitespace = False
		for line in data.split('\n'):
			out = wrapper.wrap(line)
			if len(out) > 0:
				print >>output, wrapper.wrap(line)[0]
コード例 #21
0
    def random_quote_selector(self):
        """A function that selects a random quote from the quote file.

        Returns:
            string: A quote
        """
        quotes = open(f"data/{self.quotes_file}.txt", "r").readlines()
        for no, line in enumerate(quotes, 1):
            tw = TextWrapper()
            tw.width = 50

            if self.rand_pos == no:
                if len(line) > 50:
                    self.random_quote = "\n".join(tw.wrap(line))
                else:
                    self.random_quote = line
                return self.random_quote
コード例 #22
0
ファイル: bocca_command.py プロジェクト: csdms/bocca-tools
    def dry_run(self):
        from textwrap import TextWrapper
        wrapper = TextWrapper()
        wrapper.subsequent_indent = '  '
        wrapper.break_long_words = False
        wrapper.break_on_hyphens = False
        wrapper.width = 78

        commands = [
            '# Create %s' % self._object.full_name(),
            self.create_cmd(),
            self._hook.__str__(self._object)
        ]
        for command in commands:
            command.strip()
            if len(command) > 0:
                print ' \\\n'.join(wrapper.wrap(command))
        print ''
コード例 #23
0
ファイル: tools.py プロジェクト: timtangcoding/Menotexport
def printInd(s, level=1, length=70, prefix=''):
    from textwrap import TextWrapper
    indents = {1: 0, 2: 4, 3: 8, 4: 12, 5: 16}

    ind = indents[level]
    indstr = ' ' * int(ind)

    wrapper = TextWrapper()
    wrapper.width = length
    wrapper.initial_indent = indstr
    wrapper.subsequent_indent = indstr

    string = wrapper.fill('%s %s' % (prefix, s))
    try:
        print('\n' + string)
    except:
        print('\n' + string.encode('ascii', 'replace'))

    return
コード例 #24
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printInd(s, level=1, length=70, prefix=''):
    from textwrap import TextWrapper
    indents={1: 0, 2: 4, 3: 8, 4: 12, 5: 16}

    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    string=wrapper.fill('%s %s' %(prefix,s))
    try:
        print('\n'+string)
    except:
        print('\n'+string.encode('ascii','replace'))

    return 
コード例 #25
0
ファイル: LMIFormatter.py プロジェクト: jsafrane/openlmi-doc
    def format(self, indent=0, width=80, f=sys.stdout,
               separator=True):
        """
        Formats a block of text and prints it to the output stream.

        :param int indent: number of spaces to indent the text block
        :param int width: total text block width
        :param f: output stream
        :param dictionary kwargs: supported keyword arguments
        :param bool separator: if True, there will be a new line appended after
            the formatted text; default value is True
        """
        if indent > width:
            return  # NOTE: this is wrong!
        wrapper = TextWrapper()
        wrapper.width = width - indent
        wrapper.subsequent_indent = " " * LMITextFormatter.SUB_INDENT
        for l in wrapper.wrap(self._text):
            f.write("%s%s\n" % (" " * indent, l))
        if separator:
            f.write("\n")
コード例 #26
0
def main():
    ugly = False
    if os.sys.platform[0:3] == 'win':
        ugly = True

    response = urllib2.urlopen(sys.argv[1])
    encoding = response.headers.getparam('charset')
    html = response.read().decode(encoding)

    f = StringIO(html)
    parser = etree.HTMLParser()

    #create SAX tree
    tree = etree.parse(f, parser)

    handler = BoilerpipeHTMLContentHandler()
    sax.saxify(tree, handler)

    a = ArticleExtractor()

    #parses our data and creates TextDocument with TextBlocks
    doc = handler.toTextDocument()

    tw = TextWrapper()
    tw.width = 80
    tw.initial_indent = os.linesep + os.linesep
    parsed_url = urllib2.urlparse.urlparse(sys.argv[1])
    filename = parsed_url.netloc + "-" + "".join([
        c for c in parsed_url.path if c.isalpha() or c.isdigit() or c == ' '
    ]).rstrip() + '.txt'
    output = []
    for line in a.getText(doc).splitlines():
        output.append(tw.fill(line))
    i = 0
    with codecs.open(filename, 'w', encoding='utf8') as f:
        for line in output:
            if ugly:
                line.replace('\n', os.linesep)
            f.write(line)
    print "Article saved. Lines: %s. Filename: %s" % (len(output), filename)
コード例 #27
0
ファイル: fancy_exporter.py プロジェクト: shazron/jrnl
 def export_entry(cls, entry):
     """Returns a fancy unicode representation of a single entry."""
     date_str = entry.date.strftime(entry.journal.config["timeformat"])
     linewrap = entry.journal.config["linewrap"] or 78
     initial_linewrap = linewrap - len(date_str) - 2
     body_linewrap = linewrap - 2
     card = [
         cls.border_a + cls.border_b * (initial_linewrap) + cls.border_c + date_str
     ]
     w = TextWrapper(
         width=initial_linewrap,
         initial_indent=cls.border_g + " ",
         subsequent_indent=cls.border_g + " ",
     )
     title_lines = w.wrap(entry.title)
     card.append(
         title_lines[0].ljust(initial_linewrap + 1)
         + cls.border_d
         + cls.border_e * (len(date_str) - 1)
         + cls.border_f
     )
     w.width = body_linewrap
     if len(title_lines) > 1:
         for line in w.wrap(
             " ".join(
                 [
                     title_line[len(w.subsequent_indent) :]
                     for title_line in title_lines[1:]
                 ]
             )
         ):
             card.append(line.ljust(body_linewrap + 1) + cls.border_h)
     if entry.body:
         card.append(cls.border_i + cls.border_j * body_linewrap + cls.border_k)
         for line in entry.body.splitlines():
             body_lines = w.wrap(line) or [cls.border_g]
             for body_line in body_lines:
                 card.append(body_line.ljust(body_linewrap + 1) + cls.border_h)
     card.append(cls.border_l + cls.border_b * body_linewrap + cls.border_m)
     return "\n".join(card)
コード例 #28
0
ファイル: mailutils.py プロジェクト: jrmi/byemail
def reflow_quoted_text(text, width=72):
    """Reflow text with 'soft' (SP CR LF) newlines according to rfc2646

    Text paragraphs containing 'soft' newlines are reflowed for a maximum
    line length of @width characters.
    Only non-quoted text is reflowed (Lines not starting with '>').
    
    >>> reflow_quoted_text('Foo \\nBar')
    'Foo Bar'
    >>> reflow_quoted_text('> Foo \\n> Bar')
    '> Foo \\r\\n> Bar'
    >>> reflow_quoted_text('> Foo \\nBar ')
    '> Foo \\r\\nBar'
    >>> reflow_quoted_text('> Foo\\n\\n> Bar')
    '> Foo\\r\\n\\r\\n> Bar'
    >>> reflow_quoted_text('> Foo \\n' \
                     'a b \\n' \
                     'c d e g h i j k l m\\n' \
                     '> Bar', width=10)
    '> Foo \\r\\na b c d e \\r\\ng h i j k \\r\\nl m\\r\\n> Bar'
    """
    wrapper = TextWrapper()
    wrapper.width = width
    lines = []
    paragraph = []
    for line in text.splitlines():
        if line.startswith('>'):
            if paragraph:
                lines.append(' \r\n'.join(wrapper.wrap(''.join(paragraph))))
                paragraph = []
            lines.append(line)
            continue
        paragraph.append(line)
        if not line.endswith(' '):
            if paragraph:
                lines.append(' \r\n'.join(wrapper.wrap(''.join(paragraph))))
                paragraph = []
    if paragraph:
        lines.append(' \r\n'.join(wrapper.wrap(''.join(paragraph))))
    return '\r\n'.join(lines)
コード例 #29
0
ファイル: tools.py プロジェクト: Xunius/Menotexport
def printNumHeader(s, idx, num, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs={1: '=', 2: '-', 3: '.'}
    indents={1: 0, 2: 4, 3: 8}

    dec=decs[level]
    ind=indents[level]
    indstr=' '*int(ind)

    wrapper=TextWrapper()
    wrapper.width=length-ind
    wrapper.initial_indent=indstr
    wrapper.subsequent_indent=indstr

    #-------------Get delimiter line-------------
    decl=int((length-ind-2-len(str(idx))-len(str(num)))/2.)
    decl=decl*dec

    hline1='%s%s %d/%d %s' %(' '*int(ind),decl,idx,num,decl) 
    #hline2='%s%s' %(' '*int(ind),dec*int(length-ind)) 

    #--------------------Wrap texts--------------------
    strings=wrapper.wrap('%s %s' %(prefix,s))

    #----------------------Print----------------------
    try:
        print('\n'+hline1)
    except:
        print('\n'+hline1.encode('ascii','replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii','replace'))
    #print(hline2)

    return
コード例 #30
0
ファイル: tools.py プロジェクト: timtangcoding/Menotexport
def printNumHeader(s, idx, num, level=1, length=70, prefix='# <Menotexport>:'):
    from textwrap import TextWrapper

    decs = {1: '=', 2: '-', 3: '.'}
    indents = {1: 0, 2: 4, 3: 8}

    dec = decs[level]
    ind = indents[level]
    indstr = ' ' * int(ind)

    wrapper = TextWrapper()
    wrapper.width = length - ind
    wrapper.initial_indent = indstr
    wrapper.subsequent_indent = indstr

    #-------------Get delimiter line-------------
    decl = int((length - ind - 2 - len(str(idx)) - len(str(num))) / 2.)
    decl = decl * dec

    hline1 = '%s%s %d/%d %s' % (' ' * int(ind), decl, idx, num, decl)
    #hline2='%s%s' %(' '*int(ind),dec*int(length-ind))

    #--------------------Wrap texts--------------------
    strings = wrapper.wrap('%s %s' % (prefix, s))

    #----------------------Print----------------------
    try:
        print('\n' + hline1)
    except:
        print('\n' + hline1.encode('ascii', 'replace'))
    for ss in strings:
        try:
            print(ss)
        except:
            print(ss.encode('ascii', 'replace'))
    #print(hline2)

    return
コード例 #31
0
def report_to_display(display, banner, banner_color, hint, hint_wrap,
                      group_index, group_items):
    gutter = '[R:{0}] '.format(group_index + 1)
    indent = ' '.ljust(len(gutter))

    display.display('{0}{1}\n'.format(gutter, banner), color=banner_color)

    for item in group_items:
        display.display('{0}  - {1}'.format(indent, item['path']),
                        color=C.COLOR_HIGHLIGHT)

    if hint and not hint_wrap:
        display.display('\n{0}HINT: {1}\n'.format(indent, hint),
                        color=C.COLOR_HIGHLIGHT)
    elif hint:
        wrapper = TextWrapper()
        wrapper.initial_indent = indent
        wrapper.subsequent_indent = indent
        wrapper.drop_whitespace = False
        wrapper.width = 70 - len(indent)
        wrapped = '\n'.join(wrapper.wrap('HINT: {0}'.format(hint)))

        display.display('\n{0}\n'.format(wrapped), color=C.COLOR_HIGHLIGHT)
コード例 #32
0
ファイル: Formatting.py プロジェクト: spattersongt/vippy
def wordWrap(lines, rows, cols):
    """ Format an array of text to a specific number of 
        visible rows and columns """
    wrapped = []

    # Set up a TextWrapper
    wrapper = TextWrapper()
    wrapper.width = cols
    wrapper.expand_tabs = False
    wrapper.replace_whitespace = False
    wrapper.drop_whitespace = False

    for line in lines:
        if len(line) > cols:
            wrapped.extend(wrapper.wrap(line))
        else:
            wrapped.append(line)

        if len(wrapped) >= rows:
            break

    # Return only "rows" in case a word wrap added extra
    return wrapped[:rows]
コード例 #33
0
    def __init__(self, message, *args):

        tw = TextWrapper()
        tw.width = 79
        tw.subsequent_indent = ''
        tw.break_on_hyphens = False

        # Adds custom error
        message += '\n\n'
        message += '*' * 79 + '\n'

        addenda = ('If you are not sure of how to solve this problem '
                   'please copy this error message and email to Jose '
                   'Sanchez-Gallego <*****@*****.**> and Drew '
                   'Chojnowski <*****@*****.**> and CC Demitri Muna '
                   '<*****@*****.**> and John Parejko '
                   '<*****@*****.**>.\n')
        addenda = '\n'.join(tw.wrap(addenda))
        message += addenda + '\n'

        message += '*' * 79 + '\n'

        super(PluggingException, self).__init__(message)
コード例 #34
0
ファイル: ui.py プロジェクト: dpays/dpaycli
def dump_recursive_parents(rpc,
                           post_author,
                           post_permlink,
                           limit=1,
                           format="markdown"):
    global currentThreadDepth

    limit = int(limit)

    postWrapper = TextWrapper()
    postWrapper.width = 120
    postWrapper.initial_indent = "  " * (limit)
    postWrapper.subsequent_indent = "  " * (limit)

    if limit > currentThreadDepth:
        currentThreadDepth = limit + 1

    post = rpc.get_content(post_author, post_permlink)

    if limit and post["parent_author"]:
        parent = rpc.get_content_replies(post["parent_author"],
                                         post["parent_permlink"])
        if len(parent):
            dump_recursive_parents(rpc, post["parent_author"],
                                   post["parent_permlink"], limit - 1)

    meta = {}
    for key in ["author", "permlink"]:
        meta[key] = post[key]
    meta["reply"] = "@{author}/{permlink}".format(**post)
    if format == "markdown":
        body = markdownify(post["body"])
    else:
        body = post["body"]
    yaml = frontmatter.Post(body, **meta)
    print(frontmatter.dumps(yaml))
コード例 #35
0
ファイル: mentags.py プロジェクト: rdweitzman/txt2evernote
def export2Evernote(annodf, verbose=True):
    '''Organize annotations by tags and save to txt.

    <annodf>: pandas DataFrame. Annotations.
    '''

    geeknote = send2ever.GeekNoteConnector()
    geeknote.connectToEvertone()

    if verbose:
        print('\n# <export2Txt>: Exporting all taged annotations to Evernote')

    wrapper = TextWrapper()
    wrapper.width = 70
    wrapper.initial_indent = ''
    wrapper.subsequent_indent = int(len('> ')) * ' '

    wrapper2 = TextWrapper()
    wrapper2.width = 70
    wrapper2.initial_indent = ''
    wrapper2.subsequent_indent = '\t' + int(len('- Title: ')) * ' '

    taggroup = annodf.groupby('tags')
    tags = getField(annodf, 'tags')

    #---------------------Get tags---------------------
    if len(tags) == 0:
        print('\n# <export2Evernote>: No tags found in data.')
        return
    tags.sort()

    #---------------Put @None at the end---------------
    if '@None' in tags:
        tags.remove('@None')
        tags.append('@None')

    #----------------Loop through tags----------------
    for tagii in tags:

        if verbose:
            print('# <export2Evernote>: Get tag: %s.' % tagii)

        groupii = taggroup.get_group(tagii)
        citesii = getField(groupii, 'cite')
        evercontentii = []

        #--------------Loop through cite keys--------------
        for citejj in citesii:

            outstr = u'''\n## {0}:\n'''.format(conv(citejj))
            evercontentii.append(outstr)

            notesjj = groupii[groupii.cite == citejj]

            #-------------Loop through annotations-------------
            for kk in range(notesjj.shape[0]):
                notekk = notesjj.iloc[kk]
                #strkk=wrapper.fill(notekk.text)
                strkk = notekk.text
                title = wrapper2.fill(notekk.title)
                if notekk.type == 'quote':
                    outstr=\
                    u'\n> {0}\n\n\t- Title: {1}\n\t- Ctime: {2}\n'\
                    .format(*map(conv,[strkk, title,notekk.ctime]))
                else:
                    outstr=\
                    u'\n- {0}\n\n\t- Title: {1}\n\t- Ctime: {2}\n'\
                    .format(*map(conv,[strkk, title,notekk.ctime]))

                evercontentii.append(outstr)

        #-----------------Send to Evernote-----------------
        send2ever.createNote(tagii,\
            ''.join(evercontentii),\
            tagii,'Tags summary',geeknote,skipnotebook=True)

    return
コード例 #36
0
ファイル: batchRename.py プロジェクト: sotw/hmDictUtils
#from uniseg import Wrapper
from curses import panel

global DB
global tTarget
global args
global ARGUDB
global _wrap
global LINKS
global UIflag

UIflag = 1
LINKS = []
ARGUDB = []
_wrap = TextWrapper()
_wrap.width = 34

class Menu(object):

    def __init__(self, items, stdscreen):
        self.window = stdscreen.subwin(0,0)
        self.window.keypad(1)
        self.panel = panel.new_panel(self.window)
        self.panel.hide()
        panel.update_panels()

        self.position = 0
        self.items = items
        self.items.append(('==== OUTPUT ==== (press "q" for exit)','nothing'))
        self.items.append(('>','nothing'))
コード例 #37
0
from pyfaidx import Fasta, FastaVariant
import pybedtools as pb
from textwrap import TextWrapper
import subprocess

wr = TextWrapper()
wr.width = 50

fn = '/home/sergio/media/NAS4/PFlab/TLX3_project/WES-seq/references/mouse_mm9_reference_genome.fa'
vn = 'WES_TLX3.vcf.gz'

fa = Fasta(fn)

bd = pb.BedTool('test.bed')

inf = 'in_py.fa'
with open(inf, 'w') as fp:
    for it in bd:
        rg = fa[it.chrom][it.start:it.end]
        fp.write('>' + rg.longname + '\n' + wr.fill(rg.seq) + '\n')

outf = 'out_py.fa'
cons_fa = "bcftools consensus -f {} {} -o {}".format(inf, vn, outf)

print('Running process ........ \n')
print(cons_fa)
subprocess.call(['bash', '-c', cons_fa])

fv = Fasta(outf)

## Only SNP
コード例 #38
0
ファイル: npc.py プロジェクト: xiaoxiae/TerrariaNPCHappiness
from datetime import datetime
from textwrap import TextWrapper
from os import path
from sys import exit

from config import Restrictions, Configuration

start_time = time()
current_dir = path.dirname(path.abspath(__file__))
process = psutil.Process(os.getpid())

Biome = NewType("Biome", str)

tw = TextWrapper()
tw.subsequent_indent = "  "
tw.width = Configuration.output_width


def rounded_price(number: float) -> int:
    """Round a number to the nearest multiple of 0.05."""
    # special case for infinities
    if number == float("inf"):
        return number

    multiple = 0.05

    return round(multiple * round(number / multiple), 3)


def rounded_sum(*args):
    """sum(), but with the price rounding."""
コード例 #39
0
ファイル: ui.py プロジェクト: dpays/dpaycli
 def wrapText(t):
     postWrapper = TextWrapper()
     postWrapper.width = width
     return ("\n".join(postWrapper.fill(l) for l in t.splitlines()))
コード例 #40
0
ファイル: extracttags.py プロジェクト: Xunius/Menotexport
def exportAnno(annodict,outdir,action,verbose=True):
    '''Export annotations grouped by tags

    '''

    #-----------Export all to a single file-----------
    if 'm' in action and 'n' not in action:
        fileout='Mendeley_highlights_by_tags.txt'
    elif 'n' in action and 'm' not in action:
        fileout='Mendeley_notes_by_tags.txt'
    elif 'm' in action and 'n' in action:
        fileout='Mendeley_annotations_by_tags.txt'

    abpath_out=os.path.join(outdir,fileout)
    if os.path.isfile(abpath_out):
        os.remove(abpath_out)

    if verbose:
        printHeader('Exporting all taged annotations to:',3)
        printInd(abpath_out,4)

    conv=lambda x:unicode(x)

    wrapper=TextWrapper()
    wrapper.width=70
    wrapper.initial_indent=''
    #wrapper.subsequent_indent='\t\t'+int(len('> '))*' '
    wrapper.subsequent_indent='\t\t'

    wrapper2=TextWrapper()
    wrapper2.width=60
    wrapper2.initial_indent=''
    #wrapper2.subsequent_indent='\t\t\t'+int(len('Title: '))*' '
    wrapper2.subsequent_indent='\t\t\t'

    with open(abpath_out, mode='a') as fout:

        #----------------Loop through tags----------------
        tags=annodict.keys()
        if len(tags)==0:
            return
        tags.sort()
        #---------------Put @None at the end---------------
        if '@None' in tags:
            tags.remove('@None')
            tags.append('@None')

        for tagii in tags:

            citedictii=annodict[tagii]
            outstr=u'''\n\n{0}\n# {1}'''.format(int(80)*'-', conv(tagii))
            outstr=outstr.encode('ascii','replace')
            fout.write(outstr)

            #--------------Loop through cite keys--------------
            for citejj, annosjj in citedictii.items():
                hljj=annosjj['highlights']
                ntjj=annosjj['notes']

                outstr=u'''\n\n\t@{0}:'''.format(conv(citejj))
                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)

                #-----------------Write highlights-----------------
                if len(hljj)>0:

                    #-------------Loop through highlights-------------
                    for hlkk in hljj:
                        hlstr=wrapper.fill(hlkk.text)
                        title=wrapper2.fill(hlkk.title)
                        outstr=u'''
\n\t\t> {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[hlstr, title,\
                      hlkk.ctime]))

                        outstr=outstr.encode('ascii','replace')
                        fout.write(outstr)

                #-----------------Write notes-----------------
                if len(ntjj)>0:

                    #----------------Loop through notes----------------
                    for ntkk in ntjj:
                        ntstr=wrapper.fill(ntkk.text)
                        title=wrapper2.fill(ntkk.title)
                        outstr=u'''
\n\t\t- {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[ntstr, title,\
                    ntkk.ctime]))

                        outstr=outstr.encode('ascii','replace')
                        fout.write(outstr)
コード例 #41
0
ファイル: hoursjson.py プロジェクト: RyanBaten/hoursjson
# Sets up argparse with flags for use with the command line
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--edit", action="store_true", help="Allows the user to edit a specific entry")
parser.add_argument("-w", "--weeks", action="store", default=-1, help="Expects the number of weeks in the log to \
 send in an email as an argument. If it is not Friday, it will look back the specified amount of weeks from last \
Friday")
parser.add_argument("-s", "--send", action="store_true", help="Prompts the user for an email to send to and a range of dates. Sends the hours log information to that email address")
parser.add_argument("-f", "--formatted", action="store_true", help="Prompts the user for a date and prints the formatted entry for that date")
parser.add_argument("-a", "--add", action="store_true", help="Allows the user to add to a specific entry")

args = parser.parse_args()

# Text wrapper configuration
wrapper = TextWrapper()
wrapper.width = 80
wrapper.subsequent_indent = "                "

home = environ['HOME']
logFile = home+"/workhours.log"
#logFile = "jsonlog.json"
tempFile = "tempfile.txt"
messageFile = "message.txt"

# Exits the program if invalid date data is given
def checkDate(date):
    if re.search('^\d{2}\/\d{2}\/\d{2}$',date)==None:
        print("Invalid date input, entries must be in the form 'mm/dd/yy'")
        sys.exit(1)

# Asks if the user meant to enter the email that they entered and then uses regex to
コード例 #42
0
ファイル: hours.py プロジェクト: RyanBaten/hours-log
            line = f.readline()
            while line != '':
                if line[0:5] == "+++++": 
                    if line[20:25] == entry:
                        read = True
                    else:
                        read = False
                if read == True:
                    print(line,end="")
                line = f.readline()
        print("Done")
        sys.exit(0)

# Text wrapper configuration
wrapper = TextWrapper()
wrapper.width = int(user.get("textWidth", fallback="70"))
indent = int(user.get("indentSize"))*" "
wrapper.subsequent_indent = indent

# Gets today's date
todayEntry = strftime("%m/%d",gmtime())

yearNum = strftime("%y",gmtime())
weekNum = strftime("%W",gmtime())
lastWeek = weekNum
if path.isfile(logFile) is True:
    # If the file already exists don't add the date when logging
    if ctime()[0:10] == ctime(path.getmtime(logFile))[0:10]:
        todayEntry =''
    # Gets previous week number
    with open(logFile,"r") as f:
					
				words+=word+" "
			
		
		# FORMAT FOR DISPLAY
		words.strip()

		#get longest string 
		sent=words.split(".")	
		words =max(sent, key=len)+"."
		words.strip()

		# for display in Cathode
		tw = TextWrapper()
		tw.width = 42
		words = "\t"+"\n\t".join(tw.wrap(words))


		# Tidy up 
		words = words.replace('“', '')
		words = words.replace('”', '')
		words = words.replace('"', '')
		words = words.replace('(', '')
		words = words.replace(')', '')


		# SCREEN OUTPUT
		for char in words:
			time.sleep(0.001)
			sys.stdout.write(char)
コード例 #44
0
ファイル: ui.py プロジェクト: PixelNoob/piston
 def wrapText(t):
     postWrapper = TextWrapper()
     postWrapper.width = width
     return ("\n".join(postWrapper.fill(l) for l in t.splitlines()))
コード例 #45
0
def _exportAnnoFile(abpath_out, anno, verbose=True):
    '''Export annotations in a single PDF

    <abpath_out>: str, absolute path to output txt file.
    <anno>: list, in the form [file_path, highlight_list, note_list].
            highlight_list and note_list are both lists of
            Anno objs (see extracthl.py), containing highlights
            and notes in TEXT format with metadata. To be distinguished
            with FileAnno objs which contains texts coordinates.
            if highlight_list or note_list is [], no such info
            in this PDF.

    Function takes annotations from <anno> and output to the target txt file
    in the following format:

    -----------------------------------------------------
    # Title of PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time
    
    -----------------------------------------------------
    # Title of another PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time

    Use tabs in indention, and markup syntax: ">" for highlights, and "-" for notes.

    Update time: 2016-02-24 13:59:56.
    '''

    conv = lambda x: unicode(x)

    wrapper = TextWrapper()
    wrapper.width = 80
    wrapper.initial_indent = ''
    #wrapper.subsequent_indent='\t'+int(len('> '))*' '
    wrapper.subsequent_indent = '\t'

    wrapper2 = TextWrapper()
    wrapper2.width = 80 - 7
    wrapper2.initial_indent = ''
    #wrapper2.subsequent_indent='\t\t'+int(len('- Tags: '))*' '
    wrapper2.subsequent_indent = '\t\t'

    hlii = anno.highlights
    ntii = anno.notes

    try:
        titleii = hlii[0].title
    except:
        titleii = ntii[0].title

    outstr = u'\n\n{0}\n# {1}'.format(int(80) * '-', conv(titleii))

    with open(abpath_out, mode='a') as fout:
        #outstr=outstr.encode('ascii','replace')
        outstr = outstr.encode('utf8', 'replace')
        fout.write(outstr)

        #-----------------Write highlights-----------------
        if len(hlii) > 0:

            #-------------Loop through highlights-------------
            for hljj in hlii:
                hlstr = wrapper.fill(hljj.text)
                tagstr = ', '.join(['@' + kk for kk in hljj.tags])
                tagstr = wrapper2.fill(tagstr)
                outstr=u'''
\n\t> {0}

\t\t- @{1}
\t\t- Tags: {2}
\t\t- Ctime: {3}
'''.format(*map(conv,[hlstr, hljj.citationkey,\
    tagstr, hljj.ctime]))

                #outstr=outstr.encode('ascii','replace')
                outstr = outstr.encode('utf8', 'replace')
                fout.write(outstr)

        #-----------------Write notes-----------------
        if len(ntii) > 0:

            #----------------Loop through notes----------------
            for ntjj in ntii:
                ntstr = wrapper.fill(ntjj.text)
                tagstr = ', '.join(['@' + kk for kk in ntjj.tags])
                tagstr = wrapper2.fill(tagstr)
                outstr=u'''
\n\t- {0}

\t\t- @{1}
\t\t- Tags: {2}
\t\t- Ctime: {3}
'''.format(*map(conv,[ntstr, ntjj.citationkey,\
    tagstr, ntjj.ctime]))

                #outstr=outstr.encode('ascii','replace')
                outstr = outstr.encode('utf8', 'replace')
                fout.write(outstr)
コード例 #46
0
def pprint_table(out, table, justs=None, separator=None, outer_seps=False, \
        widths=None, blank_row=False, default_just=None, hanging_indent=0):
    """
    Prints out a table of data, padded for alignment.
    Each row must have the same number of columns. 
    
    Cells may include line breaks.
    
    @param out: output stream
    @type out: file-like object
    @param table: table to print.
    @type table: list of lists
    @param outer_seps: Prints separators at the start and end of each row 
        if true.
    @type outer_seps: bool
    @type widths: list of ints
    @param widths: maximum width for each column. None means no maximum is 
        imposed. Words are wrapped if the width exceeds the maximum
    @type default_just: bool
    @param default_just: the default justification to use for all columns 
        if C{justs} is not given or where a column's justification is not 
        given. Default False
    @type hanging_indent: int
    @param hanging_indent: hanging indent to apply to the column if a cell 
        is wrapped (number of spaces)
    
    """
    col_paddings = []
    wrapper = TextWrapper()

    if hanging_indent:
        wrapper.indent = ''
        wrapper.subsequent_indent = ' ' * hanging_indent

    # Format any numbers in the table
    table = [[format_num(cell) for cell in row] for row in table]

    # Work out the maximum width of each column so we know how much to pad
    for i in range(len(table[0])):
        if widths is not None and widths[i] is not None:
            col_paddings.append(widths[i])
        else:
            col_paddings.append(get_max_width(table, i))

    # Work out justification of each column
    coljusts = []
    if default_just is None:
        default_just = False
    for col in range(len(table[0])):
        if justs:
            if justs[col] is not None:
                coljust = justs[col]
            else:
                coljust = default_just
        else:
            coljust = default_just
        coljusts.append(coljust)

    # Wrap the long cells that have a max width
    multiline = []
    for row in table:
        mlrow = []
        for col, cell in enumerate(row):
            # If this cell exceeds its max width, put it on multiple lines
            if widths is not None and \
                    widths[col] is not None and \
                    len(cell) > widths[col]:
                wrapper.width = widths[col]
                lines = []
                # Split on manual line breaks in the input as well
                for input_line in cell.split("\n"):
                    lines.extend(wrapper.wrap(input_line))
            else:
                lines = cell.split("\n")
            mlrow.append(lines)
        multiline.append(mlrow)

    for row in multiline:
        if outer_seps:
            print >> out, separator,
        # Find out the cell with the most lines in this row
        max_lines = max(len(cell) for cell in row)
        # Each line of the row
        for line in range(max_lines):
            for col in range(len(row)):
                # If this cell doesn't have this many lines, just pad
                padsize = col_paddings[col] + 2
                if line >= len(row[col]):
                    text = " " * padsize
                else:
                    # There's text: justify it
                    if coljusts[col]:
                        text = row[col][line].ljust(padsize)
                    else:
                        text = row[col][line].rjust(padsize)
                if col != 0 and separator:
                    print >> out, separator,
                print >> out, text,
            if outer_seps:
                print >> out, separator,
            print >> out
        # Add an extra blank line between rows
        if blank_row:
            print >> out
コード例 #47
0
ファイル: mentags.py プロジェクト: rdweitzman/txt2evernote
def export2Txt(annodf, abpath_out, verbose=True):
    '''Organize annotations by tags and save to txt.

    <annodf>: pandas DataFrame. Annotations.
    <abpath_out>: str, absolute path to output txt.
    '''

    if os.path.isfile(abpath_out):
        os.remove(abpath_out)

    if verbose:
        print('\n# <export2Txt>: Exporting all taged annotations to:')
        print(abpath_out)

    wrapper = TextWrapper()
    wrapper.width = 70
    wrapper.initial_indent = ''
    #wrapper.subsequent_indent='\t\t'+int(len('> '))*' '
    wrapper.subsequent_indent = '\t\t'

    wrapper2 = TextWrapper()
    wrapper2.width = 60
    wrapper2.initial_indent = ''
    #wrapper2.subsequent_indent='\t\t\t'+int(len('- Title: '))*' '
    wrapper2.subsequent_indent = '\t\t\t'

    taggroup = annodf.groupby('tags')
    tags = getField(annodf, 'tags')

    #---------------------Get tags---------------------
    if len(tags) == 0:
        print('\n# <export2Txt>: No tags found in data.')
        return
    tags.sort()

    #---------------Put @None at the end---------------
    if '@None' in tags:
        tags.remove('@None')
        tags.append('@None')

    with open(abpath_out, mode='a') as fout:

        #----------------Loop through tags----------------
        for tagii in tags:

            if verbose:
                print('# <export2Txt>: Get tag: %s.' % tagii)

            outstr = u'''\n\n{0}\n# {1}'''.format(int(80) * '-', conv(tagii))
            outstr = outstr.encode('ascii', 'replace')
            fout.write(outstr)

            groupii = taggroup.get_group(tagii)
            citesii = getField(groupii, 'cite')

            #--------------Loop through cite keys--------------
            for citejj in citesii:

                outstr = u'''\n\n\t{0}:'''.format(conv(citejj))
                outstr = outstr.encode('ascii', 'replace')
                fout.write(outstr)
                notesjj = groupii[groupii.cite == citejj]

                #-------------Loop through annotations-------------
                for kk in range(notesjj.shape[0]):
                    notekk = notesjj.iloc[kk]
                    strkk = wrapper.fill(notekk.text)
                    title = wrapper2.fill(notekk.title)
                    if notekk.type == 'quote':
                        outstr=u'''
\n\t\t> {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[strkk, title,\
                  notekk.ctime]))
                    else:
                        outstr=u'''
\n\t\t- {0}

\t\t\t- Title: {1}
\t\t\t- Ctime: {2}'''.format(*map(conv,[strkk, title,\
                  notekk.ctime]))

                    outstr = outstr.encode('ascii', 'replace')
                    fout.write(outstr)

    return
コード例 #48
0
		#erase the output '88/88 words' line
		print('                                                                                                                       ', end='\r')


		# if not titl:
		#     words = "\n"+"\n".join(words.splitlines()[1:])
		# else:
		#     words = "\n"+titl+"\n\n"+"\n".join(words.splitlines()[1:])

		#words = "\n"+"\n".join(words.splitlines()[1:])
		#words = insertNewlines(words, 48)

		# for display in Cathode
		tw = TextWrapper()
		tw.width = 64
		words = "\n".join(tw.wrap(words))

		words = ".".join(words.split(".")[:-1])+ "."

		# Tidy up 
		words = words.replace('“', '')
		words = words.replace('”', '')
		words = words.replace('"', '')
		words = words.replace('(', '')
		words = words.replace(')', '')



		words = capitalize(words)
            end='\r')

        # if not titl:
        #     words = "\n"+"\n".join(words.splitlines()[1:])
        # else:
        #     words = "\n"+titl+"\n\n"+"\n".join(words.splitlines()[1:])

        #words = "\n"+"\n".join(words.splitlines()[1:])
        #words = insertNewlines(words, 48)

        words = "".join(
            words.split(".")[0]) + "."  #".".join(words.split(".")[:-1])+ "."

        # for display in Cathode
        tw = TextWrapper()
        tw.width = 30
        words = "\t" + "\n\t".join(tw.wrap(words))

        # Tidy up
        words = words.replace('“', '')
        words = words.replace('”', '')
        words = words.replace('"', '')
        words = words.replace('(', '')
        words = words.replace(')', '')

        words = capitalize(words)  #+"\n\n"+str(args.temperature)

        # SCREEN OUTPUT
        for char in words:
            time.sleep(0.001)
            sys.stdout.write(char)
コード例 #50
0
ファイル: tableprint.py プロジェクト: johndpope/jazzparser
def pprint_table(out, table, justs=None, separator=None, outer_seps=False, \
        widths=None, blank_row=False, default_just=None, hanging_indent=0):
    """
    Prints out a table of data, padded for alignment.
    Each row must have the same number of columns. 
    
    Cells may include line breaks.
    
    @param out: output stream
    @type out: file-like object
    @param table: table to print.
    @type table: list of lists
    @param outer_seps: Prints separators at the start and end of each row 
        if true.
    @type outer_seps: bool
    @type widths: list of ints
    @param widths: maximum width for each column. None means no maximum is 
        imposed. Words are wrapped if the width exceeds the maximum
    @type default_just: bool
    @param default_just: the default justification to use for all columns 
        if C{justs} is not given or where a column's justification is not 
        given. Default False
    @type hanging_indent: int
    @param hanging_indent: hanging indent to apply to the column if a cell 
        is wrapped (number of spaces)
    
    """
    col_paddings = []
    wrapper = TextWrapper()
    
    if hanging_indent:
        wrapper.indent = ''
        wrapper.subsequent_indent = ' '*hanging_indent
    
    # Format any numbers in the table
    table = [
        [format_num(cell) for cell in row]
            for row in table]

    # Work out the maximum width of each column so we know how much to pad
    for i in range(len(table[0])):
        if widths is not None and widths[i] is not None:
            col_paddings.append(widths[i])
        else:
            col_paddings.append(get_max_width(table, i))
    
    # Work out justification of each column
    coljusts = []
    if default_just is None:
        default_just = False
    for col in range(len(table[0])):
        if justs:
            if justs[col] is not None:
                coljust = justs[col]
            else:
                coljust = default_just
        else:
            coljust = default_just
        coljusts.append(coljust)
    
    # Wrap the long cells that have a max width
    multiline = []
    for row in table:
        mlrow = []
        for col,cell in enumerate(row):
            # If this cell exceeds its max width, put it on multiple lines
            if widths is not None and \
                    widths[col] is not None and \
                    len(cell) > widths[col]:
                wrapper.width = widths[col]
                lines = []
                # Split on manual line breaks in the input as well
                for input_line in cell.split("\n"):
                    lines.extend(wrapper.wrap(input_line))
            else:
                lines = cell.split("\n")
            mlrow.append(lines)
        multiline.append(mlrow)

    for row in multiline:
        if outer_seps:
            print >> out, separator,
        # Find out the cell with the most lines in this row
        max_lines = max(len(cell) for cell in row)
        # Each line of the row
        for line in range(max_lines):
            for col in range(len(row)):
                # If this cell doesn't have this many lines, just pad
                padsize = col_paddings[col] + 2
                if line >= len(row[col]):
                    text = " " * padsize
                else:
                    # There's text: justify it
                    if coljusts[col]:
                        text = row[col][line].ljust(padsize)
                    else:
                        text = row[col][line].rjust(padsize)
                if col != 0 and separator:
                    print >> out, separator,
                print >> out, text,
            if outer_seps:
                print >> out, separator,
            print >>out
        # Add an extra blank line between rows
        if blank_row:
            print >>out
コード例 #51
0
ファイル: pydyparser.py プロジェクト: erelson/PyDyPackets
def main():
    """Parse command line options
    
    """
    usage = "usage: %(prog)s [raw-input-file [options] ]\n" \
            "Program will filter and/or translate supplied raw packets.\n"
    parser = ArgumentParser(prog='pydyparser', usage=usage,
                            formatter_class=RawTextHelpFormatter)

    tw = TextWrapper()
    mywrap = lambda x: "\n".join(tw.wrap(x))
    tw.width = 80 - 25
    quicktext = "\n".join(["\n".join(tw.wrap(_)) for _ in (
            "Arg(s) will be concatenated and treated as "
            "a single packet and then parsed. Input should be space-delimited "
            "bytes. 0xff, 255, and \\xff styles are all supported. "
            "If using the latter, space delimiting is optional, but you must "
            "wrap the sequence of bytes in quotes "
            "(or escape the backslashes).\n\n"
            "Example usage:"
            "\n$ pydyparser -q 255 255 12 7 3 30 0 2 0 2 195"
            "\n$ pydyparser -q 0xFF 0xFF 0x0C 0x07 0x03 0x1E 0x00 0x02 0x00 0x02 0xC3"
            "\n$ pydyparser -q \"\\xFF\\xFF\\x0C\\x07\\x03\\x1E\\x00\\x02\\x00\\x02\\xC3\""
            "\n$ pydyparser -q \"\\xFF \\xFF \\x0C \\x07 \\x03 \\x1E \\x00 \\x02 \\x00 \\x02 \\xC3\""
            "\n\nThese all produce output:\n"
            " ['ID: 12', 'write data', 'GOAL_POSITION_L       ', 'Val:     512', "
            "'GOAL_SPEED_L', 'Val:     512', 'invalid checksum c3 (actual c9)']").splitlines()])

    #
    parser.add_argument('arglist', nargs='*', default=list(),
            help=mywrap("Path to a file to parse/translate, or list of bytes "
            "to parse/translate if using -q flag."))
    parser.add_argument('-q', '--quick', action="store_true", dest="quick",
            default=False, help=quicktext)
    parser.add_argument('-s', '--servos', action="store",
            dest="my_f_id", default=None,
            help=mywrap("A single integer "
            "or set of comma separated integers for servo IDs to keep "
            "when filtering; e.g. '-s 1,2,3'.\nDefault: %(default)s"))
    parser.add_argument('-i', '--instructions', action="store",
            dest="my_f_instr", default=None, help=mywrap("A single integer "
            "or set of comma separated integers for instructions to keep "
            "when filtering; e.g. '-i 1,2,3'.\nDefault: %(default)s"))
    parser.add_argument('-c', '--commands', action="store",
            dest="my_f_cmd", default=None, help=mywrap("A single integer "
            "or set of comma separated integers for commands to keep "
            "when filtering; e.g. '-c 1,2,3'.\nDefault: %(default)s"))
    parser.add_argument('-o', '--output', action="store",
            dest="output", default="filtered_out.txt", help=mywrap("Specify "
	    "output file for filtered list of packets. (do `-o ''` to prevent "
	    "output creation.) Default: %(default)s"))
    parser.add_argument('-t', '--translate', action="store_true",
            dest="translate", default=False, help=mywrap("Write filtered "
	    "packets in human-readable form.\nDefault: %(default)s"))
    parser.add_argument('--time', action="store_true",
            dest="timestamp", default=None, help=mywrap("Appends timestamps "
	    "to end of each translated packet (if timestamps exist). "
	    "Default: %(default)s"))
    parser.add_argument('-T', '--Tally', action="store",
            dest="my_tally_by", default=None, help=mywrap("Tally filtered "
	    "packets by command (cmd), instruction (instr) or servo ID (id). "
	    "E.g.: '-T id'. Default: %(default)s"))
    parser.add_argument('-S', '--SyncWrite', action="store_true",
            dest="sync_split", default=None, help=mywrap("Split up sync-write "
	    "packets when filtering to look for contents satisfying other "
	    "criteria. Can also be used just to create individual packets. "
            "Default: %(default)s"))
    #
    
    options = parser.parse_args()
    args = options.arglist
    
    if len(args) == 0:
        print "Command line use requires the name of a file with a packet " \
                "log. (Or a string of bytes if using --quick option.)\n" \
                "Use the -h option for more help."
        return

    cfg = PyDyConfigParser()
    cfg.read()
    __, __, __, __, itit = cfg.get_params()
    id_dict = cfg.get_id_to_device_dict()

    if options.timestamp is None:
        options.timestamp = itit

    do_filtering(options, args, id_dict)
    return
コード例 #52
0
ファイル: main.py プロジェクト: ductri/quotes_image
    number_image = 300
    for i in range(number_image):
        color_picked = np.random.randint(0, color.shape[0])
        quote_picked = np.random.randint(0, quote.shape[0])

        new_image = Image.new('RGB', (1366, 768), color.loc[color_picked, 'background'])
        d = ImageDraw.Draw(new_image)
        font_size = FONT_SIZE

        fnt = ImageFont.truetype(resources_path + 'font/Merriweather-Light.ttf', font_size)

        text_origin = quote.loc[quote_picked, 'quote']
        text_size = d.textsize(text=text_origin, font=fnt)

        wrapper = TextWrapper()
        wrapper.width = int(NUMBER_CHARACTERS_PER_LINE)
        text = wrapper.fill(quote.loc[quote_picked, 'quote'])
        text_size = d.textsize(text=text, font=fnt)
        Y = (768 - text_size[1]) / 2
        number_character_per_line = NUMBER_CHARACTERS_PER_LINE
        while Y < Y_MIN:
            wrapper.width += 1
            text = wrapper.fill(text_origin)
            text_size = d.textsize(text=text, font=fnt)
            X = (1366 - text_size[0]) / 2
            Y = (768 - text_size[1]) / 2
            if X < X_MIN:
                break

        while (X < X_MIN) or (Y < Y_MIN):
            font_size -= 1
コード例 #53
0
import cStringIO

from metaArray.misc import filePath

# Enviromental variables
demo_dir = join(filePath(__file__).baseDir, 'example')
tty_width = 72
partition = '-' * tty_width
prompt = '>>> '

# Current dir
# current_dir = dirPath('./')


wrapper = TextWrapper()
wrapper.width = tty_width
wrapper.replace_whitespace = False
# wrapper.drop_whitespace = False
wrapper.initial_indent = "- "
wrapper.subsequent_indent = '- '

comment_wrapper = TextWrapper()
comment_wrapper.replace_whitespace = False
# comment_wrapper.drop_whitespace = False
comment_wrapper.width = tty_width
comment_wrapper.subsequent_indent = '# '

class demo_menu(object):
    """
    Menu object
コード例 #54
0
                                                   if item[0].isdigit() and item[1].isdigit() else float('inf'),item)) #stolen from:http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
    for ticker in sortedTickers:
        print(ticker)
    input.close()

def usage():
    print("python StockTrackerJSON  [ -c/--comparison, -i/--input=portfolio.json -s/--stocktable -e/--email -w/--web-html]")
    print("-i/--input=portfolio.json")

"""
############ Main 
"""

from  textwrap import TextWrapper
wrapper =TextWrapper()
wrapper.width=190 #set text wrapping width manually otherwise if drag terminal to full width python doesn't write text out the full width

import getopt 
import sys


print(sys.argv[1:])
try:
    options, remainder = getopt.gnu_getopt(sys.argv[1:], 'i:', [ 'input='
                                                                ])
except getopt.GetoptError as err:
    # print help information and exit:                                                                        
    print( str(err)) # will print something like "option -a not recognized"                                         
    usage()                                                                                                  
    sys.exit(2)
コード例 #55
0
    sys.stdout.write(bcolors.YELLOW + '[%s] %s%s %s\r' %
                     (bar, percents, '%', suffix) + bcolors.ENDC)

    sys.stdout.flush()
    if count == total:
        print(bcolors.BOLD + '\n' + '\nDONE!\n' + bcolors.ENDC)


time.sleep(.5)

# text wrapper instance


wrapper = TextWrapper()

wrapper.width = 80

# make list of files to protect

all_files = os.listdir('.')

# empty list for pdfs

files_to_encrypt = []

# fill list with only pdfs

for file in all_files:
    if isPDFfile(file):
        files_to_encrypt.append(file)
    else:
コード例 #56
0
ファイル: plugin.py プロジェクト: jlorieau/mollib
    def process(self, molecules, args):
        """Process the SVD of molecules."""
        # Setup the configuration options
        if 'project_methyls' in args and args.project_methyls:
            settings.project_methyls = True
        if 'methyl_scale' in args and args.methyl_scale is not None:
            settings.methyl_order_parameter = args.methyl_scale
        if 'fix_sign' in args and args.fix_sign:
            settings.enable_signfixer = True
        if 'nofix_sign' in args and args.nofix_sign:
            settings.enable_signfixer = False
        if 'fix_nh_scale' in args and args.fix_nh_scale:
            settings.enable_nhscalefixer = True
        if 'nofix_nh_scale' in args and args.nofix_nh_scale:
            settings.enable_nhscalefixer = False
        if 'fix_outliers' in args and args.fix_outliers:
            settings.enable_outlierfixer = True
        if 'nofix_outliers' in args and args.nofix_outliers:
            settings.enable_outlierfixer = False

        # If specified, get the identifier for the dataset to use.
        set_id = args.set if 'set' in args else None

        # Process the partial alignment calculation
        if args.command == 'pa':
            # Get the alignment data
            data = {}
            for data_filename in args.data[0]:
                # verify that the file exists
                file_path = get_or_fetch(data_filename, extensions='mr.gz',
                                         urls=settings.mr_urls,
                                         critical=True)

                # Read the data from the file.
                data_dict = read_pa_file(file_path, set_id)
                data.update(data_dict)

            # If excluded interactions are specified, remove these.
            if args.exclude:
                data = {k:v for k, v in data.items()
                        if interaction_type(k) not in args.exclude}

            # verify that there is data in the data dict
            msg = "Could not find data in alignment data."
            check_not_empty(data=data, msg=msg, critical=True)

            # Prepare the magnetic interactions for the molecules
            labels = data.keys()
            process = Process(molecules)
            magnetic_interactions = process.process(labels=labels)

            # Apply the fixers to see if the input data can be improved
            fixer = Fixer(molecules)
            data_fixed, fixes = fixer.fix(data)
            data = data_fixed if data_fixed is not None else data

            # Conduct the SVD on the data
            (data_pred, Saupe_components,
             stats) = calc_pa_SVD(magnetic_interactions, data)

            # Prepare table of stats and fit values
            table = stats_table(stats)

            # Prepare a table of the observed and predicted data
            tables = report_tables(data, data_pred)

            if len(molecules) > 1:
                # Make title for stats table
                title = "Summary SVD Statistics for Molecules "
                title += word_list([m.fullname for m in molecules])
                table.title = title

                # Make title for the fit data table
                title = "Observed and Predicted RDCs and RACS for Molecules "
                title += word_list([m.fullname for m in molecules])
                tables['fit'].title = title

                # Make title for the back-calculated predicted data
                title = "Back-calculated RDCs and RACS for Molecules "
                title += word_list([m.fullname for m in molecules])
                tables['pred'].title = title
            else:
                # Make title for stats table
                title = "Summary SVD Statistics for Molecule "
                title += molecules[0].fullname
                table.title = title

                # Make title for the fit data table
                title = "Observed and Predicted RDCs and RACS for Molecule "
                title += molecules[0].fullname
                tables['fit'].title = title

                # Make title for the back-calculated predicted data
                title = "Back-calculated RDCs and RACS for Molecule "
                title += molecules[0].fullname
                tables['pred'].title = title

            # Prepare the standard output
            summary = table.content()
            output = tables['fit'].content()

            # Prepare and format the fixes listing
            if fixes:
                # Setup the text wrapper so that the lines of fixes do not
                # exceed the set maximum number of columns.
                wrapper = TextWrapper()
                wrapper.initial_indent = '* '
                wrapper.subsequent_indent = '  '
                wrapper.width = utils_settings.default_max_width

                fixes_wrapped = ['\n'.join(wrapper.wrap(fix)) for fix in fixes]
                fixes_output = '\n'.join(fixes_wrapped)
            else:
                fixes_output = ''

            # Print or write the report(s)
            print(summary)
            if args.out:
                output += fixes_output
                write_file('\n'.join((summary, output)), args.out)
            elif not args.summary:
                print(output)

            if fixes:
                print(fixes_output)

            # Write the predicted data
            if args.pred:
                write_file(tables['pred'].content(), args.pred)
コード例 #57
0
ファイル: menotexport.py プロジェクト: samuell/Menotexport
def _exportAnnoFile(abpath_out,anno,verbose=True):
    '''Export annotations in a single PDF

    <abpath_out>: str, absolute path to output txt file.
    <anno>: list, in the form [highlight_list, note_list].
            highlight_list and note_list are both lists of
            Anno objs (see extracthl.py), containing highlights
            and notes in TEXT format with metadata. To be distinguished
            with FileAnno objs which contains texts coordinates.
            if highlight_list or note_list is [], no such info
            in this PDF.

    Function takes annotations from <anno> and output to the target txt file
    in the following format:

    -----------------------------------------------------
    # Title of PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time
    
    -----------------------------------------------------
    # Title of another PDF

        > Highlighted text line 1
          Highlighted text line 2
          Highlighted text line 3
          ...
            
            - @citationkey
            - Tags: @tag1, @tag2, @tag3...
            - Ctime: creation time

    Use tabs in indention, and markup syntax: ">" for highlights, and "-" for notes.

    Update time: 2016-02-24 13:59:56.
    '''

    conv=lambda x:unicode(x)

    wrapper=TextWrapper()
    wrapper.width=80
    wrapper.initial_indent=''
    wrapper.subsequent_indent='\t'+int(len('> '))*' '

    wrapper2=TextWrapper()
    wrapper2.width=80-7
    wrapper2.initial_indent=''
    wrapper2.subsequent_indent='\t\t'+int(len('- Tags: '))*' '

    hlii,ntii=anno
    try:
        titleii=hlii[0].title
    except:
        titleii=ntii[0].title

    outstr=u'\n\n{0}\n# {1}'.format(int(80)*'-',conv(titleii))

    with open(abpath_out, mode='a') as fout:
        outstr=outstr.encode('ascii','replace')
        fout.write(outstr)

        #-----------------Write highlights-----------------
        if len(hlii)>0:

            if verbose:
                print('\n# <mennoteexport>: Exporting highlights in:')
                print(titleii)

            #-------------Loop through highlights-------------
            for hljj in hlii:
                hlstr=wrapper.fill(hljj.text)
                tagstr=', '.join(['@'+kk for kk in hljj.tags])
                tagstr=wrapper2.fill(tagstr)
                outstr=u'''
\n\t> {0}

\t\t- @{1}
\t\t- Tags: {2}
\t\t- Ctime: {3}
'''.format(*map(conv,[hlstr, hljj.citationkey,\
    tagstr, hljj.ctime]))

                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)

        #-----------------Write notes-----------------
        if len(ntii)>0:

            if verbose:
                print('\n# <mennoteexport>: Exporting notes in:')
                print(titleii)

            #----------------Loop through notes----------------
            for ntjj in ntii:
                ntstr=wrapper.fill(ntjj.text)
                tagstr=', '.join(['@'+kk for kk in ntjj.tags])
                tagstr=wrapper2.fill(tagstr)
                outstr=u'''
\n\t- {0}

\t\t- @{1}
\t\t\t- Tags: {2}
\t\t\t- Ctime: {3}
'''.format(*map(conv,[ntstr, ntjj.citationkey,\
    tagstr, ntjj.ctime]))

                outstr=outstr.encode('ascii','replace')
                fout.write(outstr)