def __init__(self, **kwargs): width = 80 if not (self.testvars.get('acknowledged_risks') is True or os.environ.get('GAIATEST_ACKNOWLEDGED_RISKS')): url = 'https://developer.mozilla.org/en-US/docs/Gaia_Test_Runner#Risks' heading = 'Acknowledge risks' message = 'These tests are destructive and may remove data from the target Firefox OS instance as well ' \ 'as using services that may incur costs! Before you can run these tests you must follow the ' \ 'steps to indicate you have acknowledged the risks detailed at the following address:' print '\n' + '*' * 5 + ' %s ' % heading.upper() + '*' * (width - len(heading) - 7) print '\n'.join(textwrap.wrap(message, width)) print url print '*' * width + '\n' sys.exit(1) if not (self.testvars.get('skip_warning') is True or os.environ.get('GAIATEST_SKIP_WARNING')): delay = 30 heading = 'Warning' message = 'You are about to run destructive tests against a Firefox OS instance. These tests ' \ 'will restore the target to a clean state, meaning any personal data such as contacts, ' \ 'messages, photos, videos, music, etc. will be removed. This may include data on the ' \ 'microSD card. The tests may also attempt to initiate outgoing calls, or connect to ' \ 'services such as cellular data, wifi, gps, bluetooth, etc.' try: print '\n' + '*' * 5 + ' %s ' % heading.upper() + '*' * (width - len(heading) - 7) print '\n'.join(textwrap.wrap(message, width)) print '*' * width + '\n' print 'To abort the test run hit Ctrl+C on your keyboard.' print 'The test run will continue in %d seconds.' % delay time.sleep(delay) except KeyboardInterrupt: print '\nTest run aborted by user.' sys.exit(1) print 'Continuing with test run...\n'
def helpEvent(self, event, view, option, index): if event is not None and view is not None and event.type() == QEvent.ToolTip: try: db = index.model().db except AttributeError: return False try: book_id = db.id(index.row()) except (ValueError, IndexError, KeyError): return False db = db.new_api device_connected = self.parent().gui.device_connected on_device = device_connected is not None and db.field_for('ondevice', book_id) p = prepare_string_for_xml title = db.field_for('title', book_id) authors = db.field_for('authors', book_id) if title and authors: title = '<b>%s</b>' % ('<br>'.join(wrap(p(title), 120))) authors = '<br>'.join(wrap(p(' & '.join(authors)), 120)) tt = '%s<br><br>%s' % (title, authors) series = db.field_for('series', book_id) if series: use_roman_numbers=config['use_roman_numerals_for_series_number'] val = _('Book %(sidx)s of <span class="series_name">%(series)s</span>')%dict( sidx=fmt_sidx(db.field_for('series_index', book_id), use_roman=use_roman_numbers), series=p(series)) tt += '<br><br>' + val if on_device: val = _('This book is on the device in %s') % on_device tt += '<br><br>' + val QToolTip.showText(event.globalPos(), tt, view) return True return False
def format(self, p): report = "%-50s Produced at %16s\n" % (self.report.title, strftime("%Y-%m-%d %H:%M")) report += "%-50s ----------------------------\n" % self.__underline(self.report.title) report += "%18s %-30s\n" % ("profile", self.report.klass) report += "%18s %-30s" % ("vendor", self.report.vendor) report += " %-28s\n" % ("Session Result".center(28)) report += "%18s %-30s" % ("device", self.report.device) report += " %-28s\n" % ((self.report.is_compliant() and "Compliant" or "Not Compliant").center(28)) report += "%18s %-30s\n" % ("notes", self.report.notes) for idx, suite_result in enumerate(self.report.results()): report += "\n" report += "%3d. %-46s %1s %-24s\n" % (idx+1, suite_result.test_suite.title(), "", (suite_result.is_compliant() and "Compliant" or "Not Compliant")) report += " %-46s\n" % (self.__underline(suite_result.test_suite.title())) for jdx, case_result in enumerate(suite_result.results()): title = wrap(case_result.test_case.title(), 41) message = case_result.outcome.message != None and wrap(case_result.outcome.message, 24) or [] for kdx in range(0, max(len(title), len(message) + 1)): if kdx == 0: report += " %3s. %-41s %1s %-24s\n" % (jdx+1, title[kdx], case_result.test_case.is_optional() and "*" or "", case_result.outcome.result_string()) else: report += " %-41s %-24s\n" % (kdx < len(title) and title[kdx] or "", kdx <= len(message) and message[kdx-1] or "") self.write_file(p, report)
def formatDocumentation(self, node, indent=''): """Generate a Python docstring from a Node. The returned value includes quotes begin and end double-quotes. Multi-line documentation will be wrapped and combined with raw '\n' characters as separators (so newlines will be interpreted by the C++ compiler not the code generator). """ lines = [] if node.brief: for item in node.brief: if hasattr(item, "tag"): lines.extend(self.formatCode(item)) else: lines.extend(textwrap.wrap(item, width=settings.docwidth)) lines.append("") if hasattr(node, "params") and node.params: name_width = 0 for param in node.params: if param.name and param.brief and len(param.name) > name_width: name_width = len(param.name) if name_width > 0: lines.append("Arguments:") wrapper = textwrap.TextWrapper( initial_indent=" ", subsequent_indent=(" " * (name_width + 5)), width=settings.docwidth ) for param in node.params: if not param.name or len(param.name) == 0: continue sep = "-" * (name_width + 1 - len(param.name)) if param.brief: lines.extend( wrapper.wrap( "{name} {sep} {descr}".format( name=param.name, sep=sep, descr=param.brief[0]) ) ) if len(param.brief) > 1: for item in param.brief[1:]: if hasattr(item, "tag"): lines.extend(self.formatCode(item)) else: lines.extend(textwrap.wrap(item, width=settings.docwidth)) lines.append("") if node.detailed: for item in node.detailed: if hasattr(item, "tag"): lines.extend(self.formatCode(item)) else: lines.extend(textwrap.wrap(item, width=settings.docwidth)) lines.append("") if not lines: return '""' lines = [line.replace('\\', r'\\') for line in lines] lines = [line.replace('"', r'\"') for line in lines] template = '{indent}"{line}\\n"' return "\n".join( [template.format(indent="", line=lines[0])] + [template.format(indent=indent, line=line) for line in lines[1:]] )
def _CheckAndHandleCredentialException(e, args): # Provide detail to users who have no boto config file (who might previously # have been using gsutil only for accessing publicly readable buckets and # objects). # pylint: disable=g-import-not-at-top from gslib.util import HasConfiguredCredentials if (not HasConfiguredCredentials() and not boto.config.get_value('Tests', 'bypass_anonymous_access_warning', False)): # The check above allows tests to assert that we get a particular, # expected failure, rather than always encountering this error message # when there are no configured credentials. This allows tests to # simulate a second user without permissions, without actually requiring # two separate configured users. _OutputAndExit('\n'.join(textwrap.wrap( 'You are attempting to access protected data with no configured ' 'credentials. Please visit ' 'https://cloud.google.com/console#/project and sign up for an ' 'account, and then run the "gsutil config" command to configure ' 'gsutil to use these credentials.'))) elif (e.reason and (e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or 'account for the specified project has been disabled' in e.reason) and ','.join(args).find('gs://') != -1): _OutputAndExit('\n'.join(textwrap.wrap( _ConstructAccountProblemHelp(e.reason))))
def _get_trait_desc(self, inputs, name, spec): desc = spec.desc xor = spec.xor requires = spec.requires manhelpstr = ["\t%s" % name] try: setattr(inputs, name, None) except TraitError as excp: def_val = "" if getattr(spec, "usedefault"): def_val = ", nipype default value: %s" % str(getattr(spec, "default_value")()[1]) line = "(%s%s)" % (excp.info, def_val) manhelpstr = wrap(line, 90, initial_indent=manhelpstr[0] + ": ", subsequent_indent="\t\t ") if desc: for line in desc.split("\n"): manhelpstr += wrap(line, 90, initial_indent="\t\t", subsequent_indent="\t\t") if xor: line = "%s" % ", ".join(xor) manhelpstr += wrap(line, 90, initial_indent="\t\tmutually_exclusive: ", subsequent_indent="\t\t ") if requires: # and name not in xor_done: others = [field for field in requires if field != name] line = "%s" % ", ".join(others) manhelpstr += wrap(line, 90, initial_indent="\t\trequires: ", subsequent_indent="\t\t ") return manhelpstr
def format_private_key(key, heads=True): """ Returns a private key (adding header & footer if required). :param key A private key :type: string :param heads: True if we want to include head and footer :type: boolean :returns: Formated private key :rtype: string """ private_key = key.replace('\x0D', '') private_key = private_key.replace('\r', '') private_key = private_key.replace('\n', '') if len(private_key) > 0: if private_key.find('-----BEGIN PRIVATE KEY-----') != -1: private_key = private_key.replace('-----BEGIN PRIVATE KEY-----', '') private_key = private_key.replace('-----END PRIVATE KEY-----', '') private_key = private_key.replace(' ', '') if heads: private_key = "-----BEGIN PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END PRIVATE KEY-----\n" else: private_key = private_key.replace('-----BEGIN RSA PRIVATE KEY-----', '') private_key = private_key.replace('-----END RSA PRIVATE KEY-----', '') private_key = private_key.replace(' ', '') if heads: private_key = "-----BEGIN RSA PRIVATE KEY-----\n" + "\n".join(wrap(private_key, 64)) + "\n-----END RSA PRIVATE KEY-----\n" return private_key
def __call__(self, parser, namespace, values, option_string=None): fname = os.path.abspath(values[0]) if not os.access(fname, os.R_OK): print textwrap.wrap("Error: Unable to read from file {0}. Check "\ "the input file and try again.".format(fname)) sys.exit(0) raise NeedToParseInFileException(fname)
def formatMessage(msgline, width=70): """Format a long single line message so that it is easier to read. msgline is a string containing a single message. It can either be a plain message string which is reformatted using the textwrap module or it can be of the form <decl>;<msg> where <decl> is the declaration string and <msg> an arbitrary message. Lines of this form will be separated so that the declaration and the message appear in individual text blocks, where every line of message will start with '>' character. width is the maximum width of any text blocks (without indendation). """ txts = msgline.split(";") # Ensure that there are no more than two items in txts if len( txts ) != 2: #If message is not in format we expected, just return it return os.linesep.join( textwrap.wrap( msgline, width ) ) lines = [ txts[0] ] #I don't want to break declaration string to few lines # Insert a separator if there are two parts (=decl and msg) # Apply the text wrapper to shorten the maximum line length wrapped_lines = textwrap.wrap( txts[1], width ) lines.extend( map( lambda s: "> " + s.strip(), wrapped_lines ) ) return os.linesep.join(lines)
def main(tag, old_tag, report_type, line_limit): """Script to assist in generating the release changelog This script will generate a simple or full diff of PRs that are merged and present the data in a way that is easy to copy/paste into an email or git tag. """ repo = git.Repo(".") if old_tag is None: old_tag = repo.git.describe(tags=True, abbrev=0) commits = list(repo.iter_commits("{}..master".format(old_tag))) print("integration_tests {} Released".format(tag)) print("") print("Includes: {} -> {}".format(old_tag, tag)) max_len_labels = max(map(len, VALID_LABELS)) prs = get_prs() if report_type in ("full", "brief"): max_len_pr = len(str(max(prs))) followup_line = first_line = LINE_FMT.format( pr="", pr_len=max_len_pr, label="", label_len=max_len_labels ) for pr_number in pr_numbers_in_commit_order(commits, prs): label = prs[pr_number].label title = clean_commit(prs[pr_number].title) first_line = LINE_FMT.format( pr=pr_number, pr_len=max_len_pr, label=label, label_len=max_len_labels ) if report_type == "full": print("=" * line_limit) title = textwrap.wrap(title, line_limit - len(followup_line)) print(first_line + title[0]) for line in title[1:]: print(followup_line + line) if report_type == "full": print("-" * line_limit) string = clean_body(prs[pr_number].body) print( "\n".join( textwrap.wrap(string, line_limit, replace_whitespace=False) ) ) print("=" * line_limit) print("") elif report_type == "stats": labels = defaultdict(int) for pr_number in pr_numbers_in_commit_order(commits, prs): labels[prs[pr_number].label] += 1 print(tabulate.tabulate(sorted(labels.items()), headers=["Label", "Number"]))
def wrap_columns(col1, col2, width1=24, width2=40, indent=31): """ Takes two strings of text and turns them into nicely formatted column output. Used by display_module() """ lines1 = textwrap.wrap(textwrap.dedent(col1).strip(), width=width1) lines2 = textwrap.wrap(textwrap.dedent(col2).strip(), width=width2) result = '' limit = max(len(lines1), len(lines2)) for x in xrange(limit): if x < len(lines1): if x != 0: result += ' '*indent result += '{line: <0{width}s}'.format(width=width1, line=lines1[x]) else: if x == 0: result += ' '*width1 else: result += ' '*(indent + width1) if x < len(lines2): result += ' ' + '{line: <0{width}s}'.format(width=width2, line=lines2[x]) if x != limit-1: result += "\n" return result
def draw_book_title(file_path, book_title): import PIL from PIL import ImageFont from PIL import Image from PIL import ImageDraw import textwrap img=Image.open(file_path) draw = ImageDraw.Draw(img) offset = 315 margin = 41 wrapped_text = textwrap.wrap(book_title, width=12, break_long_words=False, break_on_hyphens=False) if len(wrapped_text)>3: wrapped_text = textwrap.wrap(book_title, width=16, break_long_words=False, break_on_hyphens=False) font_size = 69 else: font_size = 81 font = ImageFont.truetype("/Library/Fonts/Arial Narrow Bold.ttf", font_size) for line in wrapped_text: draw.text((margin, offset), line, font=font) offset += font.getsize(line)[1] img.save(file_path)
def __get_docstr(text, indent=0): """ Format a docstring. Take the first sentence (. followed by a space) and use it for the brief. Then put the rest of the text after a blank line if there is text there """ text = text.strip().encode("utf-8") dotpos = text.find(". ") if dotpos > 0: brief = text[:dotpos+1] content = text[dotpos+2:] else: brief = text content = "" if indent == 0: istr = "" else: istr = "{0:{1}}".format(" ", indent) brief = "\n{0} * ".format(istr).join(textwrap.wrap(brief, 80)) content = "\n{0} * ".format(istr).join(textwrap.wrap(content, 80)) docstr = "{0}/** \\brief {1}".format(istr, brief) if len(content) > 0: docstr += "\n{0} * \n{0} * {1}".format(istr, content) docstr += "\n{0} */".format(istr) return docstr
def _splitit(self, line, isheader): """Split each element of line to fit the column width Each element is turned into a list, result of the wrapping of the string to the desired width """ line_wrapped = [] for cell, width in zip(line, self._width): array = [] original_cell = cell lost_color = bcolors.WHITE for attr in bcolors_public_props(): cell = cell.replace( getattr(bcolors, attr), '').replace(bcolors.ENDC,'') if cell.replace(bcolors.ENDC,'') != original_cell.replace( bcolors.ENDC,'') and attr != 'ENDC': if not lost_color: lost_color = attr for c in cell.split('\n'): try: c = unicode(c, 'utf') except UnicodeDecodeError, strerror: sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror)) c = unicode(c, 'utf', 'replace') try: array.extend( [get_color_string( getattr(bcolors, lost_color),x ) for x in textwrap.wrap(c, width) ] ) except AttributeError: array.extend(textwrap.wrap(c, width)) line_wrapped.append(array)
def __init__(self, prog='', description=None, epilog=None): self.prog = prog self.description = '\n'.join(textwrap.wrap((description or ''), MAX_WIDTH)) self.epilog = '\n'.join(textwrap.wrap((epilog or ''), MAX_WIDTH)) self.__arguments__ = {} self.__positionals__ = [] self.ignore_urls = True
def print_scrape_results_http(results, verbosity=1, view=False): """Print the results obtained by "http" method.""" for t in results: for result in t: logger.info('{} links found! The search with the keyword "{}" yielded the result:{}'.format( len(result['results']), result['search_keyword'], result['num_results_for_kw'])) if view: import webbrowser webbrowser.open(result['cache_file']) import textwrap for result_set in ('results', 'ads_main', 'ads_aside'): if result_set in result.keys(): print('### {} link results for "{}" ###'.format(len(result[result_set]), result_set)) for link_title, link_snippet, link_url, link_position in result[result_set]: try: print(' Link: {}'.format(urllib.parse.unquote(link_url.geturl()))) except AttributeError as ae: print(ae) if verbosity > 1: print( ' Title: \n{}'.format(textwrap.indent('\n'.join(textwrap.wrap(link_title, 50)), '\t'))) print( ' Description: \n{}\n'.format( textwrap.indent('\n'.join(textwrap.wrap(link_snippet, 70)), '\t'))) print('*' * 70) print()
def showPlugins(self): """Print list of available plugins. """ import textwrap class DummyParser: def __init__(self): self.options = [] def add_option(self, *arg, **kw): self.options.append((arg, kw.pop('help', ''))) v = self.config.verbosity self.config.plugins.sort() for p in self.config.plugins: print "Plugin %s" % p.name if v >= 2: print " score: %s" % p.score print '\n'.join(textwrap.wrap(p.help().strip(), initial_indent=' ', subsequent_indent=' ')) if v >= 3: print print " Options:" parser = DummyParser() p.addOptions(parser) for opts, help in parser.options: print ' %s' % (', '.join(opts)) if help: print '\n'.join( textwrap.wrap(help.strip(), initial_indent=' ', subsequent_indent=' ')) print
def wrap_attr(self, name): value = self.__dict__[name] lines = [] template = '| {:30.30} | {:70.70} |' if value is None: value = [''] elif isinstance(value, str): value = wrap(value, 70) elif isinstance(value, list): if len(value) == 0: value = [''] else: value = wrap(str(value), 70) for num, item in enumerate(value): if num == 0: lines.append(template.format(name, item)) else: lines.append(template.format('', item)) return lines
def __init__(self, vars, path, raise_warnings=False): # Defaults self.path = path self.dependencies = set() self.experimental = False self.run_verify = True self.sync_count = 1 self.test_parameters = set() diff = REQUIRED_VARS - set(vars) if len(diff) > 0: warning = ("WARNING: Not all required control " "variables were specified in %s. Please define " "%s.") % (self.path, ', '.join(diff)) if raise_warnings: raise ControlVariableException(warning) print textwrap.wrap(warning, 80) for key, val in vars.iteritems(): try: self.set_attr(key, val, raise_warnings) except Exception, e: if raise_warnings: raise print "WARNING: %s; skipping" % e
def format_commands_for_rst_table(title, command_list): W1 = WIDTH # internal width W2 = 79 - W1 - 3 # internal width HORIZ_LINE = '+-' + '-' * W1 + '-+-' + '-' * W2 + '-+' row_list = _command_list_to_table_cells(command_list) lines = [] lines.append(HORIZ_LINE) # Top line for row_cells in row_list: if len(row_cells) == 1: lines.append('| ' + ('**' + row_cells[0] + '**').ljust(W1 + W2 + 3) + ' |') elif len(row_cells) == 2: cmd_and_args = row_cells[0].split(' ', 1) cmd = cmd_and_args[0] args = cmd_and_args[1] if len(cmd_and_args) == 2 else '' cell1 = '**-- %s** %s' % (cmd, args) cell1_lines = textwrap.wrap(cell1, W1) #, subsequent_indent=' ') cell2_lines = textwrap.wrap(row_cells[1], W2) while cell1_lines or cell2_lines: line = '| ' line += (cell1_lines.pop(0) if cell1_lines else '').ljust(W1) line += ' | ' line += (cell2_lines.pop(0) if cell2_lines else '').ljust(W2) line += ' |' lines.append(line) else: assert False lines.append(HORIZ_LINE) return lines
def _compile_scripts_txt(): # build indexes from 'scripts.txt' idx = [] names = [] cats = [] try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen import re import textwrap url = 'http://www.unicode.org/Public/UNIDATA/Scripts.txt' for ln in urlopen(url): p = re.findall(r'([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)', ln) if p: a, b, name, cat = p[0] if name not in names: names.append(name) if cat not in cats: cats.append(cat) idx.append((int(a, 16), int(b or a, 16), names.index(name), cats.index(cat))) idx.sort() print('script_data = {\n"names":%s,\n"cats":%s,\n"idx":[\n%s\n]}' % ( '\n'.join(textwrap.wrap(repr(names), 80)), '\n'.join(textwrap.wrap(repr(cats), 80)), '\n'.join(textwrap.wrap(', '.join('(0x%x,0x%x,%d,%d)' % c for c in idx), 80))))
def print_defines(): '''Print the #defines for the wiredtiger.in file.''' f.write(''' /*! * @name Connection statistics * @anchor statistics_keys * @anchor statistics_conn * Statistics are accessed through cursors with \c "statistics:" URIs. * Individual statistics can be queried through the cursor using the following * keys. See @ref data_statistics for more information. * @{ */ ''') for v, l in enumerate(connection_stats): f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70))) f.write('#define\tWT_STAT_CONN_' + l.name.upper() + "\t" * max(1, 6 - int((len('WT_STAT_CONN_' + l.name)) / 8)) + str(v) + '\n') f.write(''' /*! * @} * @name Statistics for data sources * @anchor statistics_dsrc * @{ */ ''') for v, l in enumerate(dsrc_stats): f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70))) f.write('#define\tWT_STAT_DSRC_' + l.name.upper() + "\t" * max(1, 6 - int((len('WT_STAT_DSRC_' + l.name)) / 8)) + str(v) + '\n') f.write('/*! @} */\n')
def create_image(message, user, location): font = ImageFont.truetype(os.path.join(font_path, Font), int(font_size)) margin = offset = 20 if watermark_logo: wrap = textwrap.wrap(message,width=50) else: # No logo then make the lines a bit wider wrap = textwrap.wrap(message,width=55) line_height = font.getsize(wrap[0])[1] print "line height: " + str(line_height) print wrap # Make the image double the size to start with, so that we can apply a anti-alias function when we scale down. Text looks better (I think) # enough space for "margin" at the top and bottom and also a .5 margin between the comment and the attribution img=Image.new("RGBA", (900,int(2.5*margin+line_height*(len(wrap)+1))),(background_color)) draw = ImageDraw.Draw(img) wrap_text(wrap,font,draw) draw.text((margin,int(1.5*margin+line_height*len(wrap))), u" \u2014 " + user + ", " + location,font=font,fill=font_color) if watermark_logo: # If there's a logo file provided then make space for it and paste it into the upper right corner. logo = Image.open(watermark_logo) box = (800, 0, 900, 100) img.paste(logo, box) img_resized = img.resize((450, int(2.5*.5*margin+(line_height * .5)*(len(wrap)+1))), Image.ANTIALIAS) draw = ImageDraw.Draw(img_resized) img_resized.save("test_tweet_reply.png")
def save_fig(y, summary_index, total_matrix, gene_matrix, t, ARS, plottitle, filetitle, tumor_name): df_high, df_low, total_number = divide_into_two(gene_matrix, total_matrix, t) fig = plt.figure() fig.suptitle("\n".join(wrap(('Expression of ' + str(ARS) + ' in ' + tumor_name))), fontsize = 13) if (y == "TCGA-CESC") or (y == "TCGA-DLBC"): fig.subplots_adjust(top = 0.80) else: fig.subplots_adjust(top = 0.85) ax = plt.subplot(111) ax.set_title("\n".join(wrap(plottitle)), fontsize = 11) plt.ylim(0, 1) p_value, hm5, hm10, lm5, lm10 = kmplot(df_high, df_low, ax) favorable5y = survival_compare(hm5, lm5) favorable10y = survival_compare(hm10, lm10) df = pd.DataFrame() rows = [] rows.append([ARS, plottitle, total_number, favorable5y, favorable10y, p_value]) df = pd.DataFrame(rows, columns = cols) summary_index = summary_index.append(df) fig.savefig('X:\\Su Lab\\TCGA\\Data\\Plot\\' + y + "\\" + ARS + "\\" + y + '-' + ARS + '-' + filetitle + '.png') plt.close("all") return(summary_index)
def render_article(term, html_text): """ Render and return html text of article as text. """ html_renderer = html2text.HTML2Text(bodywidth=term.width - 1) html_renderer.ignore_links = True html_renderer.ignore_images = True text_wrapped = [] for line in html_renderer.handle(html_text).splitlines(): if len(line) < term.width: text_wrapped.append(line) else: # html2text does not always honor `bodywidth', # calculate indentation (line up with previous indent) # and textwrap again. _subsq_indent = 0 for _subsq_indent, char in enumerate(line): if not char.isspace(): break _indent = u' ' * _subsq_indent text_wrapped.extend(textwrap.wrap(line, term.width - 1, subsequent_indent=_indent)) final = [_text.rstrip() for _text in text_wrapped] if not final or not any(_line for _line in final): # no text was rendered by html2text final = [''] * (term.height // 2) final.extend(textwrap.wrap(MSG_NOTEXT, term.width - 1)) return final
def _get_trait_desc(self, inputs, name, spec): desc = spec.desc xor = spec.xor requires = spec.requires manhelpstr = ['\t%s' % name] try: setattr(inputs, name, None) except TraitError as excp: def_val = '' if getattr(spec, 'usedefault'): def_val = ', nipype default value: %s' % str(getattr(spec, 'default_value')()[1]) line = "(%s%s)" % (excp.info, def_val) manhelpstr = wrap(line, 90, initial_indent=manhelpstr[0]+': ', subsequent_indent='\t\t ') if desc: for line in desc.split('\n'): manhelpstr += wrap(line, 90, initial_indent='\t\t', subsequent_indent='\t\t') if xor: line = '%s' % ', '.join(xor) manhelpstr += wrap(line, 90, initial_indent='\t\tmutually_exclusive: ', subsequent_indent='\t\t ') if requires: # and name not in xor_done: others = [field for field in requires if field != name] line = '%s' % ', '.join(others) manhelpstr += wrap(line, 90, initial_indent='\t\trequires: ', subsequent_indent='\t\t ') return manhelpstr
def test_encoding_detection(file_name, encoding): with open(file_name, 'rb') as f: input_bytes = f.read() result = chardet.detect(input_bytes) try: expected_unicode = input_bytes.decode(encoding) except LookupError: expected_unicode = '' try: detected_unicode = input_bytes.decode(result['encoding']) except (LookupError, UnicodeDecodeError, TypeError): detected_unicode = '' if result: encoding_match = (result['encoding'] or '').lower() == encoding else: encoding_match = False # Only care about mismatches that would actually result in different # behavior when decoding if not encoding_match and expected_unicode != detected_unicode: wrapped_expected = '\n'.join(textwrap.wrap(expected_unicode, 100)) + '\n' wrapped_detected = '\n'.join(textwrap.wrap(detected_unicode, 100)) + '\n' diff = ''.join(ndiff(wrapped_expected.splitlines(True), wrapped_detected.splitlines(True))) else: diff = '' encoding_match = True assert encoding_match, ("Expected %s, but got %s for %s. Character " "differences: \n%s" % (encoding, result, file_name, diff))
def format_bundle_info(log, descriptor): """ Formats a release notes summary output for an app, engine or core """ # yay we can install! - get release notes (summary, url) = descriptor.get_changelog() if summary is None: summary = "No details provided." log.info("/%s" % ("-" * 70)) log.info("| Item: %s" % descriptor) log.info("|") str_to_wrap = "Description: %s" % descriptor.get_description() for x in textwrap.wrap(str_to_wrap, width=68, initial_indent="| ", subsequent_indent="| "): log.info(x) log.info("|") str_to_wrap = "Change Log: %s" % summary for x in textwrap.wrap(str_to_wrap, width=68, initial_indent="| ", subsequent_indent="| "): log.info(x) log.info("\%s" % ("-" * 70))
def hanging_indent(text, intro, termwidth=None, change_spaces=True, introwidth=None): """Produce text with a hanging indent. .. versionadded:: 3.3.0 .. versionchanged:: 4.0.0 """ if termwidth is None: termwidth = get_termwidth() or 9001 if introwidth is None: introwidth = len(intro) nowrap = intro + text if intro: wrapv = textwrap.wrap(nowrap, termwidth, break_on_hyphens=False) else: wrapv = textwrap.wrap(nowrap, termwidth - introwidth, break_on_hyphens=False) wrap0 = wrapv[0] wraprest = textwrap.wrap('\n'.join(wrapv[1:]), termwidth - introwidth, break_on_hyphens=False) if change_spaces: wraprest = [i.replace(' ', ' ').replace(' ', ' ') for i in wraprest] buf = wrap0 for i in wraprest: buf += '\n' + introwidth * ' ' + i return buf
def format_command_help(command_list): row_list = _command_list_to_table_cells(command_list) lines = [] for row_cells in row_list: if len(row_cells) == 1: heading = row_cells[0] lines.append('') lines.append(bold(heading)) lines.append('') elif len(row_cells) == 2: cell1, cell2 = row_cells cell1_lines = textwrap.wrap(cell1, WIDTH, subsequent_indent=' ') cell2_lines = textwrap.wrap(cell2, 79 - INDENT - 3 - WIDTH) first_line = True while cell1_lines or cell2_lines: line = ' ' * INDENT if cell1_lines: line += cell1_lines.pop(0).ljust(WIDTH) else: line += ' ' * (WIDTH) line += ' : ' if first_line else ' ' if cell2_lines: line += cell2_lines.pop(0) lines.append(line) first_line = False return '\n'.join(lines)
def unwrap(txt): return ' '.join(textwrap.wrap(textwrap.dedent(txt).strip()))
'comma-separated list of HLA alleles (e.g. A*01:01,A*11:01,...)\narcasHLA output genotype.json or genotypes.json \nor tsv with format specified in README.md', metavar='', type=str) parser.add_argument('-s', '--subject', help='subject name, only required for list of alleles', default='', metavar='', type=str) parser.add_argument( '-g', '--genes', help='comma separated list of HLA genes\n' + 'default: all\n' + '\n'.join(wrap('options: ' + ', '.join(sorted(genes)), 60)) + '\n\n', default='', metavar='', type=str) parser.add_argument( '--transcriptome', type=str, help= 'transcripts to include besides input HLAs\n options: full, chr6, none\n default: full\n\n', default='full') parser.add_argument( '--resolution', type=int, help=
def download(url, output=None, quiet=False, proxy=None, speed=None, use_cookies=True): """Download file from URL. Parameters ---------- url: str URL. Google Drive URL is also supported. output: str, optional Output filename. Default is basename of URL. quiet: bool Suppress terminal output. Default is False. proxy: str Proxy. speed: float Download byte size per second (e.g., 256KB/s = 256 * 1024). use_cookies: bool Flag to use cookies. Default is True. Returns ------- output: str Output filename. """ url_origin = url sess = requests.session() # Load cookies cache_dir = osp.join(home, ".cache", "gdown") if not osp.exists(cache_dir): os.makedirs(cache_dir) cookies_file = osp.join(cache_dir, "cookies.json") if osp.exists(cookies_file) and use_cookies: with open(cookies_file) as f: cookies = json.load(f) for k, v in cookies: sess.cookies[k] = v if proxy is not None: sess.proxies = {"http": proxy, "https": proxy} print("Using proxy:", proxy, file=sys.stderr) file_id, is_download_link = parse_url(url) while True: try: res = sess.get(url, stream=True) except requests.exceptions.ProxyError as e: print("An error has occurred using proxy:", proxy, file=sys.stderr) print(e, file=sys.stderr) return # Save cookies with open(cookies_file, "w") as f: json.dump(sess.cookies.items(), f, indent=2) if "Content-Disposition" in res.headers: # This is the file break if not (file_id and is_download_link): break # Need to redirect with confirmation try: url = get_url_from_gdrive_confirmation(res.text) except RuntimeError as e: print("Access denied with the following error:") error = "\n".join(textwrap.wrap(str(e))) error = indent_func(error, "\t") print("\n", error, "\n", file=sys.stderr) print( "You may still be able to access the file from the browser:", file=sys.stderr, ) print("\n\t", url_origin, "\n", file=sys.stderr) return if url is None: print("Permission denied:", url_origin, file=sys.stderr) print( "Maybe you need to change permission over " "'Anyone with the link'?", file=sys.stderr, ) return if output is None: if file_id and is_download_link: m = re.search('filename="(.*)"', res.headers["Content-Disposition"]) output = m.groups()[0] else: output = osp.basename(url) output_is_path = isinstance(output, six.string_types) if not quiet: print("Downloading...", file=sys.stderr) print("From:", url_origin, file=sys.stderr) print( "To:", osp.abspath(output) if output_is_path else output, file=sys.stderr, ) if output_is_path: tmp_file = tempfile.mktemp( suffix=tempfile.template, prefix=osp.basename(output), dir=osp.dirname(output), ) f = open(tmp_file, "wb") else: tmp_file = None f = output try: total = res.headers.get("Content-Length") if total is not None: total = int(total) if not quiet: pbar = tqdm.tqdm(total=total, unit="B", unit_scale=True) t_start = time.time() for chunk in res.iter_content(chunk_size=CHUNK_SIZE): f.write(chunk) if not quiet: pbar.update(len(chunk)) if speed is not None: elapsed_time_expected = 1.0 * pbar.n / speed elapsed_time = time.time() - t_start if elapsed_time < elapsed_time_expected: time.sleep(elapsed_time_expected - elapsed_time) if not quiet: pbar.close() if tmp_file: f.close() shutil.move(tmp_file, output) except IOError as e: print(e, file=sys.stderr) return finally: try: if tmp_file: os.remove(tmp_file) except OSError: pass return output
m = list(Counter(a).keys()) n = list(Counter(a).values()) num = {'maxval': n} if args.nlargest: lst = pd.DataFrame(num) t = lst.nlargest(int(abs(args.nlargest)), 'maxval') res = t.index.values else: lst = pd.DataFrame(num) t = lst.nlargest(10, 'maxval') res = t.index.values ty = '' for i in res: k = k + int(n[i]) for i in res: e = wrap(str(m[i][1:]), 2) #ty+=str(int(e[0],16))+','+str(int(e[1],16))+','+str(int(e[2],16)) pt.add_row([ m[i], (str((float(n[i]) / k) * 100))[0:4], (int(e[0], 16), int(e[1], 16), int(e[2], 16)) ]) print("Palette generated") print(pt) if str(args.palette) == 'v': x = 100 y = 200 # img = Image.new('RGB', (y, x), 'white') for i in tqdm(res, desc='Generating Palette', unit='unit'): e = wrap(str(m[i][1:]), 2) aa, bb, cc = int(e[0], 16), int(e[1], 16), int(e[2], 16)
#17866458359124566529476545682848912883142607690042 #24219022671055626321111109370544217506941658960408 #07198403850962455444362981230987879927244284909188 #84580156166097919133875499200524063689912560717606 #05886116467109405077541002256983155200055935729725 #71636269561882670428252483600823257530420752963450 #Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? #Sample Input #2 #10 5 #3675356291 #10 5 #2709360626 #Sample Output #3150 #0 #!/bin/python3 import textwrap t = int(input().strip()) for a0 in range(t): n, k = [int(x) for x in input().split()] num = input().strip() res = [] [ res.append(eval("*".join(textwrap.wrap(num[i:i + k], 1)))) for i in range(n - k + 1) ] print(max(res))
height=barsep, width=0.5, bottom=baraug + barjul + barjun + barmay + barapr + barmar + barfeb + barjan) graphoct = plt.bar(x=indx, height=baroct, width=0.5, bottom=barsep + baraug + barjul + barjun + barmay + barapr + barmar + barfeb + barjan) graphnov = plt.bar(x=indx, height=barnov, width=0.5, bottom=baroct + barsep + baraug + barjul + barjun + barmay + barapr + barmar + barfeb + barjan) graphdec = plt.bar(x=indx, height=bardec, width=0.5, bottom=barnov + baroct + barsep + baraug + barjul + barjun + barmay + barapr + barmar + barfeb + barjan) plt.xlabel('States') plt.ylabel('FAAC Allocation (in 100 Billion)') states = ['\n'.join(wrap(state, 15)) for state in states] plt.xticks(indx, states) plt.title('2016 FAAC ALLOCATION PER MONTH') plt.show()
def wrap(string, width=50): if string is None: return "" return "\n".join(textwrap.wrap(string, width=width))
Fcal=info['Fcal'] iavesfr=info['iavesfr'] timestep=info['timestep'] haveB=info['haveB'] newlabel=info['newlabel'] cosmo=info['cosmo'] halostr=info['halostr'] firever=info['firever'] maindir=info['maindir'] multifile=info['multifile'] usepep=info['usepep'] snumadd=info['snumadd'] labelneed=dclabel print 'dclabel', dclabel if newlabelneed==1: labelneed="\n".join(wrap(newlabel,17)) plotit.append(labelneed) if runtitle=='SMC': ptitle='Dwarf' elif runtitle=='SBC': ptitle='Starburst' elif runtitle=='MW': ptitle=r'$L\star$ Galaxy' if i==1 and cosmo==0: plotupt.append(ptitle) else: plotupt.append('') print 'labelneed', labelneed if multifile=='y': fname=the_snapdir+'/snapdir_'+Nsnapstring+'/snapshot_'+Nsnapstring+'.0.hdf5' else:
def label_ims(ims_batch, labels=None, inverse_normalize=False, normalize=False, clip_flow=10, display_h=128, pad_top=None, clip_norm=None, padding_size=0, padding_color=255, border_size=0, border_color=0, color_space='rgb', combine_from_axis=0, concat_axis=0, interp=cv2.INTER_LINEAR): ''' Displays a batch of matrices as an image. :param ims_batch: n_batches x h x w x c array of images. :param labels: optional labels. Can be an n_batches length list of tuples, floats or strings :param inverse_normalize: boolean to do normalization from [-1, 1] to [0, 255] :param normalize: boolean to normalize any [min, max] to [0, 255] :param clip_flow: float for the min, max absolute flow magnitude to display :param display_h: integer number of pixels for the height of each image to display :param pad_top: integer number of pixels to pad each image at the top with (for more readable labels) :param color_space: string of either 'rgb' or 'ycbcr' to do color space conversion before displaying :param concat_axis: integer axis number to concatenate batch along (default is 0 for rows) :return: ''' if isinstance(ims_batch, np.ndarray) and len( ims_batch.shape) == 3 and ims_batch.shape[-1] == 3: # already an image return ims_batch # transpose the image until batches are in the 0th axis if not combine_from_axis == 0: # compute all remaining axes all_axes = list(range(len(ims_batch.shape))) del all_axes[combine_from_axis] ims_batch = np.transpose(ims_batch, (combine_from_axis, ) + tuple(all_axes)) batch_size = len(ims_batch) # works for lists and np arrays h = ims_batch[0].shape[0] w = ims_batch[0].shape[1] if len(ims_batch[0].shape) == 2: n_chans = 1 else: n_chans = ims_batch[0].shape[-1] if type(labels) == list and len(labels) == 1: # only label the first image labels = labels + [''] * (batch_size - 1) elif labels is not None and not type(labels) == list and not type( labels) == np.ndarray: labels = [labels] * batch_size scale_factor = display_h / float(h) if pad_top: im_h = int(display_h + pad_top) else: im_h = display_h im_w = round(scale_factor * float(w)) # make sure we have a channels dimension if len(ims_batch.shape) < 4: ims_batch = np.expand_dims(ims_batch, 3) if ims_batch.shape[-1] == 2: # assume to be x,y flow; map to color im X_fullcolor = np.concatenate( [ims_batch.copy(), np.zeros(ims_batch.shape[:-1] + (1, ))], axis=3) if labels is not None: labels = [''] * batch_size for i in range(batch_size): X_fullcolor[i], min_flow, max_flow = flow_to_im( ims_batch[i], clip_flow=clip_flow) # also include the min and max flow in the label if labels[i] is not None: labels[i] = '{},'.format(labels[i]) else: labels[i] = '' for c in range(len(min_flow)): labels[i] += '({}, {})'.format(round(min_flow[c], 1), round(max_flow[c], 1)) ims_batch = X_fullcolor.copy() elif ims_batch.shape[-1] > 3: # not an image, probably labels n_labels = ims_batch.shape[-1] cmap = make_cmap_rainbow(n_labels) labels_im = classification_utils.onehot_to_labels( ims_batch, n_classes=ims_batch.shape[-1]) labels_im_flat = labels_im.flatten() labeled_im_flat = np.tile(labels_im_flat[..., np.newaxis], (1, 3)).astype(np.float32) #for ei in range(batch_size): for l in range(n_labels): labeled_im_flat[labels_im_flat == l, :] = cmap[l] ims_batch = labeled_im_flat.reshape((-1, ) + ims_batch.shape[1:-1] + (3, )) elif inverse_normalize: ims_batch = image_utils.inverse_normalize(ims_batch) elif normalize: flattened_dims = np.prod(ims_batch.shape[1:]) X_spatially_flat = np.reshape(ims_batch, (batch_size, -1, n_chans)) X_orig_min = np.min(X_spatially_flat, axis=1) X_orig_max = np.max(X_spatially_flat, axis=1) # now actually flatten and normalize across channels X_flat = np.reshape(ims_batch, (batch_size, -1)) if clip_norm is None: X_flat = X_flat - np.tile(np.min(X_flat, axis=1, keepdims=True), (1, flattened_dims)) # avoid dividing by 0 X_flat = X_flat / np.clip( np.tile(np.max(X_flat, axis=1, keepdims=True), (1, flattened_dims)), 1e-5, None) else: X_flat = X_flat - (-float(clip_norm)) # avoid dividing by 0 X_flat = X_flat / (2. * clip_norm) #X_flat = X_flat - np.tile(np.min(X_flat, axis=1, keepdims=True), (1, flattened_dims)) # avoid dividing by 0 #X_flat = X_flat / np.clip(np.tile(np.max(X_flat, axis=1, keepdims=True), (1, flattened_dims)), 1e-5, None) ims_batch = np.reshape(X_flat, ims_batch.shape) ims_batch = np.clip(ims_batch.astype(np.float32), 0., 1.) for i in range(batch_size): if labels is not None and len(labels) > 0: if labels[i] is not None: labels[i] = '{},'.format(labels[i]) else: labels[i] = '' # show the min, max of each channel for c in range(n_chans): labels[i] += '({:.2f}, {:.2f})'.format( round(X_orig_min[i, c], 2), round(X_orig_max[i, c], 2)) else: ims_batch = np.clip(ims_batch, 0., 1.) if color_space == 'ycbcr': for i in range(batch_size): ims_batch[i] = cv2.cvtColor(ims_batch[i], cv2.COLOR_YCR_CB2BGR) if np.max(ims_batch) <= 1.0: ims_batch = ims_batch * 255.0 out_im = [] for i in range(batch_size): # convert grayscale to rgb if needed if len(ims_batch[i].shape) == 2: curr_im = np.tile(np.expand_dims(ims_batch[i], axis=-1), (1, 1, 3)) elif ims_batch.shape[-1] == 1: curr_im = np.tile(ims_batch[i], (1, 1, 3)) else: curr_im = ims_batch[i] # scale to specified display size if not scale_factor == 1: curr_im = cv2.resize(curr_im, None, fx=scale_factor, fy=scale_factor, interpolation=interp) if pad_top: curr_im = np.concatenate([ np.zeros( (pad_top, curr_im.shape[1], curr_im.shape[2])), curr_im ], axis=0) if border_size > 0: # add a border all around the image curr_im = cv2.copyMakeBorder(curr_im, border_size, border_size, border_size, border_size, borderType=cv2.BORDER_CONSTANT, value=border_color) if padding_size > 0 and i < batch_size - 1: # include a border between images padding_shape = list(curr_im.shape[:3]) padding_shape[concat_axis] = padding_size curr_im = np.concatenate( [curr_im, np.ones(padding_shape) * padding_color], axis=concat_axis) out_im.append(curr_im) if display_h > 50: font_size = 15 else: font_size = 10 if concat_axis is not None: out_im = np.concatenate(out_im, axis=concat_axis).astype(np.uint8) else: out_im = np.concatenate(out_im, axis=0).astype(np.uint8) max_text_width = int(17 * display_h / 128.) # empirically determined if labels is not None and len(labels) > 0: im_pil = Image.fromarray(out_im) draw = ImageDraw.Draw(im_pil) for i in range(batch_size): if len(labels) > i: # if we have a label for this image if type(labels[i]) == tuple or type(labels[i]) == list: # format tuple or list nicely formatted_text = ', '.join([ labels[i][j].decode('UTF-8') if type(labels[i][j]) == np.unicode_ \ else labels[i][j] if type(labels[i][j]) == str \ else str(round(labels[i][j], 2)) if isinstance(labels[i][j], float) \ else str(labels[i][j]) for j in range(len(labels[i]))]) elif type(labels[i]) == float or type(labels[i]) == np.float32: formatted_text = str(round(labels[i], 2)) # round floats to 2 digits elif isinstance(labels[i], np.ndarray): # assume that this is a 1D array curr_labels = np.squeeze(labels[i]).astype(np.float32) formatted_text = np.array2string(curr_labels, precision=2, separator=',') # ', '.join(['{}'.format( # np.around(labels[i][j], 2)) for j in range(labels[i].size)]) else: formatted_text = '{}'.format(labels[i]) if display_h > 30: # only print label if we have room try: font = ImageFont.truetype('Ubuntu-M.ttf', font_size) except: font = ImageFont.truetype('arial.ttf', font_size) # wrap the text so it fits formatted_text = textwrap.wrap(formatted_text, width=max_text_width) for li, line in enumerate(formatted_text): if concat_axis == 0: draw.text((5, i * im_h + 5 + 14 * li), line, font=font, fill=(50, 50, 255)) elif concat_axis == 1: draw.text((5 + i * im_w, 5 + 14 * li), line, font=font, fill=(50, 50, 255)) out_im = np.asarray(im_pil) # else: # out_im = [im.astype(np.uint8) for im in out_im] # # max_text_width = int(17 * display_h / 128.) # empirically determined # if labels is not None and len(labels) > 0: # for i, im in enumerate(out_im): # im_pil = Image.fromarray(im) # draw = ImageDraw.Draw(im_pil) # # # if len(labels) > i: # if we have a label for this image # if type(labels[i]) == tuple or type(labels[i]) == list: # # format tuple or list nicely # formatted_text = ', '.join([ # labels[i][j].decode('UTF-8') if type(labels[i][j]) == np.unicode_ \ # else labels[i][j] if type(labels[i][j]) == str \ # else str(round(labels[i][j], 2)) if isinstance(labels[i][j], float) \ # else str(labels[i][j]) for j in range(len(labels[i]))]) # elif type(labels[i]) == float or type(labels[i]) == np.float32: # formatted_text = str(round(labels[i], 2)) # round floats to 2 digits # elif isinstance(labels[i], np.ndarray): # # assume that this is a 1D array # curr_labels = np.squeeze(labels[i]).astype(np.float32) # formatted_text = np.array2string(curr_labels, precision=2, separator=',') # # ', '.join(['{}'.format( # # np.around(labels[i][j], 2)) for j in range(labels[i].size)]) # else: # formatted_text = '{}'.format(labels[i]) # # if display_h > 30: # only print label if we have room # try: # font = ImageFont.truetype('Ubuntu-M.ttf', font_size) # except: # font = ImageFont.truetype('arial.ttf', font_size) # # wrap the text so it fits # formatted_text = textwrap.wrap(formatted_text, width=max_text_width) # # for li, line in enumerate(formatted_text): # draw.text((5, 5 + 14 * li), line, font=font, fill=(50, 50, 255)) # im = np.asarray(im_pil) if concat_axis is None: # un-concat the image. faster this way out_im = np.split(out_im, batch_size, axis=combine_from_axis) return out_im
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action in ["rage-clean", "unmerge"]: print() print( bold("emerge %s" % unmerge_action) + " can only be used with specific package names") print() return 1, {} global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to %s have been provided.\n" % unmerge_action) return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if not "--quiet" in myopts: newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not "--quiet" in myopts: writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action in ["rage-clean", "unmerge"]: for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s " "since there is no valid reason for Portage to " "%s itself.") % (pkg.cpv, unmerge_action) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ("Not unmerging package %s since there is no valid " "reason for Portage to %s currently used Python " "interpreter.") % (pkg.cpv, unmerge_action) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print( colorize("WARN", "Package %s is going to be unmerged," % cpv)) print( colorize( "WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp, ) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join('=%s' % x for x in all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def int_to_macaddress(integer): """ Convert an integer to a macaddress """ return ":".join(textwrap.wrap("%012x" % (integer), width=2))
def __init__(self, sizepage=A4, list_xml=None, recibo=True, orientation='portrait', logo=None, cce_xml=None): self.width = 210 # 21 x 29,7cm self.height = 297 self.nLeft = 10 self.nRight = 10 self.nTop = 7 self.nBottom = 8 self.nlin = self.nTop self.logo = logo self.oFrete = { '0': '0 - Emitente', '1': '1 - Destinatário', '2': '2 - Terceiros', '9': '9 - Sem Frete' } self.oPDF_IO = IO() if orientation == 'landscape': raise NameError('Rotina não implementada') else: size = sizepage self.canvas = canvas.Canvas(self.oPDF_IO, pagesize=size) self.canvas.setTitle('DANFE') self.canvas.setStrokeColor(black) for oXML in list_xml: oXML_cobr = oXML.find( ".//{http://www.portalfiscal.inf.br/nfe}cobr") self.NrPages = 1 self.Page = 1 # Calculando total linhas usadas para descrições dos itens # Com bloco fatura, apenas 29 linhas para itens na primeira folha nNr_Lin_Pg_1 = 34 if oXML_cobr is None else 30 # [ rec_ini , rec_fim , lines , limit_lines ] oPaginator = [[0, 0, 0, nNr_Lin_Pg_1]] el_det = oXML.findall(".//{http://www.portalfiscal.inf.br/nfe}det") if el_det is not None: list_desc = [] list_cod_prod = [] nPg = 0 for nId, item in enumerate(el_det): el_prod = item.find( ".//{http://www.portalfiscal.inf.br/nfe}prod") infAdProd = item.find( ".//{http://www.portalfiscal.inf.br/nfe}infAdProd") list_ = wrap(tagtext(oNode=el_prod, cTag='xProd'), 56) if infAdProd is not None: list_.extend(wrap(infAdProd.text, 56)) list_desc.append(list_) list_cProd = wrap(tagtext(oNode=el_prod, cTag='cProd'), 14) list_cod_prod.append(list_cProd) # Nr linhas necessárias p/ descrição item nLin_Itens = len(list_) if (oPaginator[nPg][2] + nLin_Itens) >= oPaginator[nPg][3]: oPaginator.append([0, 0, 0, 77]) nPg += 1 oPaginator[nPg][0] = nId oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] = nLin_Itens else: # adiciona-se 1 pelo funcionamento de xrange oPaginator[nPg][1] = nId + 1 oPaginator[nPg][2] += nLin_Itens self.NrPages = len(oPaginator) # Calculando nr. páginas if recibo: self.recibo_entrega(oXML=oXML) self.ide_emit(oXML=oXML) self.destinatario(oXML=oXML) if oXML_cobr is not None: self.faturas(oXML=oXML_cobr) self.impostos(oXML=oXML) self.transportes(oXML=oXML) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPaginator[0], list_desc=list_desc, list_cod_prod=list_cod_prod) self.adicionais(oXML=oXML) # Gera o restante das páginas do XML for oPag in oPaginator[1:]: self.newpage() self.ide_emit(oXML=oXML) self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPag, list_desc=list_desc, nHeight=77, list_cod_prod=list_cod_prod) self.newpage() if cce_xml: for xml in cce_xml: self._generate_cce(cce_xml=xml, oXML=oXML) self.newpage() self.canvas.save()
async def process(msg, user, client, reply, replied=None): if not os.path.isdir("resources"): os.mkdir("resources", 0o755) urllib.request.urlretrieve( 'https://github.com/erenmetesar/modules-repo/raw/master/Roboto-Regular.ttf', 'resources/Roboto-Regular.ttf') urllib.request.urlretrieve( 'https://github.com/erenmetesar/modules-repo/raw/master/Quivira.otf', 'resources/Quivira.otf') urllib.request.urlretrieve( 'https://github.com/erenmetesar/modules-repo/raw/master/Roboto-Medium.ttf', 'resources/Roboto-Medium.ttf') urllib.request.urlretrieve( 'https://github.com/erenmetesar/modules-repo/raw/master/DroidSansMono.ttf', 'resources/DroidSansMono.ttf') urllib.request.urlretrieve( 'https://github.com/erenmetesar/modules-repo/raw/master/Roboto-Italic.ttf', 'resources/Roboto-Italic.ttf') # Importıng fonts and gettings the size of text font = ImageFont.truetype("resources/Roboto-Medium.ttf", 43, encoding="utf-16") font2 = ImageFont.truetype("resources/Roboto-Regular.ttf", 33, encoding="utf-16") mono = ImageFont.truetype("resources/DroidSansMono.ttf", 30, encoding="utf-16") italic = ImageFont.truetype("resources/Roboto-Italic.ttf", 33, encoding="utf-16") fallback = ImageFont.truetype("resources/Quivira.otf", 43, encoding="utf-16") # Splitting text maxlength = 0 width = 0 text = [] for line in msg.split("\n"): length = len(line) if length > 43: text += textwrap.wrap(line, 43) maxlength = 43 if width < fallback.getsize(line[:43])[0]: if "MessageEntityCode" in str(reply.entities): width = mono.getsize(line[:43])[0] + 30 else: width = fallback.getsize(line[:43])[0] next else: text.append(line + "\n") if width < fallback.getsize(line)[0]: if "MessageEntityCode" in str(reply.entities): width = mono.getsize(line)[0] + 30 else: width = fallback.getsize(line)[0] if maxlength < length: maxlength = length title = "" try: details = await client( functions.channels.GetParticipantRequest(reply.chat_id, user.id)) if isinstance(details.participant, types.ChannelParticipantCreator): title = details.participant.rank if details.participant.rank else "Creator" elif isinstance(details.participant, types.ChannelParticipantAdmin): title = details.participant.rank if details.participant.rank else "Admin" except TypeError: pass titlewidth = font2.getsize(title)[0] # Get user name lname = "" if not user.last_name else user.last_name tot = user.first_name + " " + lname namewidth = fallback.getsize(tot)[0] + 10 if namewidth > width: width = namewidth width += titlewidth + 30 if titlewidth > width - namewidth else -( titlewidth - 30) height = len(text) * 40 # Profile Photo BG pfpbg = Image.new("RGBA", (125, 600), (0, 0, 0, 0)) # Draw Template top, middle, bottom = await drawer(width, height) # Profile Photo Check and Fetch yes = False color = random.choice(COLORS) async for photo in client.iter_profile_photos(user, limit=1): yes = True if yes: pfp = await client.download_profile_photo(user) paste = Image.open(pfp) os.remove(pfp) paste.thumbnail((105, 105)) # Mask mask_im = Image.new("L", paste.size, 0) draw = ImageDraw.Draw(mask_im) draw.ellipse((0, 0, 105, 105), fill=255) # Apply Mask pfpbg.paste(paste, (0, 0), mask_im) else: paste, color = await no_photo(user, tot) pfpbg.paste(paste, (0, 0)) # Creating a big canvas to gather all the elements canvassize = (middle.width + pfpbg.width, top.height + middle.height + bottom.height) canvas = Image.new('RGBA', canvassize) draw = ImageDraw.Draw(canvas) y = 80 if replied: # Creating a big canvas to gather all the elements replname = "" if not replied.sender.last_name else replied.sender.last_name reptot = replied.sender.first_name + " " + replname replywidth = font2.getsize(reptot)[0] if reply.sticker: sticker = await reply.download_media() stimg = Image.open(sticker) canvas = canvas.resize( (stimg.width + pfpbg.width, stimg.height + 160)) top = Image.new("RGBA", (200 + stimg.width, 300), (29, 29, 29, 255)) draw = ImageDraw.Draw(top) await replied_user(draw, reptot, replied.message.replace("\n", " "), 20) top = top.crop((135, 70, top.width, 300)) canvas.paste(pfpbg, (0, 0)) canvas.paste(top, (pfpbg.width + 10, 0)) canvas.paste(stimg, (pfpbg.width + 10, 140)) os.remove(sticker) return True, canvas canvas = canvas.resize((canvas.width + 60, canvas.height + 120)) top, middle, bottom = await drawer(middle.width + 60, height + 105) canvas.paste(pfpbg, (0, 0)) canvas.paste(top, (pfpbg.width, 0)) canvas.paste(middle, (pfpbg.width, top.height)) canvas.paste(bottom, (pfpbg.width, top.height + middle.height)) draw = ImageDraw.Draw(canvas) if replied.sticker: replied.text = "Sticker" elif replied.photo: replied.text = "Photo" elif replied.audio: replied.text = "Audio" elif replied.voice: replied.text = "Voice Message" elif replied.document: replied.text = "Document" await replied_user(draw, reptot, replied.message.replace("\n", " "), maxlength + len(title), len(title)) y = 200 elif reply.sticker: sticker = await reply.download_media() stimg = Image.open(sticker) canvas = canvas.resize( (stimg.width + pfpbg.width + 30, stimg.height + 10)) canvas.paste(pfpbg, (0, 0)) canvas.paste(stimg, (pfpbg.width + 10, 10)) os.remove(sticker) return True, canvas elif reply.document and not reply.audio and not reply.audio: docname = ".".join( reply.document.attributes[-1].file_name.split(".")[:-1]) doctype = reply.document.attributes[-1].file_name.split( ".")[-1].upper() if reply.document.size < 1024: docsize = str(reply.document.size) + " Bytes" elif reply.document.size < 1048576: docsize = str(round(reply.document.size / 1024, 2)) + " KB " elif reply.document.size < 1073741824: docsize = str(round(reply.document.size / 1024**2, 2)) + " MB " else: docsize = str(round(reply.document.size / 1024**3, 2)) + " GB " docbglen = font.getsize(docsize)[0] if font.getsize( docsize)[0] > font.getsize(docname)[0] else font.getsize( docname)[0] canvas = canvas.resize((pfpbg.width + width + docbglen, 160 + height)) top, middle, bottom = await drawer(width + docbglen, height + 30) canvas.paste(pfpbg, (0, 0)) canvas.paste(top, (pfpbg.width, 0)) canvas.paste(middle, (pfpbg.width, top.height)) canvas.paste(bottom, (pfpbg.width, top.height + middle.height)) canvas = await doctype(docname, docsize, doctype, canvas) y = 80 if text else 0 else: canvas.paste(pfpbg, (0, 0)) canvas.paste(top, (pfpbg.width, 0)) canvas.paste(middle, (pfpbg.width, top.height)) canvas.paste(bottom, (pfpbg.width, top.height + middle.height)) y = 85 # Writing User's Name space = pfpbg.width + 30 namefallback = ImageFont.truetype("resources/Quivira.otf", 43, encoding="utf-16") for letter in tot: if letter in emoji.UNICODE_EMOJI: newemoji, mask = await emoji_fetch(letter) canvas.paste(newemoji, (space, 24), mask) space += 40 else: if not await fontTest(letter): draw.text((space, 20), letter, font=namefallback, fill=color) space += namefallback.getsize(letter)[0] else: draw.text((space, 20), letter, font=font, fill=color) space += font.getsize(letter)[0] if title: draw.text((canvas.width - titlewidth - 20, 25), title, font=font2, fill="#898989") # Writing all separating emojis and regular texts x = pfpbg.width + 30 bold, mono, italic, link = await get_entity(reply) mdlength = 0 index = 0 emojicount = 0 textfallback = ImageFont.truetype("resources/Quivira.otf", 33, encoding="utf-16") textcolor = "white" for line in text: for letter in line: index = msg.find( letter) if emojicount == 0 else msg.find(letter) + emojicount for offset, length in bold.items(): if index in range(offset, length): font2 = ImageFont.truetype("resources/Roboto-Medium.ttf", 33, encoding="utf-16") textcolor = "white" for offset, length in italic.items(): if index in range(offset, length): font2 = ImageFont.truetype("resources/Roboto-Italic.ttf", 33, encoding="utf-16") textcolor = "white" for offset, length in mono.items(): if index in range(offset, length): font2 = ImageFont.truetype("resources/DroidSansMono.ttf", 30, encoding="utf-16") textcolor = "white" for offset, length in link.items(): if index in range(offset, length): font2 = ImageFont.truetype("resources/Roboto-Regular.ttf", 30, encoding="utf-16") textcolor = "#898989" if letter in emoji.UNICODE_EMOJI: newemoji, mask = await emoji_fetch(letter) canvas.paste(newemoji, (x, y - 2), mask) x += 45 emojicount += 1 else: if not await fontTest(letter): draw.text((x, y), letter, font=textfallback, fill=textcolor) x += textfallback.getsize(letter)[0] else: draw.text((x, y), letter, font=font2, fill=textcolor) x += font2.getsize(letter)[0] msg = msg.replace(letter, "¶", 1) y += 40 x = pfpbg.width + 30 return True, canvas
def get_arg_text(ob): '''Return a string describing the signature of a callable object, or ''. For Python-coded functions and methods, the first line is introspected. Delete 'self' parameter for classes (.__init__) and bound methods. The next lines are the first lines of the doc string up to the first empty line or _MAX_LINES. For builtins, this typically includes the arguments in addition to the return value. ''' argspec = "" try: ob_call = ob.__call__ except BaseException: if type(ob) is types.ClassType: # old-style ob_call = ob else: return argspec arg_offset = 0 if type(ob) in (types.ClassType, types.TypeType): # Look for the first __init__ in the class chain with .im_func. # Slot wrappers (builtins, classes defined in funcs) do not. fob = _find_constructor(ob) if fob is None: fob = lambda: None else: arg_offset = 1 elif type(ob) == types.MethodType: # bit of a hack for methods - turn it into a function # and drop the "self" param for bound methods fob = ob.im_func if ob.im_self is not None: arg_offset = 1 elif type(ob_call) == types.MethodType and \ hasattr(ob_call.im_func.func_code, 'co_code'): # a callable class instance fob = ob_call.im_func arg_offset = 1 else: fob = ob # Try to build one for Python defined functions if type(fob) in [types.FunctionType, types.LambdaType] and \ hasattr(fob.func_code, 'co_code'): argcount = fob.func_code.co_argcount real_args = fob.func_code.co_varnames[arg_offset:argcount] defaults = fob.func_defaults or [] defaults = list(map(lambda name: "=%s" % repr(name), defaults)) defaults = [""] * (len(real_args) - len(defaults)) + defaults items = map(lambda arg, dflt: arg + dflt, real_args, defaults) for flag, pre, name in ((0x4, '*', 'args'), (0x8, '**', 'kwargs')): if fob.func_code.co_flags & flag: pre_name = pre + name if name not in real_args: items.append(pre_name) else: i = 1 while ((name + '%s') % i) in real_args: i += 1 items.append((pre_name + '%s') % i) argspec = ", ".join(items) argspec = "(%s)" % re.sub("(?<!\d)\.\d+", "<tuple>", argspec) lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT) if len(argspec) > _MAX_COLS else [argspec] if argspec else []) if isinstance(ob_call, types.MethodType) and \ hasattr(ob_call.im_func.func_code, 'co_code'): doc = ob_call.__doc__ else: doc = getattr(ob, "__doc__", "") if doc: for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]: line = line.strip() if not line: break if len(line) > _MAX_COLS: line = line[:_MAX_COLS - 3] + '...' lines.append(line) argspec = '\n'.join(lines) return argspec
def process_hadith(self, hadith_list): if self.ref.type == 'normal': hadith_list = [hadith for hadith in hadith_list['data']] hadith = hadith_list[int(self.ref.hadith_number) - 1] self.hadith_number = hadith['hadithNumber'] if self.lang == 'en': self.text = hadith["hadith"][0]["body"] self.chapter_name = hadith["hadith"][0]["chapterTitle"] try: self.grading = hadith["hadith"][0]["grades"][0]["grade"] self.graded_by = hadith["hadith"][0]["grades"][0][ "graded_by"] except IndexError: pass else: self.text = hadith["hadith"][1]["body"] self.chapter_name = hadith["hadith"][1]["chapterTitle"] try: self.grading = hadith["hadith"][1]["grades"][0]["grade"] self.graded_by = hadith["hadith"][1]["grades"][0][ "graded_by"] except IndexError: pass else: self.hadith_number = hadith_list['hadithNumber'] if self.lang == 'en': self.text = hadith_list['hadith'][0]['body'] self.chapter_name = hadith_list['hadith'][0]['chapterTitle'] try: self.grading = hadith_list['hadith'][0]["grades"][0][ "grade"] self.graded_by = hadith_list['hadith'][0]["grades"][0][ "graded_by"] except IndexError: pass else: self.text = hadith_list['hadith'][1]['body'] self.chapter_name = hadith_list['hadith'][1]['chapterTitle'] try: self.grading = hadith_list['hadith'][1]["grades"][0][ "grade"] self.graded_by = hadith_list['hadith'][1]["grades"][0][ "graded_by"] except IndexError: pass self.text = self.format_hadith_text(self.text) self.pages = textwrap.wrap(self.text, 1024) if self.lang == 'en': self.formatted_collection = self.format_english_collection_name( self.collection) else: self.formatted_collection = self.format_arabic_collection_name( self.collection) em = self.make_embed() return em
def message_event(evt): dt = datetime.fromtimestamp(evt['timestamp'] / 1000.0) return "%s: %s" % (dt.ctime(), "\n".join(textwrap.wrap(evt['message'], 80)))
def comment(self, text): for line in textwrap.wrap(text, self.width - 2): self.output.write('# ' + line + '\n')
challenge_level = 0 for line in sys.stdin: line = line.rstrip() if line.startswith("```"): print in_code = not in_code if in_code: show_code = "echo=FALSE" not in line assert in_code or line == "```", line elif in_code: if show_code: print line elif line.startswith("#"): print "#" if in_challenge else "" n = line.count("#") if not in_challenge or n <= challenge_level: in_challenge = "{.challenge}" in line if in_challenge: challenge_level = n if in_challenge: line = line.replace("{.challenge}", "").rstrip() banner = "#" * n + " " + (" " if n > 3 else ("-" if n > 2 else "=")) * (len(line) - n - 1) print banner print line print banner elif in_challenge: for line2 in textwrap.wrap(line) or [""]: print "# " + line2
def text_wrap_formatter(d): return '\n'.join(textwrap.wrap(d or '', 55))
def do_entity_extraction(self, parsed): """ Somewhat complex function to actually do the entity extraction. """ import networkx as nx import textwrap from numpy import mean chunks = self.get_chunks(parsed) def total_edge_count(count_obj, total_counter=0): """ Sub-function to calculate total number of edges in a container. """ if count_obj: ceiling = count_obj.pop(count_obj.keys()[0]) total_counter += sum( [min(ceiling, count_obj[c]) for c in count_obj]) total_counter = total_edge_count(count_obj, total_counter) return total_counter def observed_edge_count(raw_obj): """ Sub-function to calculate the observed number of edges in a container. """ observed_counter = 0 for chunk_obj in raw_obj: chunk_entities = { e: chunk_obj.count(e) for e in set(chunk_obj) } observed_counter += total_edge_count(chunk_entities) return observed_counter # container to store all entities extracted, for matching use in-string # maybe consider shifting this inside the loop to only match in-chunk? # though note that the output generator currently depends on this all_entities = [] # output container out = [] # iterate over units of analysis, as defined in country-specific functions for chunk in chunks: entity_strings = [] sentences = self.process_doc(chunk) for sent in sentences: entities = [] tags = self.model.predict(sent) for i, t in enumerate(tags): if t == 'B-MISC': entities.append([sent[i]]) elif t == 'I-MISC' and len(entities) > 0: # this condition shouldn't be necessary - need to figure out why this is happening entities[-1].append(sent[i]) #elif sent[i] in self.white_list and any([sent[i] in e.split() for e in all_entities]): # matches = [e for e in all_entities if sent[i] in e.split()] # entities.append([matches[-1]]) new_entities = [' '.join(e) for e in entities] new_entities = [ '\n'.join(textwrap.wrap(e.strip(), 20)) for e in new_entities ] entity_strings += new_entities all_entities += new_entities out.append(entity_strings) # get the actual output entities_count = {e: all_entities.count(e) for e in set(all_entities)} out = [[e for e in row if e in entities_count] for row in out] edges = {} for chunk in out: if len(set(chunk)) > 1: entities = list(set(chunk)) for i in range(len(entities)): for j in range(i + 1, len(entities)): e1 = entities[i] e2 = entities[j] if (e1, e2) in edges: edges[(e1, e2)] += min(chunk.count(e1), chunk.count(e2)) elif (e2, e1) in edges: edges[(e2, e1)] += min(chunk.count(e1), chunk.count(e2)) else: edges[(e1, e2)] = min(chunk.count(e1), chunk.count(e2)) edges = [k + (w, ) for k, w in edges.iteritems()] if entities_count: graph = nx.Graph() for u, v, w in edges: graph.add_edge(u, v, weight=w) degree = list(graph.degree(weight='weight').values()) if degree: average_degree = mean( list(graph.degree(weight='weight').values())) else: average_degree = 0 # count_zeroes? try: clustering_coeff = nx.average_clustering(graph, weight='weight', count_zeros=True) except ZeroDivisionError: clustering_coeff = 0 else: graph = None clustering_coeff = None average_degree = None total_nodes = len(set(all_entities)) total_edges = sum([e[2] for e in edges]) return { 'graph': graph, 'edges': edges, 'total_nodes': total_nodes, 'clustering': clustering_coeff, 'total_edges': total_edges, 'average_degree': average_degree }
print('=' * 72) print(text_prompt, end='') gen_text = '' for _ in range(nsamples // batch_size): out = sess.run(output, feed_dict={ context: [context_tokens for _ in range(batch_size)] })[:, len(context_tokens):] for i in range(batch_size): generated += 1 text = enc.decode(out[i]) gen_text += text try: idx = gen_text.index('<|endoftext|>') except: idx = -1 if idx > 0: gen_text = gen_text[:idx] print(gen_text) print('=' * 72) if __name__ == '__main__': prompt = input('Bill prompt ("To require/reward/provide..."): ') prompt_formatted = [ line.center(72).rstrip() for line in textwrap.wrap(prompt, 72) ] model_input = TEMPLATE.replace('DESC_HERE', '\n'.join(prompt_formatted)) run_model(model_input, 'gov')
def display_pipeline(pipeline, plot_width = 800): p = pipeline if isinstance(p, Pipeline): stages = p.getStages() elif isinstance(p, PipelineModel): stages = p.stages arrow_len = 20 box_height = 30 box_width = int(plot_width/2) num_boxes = len(stages) plot_height = int(300 + num_boxes * (box_height + arrow_len)) plot_width = int(2*box_width) bottom_pos = plot_height - box_height - 200 left_pos = box_width/3 # see https://bokeh.pydata.org/en/latest/docs/user_guide/tools.html#custom-tooltip hover = HoverTool(tooltips='<html><head><style> \ table { \ font-family: arial, sans-serif; \ border-collapse: collapse; \ table-layout: fixed; \ width: ' + str(box_width-20) + '; \ } \ td, th { \ border: 1px solid #dddddd; font-size:10px; \ text-align: left; \ padding: 1px; \ } \ tr:nth-child(even) { \ background-color: #dddddd; font-size:10px; \ } \ </style></head><div style="width:' + str(box_width-20) + '"><p style="font-size:10px"><font color="#26AAE1">Stage: </font>@name <br> \ <font color="#26aae1">Type: </font>@type <br> \ @desc{safe}</p></div>', formatters={"desc": "printf"}) plot = figure(plot_width=plot_width, plot_height=plot_height, tools=[hover], y_range=[0,plot_height], x_range=[0,plot_width]) plot.xgrid.visible = False plot.ygrid.visible = False plot.axis.visible = False # define colours for each type d = {'Estimator': "#FF6969", #red 'Transformer': "#26AAE1", #blue 'Pipeline': "#B3DE69", #green 'PipelineModel': "#FFFF69", #yellow } bottom = plot_height - box_height - 20 # plot the legend for type, box_color in d.items(): plot.quad(top=bottom+box_height, bottom=bottom, left=20, right=120, color=box_color, line_color='black') plot.add_layout(Label(x=25, y=bottom + 5, text=type, render_mode='css', text_font_size='10pt', border_line_color='black', border_line_alpha=0, background_fill_color='#FFFFFF', background_fill_alpha=0)) bottom -= box_height + 5 first_box = True rows_list = [] # this will be a list of the rows in the final table - each row being a dictionary for stage in stages: if isinstance(stage, Pipeline): stage_type = "Pipeline" elif isinstance(stage, PipelineModel): stage_type = "PipelineModel" elif isinstance(stage, Estimator): stage_type = "Estimator" elif isinstance(stage, Transformer): stage_type = "Transformer" box_color = d[stage_type] #set up the table to display on hover with the Params and their descriptions values_html = '<table><tr><th>Param</th><th>Value</th></tr>' if stage_type == "PipelineModel": values_html += '<tr><td>stages</td><td>%s</td>' % stage.stages else: for param in stage.params: values_html += '<tr><td>' + param.name + '</td>' try: value_str = "%s" % stage.getOrDefault(param.name) except: value_str = "None" wrapped_value_str = "<br>".join(textwrap.wrap(value_str)) values_html += '<td>' + wrapped_value_str + '</td></tr>' values_html += '</table>' # create the row in the plot table with box positions and hover information plot_dd={ 'top':bottom_pos+box_height, 'bottom':bottom_pos, 'left':left_pos, 'right':left_pos+box_width, 'color':box_color, 'type':stage_type, 'name':stage.uid, 'text_x':left_pos+5, 'text_y':bottom_pos + 5, 'x_start':left_pos+(box_width/2), 'y_start':bottom_pos+box_height+arrow_len, 'x_end':left_pos+(box_width/2), 'y_end':bottom_pos+box_height, 'desc':values_html} if first_box: # we don't want an arrow (None seems to plot at 0,0 so plotting off the screen at -100) plot_dd['x_start']=-100 plot_dd['y_start']=-100 plot_dd['x_end']=-100 plot_dd['y_end']=-100 rows_list.append(plot_dd) bottom_pos = bottom_pos - box_height - arrow_len first_box = False # now we can create the plot items - box, text and arrows plot_data = pd.DataFrame(rows_list) source = ColumnDataSource(data=plot_data) boxes = plot.quad(source=source,top='top', bottom='bottom', left='left', right='right', color='color', line_color='black') plot.add_layout(LabelSet(source=source,x='text_x', y='text_y', text='name', render_mode='css', text_font_size='10pt', border_line_alpha=0, background_fill_alpha=0)) plot.add_layout(Arrow(end=VeeHead(size=15), source=source, x_start='x_start', y_start='y_start', x_end='x_end', y_end='y_end')) # this is for showing in a Databricks Notebook - create HTML then show it html = file_html(plot, CDN, "Pipeline plot") displayHTML(html) # this is for showing in Zeppelin Notebook show(plot) #return the plot object return plot
def extract_randoms(self): """ Extracts the randomness data from the list of arrays provided by the MongoDB entry. The randomness data is then prepared into distinct lists and written into temporary files for analysis with dieharder. """ self.hello_random = self.document['result']['report'][ 'extractedRandomList'] self.session_id_random = self.document['result']['report'][ 'extractedSessionIDList'] self.iv_random = self.document['result']['report']['extractedIVList'] if (self.hello_random is None or len(self.hello_random) == 0) and \ (self.session_id_random is None or len(self.session_id_random) == 0) and \ (self.iv_random is None or len(self.iv_random) == 0): # Nothing to extract. return False # extracting the random data from the document if self.hello_random is not None and not len(self.hello_random) == 0: # self.hello_random = [] integer_file_random = open(self.hello_random_filename, "w") number_of_integers = len(self.hello_random) * ( len(self.hello_random[0].get('array')) / 8) self.write_header(integer_file_random, number_of_integers) for rand in self.hello_random: # self.hello_random.append(rand.get('array')) splitter = textwrap.wrap(rand.get('array'), 8) for package in splitter: converted_int = int(package, 16) integer_file_random.write(str(converted_int) + "\n") integer_file_random.close() if self.session_id_random is not None and not len( self.session_id_random) == 0: # self.session_id_random = [] integer_file_session = open(self.session_random_filename, "w") number_of_integers = len(self.session_id_random) * ( len(self.session_id_random[0].get('array')) / 8) self.write_header(integer_file_session, number_of_integers) for rand in self.session_id_random: # self.session_id_random.append(rand.get('array')) splitter = textwrap.wrap(rand.get('array'), 8) for package in splitter: converted_int = int(package, 16) integer_file_session.write(str(converted_int) + "\n") integer_file_session.close() if self.iv_random is not None and not len(self.iv_random) == 0: # self.iv_random = [] integer_file_iv = open(self.iv_random_filename, "w") number_of_integers = len( self.iv_random) * (len(self.iv_random[0].get('array')) / 8) self.write_header(integer_file_iv, number_of_integers) for rand in self.iv_random: # self.iv_random.append(rand.get('array')) splitter = textwrap.wrap(rand.get('array'), 8) for package in splitter: converted_int = int(package, 16) integer_file_iv.write(str(converted_int) + "\n") integer_file_iv.close() self.complete_sequence = "" hello_random_length = 0 session_id_length = 0 if self.hello_random is not None: hello_random_length = len(self.hello_random) if self.session_id_random is not None: session_id_length = len(self.session_id_random) for i in range(0, max(hello_random_length, session_id_length)): if i < hello_random_length and self.hello_random[i] is not None: self.complete_sequence = self.complete_sequence + self.hello_random[ i].get('array') if i < session_id_length and self.session_id_random[i] is not None: self.complete_sequence = self.complete_sequence + self.session_id_random[ i].get('array') if self.iv_random is not None: for iv in self.iv_random: if iv is not None: self.complete_sequence = self.complete_sequence + iv.get( 'array') # Write File in pack of 4 Bytes complete_file = open(self.complete_random_filename, "w") splitter = textwrap.wrap(self.complete_sequence, 8) self.write_header(complete_file, len(splitter)) for package in splitter: converted_int = int(package, 16) complete_file.write(str(converted_int) + '\n') complete_file.close() return True
import speech_recognition as sr import textwrap # import json r = sr.Recognizer() with sr.AudioFile("audioo.wav") as source: r.adjust_for_ambient_noise(source, duration=1) audio = r.listen(source) #print(r.recognize_google(audio,show_all=True)) try: text = r.recognize_google(audio, show_all=True) print("stupid") file2 = open('myfile3.txt', 'w') s = text['alternative'][0]['transcript'] file2.writelines("\n".join(textwrap.wrap(s, 56))) # file2.writelines(text['alternative'][0]['transcript']) # file2.close() # print(text) print(text['alternative'][0]['transcript']) except: print("NOt working") # #text=r.recognize_google(audio,key="AIzaSyDRdSN1VaRW27HxA68rZW5FesS2qoPD8", language="fr-FR",show_all=True) # sFinalResult = r.recognize_google(audio,language='en',show_all=True) # text = json.dumps(sFinalResult, ensure_ascii=False).encode('utf8') # print("stupid") # print(text) # # print("Not being able to read")
def _split_lines(self, text, width): text = self._whitespace_matcher.sub(' ', text).strip() return _textwrap.wrap(text, 80)
# Turn all of the parts into a single string wad = (''.join(str(x) for x in wadparts) + Config.get('main', 'end')) if Config.get('main', 'debug') == str(1): print wad #raise Exception(wad) if Config.get('main', 'readaloud') == str(1): # strip any quotation marks wad = wad.replace('"', '').replace("'", '').strip() if Config.get('main', 'trygoogle') == str(1): # Google voice only accepts 100 characters or less, so split into chunks shorts = [] for chunk in wad.split('. '): shorts.extend(textwrap.wrap(chunk, 100)) # Send shorts to Google and return mp3s try: for sentence in shorts: sendthis = sentence.join([ '"http://translate.google.com/translate_tts?tl=en&q=', '" -O /mnt/ram/' ]) print(head + sendthis + str(count).zfill(2) + str(tail)) print subprocess.check_output(head + sendthis + str(count).zfill(2) + str(tail), shell=True) count = count + 1 print subprocess.call('python lighton.py', shell=True)
def confusion_matrix(export_path, classes_predictions, classes_trues, classes: list = None, normalize=False, title='Confusion matrix', cmap=plt.cm.Greens, display_numbers=True, maximum_chars_per_line=50, rotate_x=None, rotate_y=None, display_names_x=True, sort_by_decreasing_sample_size=True, excludes_classes_with_samples_less_than=None, main_font_size=16, sub_font_size=8, normalize_unit_percentage=False, max_size_x_label=10): """ Plot the confusion matrix of a predicted class versus the true class :param export_path: the folder where the confusion matrix will be exported :param classes_predictions: the classes that were predicted by the classifier :param classes_trues: the true classes :param classes: a list of labels. Label 0 for class 0, label 1 for class 1... :param normalize: if True, the confusion matrix will be normalized to 1.0 per row :param title: the title of the plot :param cmap: the color map to use :param display_numbers: if True, display the numbers within each cell of the confusion matrix :param maximum_chars_per_line: the title will be split every `maximum_chars_per_line` characters to avoid display issues :param rotate_x: if not None, indicates the rotation of the label on x axis :param rotate_y: if not None, indicates the rotation of the label on y axis :param display_names_x: if True, the class name, if specified, will also be displayed on the x axis :param sort_by_decreasing_sample_size: if True, the confusion matrix will be sorted by decreasing number of samples. This can be useful to show if the errors may be due to low number of samples :param excludes_classes_with_samples_less_than: if not None, the classes with less than `excludes_classes_with_samples_less_than` samples will be excluded :param normalize_unit_percentage if True, use 100% base as unit instead of 1.0 :param main_font_size: the font size of the text :param sub_font_size: the font size of the sub-elements (e.g., ticks) :param max_size_x_label: the maximum length of a label on the x-axis :return: """ if classes is not None: assert max(classes_trues) <= len( classes), 'there are more classes than class names!' if sort_by_decreasing_sample_size: def remap(class_ids, mapping): return [mapping[class_id] for class_id in class_ids] # first, calculate the most classes with the highest number of samples # we need to keep track of the 2 trues & truths: these may be different class_samples = collections.Counter( np.concatenate( [np.asarray(classes_trues), np.asarray(classes_predictions)])) sorted_classes = sorted(list(class_samples.items()), key=lambda t: t[1], reverse=True) new_mappings = {} for new_mapping, (old_mapping, nb_samples) in enumerate(sorted_classes): new_mappings[old_mapping] = new_mapping if classes is not None: new_classes = [] for new_mapping, (old_mapping, nb_samples) in enumerate(sorted_classes): new_classes.append(classes[old_mapping]) classes = new_classes # now re-map the original classes classes_predictions = remap(classes_predictions, new_mappings) classes_trues = remap(classes_trues, new_mappings) classes_predictions = np.asarray(classes_predictions) classes_trues = np.asarray(classes_trues) if sort_by_decreasing_sample_size and excludes_classes_with_samples_less_than is not None: # IMPORTANT: we must have sorted by size before excluding the classes with low number of samples! # else the `classes` will not be consitent with the class id class_samples = collections.Counter(classes_trues) indices_to_keep = set() classes_to_keep = [] for class_id, num_samples in class_samples.items(): if num_samples >= excludes_classes_with_samples_less_than: indices = np.where(classes_trues == class_id) if len(indices) != 0: indices_to_keep = indices_to_keep.union(indices[0]) classes_to_keep.append(class_id) # keep only tha classes that satisfy the criteria indices_to_keep = list(indices_to_keep) classes_predictions = classes_predictions[indices_to_keep] classes_trues = classes_trues[indices_to_keep] if classes is not None: classes = np.asarray(classes)[classes_to_keep] if len(classes_predictions.shape) != 1: return cm = sklearn.metrics.confusion_matrix(y_pred=classes_predictions, y_true=classes_trues) cm_orig = cm.copy() if normalize: unit = 1.0 if normalize_unit_percentage: unit = 100.0 cm = unit * cm.astype('float') / (cm.sum(axis=1)[:, np.newaxis] + 1e-3) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.imshow(cm, interpolation='nearest', cmap=cmap, vmax=np.max(cm), vmin=0.0000001) ax.set_title('\n'.join(wrap(title, maximum_chars_per_line)), fontsize=main_font_size) fig.colorbar(cax) if classes is not None: tick_marks = np.arange(len(classes)) if display_names_x: classes_short_names = [c[:max_size_x_label] for c in classes] plt.xticks(tick_marks, classes_short_names, rotation=rotate_x, fontsize=sub_font_size) plt.yticks(tick_marks, classes, rotation=rotate_y, fontsize=sub_font_size) if display_numbers: thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if cm_orig[i, j] != 0: ax.text(j, i, cm_orig[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", fontsize=5) ax.set_ylabel('Predicted label', fontsize=main_font_size) ax.set_xlabel('True label', fontsize=main_font_size) fig_tight_layout(fig) export_figure(export_path, title) plt.close()
for line in open('../src/include/wiredtiger.in', 'r'): if not skip: tfile.write(line) if line.count('Error return section: END'): tfile.write(line) skip = 0 elif line.count('Error return section: BEGIN'): tfile.write(' */\n') skip = 1 for err in errors: if 'undoc' in err.flags: tfile.write('/*! @cond internal */\n') tfile.write( '/*!%s.%s */\n' % (('\n * ' if err.long_desc else ' ') + err.desc[0].upper() + err.desc[1:], ''.join('\n * ' + l for l in textwrap.wrap( textwrap.dedent(err.long_desc).strip(), 77)) + '\n' if err.long_desc else '')) tfile.write('#define\t%s\t%d\n' % (err.name, err.value)) if 'undoc' in err.flags: tfile.write('/*! @endcond */\n') tfile.write('/*\n') tfile.close() compare_srcfile(tmp_file, '../src/include/wiredtiger.in') # Output the wiredtiger_strerror and wiredtiger_sterror_r code. tmp_file = '__tmp' tfile = open(tmp_file, 'w') tfile.write('''/* DO NOT EDIT: automatically built by dist/api_err.py. */ #include "wt_internal.h"
def main(programAsString = None) : # argument handling: fname = 0 oname = 0 filename = "" outputname = "out.b" # here, we check in case there were command-line inputs if not programAsString: for arg in sys.argv: if fname: filename = arg fname = 0 continue if oname: outputname = arg oname = 0 continue if arg[:2] == "-f": if arg[2:]: filename = arg[2:] else: fname = 1 if arg[:2] == "-o": if arg[2:]: outputname = arg[2:] else: oname = 1 if arg == "-h" or arg == "--help": print ("hmmmAssembler.py") print (" Python program for assembling Harvey Mudd Miniature Machine code.") print (" Options:") print (" -f filename use filename as the input file") print (" -o filename use filename as the output file") print (" -h, --help print this help message\n") sys.exit() # the optional input prompt if filename == "": filename = input("Enter input file name: ") # to read from stdin instead we would use: program = sys.stdin.readlines() program = readfile(filename) # the optional output prompt if outputname == "": outputname = input("Enter output file name: ") if programAsString: outputname = "out.b" #print 'programAsString is ', programAsString program = readstring( programAsString ) #print "program is", program #print 'program is', program machinecode = assemble(program) # check whether there are any errors failure = 0 for triplet in machinecode: if triplet[1][0] == '*': failure = 1 if (not machinecode == []) and (not failure): writefile(machinecode, outputname) else: print ("\n***** ASSEMBLY TERMINATED UNSUCCESSFULLY *****") print (" ASSEMBLY RESULTS:\n") try: nwidth = max(map(lambda x: len(str(x[0])), machinecode)) except ValueError: print (" <EMPTY FILE>\n") return for triplet in machinecode: print (textwrap.wrap((str(triplet[0])).ljust(nwidth) + " : " + (triplet[1]).ljust(31) + triplet[2], 76)[0]) # wrap returns a list of strings limited in length # this should give a 76 character line print ("")
def boxplots(export_path, features_trials, title, xlabel, ylabel, meanline=False, plot_trials=True, scale='linear', y_range=None, rotate_x=None, showfliers=False, maximum_chars_per_line=50, title_line_height=0.055): """ Compare different histories: e.g., compare 2 configuration, which one has the best results for a given measure? :param export_path: where to export the figure :param features_trials: a dictionary of list. Each list representing a feature :param title: the title of the plot :param ylabel: the label for axis y :param xlabel: the label for axis x :param meanline: if True, draw a line from the center of the plot for each history name to the next :param maximum_chars_per_line: the maximum of characters allowed per line of title. If exceeded, newline will be created. :param plot_trials: if True, each trial of a feature will be plotted :param scale: the axis scale to be used :param y_range: if not None, the (min, max) of the y-axis :param rotate_x: if not None, the rotation of the x axis labels in degree :param showfliers: if True, plot the outliers :param maximum_chars_per_line: the maximum number of characters of the title per line :param title_line_height: the height of the title lines """ assert isinstance(features_trials, collections.Mapping), 'must be a dictionary of list' assert isinstance(next(iter(features_trials.keys())), collections.Iterable), 'each feature must be iterable' labels = [] series = [] for features_name, trials in features_trials.items(): labels.append(features_name) series.append(trials) # for very long title, split it! title_lines = list(wrap(title, maximum_chars_per_line)) fig = plt.figure() ax = fig.add_subplot(111) ax.boxplot(series, labels=labels, positions=range(0, len(features_trials)), showfliers=showfliers, widths=0.4) ax.set_ylabel(ylabel, fontsize=16) ax.set_xlabel(xlabel, fontsize=16) ax.set_title('\n'.join(wrap(title, maximum_chars_per_line)), fontsize=20) ax.set_yscale(scale) if y_range is not None: ax.set_ylim(y_range) ax.grid(which='both', axis='y', linestyle='--') for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(12) if rotate_x is not None: tick.label1.set_rotation(rotate_x) if plot_trials: for index, values in enumerate(series): y = values # Add some random "jitter" to the x-axis x = np.random.normal(index, 0.01, size=len(y)) plt.plot(x, y, 'r.') if meanline: means = [np.mean(values) for values in features_trials.values()] lines_x = [] lines_y = [] for index in range(len(means) - 1): lines_x.append(index) lines_y.append(means[index]) lines_x.append(index + 1) lines_y.append(means[index + 1]) ax.plot(lines_x, lines_y) fig_tight_layout(fig) fig.subplots_adjust(top=1.0 - len(title_lines) * title_line_height) export_figure(export_path, title) plt.close()