Beispiel #1
0
def parse(commit):
    '''
    Parse the commit object into a JSON.
    '''
    payload, messages = {}, []
    cat_file = ['git', 'cat-file', 'commit', commit]
    execute = Popen(cat_file, stdout=PIPE, stdin=PIPE)
    stdout, _ = execute.communicate()

    for line in stdout.split('\n'):
        if not re.search(r'^(author|committer|tree|parent|commit)', line):
            messages.append(line)

    if execute.returncode == 0:
        payload.update({'length': len(stdout)})
        payload.update({
            'raw': stdout,
            'length': len(stdout),
            'commit': commit,
            'message': '\n'.join(messages),
        })

    if DEBUG_FLAG:
        print fill('[parse] Contents of {commit}:\n'.format(commit=commit))
        print payload['raw']
        print DELIMITER

    return payload
Beispiel #2
0
    def generatePlainText(self):
        resultText = "Join the " + self.city + " Sunday Night Film Club "

        resultText += "(" + self.clubURL + ") this "

        resultText += self.nextSunday.strftime("%A, %b %e") + self.daySuffix 
        resultText += " at " + self.showTime + " for " + self.film 

        resultText += ' at the ' + self.location + ". "

        resultText += 'Look for ' + self.host + " "
        resultText += "wearing "
        resultText += self.wearing
        resultText += " in the theatre lobby about 15 "
        resultText += "minutes before the film. As always, after the film "
        resultText += "we will descend on a local establishment for "
        resultText += "dinner/drinks/discussion.\n\n"

        resultText = textwrap.fill(resultText, 70)
        resultText += "\n\n"
        
        # hacky attempt at breaking synopsis up into paragraphs...
        r = re.compile("^\r$", re.MULTILINE)
        syn = r.split(self.synopsis)

        for x in syn:
            s = x
            s = re.sub("\r", "", s)
            s = re.sub("\n", "", s)
            s = textwrap.dedent(s).strip()
            resultText += textwrap.fill(s, 70)
            resultText += "\n\n"

        return resultText
Beispiel #3
0
def WrapStr(myStr):
	LineWrap = int(Prefs['Line_Length'])
	if Prefs['Line_Wrap']:		
		Log.Debug('Wrapped Output is: %s' %(fill(myStr, LineWrap)))
		return fill(myStr, LineWrap)
	else:
		return myStr
def display_pull_request(pull_request):
	"""Nicely display_pull_request info about a given pull request"""

	display_pull_request_minimal(pull_request)

	description_indent = options['description-indent']

	print "%s%s" % (description_indent, color_text(pull_request.get('html_url'), 'display-title-url'))

	pr_body = pull_request.get('body')

	if pr_body and pr_body.strip():
		pr_body = re.sub('(<br\s?/?>)', '\n', pr_body.strip())

		if options['description-strip-newlines']:
			pr_body = fill(pr_body, initial_indent=description_indent, subsequent_indent=description_indent, width=80)
		else:
			# Normalize newlines
			pr_body = re.sub('\r?\n', '\n', pr_body)

			pr_body = pr_body.splitlines()

			pr_body = [fill(line.strip(), initial_indent=description_indent, subsequent_indent=description_indent, width=80) for line in pr_body]

			pr_body = '\n'.join(pr_body)

		print pr_body

	print
Beispiel #5
0
def get_env(suppress_warning=False):
    '''
    :returns env_vars: mapping of environment variable names to resolved values
    :type env_vars: dict

    This method looks up the known environment variables, and if they
    are not found, then attempts to resolve them by looking in the
    file ~/.dnanexus_config/environment, followed by the installed
    defaults in /opt/dnanexus/environment.
    '''

    env_vars = read_conf_dir(get_global_conf_dir())
    env_vars.update(read_conf_dir(get_user_conf_dir()))
    env_vars.update(read_conf_dir(get_session_conf_dir()))
    env_overrides = []
    for var in VAR_NAMES:
        if var in os.environ:
            if var in env_vars and env_vars.get(var) != os.environ[var]:
                env_overrides.append(var)
            env_vars[var] = os.environ[var]
        elif var not in env_vars:
            env_vars[var] = None

    if sys.stdout.isatty():
        if not suppress_warning and len(env_overrides) > 0:
            sys.stderr.write(textwrap.fill("WARNING: The following environment variables were found to be different than the values last stored by dx: " + ", ".join(env_overrides), width=80) + '\n')
            sys.stderr.write(textwrap.fill('To use the values stored by dx, unset the environment variables in your shell by running "source ~/.dnanexus_config/unsetenv".  To clear the dx-stored values, run "dx clearenv".', width=80) + '\n')

    return env_vars
Beispiel #6
0
    def pprint(self, short=False):
        """Returns a pretty-printed version of the entry.
        If short is true, only print the title."""
        date_str = self.date.strftime(self.journal.config['timeformat'])
        if not short and self.journal.config['linewrap']:
            title = textwrap.fill(date_str + " " + self.title, self.journal.config['linewrap'])
            body = "\n".join([
                    textwrap.fill(line+" ",
                        self.journal.config['linewrap'],
                        initial_indent="| ",
                        subsequent_indent="| ",
                        drop_whitespace=False).replace('  ', ' ')
                    for line in self.body.strip().splitlines()
                ])
        else:
            title = date_str + " " + self.title
            body = self.body.strip()

        # Suppress bodies that are just blanks and new lines.
        has_body = len(self.body) > 20 or not all(char in (" ", "\n") for char in self.body)

        if short:
            return title
        else:
            return u"{title}{sep}{body}\n".format(
                title=title,
                sep="\n" if has_body else "",
                body=body if has_body else "",
            )
Beispiel #7
0
    def __init__(self, parent, authors_edit, autogen_button, db,
            copy_a_to_as_action, copy_as_to_a_action, a_to_as, as_to_a):
        EnLineEdit.__init__(self, parent)
        self.authors_edit = authors_edit
        self.db = db

        base = self.TOOLTIP
        ok_tooltip = '<p>' + textwrap.fill(base+'<br><br>'+
                _(' The green color indicates that the current '
                    'author sort matches the current author'))
        bad_tooltip = '<p>'+textwrap.fill(base + '<br><br>'+
                _(' The red color indicates that the current '
                    'author sort does not match the current author. '
                    'No action is required if this is what you want.'))
        self.tooltips = (ok_tooltip, bad_tooltip)

        self.authors_edit.editTextChanged.connect(self.update_state_and_val)
        self.textChanged.connect(self.update_state)

        self.autogen_button = autogen_button
        self.copy_a_to_as_action = copy_a_to_as_action
        self.copy_as_to_a_action = copy_as_to_a_action

        autogen_button.clicked.connect(self.auto_generate)
        copy_a_to_as_action.triggered.connect(self.auto_generate)
        copy_as_to_a_action.triggered.connect(self.copy_to_authors)
        a_to_as.triggered.connect(self.author_to_sort)
        as_to_a.triggered.connect(self.sort_to_author)
        self.update_state()
Beispiel #8
0
    def __init__(self, name, plugins, gui_name, parent=None):
        QWidget.__init__(self, parent)
        self._layout = QVBoxLayout()
        self.setLayout(self._layout)
        self.label = QLabel(gui_name)
        self.sep = QFrame(self)
        self.bf = QFont()
        self.bf.setBold(True)
        self.label.setFont(self.bf)
        self.sep.setFrameShape(QFrame.HLine)
        self._layout.addWidget(self.label)
        self._layout.addWidget(self.sep)

        self.plugins = plugins

        self.bar = QToolBar(self)
        self.bar.setStyleSheet(
                'QToolBar { border: none; background: none }')
        self.bar.setIconSize(QSize(32, 32))
        self.bar.setMovable(False)
        self.bar.setFloatable(False)
        self.bar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
        self._layout.addWidget(self.bar)
        self.actions = []
        for p in plugins:
            target = partial(self.triggered, p)
            ac = self.bar.addAction(QIcon(p.icon), p.gui_name, target)
            ac.setToolTip(textwrap.fill(p.description))
            ac.setWhatsThis(textwrap.fill(p.description))
            ac.setStatusTip(p.description)
            self.actions.append(ac)
            w = self.bar.widgetForAction(ac)
            w.setCursor(Qt.PointingHandCursor)
            w.setAutoRaise(True)
            w.setMinimumWidth(100)
Beispiel #9
0
def repo_status(repo, tracking_branch, remote_ref, indent=0):
  # Make sure the right branch is checked out
  if repo.current_branch() != tracking_branch:
    print " " * indent + ("Checked out branch is %s instead of %s" %
                         (repo.current_branch(), tracking_branch))

  # Make sure the branches exist
  has_tracking = repo.has_ref(tracking_branch)
  has_remote = repo.has_ref(remote_ref)

  if not has_tracking:
    print " " * indent + "You appear to be missing the tracking branch " + \
          tracking_branch
  if not has_remote:
    print " " * indent + "You appear to be missing the remote branch " + \
          remote_ref

  if not has_tracking or not has_remote:
    return

  # Print tracking branch status
  (left, right) = repo.tracking_status(tracking_branch, remote_ref)
  text = _format_tracking(tracking_branch, remote_ref, left, right)
  indent_str = " " * indent
  print textwrap.fill(text, initial_indent=indent_str, subsequent_indent=indent_str)
def see(ms):
    clear_screen()
    if LinkTopicHomeRaw[ms] == "":
        update_data_home()

    # Tach du lieu trang can xem vao mang
    source_linebyline = convert_unicode_2_ascii(sourcetext(LinkTopicHomeRaw[ms])).splitlines()

    print "\n\n\n"
    print "===================================================="
    print "===================================================="
    print "=========        Enter de xem tiep!!      =========="
    print "\n\n\n"
    print textwrap.fill("    " + "[" + str(ms) + "]:" + TenTopicHome[ms])
    print "\n"

    for i in range(0, source_linebyline.__len__()):
       if source_linebyline[i] != "":
            print '\n' 
            # Wrap lai, xong in tung dong de can chinh le trai
            for element in textwrap.wrap(source_linebyline[i], 75):
                print "  " + element
            # Thay the do while
            while True:
                key = msvcrt.getch()
                if ord(key) == 13:
                    break
            continue

    print "\n\n\n"
    print "===================================================="
    print "===================================================="
    print "===================================================="
    print "\n\n\n"
Beispiel #11
0
 def show_plugin(self, plugin):
     self.showing_widget = plugin.create_widget(self.scroll_area)
     self.showing_widget.genesis(self.gui)
     self.showing_widget.initialize()
     self.set_tooltips_for_labels()
     self.scroll_area.setWidget(self.showing_widget)
     self.stack.setCurrentIndex(1)
     self.showing_widget.show()
     self.setWindowTitle(__appname__ + ' - ' + _('Preferences') + ' - ' +
             plugin.gui_name)
     self.apply_action.setEnabled(False)
     self.showing_widget.changed_signal.connect(lambda :
             self.apply_action.setEnabled(True))
     self.restore_action.setEnabled(self.showing_widget.supports_restoring_to_defaults)
     tt = self.showing_widget.restore_defaults_desc
     if not self.restore_action.isEnabled():
         tt = _('Restoring to defaults not supported for') + ' ' + \
             plugin.gui_name
     self.restore_action.setToolTip(textwrap.fill(tt))
     self.restore_action.setWhatsThis(textwrap.fill(tt))
     self.restore_action.setStatusTip(tt)
     self.bar_title.show_plugin(plugin)
     self.setWindowIcon(QIcon(plugin.icon))
     self.bar.setVisible(True)
     self.bb.setVisible(False)
Beispiel #12
0
def main():
    paths = sys.argv[1:] or ['.']

    print 'Importing nltk...'
    try:
        import nltk
    except ImportError:
        print 'Unable to import nltk -- check your PYTHONPATH.'
        sys.exit(-1)

    print 'Finding definitions of deprecated funtions & classes in nltk...'
    find_deprecated_defs(nltk.__path__[0])

    print 'Looking for possible uses of deprecated funcs & classes...'
    dep_names = print_deprecated_uses(paths)

    if not dep_names:
        print 'No deprecated funcs or classes found!'
    else:
        print "\n"+term.BOLD+"What you should use instead:"+term.NORMAL
        for name in sorted(dep_names):
            msgs = deprecated_funcs[name].union(
                deprecated_classes[name]).union(
                deprecated_methods[name])
            for msg, prefix, suffix in msgs:
                print textwrap.fill(term.RED+prefix+name+suffix+
                                    term.NORMAL+': '+msg,
                                    width=75, initial_indent=' '*2,
                                    subsequent_indent=' '*6)
Beispiel #13
0
 def _tooltip(self, widget, x, y, keyboard_mode, tooltip):
     """ Show current event in tooltip """
     # Silver Rain
     silver = Gtk.Label()
     silver.set_markup("<b>{0}</b>".format(_("Silver Rain")))
     # Icon
     img = Gtk.Image.new_from_pixbuf(self._event_icon)
     # Program
     title = Gtk.Label()
     str = textwrap.fill(self._event_title, 21)
     title.set_markup("<b>" + str + "</b>")
     title.set_alignment(0, 0.5)
     host = Gtk.Label()
     str = textwrap.fill(self._event_host, 21)
     host.set_text(str)
     host.set_alignment(0, 0.5)
     time = Gtk.Label()
     time.set_text(self._event_time)
     time.set_alignment(0, 0.5)
     # Pack
     box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=15)
     box.set_border_width(10)
     grid = Gtk.Grid()
     grid.set_column_spacing(20)
     grid.attach(img,    0, 0, 1, 3)
     grid.attach(title,  1, 0, 1, 1)
     grid.attach(host,   1, 1, 1, 1)
     grid.attach(time,   1, 2, 1, 1)
     box.pack_start(silver, False, False, 0)
     box.pack_start(grid, False, False, 0)
     # Show
     box.show_all()
     tooltip.set_custom(box)
     return True
Beispiel #14
0
def print_options(option_list):
    import textwrap
    from auxcodes import bcolors
    # expected to be called if a config file is not specified. Print a
    # list of options
    option_list=sorted(option_list,key=lambda x:x[1])
    width,height=_get_terminal_size_linux()
    sections=sorted(set(x[0] for x in option_list))
    klen=max([len(x[1]) for x in option_list])
    tlen=max([len(typename(x[2])) for x in option_list])
    fstring='%-'+str(klen)+'s = %-'+str(tlen)+'s (default %s)'
    indent=' '*(klen+3)
    for s in sections:
        print bcolors.OKBLUE+'\n[%s]' % s+bcolors.ENDC
        for o in option_list:
            if len(o)==4:
                (section, name, otype, default)=o
                doc=None
            elif len(o)==5:
                (section, name, otype, default, doc)=o
            else:
                print 'Oops!',o
                continue
            if section==s:
                
                print bcolors.BOLD+fstring % (name, typename(otype), str(default))+bcolors.ENDC
                if doc is not None:
                    print textwrap.fill(doc,width-1,initial_indent=indent,subsequent_indent=indent)
Beispiel #15
0
def promptForLogicalInfo():
    bundle = {"name" : "", "hosts" : [], "bundle_location" : "", "version" : 1}
    
    print "\n" + textwrap.fill("A notary is a 'logical' entity that represents an " \
                                 "arbitrary number of physical hosts.  To create a " \
                                 "notary bundle, this script will prompt you for general " \
                                 "information about the logical notary entity, and then for " \
                                 "information about the physical notary hosts.", 78)


    print "\n\n" + textwrap.fill("First, please enter the name of the entity managing this notary. " \
                                 "For an individual, this would be an individual's " \
                                 "name (eg: John Smith). For an organization, this " \
                                 "would be the organization's name (eg: Acme).", 78) + "\n"

    bundle['name'] = loopingPrompt("Notary name: ")

    print "\n\n" + textwrap.fill("Next, please enter the complete URL for where this bundle will " \
                                 "be hosted (eg: https://thoughtcrime.org/thoughtcrime.notary).  It must " \
                                 "be an https URL, and the file must have a '.notary' " \
                                 "extension. This location will be periodically checked by clients for " \
                                 "updates to your notary configuration.", 78) + "\n"

    bundle['bundle_location'] = loopingPrompt("Bundle location: ")

    while (not bundle['bundle_location'].startswith("https://")) or (not bundle['bundle_location'].endswith(".notary")):
        print textwrap.fill("Sorry, the bundle location must be an HTTPS URL and have a '.notary' file extension.", 78)
        bundle['bundle_location'] = loopingPrompt("Bundle location: ")
    
    return bundle
Beispiel #16
0
def print_dict(d, dict_property="Property", wrap=0):
    pt = prettytable.PrettyTable([dict_property, 'Value'], print_empty=False)
    pt.align = 'l'
    for k, v in sorted(six.iteritems(d)):
        # convert dict to str to check length
        if isinstance(v, dict):
            v = jsonutils.dumps(v)
        # if value has a newline, add in multiple rows
        # e.g. fault with stacktrace
        if v and isinstance(v, six.string_types) and r'\n' in v:
            lines = v.strip().split(r'\n')
            col1 = k
            for line in lines:
                if wrap > 0:
                    line = textwrap.fill(six.text_type(line), wrap)
                pt.add_row([col1, line])
                col1 = ''
        else:
            if wrap > 0:
                v = textwrap.fill(six.text_type(v), wrap)
            pt.add_row([k, v])
    encoded = encodeutils.safe_encode(pt.get_string())
    # FIXME(gordc): https://bugs.launchpad.net/oslo-incubator/+bug/1370710
    if six.PY3:
        encoded = encoded.decode()
    print(encoded)
Beispiel #17
0
 def show_info(self):
     self.info['Path'] = os.path.join('modules', self._modulename) + '.py'
     print('')
     # meta info
     for item in ['Name', 'Path', 'Author', 'Version']:
         if item in self.info:
             print('%s: %s' % (item.rjust(10), self.info[item]))
     #dirs = self._modulename.split('/')
     #if dirs[0] == 'recon':
     #    print('%s: %s => %s' % ('Transform'.rjust(10), dirs[1].upper(), dirs[2].upper()))
     print('')
     # description
     if 'Description' in self.info:
         print('Description:')
         print('%s%s' % (self.spacer, textwrap.fill(self.info['Description'], 100, subsequent_indent=self.spacer)))
         print('')
     # options
     print('Options:', end='')
     self.show_options()
     # sources
     if hasattr(self, '_default_source'):
         print('Source Options:')
         print('%s%s%s' % (self.spacer, 'default'.ljust(15), self._default_source))
         print('%s%sstring representing a single input' % (self.spacer, '<string>'.ljust(15)))
         print('%s%spath to a file containing a list of inputs' % (self.spacer, '<path>'.ljust(15)))
         print('%s%sdatabase query returning one column of inputs' % (self.spacer, 'query <sql>'.ljust(15)))
         print('')
     # comments
     if 'Comments' in self.info and self.info['Comments']:
         print('Comments:')
         for comment in self.info['Comments']:
             print('%s%s' % (self.spacer, textwrap.fill('* %s' % (comment), 100, subsequent_indent=self.spacer)))
         print('')
Beispiel #18
0
def setup(config_class, node_factory):
    from .client.node import ClientNodeConfig
    config = config_class.get_config()
    print
    msg = 'Your configuration file is located at %s. ' % config.config_path
    msg += 'You can edit the configuration file with a text editor.'
    print textwrap.fill(msg)
    print
    if not config.get('node_id'):
        if not prompt_register(config):
            raw_node_id = raw_input('Enter the Node ID assigned to you: ')
            config.set('node_id', raw_node_id.strip())
    if not config.get('recert_code'):
        raw_recert_code = raw_input('Enter the Recert Code assigned to you: ')
        config.set('recert_code', raw_recert_code.strip())
    if config_class is ClientNodeConfig and config.encrypt_passphrase:
        set_encryption_passphrase(config, save=False)
    config.save()
    # See if we have a cert.
    if not os.path.exists(config.cert_path):
        print 'Fetching your certificate.'
        node = node_factory(config=config)
        node.request_new_certificate()
    if not config.encrypt_passphrase:
        set_encryption_passphrase(config)
Beispiel #19
0
def setup_basic(config):
    msg = 'Your configuration file is located at %s. ' % config.config_path
    msg += 'You can edit the configuration file with a text editor.'
    print textwrap.fill(msg)
    print

    def setup_value(value_name, value_text):
        change = False
        if not config.get(value_name):
            change = True
        else:
            print 'Current value for "%s": %s' % \
                (value_text, config.get(value_name))
            response = input_yesno('Change %s?' % value_text, 'no')
            if response == 'yes':
                change = True
        if change:
            raw_node_id = raw_input('Enter value for %s: ' % value_text)
            config.set(value_name, raw_node_id.strip())
            config.save()

    # Node ID
    setup_value('node_id', 'Node ID')
    print

    # Cert Code
    setup_value('recert_code', 'Certificate Code')
    print
Beispiel #20
0
def demo(train_size=100, test_size=100, java_home=None, mallet_home=None):
    from nltk.corpus import brown
    import textwrap

    # Define a very simple feature detector
    def fd(sentence, index):
        word = sentence[index]
        return dict(word=word, suffix=word[-2:], len=len(word))

    # Let nltk know where java & mallet are.
    nltk.internals.config_java(java_home)
    nltk.classify.mallet.config_mallet(mallet_home)

    # Get the training & test corpus.  We simplify the tagset a little:
    # just the first 2 chars.
    def strip(corpus): return [[(w, t[:2]) for (w,t) in sent]
                               for sent in corpus]
    brown_train = strip(brown.tagged_sents(categories='news')[:train_size])
    brown_test = strip(brown.tagged_sents(categories='editorial')[:test_size])

    crf = MalletCRF.train(fd, brown_train, #'/tmp/crf-model',
                          transduction_type='VITERBI')
    sample_output = crf.tag([w for (w,t) in brown_test[5]])
    acc = nltk.tag.accuracy(crf, brown_test)
    print '\nAccuracy: %.1f%%' % (acc*100)
    print 'Sample output:'
    print textwrap.fill(' '.join('%s/%s' % w for w in sample_output),
                        initial_indent='  ', subsequent_indent='  ')+'\n'

    # Clean up
    print 'Clean-up: deleting', crf.filename
    os.remove(crf.filename)

    return crf
Beispiel #21
0
def paragraphs(text):
    # loosly based on brett cannon's recipe:
    # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/358228
    accum = []
    blank = ""
    plain, quoted = object(), object()
    state = plain

    if type(text) is list:
        for msg in text:
            if msg.get_content_type() == "text/plain":
                yield msg.get_payload()
                raise StopIteration
        else:
            yield str(msg)[0]
            raise StopIteration

    for line in text.split("\n"):
        if line.strip() == blank:
            if accum:
                yield textwrap.fill(" ".join(accum))
                accum = []
            yield "\n"
        elif line.startswith(">"):
            if accum:
                yield textwrap.fill(" ".join(accum)); accum=[]
            pass
        elif line.startswith("\t") or line.startswith(" "):
            yield line
        else:
            accum.append(line)
Beispiel #22
0
def hasher(filename, blocksize=-1):
    # Hash the file, returning MD5 and SHA1 in an optimal manner. Most of this code is error handling.
    hashmd5 = hashlib.md5()
    hashsha1 = hashlib.sha1()
    try:
        with open(filename, "rb") as f:
            for block in iter(lambda: f.read(blocksize), ""):
                hashmd5.update(block)
                hashsha1.update(block)

    except IOError:
        err = "Error: Unable to read the file \"%s\". Make sure you have permission to read this file.  You may have " \
                "to disable or uninstall anti-virus to stop it from denying access to the file.  Correct this " \
                "problem and try again." % filename
        sys.stderr.write(textwrap.fill(err, width=term_width()) + "\n")
        sys.exit(-1)

    except MemoryError:
        # OOM, revert to the smaller blocksize for this file
        #print "DEBUG: Reducing block size for the file %s"%filename
        if blocksize != -1:
            # Blocksize is already small - bail
            err = "Error: Unable to read the file \"%s\" into memory. This could be caused by anti-virus, or by " \
                    "other system instability issues. Kill some running processes before trying again." % filename
            sys.stderr.write(textwrap.fill(err, width=term_width()) + "\n")
            sys.exit(-1)

        # We can't recover if this fails, so no try/except block.
        with open(filename, "rb") as f:
            for block in iter(lambda: f.read(SMALLBLOCK), ""):
                hashmd5.update(block)
                hashsha1.update(block)

    return (hashmd5.hexdigest(), hashsha1.hexdigest())
Beispiel #23
0
def get_spell_info(spell, width=70):
    spellDict = lib.registry.lookup_by_name(spell=spell)
    spellObj = spellDict['spell']
    if not spellDict['spell']:
        raise RuntimeError()

    yield '  * Name: %s' % spellObj.__name__
    yield textwrap.fill(
        '  * Description: %s' % spellObj.__doc__,
        width=width, subsequent_indent=' ' * 4
    )
    if spellObj.config:
        yield '  * Required configs:'
        for config in spellObj.config:
            yield '      - %s' % config
    else:
        yield '  * Required configs: None'

    if spellDict['test']:
        queries = spellDict['test'].collectQueries()
        yield '  * Example usage:'
        for query, result in queries.iteritems():
            yield textwrap.fill(
                '      >>> %s' % query,
                width=width, subsequent_indent=' ' * 10
            )
            yield textwrap.fill(
                '      ... %s' % result,
                width=width, subsequent_indent=' ' * 10
            )
	def look(self):
		print "-" * 42
		print self.name.center(42, " ")
		print "-" * 42
		print ""
		print textwrap.fill(self.ldesc, 42)
		print ""
Beispiel #25
0
def main_parser():
    parser = argparse.ArgumentParser(
        usage=argparse.SUPPRESS,
        description=textwrap.fill(textwrap.dedent("""\
                    OpenERP Command provides a set of command-line tools around
                    the OpenERP framework: openobject-server. All the tools are
                    sub-commands of a single oe executable.""")),
        epilog="""Use <command> --help to get information about the command.""",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    description = []
    for x in command_list_server:
        description.append(x.__name__[len(__package__)+1:])
        if x.__doc__:
            description.extend([
                ":\n",
                textwrap.fill(str(x.__doc__).strip(),
                              subsequent_indent='  ',
                              initial_indent='  '),
            ])
        description.append("\n\n")
    subparsers = parser.add_subparsers(
        title="Available commands",
        help=argparse.SUPPRESS,
        description="".join(description[:-1]),
    )
    # Server-side commands.
    for x in command_list_server:
        x.add_parser(subparsers)
    # Client-side commands. TODO one per .py file.
    for x in command_list_client:
        x(subparsers)
    return parser
Beispiel #26
0
  def _extensionIndexCommitMessage(self, name, description, update, wrap=True):
    args = description.__dict__
    args["name"] = name

    if update:
      template = textwrap.dedent("""\
        ENH: Update %(name)s extension

        This updates the %(name)s extension to %(scmrevision)s.
        """)
      if wrap:
        paragraphs = (template % args).split("\n")
        return "\n".join([textwrap.fill(p, width=76) for p in paragraphs])
      else:
        return template % args

    else:
      template = textwrap.dedent("""\
        ENH: Add %(name)s extension

        Description:
        %(description)s

        Contributors:
        %(contributors)s
        """)

      if wrap:
        for key in args:
          args[key] = textwrap.fill(args[key], width=72)

      return template % args
Beispiel #27
0
    def toolTip(self, column):
        """Return tooltip for column."""
        try:
            ds = self.doc.data[self.data[0]]
        except KeyError:
            return None

        c = self.cols[column]
        if c == "name":
            text = ds.description()
            if text is None:
                text = ''

            if ds.tags:
                text += '\n\n' + _('Tags: %s') % (' '.join(sorted(ds.tags)))

            return textwrap.fill(text, 40)
        elif c == "size" or (c == 'type' and 'size' not in self.cols):
            text = ds.userPreview()
            # add preview of dataset if possible
            pix = self.getPreviewPixmap(ds)
            if pix:
                text = text.replace("\n", "<br>")
                text = "<html>%s<br>%s</html>" % (text, utils.pixmapAsHtml(pix))
            return text
        elif c == "linkfile" or c == "type":
            return textwrap.fill(ds.linkedInformation(), 40)
        return None
Beispiel #28
0
def print_dict(d, dict_property="Property", wrap=0):
    pt = prettytable.PrettyTable([dict_property, 'Value'],
                                 caching=False, print_empty=False)
    pt.align = 'l'
    for k, v in sorted(six.iteritems(d)):
        # convert dict to str to check length
        if isinstance(v, dict):
            v = str(v)
        if isinstance(v, six.string_types):
            v = strutils.safe_encode(v)
        # if value has a newline, add in multiple rows
        # e.g. fault with stacktrace
        if v and isinstance(v, six.string_types) and r'\n' in v:
            lines = v.strip().split(r'\n')
            col1 = k
            for line in lines:
                if wrap > 0:
                    line = textwrap.fill(str(line), wrap)
                pt.add_row([col1, line])
                col1 = ''
        else:
            if wrap > 0:
                v = textwrap.fill(str(v), wrap)
            pt.add_row([k, v])
    print(pt.get_string())
def main():
    if len(args) < 1:
	sys.stderr.write("No rating string provided! Exiting...\n")
	sys.exit(1)
    rating_string = ' '.join(args)
    print "Input composite rating string: ", rating_string
    
    prepped_string = prep_composite_rating(rating_string)
    print "Prepped composite rating string:", prepped_string
    rating_tables = get_tables(prepped_string)
   
    where_clause = prep_composite_rating(options.where_clause)
    print "Where clause:", where_clause
    where_tables = get_tables(where_clause)
  
    tables = sorted(list(set(where_tables+rating_tables)))
    print "MySQL DB tables to use:", ', '.join(tables)

    query = build_query(prepped_string, where=where_clause)
    print "MySQL query to compute composite rating:"
    print textwrap.fill(query, width=72, initial_indent='\t', subsequent_indent='\t')

    produce_report(prepped_string, numbins=options.numbins, \
			where=where_clause, norm=options.norm, \
			log=options.log)
Beispiel #30
0
    def getHelpTopic(self, topic, app):
        import io
        import textwrap
        from piecrust.chefutil import print_help_item

        with io.StringIO() as tplh:
            extensions = app.plugin_loader.getCommandExtensions()
            for e in extensions:
                if e.command_name == 'prepare' and e.supports(app):
                    for n in e.getTemplateNames(app):
                        d = e.getTemplateDescription(app, n)
                        print_help_item(tplh, n, d)
            help_list = tplh.getvalue()

        help_txt = (
            textwrap.fill(
                "Running the 'prepare' command will let "
                "PieCrust setup a page for you in the correct place, with "
                "some hopefully useful default text.") +
            "\n\n" +
            textwrap.fill("The following templates are available:") +
            "\n\n" +
            help_list +
            "\n" +
            "You can add user-defined templates by creating pages in a "
            "`scaffold/pages` sub-directory in your website.")
        return help_txt
Beispiel #31
0
def write_text(text_file, output):
    output_filename = text_file.replace('.txt', '_trigrams.txt')
    with open(output_filename, 'w') as outfile:
        outfile.write(textwrap.fill(output, 80))
Beispiel #32
0
def wrap(string, max_width):
    return textwrap.fill(string, max_width)
Beispiel #33
0
def prompt(text,
           validator,
           confirm=False,
           helptext='No help text provided',
           errortext='Invalid response!'):
    """Prompts for and validates text provided by user

    Args:
        text (str): the prompt to present to the user
        validator (mixed): the dict, list, or string used to validate the
            repsonse
        confirm (bool): if true, user will be prompted to confirm value
        helptext (str): text to show if user response is "?"
        errortext (str): text to return if user response does not validate

    Return:
        Validated response to prompt
    """
    # Prepare string
    text = '{} '.format(text.rstrip())
    # Prepare validator
    if isinstance(validator, str):
        validator = re.compile(validator, re.U)
    elif isinstance(validator, dict) and sorted(
            validator.keys()) == ['n', 'y']:
        text = '{} '.format(text, '/'.join(list(validator.keys())))
    elif isinstance(validator, dict):
        keys = list(validator.keys())
        keys.sort(key=lambda s: s.zfill(100))
        options = ['{} '.format(key, validator[key]) for key in keys]
    elif isinstance(validator, list):
        options = ['{} '.format(i + 1, val) for i, val in enumerate(validator)]
    else:
        raise ValueError('Validator must be dict, list, or str')
    # Validate response
    loop = True
    num_loops = 0
    while loop:
        # Print options
        try:
            options
        except UnboundLocalError:
            pass
        else:
            print('-' * 60 + '\nOPTIONS\n-------')
            for option in options:
                print(option)
            print('-' * 60)
        # Prompt for value
        val = input(text)
        if val.lower() == 'q':
            print('User exited prompt')
            sys.exit()
        elif val.lower() == '?':
            print(fill(helptext))
            loop = False
        elif isinstance(validator, list):
            try:
                result = validator[int(val) - 1]
            except IndexError:
                pass
            else:
                if num_loops >= 0:
                    loop = False
        elif isinstance(validator, dict):
            try:
                result = validator[val]
            except KeyError:
                pass
            else:
                loop = False
        else:
            try:
                validator.search(val).group()
            except AttributeError:
                pass
            else:
                result = val
                loop = False
        # Confirm value, if required
        if confirm and not loop:
            try:
                result = str(result)
            except UnicodeEncodeError:
                result = str(result)
            loop = prompt('Is this value correct: "{}"?'.format(result), {
                'y': False,
                'n': True
            },
                          confirm=False)
        elif loop:
            print(fill(errortext))
        num_loops += 1
    # Return value as unicode
    return result
Beispiel #34
0
class ConstraintToVarBoundTransform(IsomorphicTransformation):
    """Change constraints to be a bound on the variable.

    Looks for constraints of form k*v + c1 <= c2. Changes bound on v to match
    (c2 - c1)/k if it results in a tighter bound. Also does the same thing for
    lower bounds.

    """

    alias('core.constraints_to_var_bounds',
          doc=textwrap.fill(textwrap.dedent(__doc__.strip())))

    def __init__(self, *args, **kwargs):
        """Initialize the transformation."""
        super(ConstraintToVarBoundTransform, self).__init__(*args, **kwargs)

    def _create_using(self, model):
        """Create new model, applying transformation."""
        m = model.clone()
        self._apply_to(m)
        return m

    def _apply_to(self, model):
        """Apply the transformation to the given model."""
        m = model

        for constr in m.component_data_objects(ctype=Constraint,
                                               active=True,
                                               descend_into=True):
            # Check if the constraint is k * x + c1 <= c2 or c2 <= k * x + c1
            if constr.body.polynomial_degree() == 1:
                repn = generate_canonical_repn(constr.body)
                if repn.variables is not None and len(repn.variables) == 1:
                    var = repn.variables[0]
                    const = repn.constant if repn.constant is not None else 0
                    coef = float(repn.linear[0])
                    if coef == 0:
                        # This can happen when a one element of a bilinear term
                        # is fixed to zero. Obviously, do not divide by zero.
                        pass
                    else:
                        if constr.upper is not None:
                            newbound = (value(constr.upper) - const) / coef
                            if coef > 0:
                                var.setub(
                                    min(var.ub, newbound) if var.
                                    ub is not None else newbound)
                            elif coef < 0:
                                var.setlb(
                                    max(var.lb, newbound) if var.
                                    lb is not None else newbound)
                        if constr.lower is not None:
                            newbound = (value(constr.lower) - const) / coef
                            if coef > 0:
                                var.setlb(
                                    max(var.lb, newbound) if var.
                                    lb is not None else newbound)
                            elif coef < 0:
                                var.setub(
                                    min(var.ub, newbound) if var.
                                    ub is not None else newbound)
                    constr.deactivate()
                    # Sometimes deactivating the constraint will remove a
                    # variable from all active constraints, so that it won't be
                    # updated during the optimization. Therefore, we need to
                    # shift the value of var as necessary in order to keep it
                    # within its implied bounds, as the constraint we are
                    # deactivating is not an invalid constraint, but rather we
                    # are moving its implied bound directly onto the variable.
                    if (var.has_lb() and var.value is not None
                            and var.value < var.lb):
                        var.set_value(var.lb)
                    if (var.has_ub() and var.value is not None
                            and var.value > var.ub):
                        var.set_value(var.ub)
Beispiel #35
0
 def maybe_fill(t):
     if all(len(l) < width for l in t.splitlines()):
         return t
     else:
         return textwrap.fill(t, width, **kwargs)
 def _hanging_indent(self, paragraph: str, indent="    ", width: int = DEFAULT_WIDTH) -> str:
     return textwrap.indent(textwrap.fill(paragraph, width=width - len(indent)), prefix=indent).lstrip()
import textwrap

s = "Python can be easy to pick up whether you're a first time programmer or you're experienced with other languages"

s_wrap_list = textwrap.wrap(s, 40)
print(s_wrap_list)
# ['Python can be easy to pick up whether', "you're a first time programmer or you're", 'experienced with other languages']

print('\n'.join(s_wrap_list))
# Python can be easy to pick up whether
# you're a first time programmer or you're
# experienced with other languages

print(textwrap.fill(s, 40))
# Python can be easy to pick up whether
# you're a first time programmer or you're
# experienced with other languages

print(textwrap.wrap(s, 40, max_lines=2))
# ['Python can be easy to pick up whether', "you're a first time programmer or [...]"]

print(textwrap.fill(s, 40, max_lines=2))
# Python can be easy to pick up whether
# you're a first time programmer or [...]

print(textwrap.fill(s, 40, max_lines=2, placeholder=' ~'))
# Python can be easy to pick up whether
# you're a first time programmer or ~

print(textwrap.fill(s, 40, max_lines=2, placeholder=' ~', initial_indent='  '))
#   Python can be easy to pick up whether
# HTTP Package

# https://www.googleapis.com/books/v1/volumes?q=isbn:1101904224

import urllib.request
import json
import textwrap

with urllib.request.urlopen(
        'https://www.googleapis.com/books/v1/volumes?q=isbn:1101904224') as f:
    text = f.read()
    decoded = text.decode('utf-8')
    print(textwrap.fill(decoded, width=50))

print()

obj = json.loads(decoded)
print(obj['kind'])

print(obj['items'][0]['searchInfo']['textSnippet'])
Beispiel #39
0
    def send_by_mail(self, to, fmts, delete_from_library, subject='', send_ids=None,
            do_auto_convert=True, specific_format=None):
        ids = [self.library_view.model().id(r) for r in self.library_view.selectionModel().selectedRows()] if send_ids is None else send_ids
        if not ids or len(ids) == 0:
            return

        files, _auto_ids = self.library_view.model().get_preferred_formats_from_ids(ids,
                                    fmts, set_metadata=True,
                                    specific_format=specific_format,
                                    exclude_auto=do_auto_convert,
                                    use_plugboard=plugboard_email_value,
                                    plugboard_formats=plugboard_email_formats)
        if do_auto_convert:
            nids = list(set(ids).difference(_auto_ids))
            ids = [i for i in ids if i in nids]
        else:
            _auto_ids = []

        full_metadata = self.library_view.model().metadata_for(ids,
                get_cover=False)

        bad, remove_ids, jobnames = [], [], []
        texts, subjects, attachments, attachment_names = [], [], [], []
        for f, mi, id in zip(files, full_metadata, ids):
            t = mi.title
            if not t:
                t = _('Unknown')
            if f is None:
                bad.append(t)
            else:
                remove_ids.append(id)
                jobnames.append(t)
                attachments.append(f)
                if not subject:
                    subjects.append(_('E-book:')+ ' '+t)
                else:
                    components = get_components(subject, mi, id)
                    if not components:
                        components = [mi.title]
                    subjects.append(os.path.join(*components))
                a = authors_to_string(mi.authors if mi.authors else
                        [_('Unknown')])
                texts.append(_('Attached, you will find the e-book') +
                        '\n\n' + t + '\n\t' + _('by') + ' ' + a + '\n\n' +
                        _('in the %s format.') %
                        os.path.splitext(f)[1][1:].upper())
                if mi.comments and gprefs['add_comments_to_email']:
                    from calibre.utils.html2text import html2text
                    texts[-1] += '\n\n' + _('About this book:') + '\n\n' + textwrap.fill(html2text(mi.comments))
                prefix = ascii_filename(t+' - '+a)
                if not isinstance(prefix, str):
                    prefix = prefix.decode(preferred_encoding, 'replace')
                attachment_names.append(prefix + os.path.splitext(f)[1])
        remove = remove_ids if delete_from_library else []

        to_s = list(repeat(to, len(attachments)))
        if attachments:
            send_mails(jobnames,
                    Dispatcher(partial(self.email_sent, remove=remove)),
                    attachments, to_s, subjects, texts, attachment_names,
                    self.job_manager)
            self.status_bar.show_message(_('Sending email to')+' '+to, 3000)

        auto = []
        if _auto_ids != []:
            for id in _auto_ids:
                if specific_format is None:
                    dbfmts = self.library_view.model().db.formats(id, index_is_id=True)
                    formats = [f.lower() for f in (dbfmts.split(',') if dbfmts else
                        [])]
                    if list(set(formats).intersection(available_input_formats())) != [] and list(set(fmts).intersection(available_output_formats())) != []:
                        auto.append(id)
                    else:
                        bad.append(self.library_view.model().db.title(id, index_is_id=True))
                else:
                    if specific_format in list(set(fmts).intersection(set(available_output_formats()))):
                        auto.append(id)
                    else:
                        bad.append(self.library_view.model().db.title(id, index_is_id=True))

        if auto != []:
            format = specific_format if specific_format in list(set(fmts).intersection(set(available_output_formats()))) else None
            if not format:
                for fmt in fmts:
                    if fmt in list(set(fmts).intersection(set(available_output_formats()))):
                        format = fmt
                        break
            if format is None:
                bad += auto
            else:
                autos = [self.library_view.model().db.title(id, index_is_id=True) for id in auto]
                if self.auto_convert_question(
                    _('Auto convert the following books to %s before sending via '
                        'email?') % format.upper(), autos):
                    self.iactions['Convert Books'].auto_convert_mail(to, fmts, delete_from_library, auto, format, subject)

        if bad:
            bad = '\n'.join('%s'%(i,) for i in bad)
            d = warning_dialog(self, _('No suitable formats'),
                _('Could not email the following books '
                'as no suitable formats were found:'), bad)
            d.exec_()
Beispiel #40
0
    def data_str(self, include_og=True):
        """
        Get description of all data, including information for OG setting.
        :return: str
        """

        # __str__() omits information on OG setting to reduce confusion
        # as to which set of symops are active, this property gives
        # all stored data including OG setting

        desc = {}  # dictionary to hold description strings

        # parse data into strings

        desc['magtype'] = self._data['magtype']
        desc['bns_number'] = ".".join(map(str, self._data["bns_number"]))
        desc['bns_label'] = self._data["bns_label"]
        desc['og_id'] = ("\t\tOG: " +
                         ".".join(map(str, self._data["og_number"])) + " " +
                         self._data["og_label"] if include_og else '')
        desc['bns_operators'] = ' '.join(
            [op_data['str'] for op_data in self._data['bns_operators']])

        desc['bns_lattice'] = (' '.join([
            lattice_data['str']
            for lattice_data in self._data['bns_lattice'][3:]
        ]) if len(self._data['bns_lattice']) > 3 else ''
                               )  # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+

        desc['bns_wyckoff'] = '\n'.join([
            textwrap.fill(wyckoff_data['str'],
                          initial_indent=wyckoff_data['label'] + "  ",
                          subsequent_indent=" " *
                          len(wyckoff_data['label'] + "  "),
                          break_long_words=False,
                          break_on_hyphens=False)
            for wyckoff_data in self._data['bns_wyckoff']
        ])

        desc['og_bns_transformation'] = ('OG-BNS Transform: ({})\n'.format(
            self._data['og_bns_transform']) if desc['magtype'] == 4
                                         and include_og else '')

        bns_operators_prefix = "Operators{}: ".format(
            ' (BNS)' if desc['magtype'] == 4 and include_og else '')
        bns_wyckoff_prefix = "Wyckoff Positions{}: ".format(
            ' (BNS)' if desc['magtype'] == 4 and include_og else '')

        # apply textwrap on long lines
        desc['bns_operators'] = textwrap.fill(
            desc['bns_operators'],
            initial_indent=bns_operators_prefix,
            subsequent_indent=" " * len(bns_operators_prefix),
            break_long_words=False,
            break_on_hyphens=False)

        description = ("BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
                       "{d[og_bns_transformation]}"
                       "{d[bns_operators]}\n"
                       "{bns_wyckoff_prefix}{d[bns_lattice]}\n"
                       "{d[bns_wyckoff]}").format(
                           d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)

        if desc['magtype'] == 4 and include_og:

            desc['og_operators'] = ' '.join(
                [op_data['str'] for op_data in self._data['og_operators']])

            # include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
            # not always present in OG setting
            desc['og_lattice'] = ' '.join([
                lattice_data['str']
                for lattice_data in self._data['og_lattice']
            ])

            desc['og_wyckoff'] = '\n'.join([
                textwrap.fill(wyckoff_data['str'],
                              initial_indent=wyckoff_data['label'] + "  ",
                              subsequent_indent=" " *
                              len(wyckoff_data['label'] + "  "),
                              break_long_words=False,
                              break_on_hyphens=False)
                for wyckoff_data in self._data['og_wyckoff']
            ])

            og_operators_prefix = "Operators (OG): "
            og_wyckoff_prefix = "Wyckoff Positions (OG): "

            # apply textwrap on long lines
            desc['og_operators'] = textwrap.fill(
                desc['og_operators'],
                initial_indent=og_operators_prefix,
                subsequent_indent=" " * len(og_operators_prefix),
                break_long_words=False,
                break_on_hyphens=False)

            description += ("\n{d[og_operators]}\n"
                            "Wyckoff Positions (OG): {d[og_lattice]}\n"
                            "{d[og_wyckoff]}").format(d=desc)
        elif desc['magtype'] == 4:
            description += '\nAlternative OG setting exists for this space group.'

        return description
Beispiel #41
0
 def format_description(self, description):
     paras = description.split('\n')
     formatted_paras = [
         textwrap.fill(para, self.width) for para in paras
     ]
     return '\n'.join(formatted_paras) + '\n'
    def draw_figure(self, *args):
        self._update_selection()

        if not self._validate_inputs():
            return

        start_time, end_time = self.time_selector.get_inputs()
        plot_type = self.vars_button.text

        results = self.dfs

        if plot_type in ['price of electricity (box)']:
            self._reinit_graph(has_legend=False)
        else:
            self._reinit_graph(has_legend=True)

        fig = self.current_fig
        ax = self.current_ax
        ax.clear()

        plt.xticks(rotation=0)
        plt.grid(True)

        if plot_type == 'arbitrage activity':
            for key in results:
                df = results[key]
                ax.plot((df['q_r'] - df['q_d'])[start_time:end_time],
                        drawstyle='steps-post',
                        label=textwrap.fill(key, 50))

            ax.set_ylabel('MWh')
            ax.set_xlabel('ending hour')
            ax.set_title('Energy Charged/Discharged (MWh)')
        elif plot_type == 'regulation capacity offered':
            for key in results:
                df = results[key]

                if (df['q_reg'] != 0).any():
                    ax.plot(df['q_reg'][start_time:end_time],
                            drawstyle='steps-post',
                            label=textwrap.fill(key, 50))
                else:
                    ax.plot((df['q_rd'] - df['q_ru'])[start_time:end_time],
                            drawstyle='steps-post',
                            label=textwrap.fill(key, 50))

            ax.set_ylabel('MWh')
            ax.set_xlabel('ending hour')
            ax.set_title('Regulation Capacity Offered (MWh)')
        elif plot_type == 'revenue':
            for key in results:
                df = results[key]
                ax.plot(df['revenue'][start_time:end_time],
                        drawstyle='steps-post',
                        label=textwrap.fill(key, 50))

                ax.set_ylabel('$')
                ax.set_xlabel('ending hour')
                ax.set_title('Cumulative Revenue Generated ($)')
                ax.get_yaxis().set_major_formatter(
                    mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
        elif plot_type == 'state of charge':
            for key in results:
                df = results[key]
                ax.plot(df['state of charge'][start_time:end_time],
                        drawstyle='steps-post',
                        label=textwrap.fill(key, 50))

                ax.set_ylabel('MWh')
                ax.set_xlabel('ending hour')
                ax.set_title('State of Charge (MWh)')
        elif plot_type == 'price of electricity':
            for key in results:
                df = results[key]
                ax.plot(df['price of electricity'][start_time:end_time],
                        drawstyle='steps-post',
                        label=textwrap.fill(key, 50))

                ax.set_ylabel('$/MWh')
                ax.set_xlabel('ending hour')
                ax.set_title('Price of Electricity ($/MWh)')
        elif plot_type == 'price of electricity (box)':
            ax.boxplot([
                results[key]['price of electricity'][start_time:end_time]
                for key in results
            ],
                       labels=[
                           textwrap.fill(' '.join(key.split()[:3]), 8)
                           for key in results
                       ])

            ax.set_ylabel('$/MWh')
            ax.set_title('Price of Electricity ($/MWh)')
        else:
            for key in results:
                df = results[key]
                ax.plot(df[plot_type][start_time:end_time],
                        drawstyle='steps-post',
                        label=textwrap.fill(key, 50))

                ax.set_title(plot_type)

        if plot_type in ['price of electricity (box)']:
            pass
        else:
            ax.legend(bbox_to_anchor=(1.02, 0.5),
                      loc="center left",
                      borderaxespad=0,
                      shadow=False,
                      labelspacing=1.8)

        self.plotbox.children[0].draw()
Beispiel #43
0
def fill_paragraph( text, width, indent ):
    ""
    text = whitespace_regex.sub(' ', text).strip()
    return textwrap.fill( text, width, initial_indent=indent,
                                       subsequent_indent=indent )
Beispiel #44
0
    Values to consider as True.
false_values : list, default None
    Values to consider as False.
skiprows : list-like, int, or callable, optional
    Line numbers to skip (0-indexed) or number of lines to skip (int) at the
    start of the file. If callable, the callable function will be evaluated
    against the row indices, returning True if the row should be skipped and
    False otherwise. An example of a valid callable argument would be ``lambda
    x: x in [0, 2]``.
nrows : int, default None
    Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
    Additional strings to recognize as NA/NaN. If dict passed, specific
    per-column NA values. By default the following values are interpreted
    as NaN: '"""
    + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent="    ")
    + """'.
keep_default_na : bool, default True
    Whether or not to include the default NaN values when parsing the data.
    Depending on whether `na_values` is passed in, the behavior is as follows:

    * If `keep_default_na` is True, and `na_values` are specified, `na_values`
      is appended to the default NaN values used for parsing.
    * If `keep_default_na` is True, and `na_values` are not specified, only
      the default NaN values are used for parsing.
    * If `keep_default_na` is False, and `na_values` are specified, only
      the NaN values specified `na_values` are used for parsing.
    * If `keep_default_na` is False, and `na_values` are not specified, no
      strings will be parsed as NaN.

    Note that if `na_filter` is passed in as False, the `keep_default_na` and
Beispiel #45
0
                        metavar='SYN2',
                        type=str,
                        help='a Processing syntax definition for Vim')

    args = parser.parse_args()

    if not os.path.isfile(args.syn1):
        sys.exit("Could not open syntax file %s. Does the file exist?" %
                 args.syn1)

    if not os.path.isfile(args.syn2):
        sys.exit("Could not open syntax file %s. Does the file exist?" %
                 args.syn2)

    for key in patterns:
        w1 = get_words(args.syn1, patterns[key])
        w2 = get_words(args.syn2, patterns[key])
        removed = w1 - w2
        added = w2 - w1
        print "- %s" % key
        print "Added:"
        print textwrap.fill(' '.join(added),
                            initial_indent='    ',
                            subsequent_indent='    ')

        print "Removed:"
        print textwrap.fill(' '.join(removed),
                            initial_indent='    ',
                            subsequent_indent='    ')
        print
Beispiel #46
0
def format_job(run_name, job):
    job_id = job['job_id']
    status = job['status']
    description = job['description']
    duration = seconds_to_hms(int(job['duration'] or 0))

    # Every job gets a link to e.g. pulpito's pages
    info_url = misc.get_results_url(run_name, job_id)
    if info_url:
        info_line = email_templates['info_url_templ'].format(info=info_url)
    else:
        info_line = ''

    if status in UNFINISHED_STATUSES:
        format_args = dict(
            job_id=job_id,
            desc=description,
            time=duration,
            info_line=info_line,
        )
        return email_templates['running_templ'].format(**format_args)

    if status == 'pass':
        return email_templates['pass_templ'].format(
            job_id=job_id,
            desc=description,
            time=duration,
            info_line=info_line,
        )
    else:
        log_dir_url = job['log_href'].rstrip('teuthology.yaml')
        if log_dir_url:
            log_line = email_templates['fail_log_templ'].format(
                log=log_dir_url)
        else:
            log_line = ''
        sentry_event = job.get('sentry_event')
        if sentry_event:
            sentry_line = email_templates['fail_sentry_templ'].format(
                sentry_event=sentry_event)
        else:
            sentry_line = ''

        if job['failure_reason']:
            # 'fill' is from the textwrap module and it collapses a given
            # string into multiple lines of a maximum width as specified.
            # We want 75 characters here so that when we indent by 4 on the
            # next line, we have 79-character exception paragraphs.
            reason = fill(job['failure_reason'] or '', 75)
            reason = \
                '\n'.join(('    ') + line for line in reason.splitlines())
            reason_lines = email_templates['fail_reason_templ'].format(
                reason=reason).rstrip()
        else:
            reason_lines = ''

        format_args = dict(
            job_id=job_id,
            desc=description,
            time=duration,
            info_line=info_line,
            log_line=log_line,
            sentry_line=sentry_line,
            reason_lines=reason_lines,
        )
        return email_templates['fail_templ'].format(**format_args)
Beispiel #47
0
async def py_constraints(
    addresses: Addresses,
    console: Console,
    py_constraints_subsystem: PyConstraintsSubsystem,
    python_setup: PythonSetup,
    registered_target_types: RegisteredTargetTypes,
    union_membership: UnionMembership,
) -> PyConstraintsGoal:
    if py_constraints_subsystem.summary:
        if addresses:
            console.print_stderr(
                "The `py-constraints --summary` goal does not take file/target arguments. Run "
                "`help py-constraints` for more details.")
            return PyConstraintsGoal(exit_code=1)

        all_expanded_targets, all_explicit_targets = await MultiGet(
            Get(Targets, AddressSpecs([DescendantAddresses("")])),
            Get(UnexpandedTargets, AddressSpecs([DescendantAddresses("")])),
        )
        all_python_targets = sorted(
            {
                t
                for t in (*all_expanded_targets, *all_explicit_targets)
                if t.has_field(InterpreterConstraintsField)
            },
            key=lambda tgt: cast(Address, tgt.address),
        )

        constraints_per_tgt = [
            PexInterpreterConstraints.create_from_targets([tgt], python_setup)
            for tgt in all_python_targets
        ]

        transitive_targets_per_tgt = await MultiGet(
            Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
            for tgt in all_python_targets)
        transitive_constraints_per_tgt = [
            PexInterpreterConstraints.create_from_targets(
                transitive_targets.closure, python_setup)
            for transitive_targets in transitive_targets_per_tgt
        ]

        dependees_per_root = await MultiGet(
            Get(
                Dependees,
                DependeesRequest(
                    [tgt.address], transitive=True, include_roots=False))
            for tgt in all_python_targets)

        data = [{
            "Target": tgt.address.spec,
            "Constraints": str(constraints),
            "Transitive Constraints": str(transitive_constraints),
            "# Dependencies": len(transitive_targets.dependencies),
            "# Dependees": len(dependees),
        } for tgt, constraints, transitive_constraints, transitive_targets,
                dependees in zip(
                    all_python_targets,
                    constraints_per_tgt,
                    transitive_constraints_per_tgt,
                    transitive_targets_per_tgt,
                    dependees_per_root,
                )]

        with py_constraints_subsystem.output_sink(console) as stdout:
            writer = csv.DictWriter(
                stdout,
                fieldnames=[
                    "Target",
                    "Constraints",
                    "Transitive Constraints",
                    "# Dependencies",
                    "# Dependees",
                ],
            )
            writer.writeheader()
            for entry in data:
                writer.writerow(entry)

        return PyConstraintsGoal(exit_code=0)

    transitive_targets = await Get(TransitiveTargets,
                                   TransitiveTargetsRequest(addresses))
    final_constraints = PexInterpreterConstraints.create_from_targets(
        transitive_targets.closure, python_setup)

    if not final_constraints:
        target_types_with_constraints = sorted(
            tgt_type.alias for tgt_type in registered_target_types.types
            if tgt_type.class_has_field(InterpreterConstraintsField,
                                        union_membership))
        logger.warning(
            "No Python files/targets matched for the `py-constraints` goal. All target types with "
            f"Python interpreter constraints: {', '.join(target_types_with_constraints)}"
        )
        return PyConstraintsGoal(exit_code=0)

    constraints_to_addresses = defaultdict(set)
    for tgt in transitive_targets.closure:
        constraints = PexInterpreterConstraints.create_from_targets(
            [tgt], python_setup)
        if not constraints:
            continue
        constraints_to_addresses[constraints].add(tgt.address)

    with py_constraints_subsystem.output(console) as output_stdout:
        output_stdout(f"Final merged constraints: {final_constraints}\n")
        if len(addresses) > 1:
            merged_constraints_warning = (
                "(These are the constraints used if you were to depend on all of the input "
                "files/targets together, even though they may end up never being used together in "
                "the real world. Consider using a more precise query or running "
                "`./pants py-constriants --summary`.)\n")
            output_stdout(indent(fill(merged_constraints_warning, 80), "  "))

        for constraint, addrs in sorted(constraints_to_addresses.items()):
            output_stdout(f"\n{constraint}\n")
            for addr in sorted(addrs):
                output_stdout(f"  {addr}\n")

    return PyConstraintsGoal(exit_code=0)
Beispiel #48
0
    def print_modules(self):
        basepath = os.path.abspath(
            os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))

        rst = {}
        for m in all_modules():
            try:
                module_type = "core"
                filename = os.path.join(basepath, "modules", "core",
                                        "{}.py".format(m))
                if not os.path.exists(filename):
                    filename = os.path.join(basepath, "modules", "contrib",
                                            "{}.py".format(m))
                    module_type = "contrib"
                if not os.path.exists(filename):
                    log.warning("module {} not found".format(m))
                    continue

                doc = None
                with open(filename) as f:
                    tree = ast.parse(f.read())
                    doc = ast.get_docstring(tree)

                if not doc:
                    log.warning("failed to find docstring for {}".format(m))
                    continue
                if self._format == "rst":
                    if os.path.exists(
                            os.path.join(basepath, "..", "screenshots",
                                         "{}.png".format(m))):
                        doc = "{}\n\n.. image:: ../screenshots/{}.png".format(
                            doc, m)

                    rst[module_type] = rst.get(module_type, [])
                    rst[module_type].append({"module": m, "content": doc})
                else:
                    print(
                        textwrap.fill(
                            "{}:".format(m),
                            80,
                            initial_indent=self._indent * 2,
                            subsequent_indent=self._indent * 2,
                        ))
                    for line in doc.split("\n"):
                        print(
                            textwrap.fill(
                                line,
                                80,
                                initial_indent=self._indent * 3,
                                subsequent_indent=self._indent * 6,
                            ))
            except Exception as e:
                log.warning(e)

        if self._format == "rst":
            print("List of modules\n===============")
            for k in ["core", "contrib"]:
                print("\n{}\n{}\n".format(k, "-" * len(k)))
                for mod in rst[k]:
                    print("\n{}\n{}\n".format(mod["module"],
                                              "~" * len(mod["module"])))
                    print(mod["content"])
Beispiel #49
0
def find_jar_iter(
        name_pattern,
        path_to_jar=None,
        env_vars=(),
        searchpath=(),
        url=None,
        verbose=False,
        is_regex=False,
):
    """
    Search for a jar that is used by nltk.

    :param name_pattern: The name of the jar file
    :param path_to_jar: The user-supplied jar location, or None.
    :param env_vars: A list of environment variable names to check
                     in addition to the CLASSPATH variable which is
                     checked by default.
    :param searchpath: List of directories to search.
    :param is_regex: Whether name is a regular expression.
    """

    assert isinstance(name_pattern, str)
    assert not isinstance(searchpath, str)
    if isinstance(env_vars, str):
        env_vars = env_vars.split()
    yielded = False

    # Make sure we check the CLASSPATH first
    env_vars = ["CLASSPATH"] + list(env_vars)

    # If an explicit location was given, then check it, and yield it if
    # it's present; otherwise, complain.
    if path_to_jar is not None:
        if os.path.isfile(path_to_jar):
            yielded = True
            yield path_to_jar
        else:
            raise LookupError("Could not find %s jar file at %s" %
                              (name_pattern, path_to_jar))

    # Check environment variables
    for env_var in env_vars:
        if env_var in os.environ:
            if env_var == "CLASSPATH":
                classpath = os.environ["CLASSPATH"]
                for cp in classpath.split(os.path.pathsep):
                    if os.path.isfile(cp):
                        filename = os.path.basename(cp)
                        if (is_regex and re.match(name_pattern, filename) or
                            (not is_regex and filename == name_pattern)):
                            if verbose:
                                print("[Found %s: %s]" % (name_pattern, cp))
                            yielded = True
                            yield cp
                    # The case where user put directory containing the jar file in the classpath
                    if os.path.isdir(cp):
                        if not is_regex:
                            if os.path.isfile(os.path.join(cp, name_pattern)):
                                if verbose:
                                    print("[Found %s: %s]" %
                                          (name_pattern, cp))
                                yielded = True
                                yield os.path.join(cp, name_pattern)
                        else:
                            # Look for file using regular expression
                            for file_name in os.listdir(cp):
                                if re.match(name_pattern, file_name):
                                    if verbose:
                                        print("[Found %s: %s]" % (
                                            name_pattern,
                                            os.path.join(cp, file_name),
                                        ))
                                    yielded = True
                                    yield os.path.join(cp, file_name)

            else:
                jar_env = os.environ[env_var]
                jar_iter = ((os.path.join(jar_env, path_to_jar)
                             for path_to_jar in os.listdir(jar_env))
                            if os.path.isdir(jar_env) else (jar_env, ))
                for path_to_jar in jar_iter:
                    if os.path.isfile(path_to_jar):
                        filename = os.path.basename(path_to_jar)
                        if (is_regex and re.match(name_pattern, filename) or
                            (not is_regex and filename == name_pattern)):
                            if verbose:
                                print("[Found %s: %s]" %
                                      (name_pattern, path_to_jar))
                            yielded = True
                            yield path_to_jar

    # Check the path list.
    for directory in searchpath:
        if is_regex:
            for filename in os.listdir(directory):
                path_to_jar = os.path.join(directory, filename)
                if os.path.isfile(path_to_jar):
                    if re.match(name_pattern, filename):
                        if verbose:
                            print("[Found %s: %s]" % (filename, path_to_jar))
                yielded = True
                yield path_to_jar
        else:
            path_to_jar = os.path.join(directory, name_pattern)
            if os.path.isfile(path_to_jar):
                if verbose:
                    print("[Found %s: %s]" % (name_pattern, path_to_jar))
                yielded = True
                yield path_to_jar

    if not yielded:
        # If nothing was found, raise an error
        msg = "NLTK was unable to find %s!" % name_pattern
        if env_vars:
            msg += " Set the %s environment variable" % env_vars[0]
        msg = textwrap.fill(msg + ".",
                            initial_indent="  ",
                            subsequent_indent="  ")
        if searchpath:
            msg += "\n\n  Searched in:"
            msg += "".join("\n    - %s" % d for d in searchpath)
        if url:
            msg += "\n\n  For more information, on %s, see:\n    <%s>" % (
                name_pattern,
                url,
            )
        div = "=" * 75
        raise LookupError("\n\n%s\n%s\n%s" % (div, msg, div))
Beispiel #50
0
def slim_makescript(script_file, trees_file, demographic_model, contig,
                    samples, mutation_types, extended_events, scaling_factor,
                    burn_in):

    pop_names = [
        pc.metadata["id"] for pc in demographic_model.population_configurations
    ]
    # Use copies of these so that the time frobbing below doesn't have
    # side-effects in the caller's model.
    demographic_events = copy.deepcopy(demographic_model.demographic_events)
    if extended_events is None:
        extended_events = []
    else:
        extended_events = copy.deepcopy(extended_events)

    # Reassign event times according to integral SLiM generations.
    # This collapses the time deltas used in HomSap/AmericanAdmixture_4B11,
    # and calculates times for GenerationAfter objects.
    def fix_time(event):
        for attr in ("time", "start_time", "end_time"):
            if not hasattr(event, attr):
                continue
            t = getattr(event, attr)
            t_rounded = round(float(t) / scaling_factor) * scaling_factor
            if isinstance(t, stdpopsim.ext.GenerationAfter):
                t_rounded -= scaling_factor
            if t_rounded < 0:
                raise ValueError(f"Bad {attr}: {getattr(event, attr)}")
            setattr(event, attr, t_rounded)

    for event in demographic_events:
        fix_time(event)
    for event in extended_events:
        fix_time(event)

    # The demography debugger constructs event epochs, which we use
    # to define the forwards-time events.
    dd = msprime.DemographyDebugger(
        population_configurations=demographic_model.population_configurations,
        migration_matrix=demographic_model.migration_matrix,
        demographic_events=demographic_events)

    epochs = sorted(dd.epochs, key=lambda e: e.start_time, reverse=True)
    T = [
        round(e.start_time * demographic_model.generation_time) for e in epochs
    ]
    migration_matrices = [e.migration_matrix for e in epochs]

    N = np.empty(shape=(dd.num_populations, len(epochs)), dtype=int)
    growth_rates = np.empty(shape=(dd.num_populations, len(epochs)),
                            dtype=float)
    for j, epoch in enumerate(epochs):
        for i, pop in enumerate(epoch.populations):
            N[i, j] = int(pop.end_size)
            growth_rates[i, j] = pop.growth_rate

    admixture_pulses = []
    subpopulation_splits = []
    for i, epoch in enumerate(epochs):
        for de in epoch.demographic_events:
            if isinstance(de, msprime.MassMigration):

                if de.proportion < 1:
                    # Calculate remainder of population after previous
                    # MassMigration events in this epoch.
                    rem = 1 - np.sum([
                        ap[3] for ap in admixture_pulses
                        if ap[0] == i and ap[1] == de.source
                    ])
                    admixture_pulses.append((
                        i,
                        de.source,  # forwards-time dest
                        de.dest,  # forwards-time source
                        rem * de.proportion))
                    continue

                # Backwards: de.source is being merged into de.dest.
                # Forwards: de.source is being created, taking individuals
                #           from de.dest.
                #
                # If the proportion==1, we can use SLiM function:
                #       sim.addSubpopSplit(newpop, size, oldpop),
                # which we trigger by adding a row to subpopulation_splits.
                # This SLiM function creates newpop (=de.source), under the
                # assumption that it doesn't already exist.

                subpopulation_splits.append(
                    (f"_T[{i}]", de.source, f"_N[{i+1},{de.source}]", de.dest))

                # Zero out the population size for generations before this
                # epoch, to avoid simulating invididuals that contribute no
                # genealogy.
                N[de.source, 0:(i + 1)] = 0
                growth_rates[de.source, 0:(i + 1)] = 0

                # Ensure there are no migrations to or from de.source before
                # this epoch.
                for j in range(i + 1):
                    for k in range(dd.num_populations):
                        migration_matrices[j][k][de.source] = 0
                        migration_matrices[j][de.source][k] = 0

    drawn_mutations = []
    fitness_callbacks = []
    condition_on_allele_frequency = []
    op_id = stdpopsim.ext.ConditionOnAlleleFrequency.op_id
    for ee in extended_events:
        if hasattr(ee, "mutation_type_id"):
            mt_id = getattr(ee, "mutation_type_id")
            cls_name = ee.__class__.__name__
            if mutation_types is None:
                raise ValueError(
                    f"Invalid {cls_name} event. No mutation types defined.")
            if not (0 < ee.mutation_type_id <= len(mutation_types)):
                # FIXME: use zero-based indexes
                raise ValueError(
                    f"Invalid {cls_name} event with mutation type id {mt_id}.")
        if hasattr(ee, "start_time") and hasattr(ee, "end_time"):
            # Now that GenerationAfter times have been accounted for, we can
            # properly catch invalid start/end times.
            start_time = getattr(ee, "start_time")
            end_time = getattr(ee, "end_time")
            stdpopsim.ext.validate_time_range(start_time, end_time)

        if isinstance(ee, stdpopsim.ext.DrawMutation):
            time = ee.time * demographic_model.generation_time
            save = 1 if ee.save else 0
            drawn_mutations.append((time, ee.mutation_type_id,
                                    ee.population_id, ee.coordinate, save))
        elif isinstance(ee, stdpopsim.ext.ChangeMutationFitness):
            start_time = ee.start_time * demographic_model.generation_time
            end_time = ee.end_time * demographic_model.generation_time
            fitness_callbacks.append(
                (start_time, end_time, ee.mutation_type_id, ee.population_id,
                 ee.selection_coeff, ee.dominance_coeff))
        elif isinstance(ee, stdpopsim.ext.ConditionOnAlleleFrequency):
            start_time = ee.start_time * demographic_model.generation_time
            end_time = ee.end_time * demographic_model.generation_time
            save = 1 if ee.save else 0
            condition_on_allele_frequency.append(
                (start_time, end_time, ee.mutation_type_id, ee.population_id,
                 op_id(ee.op), ee.allele_frequency, save))
        else:
            raise ValueError(f"Unknown extended event type {type(ee)}")

    # Check that drawn mutations exist for extended events that need them.
    drawn_mut_type_ids = {mt_id for _, mt_id, _, _, _ in drawn_mutations}
    for ee in extended_events:
        if (isinstance(ee, stdpopsim.ext.ChangeMutationFitness)
                or isinstance(ee, stdpopsim.ext.ConditionOnAlleleFrequency)):
            if ee.mutation_type_id not in drawn_mut_type_ids:
                cls_name = ee.__class__.__name__
                raise ValueError(
                    f"Invalid {cls_name} event. No drawn mutation for "
                    "mutation type id {ee.mutation_type_id}")

    printsc = functools.partial(print, file=script_file)

    # Header
    printsc('/*')
    printsc(' * stdpopsim ' + stdpopsim.__version__)
    printsc(' *')
    printsc(' * Demographic model: ' + demographic_model.id)
    printsc(' * ' + "\n * ".join(
        [line.strip() for line in demographic_model.description.split('\n')]))
    for citation in demographic_model.citations:
        printsc(' * ' + str(citation))
    printsc(' */')

    recomb_rates, recomb_ends = msprime_rm_to_slim_rm(contig.recombination_map)
    indent = 8 * " "
    recomb_rates_str = ("c(\n" +
                        textwrap.fill(", ".join(map(str, recomb_rates)),
                                      width=80,
                                      initial_indent=indent,
                                      subsequent_indent=indent) + ")")
    recomb_ends_str = ("c(\n" + textwrap.fill(", ".join(map(str, recomb_ends)),
                                              width=80,
                                              initial_indent=indent,
                                              subsequent_indent=indent) + ")")

    pop_names_str = ', '.join(map(lambda x: f'"{x}"', pop_names))

    printsc(
        string.Template(_slim_upper).substitute(
            scaling_factor=scaling_factor,
            burn_in=float(burn_in),
            chromosome_length=int(contig.recombination_map.get_length()),
            recombination_rates=recomb_rates_str,
            recombination_ends=recomb_ends_str,
            mutation_rate=contig.mutation_rate,
            generation_time=demographic_model.generation_time,
            trees_file=trees_file,
            pop_names=f"c({pop_names_str})"))

    def matrix2str(matrix,
                   row_comments=None,
                   col_comment=None,
                   indent=2,
                   fmt="",
                   dim=(None, None)):
        """
        Return an Eidos representation of the matrix as a string.
        """
        if row_comments is not None:
            assert len(matrix) == len(row_comments)

        if len(matrix) == 0:
            return "c()"

        s = ["array(c(\n"]
        if col_comment is not None:
            s.append(indent * 4 * ' ' + '// ' + col_comment + '\n')

        for i in range(len(matrix)):
            s.append(indent * 4 * " ")
            s.append('c({})'.format(", ".join(
                [format(x, fmt) for x in matrix[i]])))
            if i != len(matrix) - 1:
                s.append(",")
            if row_comments is not None:
                s.append(" // " + row_comments[i])
            s.append("\n")

        s.append((indent - 1) * 4 * " ")

        if dim[0] is None:
            dim = (len(matrix[0]), dim[1])
        if dim[1] is None:
            dim = (dim[0], len(matrix))
        s.append(f'), c({dim[0]}, {dim[1]}))')

        return "".join(s)

    if mutation_types is None:
        mutation_types = [stdpopsim.ext.MutationType()]

    # Mutation type; genomic elements.
    # FIXME: Change this to use zero-based indices---one-based indices are
    #        inconsistent with everything else, e.g. population IDs.
    for i, m in enumerate(mutation_types, 1):
        distrib_args = [str(arg) for arg in m.distribution_args]
        distrib_args[
            m.Q_scaled_index] = "Q * " + distrib_args[m.Q_scaled_index]
        distrib_args = ", ".join(distrib_args)
        printsc(f'    initializeMutationType("m{i}", {m.dominance_coeff}, ' +
                f'"{m.distribution_type}", {distrib_args});')
        if not m.convert_to_substitution:
            # T is the default for WF simulations.
            printsc(f'    m{i}.convertToSubstitution = F;')
    mut_weights = ", ".join(str(m.weight) for m in mutation_types)
    printsc('    initializeGenomicElementType("g1", ' +
            f'seq(1, {len(mutation_types)}), c({mut_weights}));')
    printsc()

    # Epoch times.
    printsc('    // Time of epoch boundaries, in years before present.')
    printsc('    // The first epoch spans from INF to _T[0].')
    printsc('    defineConstant("_T", c({}));'.format(", ".join(map(str, T))))
    printsc()

    # Population sizes.
    printsc('    // Population sizes in each epoch.')
    printsc('    _N = ' + matrix2str(
        N, row_comments=pop_names, col_comment="INF:_T[0], _T[0]:_T[1], etc.")
            + ';')
    printsc()

    printsc('    defineConstant("num_epochs", length(_T));')
    printsc('    defineConstant("num_populations", ncol(_N));')
    printsc()

    # Growth rates.
    printsc('    // Population growth rates for each epoch.')
    printsc('    defineConstant("growth_rates", ' +
            matrix2str(growth_rates,
                       row_comments=pop_names,
                       col_comment="INF:_T[0], _T[0]:_T[1], etc.",
                       dim=("num_epochs", "num_populations")) + ');')
    printsc()

    printsc('    no_migration = rep(0, num_populations*num_populations);')
    printsc()

    # Migration rates.
    printsc('    // Migration rates for each epoch.')
    printsc(
        '    // Migrations involving a population with size=0 are ignored.')
    printsc('    // XXX: document what the rows & cols correspond to.')
    printsc('    defineConstant("migration_matrices", array(c(')
    for i in range(len(migration_matrices)):
        epoch_str = f"INF:_T[{i}]" if i == 0 else f"_T[{i}]:_T[{i+1}]"
        printsc()
        printsc(2 * 4 * ' ' + '// ' + epoch_str)

        end = ",\n" if i != len(migration_matrices) - 1 else "\n"
        if np.all(np.array(migration_matrices[i]) == 0):
            printsc(2 * 4 * ' ' + 'no_migration', end=end)
        else:
            printsc(2 * 4 * ' ' +
                    matrix2str(migration_matrices[i],
                               indent=3,
                               fmt="g",
                               dim=("num_populations", "num_populations")),
                    end=end)
    printsc()
    printsc(4 * ' ' + '), c(num_populations, num_populations, num_epochs)));')
    printsc()

    # Population splits.
    printsc('    // Population splits, one row for each event.')
    printsc('    defineConstant("subpopulation_splits", ' + matrix2str(
        subpopulation_splits, col_comment="time, newpop, size, oldpop") + ');')
    printsc()

    # Admixture pulses.
    # Output _T[...] variable rather than an index.
    admixture_pulses = [(f"_T[{ap[0]}]", *ap[1:]) for ap in admixture_pulses]
    printsc('    // Admixture pulses, one row for each pulse.')
    printsc(
        '    defineConstant("admixture_pulses", ' +
        matrix2str(admixture_pulses, col_comment="time, dest, source, rate") +
        ');')
    printsc()

    # Drawn mutations.
    printsc('    // Drawn mutations, one row for each mutation.')
    printsc('    defineConstant("drawn_mutations", ' + matrix2str(
        drawn_mutations,
        col_comment="time, mut_type, pop_id, genomic_coordinate, save") + ');')
    printsc()

    # Fitness callbacks.
    printsc('    // Fitness callbacks, one row for each callback.')
    printsc('    defineConstant("fitness_callbacks", ' +
            matrix2str(fitness_callbacks,
                       col_comment="start_time, end_time, mut_type, pop_id, "
                       "selection_coeff, dominance_coeff") + ');')
    printsc()

    # Allele frequency conditioning
    op_types = ", ".join(
        f"\"{op}\""
        for op in stdpopsim.ext.ConditionOnAlleleFrequency.op_types)
    printsc(f'    defineConstant("op_types", c({op_types}));')
    printsc('    // Allele frequency conditioning, one row for each.')
    printsc('    defineConstant("condition_on_allele_frequency", ' +
            matrix2str(condition_on_allele_frequency,
                       col_comment="start_time, end_time, mut_type, pop_id, "
                       "op, allele_frequency, save") + ');')
    printsc()

    # Sampling episodes.
    sample_counts = collections.Counter([
        (sample.population,
         round(sample.time * demographic_model.generation_time))
        for sample in samples
    ])
    sampling_episodes = []
    for (pop, time), count in sample_counts.items():
        # SLiM can only sample individuals, which we assume are diploid.
        n_inds = (count + 1) // 2
        if count % 2 != 0:
            pop_id = pop_names[pop]
            gen = time / demographic_model.generation_time
            warnings.warn(
                stdpopsim.SLiMOddSampleWarning(
                    f"SLiM simulates diploid individuals, so {n_inds} "
                    f"individuals will be sampled for the {count} haploids "
                    f"requested from population {pop_id} at time {gen}. "
                    "See #464."))
        sampling_episodes.append((pop, n_inds, time))

    printsc('    // One row for each sampling episode.')
    printsc('    defineConstant("sampling_episodes", ' +
            matrix2str(sampling_episodes, col_comment='pop, n_inds, time') +
            ');')

    printsc(_slim_lower)

    return epochs[0]
Beispiel #51
0
 def paragraph(self, text):
     if self.previous:
         self.file.write("\n")
     self.file.write(textwrap.fill(text, self.width))
     self.file.write("\n")
     self.previous = True
Beispiel #52
0
def add_node(lbl, G):
    nid = str(uuid.uuid4())
    G.add_node(nid)
    n = G.get_node(nid)
    n.attr['label'] = textwrap.fill(escape_str(lbl), width=20)
    return nid
Beispiel #53
0
            exit()
        else:
            while (float(rw) < 1):
                print(Fore.RED + Style.BRIGHT +
                      'Invalid entry. Let\'s do that room again.')
                oneRoom(floortotal, htotal)
            #print('rw = ' + str(rw) + ' in')
            rw = numParse(rw)
            rtotal = int(rl) * int(rw)
            #print ('rtotal is %g sq in' %rtotal)
            print('\t\t\tRoom total: ' + inchesToFeet(rtotal))
            floortotal += rtotal
            oneRoom(floortotal, htotal)


print('\nJUMPVISUAL SQ FOOTAGE CALCULATOR v2.1')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
#print('      created by john gallino')
print('\n' + textwrap.fill(quotes.quotes[qnum], 65) + '\n')
#choice = 'x'
#choice = input('Are you including inches in your measurements? (y/n): ')
#
#
#while (choice.lower() != 'y' and choice.lower() != 'n'):
#    choice = int(input('\nPlease enter y or n :'))

print(
    '\nINSTRUCTIONS\n1. Starting with the first floor, enter the length and width of each room.\n2. If you are including inches, separate with a space. (e.g. \'8 3\' for 8ft 3in)\n3. Decimals work too. (e.g. \'10.5\' for 10ft 6in)\n4. When finished with a floor, enter \'-1\'\n'
)
oneRoom(floortotal, htotal)
Beispiel #54
0
    def test_candidateLogUpToDate(self):
        """
        Is the candidate up to date? The term and the index together
        have to be at least as advanced as that of the current machine.

        """

        currentTerm = 100
        log = []
        for x in xrange(10):
            log.append(rpc_objects.LogEntry(term=currentTerm, command=x))
        results = self.state.persister.setCurrentTerm(currentTerm)
        results.addCallback(
            dropResult(self.state.persister.matchAndAppendNewLogEntries, -1,
                       log))

        # Index and term are equal
        results.addCallback(
            dropResult(self.state.candidateLogUpToDate,
                       lastLogIndex=len(log) - 1,
                       lastLogTerm=currentTerm))

        indexAndTermOKFailureMessage = fill(
            '''candidateLogUpToDate should return True when lastLogIndex and
            lastLogTerm are equal to the state's last log index and the
            term of the last log entry.''')
        results.addCallback(self.assertTrue, msg=indexAndTermOKFailureMessage)

        results.addCallback(
            dropResult(self.state.candidateLogUpToDate,
                       lastLogIndex=len(log),
                       lastLogTerm=currentTerm))

        indexIsHigherFailureMessage = fill(
            '''candidateLogUpToDate should return True when lastLogIndex is
            greater than state's last log index and the lastLogTerm is
            equal to the term of the last log entry.''')
        results.addCallback(self.assertTrue, msg=indexIsHigherFailureMessage)

        results.addCallback(
            dropResult(self.state.candidateLogUpToDate,
                       lastLogIndex=len(log) - 1,
                       lastLogTerm=currentTerm + 1))

        termIsHigherFailureMessage = fill(
            '''candidateLogUpToDate should return True when lastLogIndex is
            equal to the state's last log index and the lastLogTerm is
            greater than the term of the last log entry.''')
        results.addCallback(self.assertTrue, msg=termIsHigherFailureMessage)

        results.addCallback(
            dropResult(self.state.candidateLogUpToDate,
                       lastLogIndex=len(log) - 2,
                       lastLogTerm=currentTerm))

        indexIsLowerFailureMessage = fill(
            '''candidateLogUpToDate should return False when lastLogIndex is
            less than the state's last log index''')
        results.addCallback(self.assertFalse, msg=indexIsLowerFailureMessage)

        # # Term is lower
        results.addCallback(
            dropResult(self.state.candidateLogUpToDate,
                       lastLogIndex=len(log),
                       lastLogTerm=currentTerm - 1))

        termIsLowerFailureMessage = fill(
            '''candidateLogUpToDate should return False when lastLogTerm is
            less than the term of the last log entry.''')
        results.addCallback(self.assertFalse, msg=termIsLowerFailureMessage)
        return results
Beispiel #55
0
def wrap_text(
    text,
    indent,
    width=79,
    initial_indent=4,
    multi=False,
    preserve_line_breaks=True,
    quote=True,
):
    text = text.strip()
    if not multi:
        if not quote:
            quote = ""
        elif len(text
                 ) + initial_indent + 8 > width or "\n" in text or '"' in text:
            quote = '"""'
        else:
            quote = '"'
        if text[0] == '"':
            text = "{} {}".format(quote, text)
        else:
            text = "{}{}".format(quote, text)
        if text[-1] == '"':
            text = "{} {}".format(text, quote)
        else:
            text = "{}{}".format(text, quote)
    else:
        width -= 3

    wrapped_lines = []
    first = True
    for l in text.splitlines():
        if first:
            w = width - initial_indent
            first = False
        else:
            if multi:
                w = width - indent
            else:
                w = width
        lines = textwrap.fill(
            " ".join(l.strip().split()),
            width=w,
            subsequent_indent=" " * indent,
            replace_whitespace=False,
        ).splitlines()
        wrapped_lines += [i.strip() for i in lines]
    text = ("\n" + " " * indent).join(wrapped_lines)

    if not multi:
        return text
    else:
        lines = [
            '"{}"'.format(i.strip().replace('"', "'"))
            for i in text.splitlines()
        ]
        lines2 = []
        c = 0
        for l in lines:
            if preserve_line_breaks:
                if c != 0:
                    l = '"\\n{}'.format(l[1:100])
            elif c != len(lines) - 1:
                l = '%s "' % (l[0:-1])
            c += 1
            lines2.append(l)
        text = ("\n{}".format(" " * indent)).join(lines2)
        if "\n" in text:
            text = "{}".format(text)
        return text
 def comment(s): print(textwrap.indent(textwrap.fill(s, width=100, initial_indent='', subsequent_indent='  '), '    // '), file=typings)
 def addfield(name, tpe='any'): add(f'{name}: {types.pop(name, tpe)}')
Beispiel #57
0
def output(text1,
           col1=None,
           text2=None,
           col2=None,
           wrap=True,
           beg=None,
           end='\n',
           sep=' '):
    print('', end=beg)
    ptoolkit = use_ptoolkit() and ptcolors['displaymethod'] == "prompt-toolkit"

    if not ptoolkit:
        col1 = colors[col1] if col1 and colors[col1] and colors[col1][
            0].isdigit() else None
        col2 = colors[col2] if col2 and colors[col2] and colors[col2][
            0].isdigit() else None

        clb1 = "\x1B[{}m".format(col1) if col1 else ""
        clb2 = "\x1B[{}m".format(col2) if col2 else ""
        cle1 = "\x1B[0m" if col1 else ""
        cle2 = "\x1B[0m" if col2 else ""
        text1 = clb1 + text1 + cle1
        if text2 is not None:
            text2 = clb2 + text2 + cle2
    if wrap:
        width = settings.getint("text-wrap-width")
        width = 999999999 if width < 2 else width
        width = min(width, termWidth)
        text1 = textwrap.fill(text1,
                              width,
                              replace_whitespace=False,
                              drop_whitespace=False)
        if text2:
            if len(text1) + 1 + len(text2) >= width:
                if not re.match("^\n+$", sep):
                    sep += '\n'
            text2 = textwrap.fill(text2,
                                  width,
                                  replace_whitespace=False,
                                  drop_whitespace=False)

    if ptoolkit:
        col1 = ptcolors[col1] if col1 and ptcolors[col1] else ""
        col2 = ptcolors[col2] if col2 and ptcolors[col2] else ""
        print_formatted_text(to_formatted_text(text1, col1), end='')
        if text2:
            print_formatted_text(to_formatted_text(sep), end='')
            print_formatted_text(to_formatted_text(text2, col2), end='')
        print('', end=end)
    else:
        if not text2:
            print(text1, end=end)
        else:
            print(text1, end='')
            print(sep, end='')
            print(text2, end=end)

    linecount = 1
    if text1:
        linecount += text1.count('\n')
    if end:
        linecount += end.count('\n')
    if sep:
        linecount += sep.count('\n')
    if text2:
        linecount += text2.count('\n')
    return linecount
Beispiel #58
0
def main(language: str) -> None:
    """The hypermodern Python project."""
    page = wikipedia.random_page(language=language)

    click.secho(page.title, fg="green")
    click.echo(textwrap.fill(page.extract))
import textwrap
from textwrap_example import sample_text


def should_indent(line):
    print('Indent {!r}?'.format(line))
    return len(line.strip()) % 2 == 0


dedented_text = textwrap.dedent(sample_text)
wrapped = textwrap.fill(dedented_text, width=50)
final = textwrap.indent(wrapped, 'EVEN ',
                        predicate=should_indent)
print('\nQuoted block:\n')
print(final)
Beispiel #60
0
def main(argv=None):
    """ Scan a binary file for certain open source libraries that may have CVEs """
    if argv is None:
        argv = sys.argv

    # Reset logger level to info
    LOGGER.setLevel(logging.INFO)

    parser = argparse.ArgumentParser(
        prog="cve-bin-tool",
        description=textwrap.dedent("""
            The CVE Binary Tool scans for a number of common, vulnerable open source
            components (openssl, libpng, libxml2, expat and a few others) to let you know
            if a given directory or binary file includes common libraries with known
            vulnerabilities.
            """),
        epilog=textwrap.fill(
            f'Available checkers: {", ".join(Scanner.available_checkers())}') +
        "\n\nPlease disclose issues responsibly!",
        formatter_class=argparse.RawDescriptionHelpFormatter,
    )
    parser.add_argument("directory", help="directory to scan")

    output_group = parser.add_argument_group("Output")
    output_group.add_argument("-q",
                              "--quiet",
                              action="store_true",
                              help="suppress output")
    output_group.add_argument(
        "-l",
        "--log",
        help="log level (default: info)",
        dest="log_level",
        action=LogAction,
        choices=["debug", "info", "warning", "error", "critical"],
    )
    output_group.add_argument(
        "-o",
        "--output-file",
        action="store",
        default=sys.stdout,
        help="provide output filename (default: output to stdout)",
    )
    output_group.add_argument(
        "-f",
        "--format",
        action="store",
        choices=["csv", "json", "console", "html"],
        default="console",
        help="update output format (default: console)",
    )
    parser.add_argument(
        "-V",
        "--version",
        action="version",
        version=VERSION,
    )
    parser.add_argument(
        "-u",
        "--update",
        action="store",
        choices=["now", "daily", "never", "latest"],
        default="daily",
        help="update schedule for NVD database (default: daily)",
    )
    parser.add_argument("-m",
                        "--multithread",
                        action="store_true",
                        help="enable multithread")
    parser.add_argument("-x",
                        "--extract",
                        action="store_true",
                        help="autoextract compressed files")

    checker_group = parser.add_argument_group("Checkers")
    checker_group.add_argument(
        "-s",
        "--skips",
        dest="skips",
        action="store",
        type=str,
        help="comma-separated list of checkers to disable",
    )
    checker_group.add_argument(
        "-r",
        "--runs",
        dest="checkers",
        action="store",
        type=str,
        help="comma-separated list of checkers to enable",
    )

    if len(argv) <= 1:
        parser.print_help()
        return 0

    try:
        args = parser.parse_args(argv[1:])
    except SystemExit:
        # override default argparse exit(2) behaviour so positive numbers can indicate
        sys.exit(-2)

    if args.log_level:
        LOGGER.setLevel(args.log_level)

    if args.quiet:
        LOGGER.setLevel(logging.CRITICAL)

    if platform.system() != "Linux":
        warning_nolinux = """
                          **********************************************
                          Warning: this utility was developed for Linux.
                          You may need to install additional utilities
                          to use it on other operating systems.
                          **********************************************
                          """
        LOGGER.warning(warning_nolinux)

    if not os.path.isfile(args.directory) and not os.path.isdir(
            args.directory):
        LOGGER.error("Error: directory/file invalid")
        parser.print_usage()
        return -1

    exclude_folders = [".git"]
    walker = DirWalk(
        # pattern='*.*',
        # folder_include_pattern='*',
        folder_exclude_pattern=";".join(exclude_folders),
        # file_exclude_pattern=';'.join(exclude_files)
    ).walk

    # Connect to the database
    cvedb_orig = CVEDB()

    # if OLD_CACHE_DIR (from cvedb.py) exists, print warning
    if os.path.exists(OLD_CACHE_DIR):
        LOGGER.warning(
            f"Obsolete cache dir {OLD_CACHE_DIR} is no longer needed and can be removed."
        )

    # Clear data if -u now is set
    if args.update == "now":
        cvedb_orig.clear_cached_data()

    if args.update == "latest":
        cvedb_orig.refresh_cache_and_update_db()

    # update db if needed
    if args.update != "never":
        cvedb_orig.get_cvelist_if_stale()

    skips = ""
    if args.skips:
        skips = args.skips

    if args.checkers:
        checkers = args.checkers.split(",")
        skips = ",".join(
            map(
                lambda checker: checker.name,
                filter(
                    lambda checker: checker.name not in checkers,
                    pkg_resources.iter_entry_points("cve_bin_tool.checker"),
                ),
            ))

    # Single-thread mode
    if not args.multithread:
        # Close database when done
        cvedb = CVEDB()
        cvedb.open()
        with cvedb:
            extractor = Extractor()
            scanner = Scanner(cvedb)
            scanner.remove_skiplist(skips)
            LOGGER.info(scanner.print_checkers())

            with extractor() as ectx:
                if os.path.isdir(args.directory):
                    for filepath in walker([args.directory]):
                        scan_and_or_extract_file(scanner, ectx, walker,
                                                 args.extract, filepath)
                elif os.path.isfile(args.directory):
                    scan_and_or_extract_file(scanner, ectx, walker,
                                             args.extract, args.directory)

            LOGGER.info("")
            LOGGER.info("Overall CVE summary: ")
            LOGGER.info(
                f"There are {scanner.files_with_cve} files with known CVEs detected"
            )
            if scanner.files_with_cve > 0:
                affected_string = ", ".join(
                    map(
                        lambda module_version: "".join(str(module_version)),
                        scanner.affected(),
                    ))
                LOGGER.info(f"Known CVEs in {affected_string}:")

                # Creates a Object for OutputEngine
                output = OutputEngine(modules=scanner.all_cves,
                                      filename=args.output_file)

                if not args.quiet or args.output_file != sys.stdout:
                    output.output_file(args.format)

            # Use the number of files with known cves as error code
            # as requested by folk planning to automate use of this script.
            # If no files found, then the program exits cleanly.
            return scanner.files_with_cve

    # Enable multithread
    else:

        def worker():
            cvedb = CVEDB()
            cvedb.open()
            with cvedb:
                scanner = Scanner(cvedb)
                scanner.remove_skiplist(skips)
                while True:
                    scan_target = q.get()
                    if not scan_target:
                        q.task_done()
                        break
                    scanner.scan_file(scan_target)
                    q.task_done()
                cves.put(scanner.files_with_cve)

        # using queue
        q = queue.Queue()
        cves = queue.Queue()
        # Extract all files first, save the path to a list
        extractor = Extractor()
        file_list = []
        with extractor() as ectx:
            if os.path.isdir(args.directory):
                for filepath in walker([args.directory]):
                    extract_file(ectx, walker, args.extract, filepath,
                                 file_list)
            elif os.path.isfile(args.directory):
                extract_file(ectx, walker, args.extract, args.directory,
                             file_list)
            binary_list = []

            pool = multiprocessing.Pool(multiprocessing.cpu_count() * 4)
            try:
                for i, file_is_binary in enumerate(
                        pool.map(is_binary, file_list)):
                    if file_is_binary:
                        binary_list.append(file_list[i])

                # Get all of the binary files
                scanning_list = binary_list if binary_list else file_list
                if len(scanning_list) == 1:
                    return scan_files(scanning_list[0], vars(args))

                # create threads
                threads = []
                for i in range(multiprocessing.cpu_count() * 4):
                    t = threading.Thread(target=worker)
                    t.start()
                    threads.append(t)
                for scanning_target in scanning_list:
                    q.put(scanning_target)

                for i in range(multiprocessing.cpu_count() * 4):
                    q.put(None)

                # wait until all works done
                q.join()

                for t in threads:
                    t.join()

                cve_list = []

                while not cves.empty():
                    cve_list.append(cves.get())

                return cve_list

            finally:
                pool.terminate()
                pool.join()