Esempio n. 1
0
	def getFilesToBackup(self):
		filesToBackup = []
		for hash,files in self.currentBackupSet.items():
			if not self.previousBackupSet.has_key(hash):
				filesToBackup.append((hash,files))
			else:
				previousFiles = self.previousBackupSet[hash]
				found = False
				equals = []
				for previousEntry in previousFiles:
					for file in files:
						base = file[0][len(path.commonprefix([file[0],self.options.source])):].lstrip("/")
						if base in previousEntry[0]:
							found = True
						else:
							equals.append(previousEntry[0])
				if not found:
					if self.options.verbose:
						pass
					newFiles = []
					for f in files:
						newFiles.append(f[0][len(path.commonprefix([f[0],self.options.source])):].lstrip("/"))
					print warn("Duplicate:"), "image already backed up under different name: %s == %s" % (newFiles,equals)
					if not self.options.only_hash: filesToBackup.append((hash,[[file[0]]]))
		return filesToBackup
Esempio n. 2
0
def longest_common_prefix_suffix(s1, s2):
    """

    :param s1:
    :param s2:
    :return:
    """
    return commonprefix([s1, s2[::-1]]), commonprefix([s1[::-1], s2])
Esempio n. 3
0
def get_output_file(infiles):
    prefix = path.commonprefix(infiles)
    suffix = path.commonprefix([f[::-1] for f in infiles])[::-1]
    num_suffix = ''.join([c for c in itertools.takewhile(lambda x: x.isdigit(), suffix)])
    num_prefix = ''.join([c for c in itertools.takewhile(lambda x: x.isdigit(), prefix[::-1])])[::-1]
    prefix = prefix[:len(prefix)-len(num_prefix)]
    suffix = suffix[len(num_suffix):]
    diffs = [s[len(prefix):len(s)-len(suffix)] for s in infiles]
    output_file = prefix + min(diffs, key=alphanum_key) + '-' + max(diffs, key=alphanum_key) + suffix
    return output_file
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return make_charset(oneletter)
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren
Esempio n. 5
0
    def test_that_a_valid_license_notice_exists_in_every_source_file_and_that_global_licensing_information_is_correct(self):
        license_notice = compile(r"""(?P<comment_start>#|--|//) This Source Code Form is subject to the terms of the Mozilla Public
(?P=comment_start) License, v\. 2\.0\. If a copy of the MPL was not distributed with this file,
(?P=comment_start) You can obtain one at http://mozilla\.org/MPL/2\.0/\.
(?P=comment_start)
(?P=comment_start) Copyright \(c\) (?P<first_year>20\d\d)(-(?P<last_year>20\d\d))?, Lars Asplund lars\.anders\.asplund@gmail\.com""")
        log_date = compile(r'Date:\s*(?P<year>20\d\d)-\d\d-\d\d')
        licensed_files = []
        repo_root = abspath(join(dirname(__file__), '..'))
        for root, dirs, files in walk(repo_root):
            for f in files:
                if 'preprocessed' in root:
                    continue
                osvvm_directory = abspath(join(repo_root, 'vhdl', 'osvvm'))
                if commonprefix([osvvm_directory, abspath(join(root, f))]) == osvvm_directory:
                    continue
                osvvm_integration_example_directory = abspath(join(repo_root, 'examples', 'osvvm_integration', 'src'))
                if commonprefix([osvvm_integration_example_directory, abspath(join(root, f))]) == osvvm_integration_example_directory:
                    continue
                if splitext(f)[1] in ['.vhd', '.vhdl', '.py', '.v', '.sv']:
                    licensed_files.append(join(root, f))
        i = 0
        min_first_year = None
        max_last_year = None
        for f in licensed_files:
            stdout.write('\r%d/%d' % (i + 1, len(licensed_files)))
            stdout.flush()
            i += 1
            proc = Popen(['git', 'log',  '--follow', '--date=short', f], \
                  bufsize=0, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True)
            out, _ = proc.communicate()
            first_year = None
            last_year = None
            for date in log_date.finditer(out):
                first_year = int(date.group('year')) if first_year is None else min(int(date.group('year')), first_year)
                last_year = int(date.group('year')) if last_year is None else max(int(date.group('year')), last_year)
            min_first_year = first_year if min_first_year is None else min(min_first_year, first_year)
            max_last_year = last_year if max_last_year is None else max(max_last_year, last_year)

            with open(f) as fp:
                code = fp.read()
                match = license_notice.search(code)
                self.assertIsNotNone(match, "Failed to find license notice in %s" % f)
                if first_year == last_year:
                    self.assertEqual(int(match.group('first_year')), first_year, 'Expected copyright year to be %d in %s' % (first_year, f))
                    self.assertIsNone(match.group('last_year'), 'Expected no copyright years range in %s' % join(root, f))
                else:
                    self.assertIsNotNone(match.group('last_year'), 'Expected copyright year range %d-%d in %s' % (first_year, last_year, f))
                    self.assertEqual(int(match.group('first_year')), first_year, 'Expected copyright year range to start with %d in %s' % (first_year, f))
                    self.assertEqual(int(match.group('last_year')), last_year, 'Expected copyright year range to end with %d in %s' % (last_year, f))
        print('\n')
    def _check_lookahead(self, inp):
        """
        Check a counterexample for lookahead transitions using prefix-closed
        queries. If an unknown lookahead is found it is added on the observation
        table.

        Args:
            inp (list): Counterexample input.
        """
        # Make a prefix closed membership query and gather the result
        prefix = []
        prefix_set = [[]]
        prefix_set_input = [[]]
        for c in inp:
            prefix.append(c)
            prefix_set_input.append(prefix)
            prefix_set.append(self.membership_query(prefix))

        for i in xrange(1, len(prefix_set)):
            if commonprefix([prefix_set[i], prefix_set[i-1]]) != prefix_set[i-1]:
                logging.debug('Lookahead detected at position %s : %s, %s',
                              i, prefix_set[i-1], prefix_set[i])

                la_out = _remove_common_prefix(prefix_set[i], prefix_set[i-1])
                j = None
                for j in reversed(xrange(i)):
                    if commonprefix([prefix_set[i], prefix_set[j]]) == prefix_set[j]:
                        la_inp = inp[j:i]
                        break

                la_out = _remove_common_prefix(prefix_set[i], prefix_set[j])
                access_string = self._run_in_hypothesis(inp, j)
                out_as = self.membership_query(access_string)
                out_complete = self.membership_query(list(access_string)+la_inp)

                # If The access string for the lookahead state is wrong, we will
                # add the lookahead path once this is fixed in a next iteration.
                if _remove_common_prefix(out_complete, out_as) != la_out:
                    logging.debug('Lookahead detected but access string is '+ \
                                  'wrong, skipping.')
                    continue
                if  self.ot.add_lookahead_transition(access_string,
                                                     tuple(la_inp),
                                                     tuple(la_out)):
                    # Fill all table entries for the lookahead transition
                    for col in self.ot.dist_strings:
                        self._fill_ot_entry(access_string + tuple(la_inp), col)
                    # New lookahead added, no need for further processing.
                    break
Esempio n. 7
0
def _normalize_path(base_dir, path):
    """Helper to check paths passed to methods of this class.

    Checks whether `path` is beneath `base_dir` and normalize it.
    Additionally paths are converted into relative paths with respect to
    `base_dir`, considering PWD in case of relative paths. This
    is intended to be used in repository classes, which means that
    `base_dir` usually will be the repository's base directory.

    Parameters
    ----------
    path: str
        path to be normalized
    base_dir: str
        directory to serve as base to normalized, relative paths

    Returns
    -------
    str:
        path, that is a relative path with respect to `base_dir`
    """
    if not path:
        return path

    base_dir = realpath(base_dir)
    # path = normpath(path)
    # Note: disabled normpath, because it may break paths containing symlinks;
    # But we don't want to realpath relative paths, in case cwd isn't the correct base.

    if isabs(path):
        # path might already be a symlink pointing to annex etc,
        # so realpath only its directory, to get "inline" with realpath(base_dir)
        # above
        path = opj(realpath(dirname(path)), basename(path))
        if commonprefix([path, base_dir]) != base_dir:
            raise FileNotInRepositoryError(msg="Path outside repository: %s"
                                               % path, filename=path)
        else:
            pass

    elif commonprefix([realpath(getpwd()), base_dir]) == base_dir:
        # If we are inside repository, rebuilt relative paths.
        path = opj(realpath(getpwd()), path)
    else:
        # We were called from outside the repo. Therefore relative paths
        # are interpreted as being relative to self.path already.
        return path

    return relpath(path, start=base_dir)
Esempio n. 8
0
def _apply_meta_attach(meta, ctx):
    action = ctx.new_action()
    begin = meta.startswith(META_ATTACH_FLAG)
    end = meta.endswith(META_ATTACH_FLAG)
    if begin:
        meta = meta[len(META_ATTACH_FLAG):]
        action.prev_attach = True
    if end:
        meta = meta[:-len(META_ATTACH_FLAG)]
        action.next_attach = True
    last_word = ctx.last_action.word or ''
    if not meta:
        # We use an empty connection to indicate a "break" in the
        # application of orthography rules. This allows the
        # stenographer to tell Plover not to auto-correct a word.
        action.orthography = False
    elif (
        last_word and
        not meta.isspace() and
        ctx.last_action.orthography and
        begin and (not end or _has_word_boundary(meta))
    ):
        new_word = add_suffix(last_word, meta)
        common_len = len(commonprefix([last_word, new_word]))
        replaced = last_word[common_len:]
        action.prev_replace = ctx.last_text(len(replaced))
        assert replaced.lower() == action.prev_replace.lower()
        last_word = last_word[:common_len]
        meta = new_word[common_len:]
    action.text = meta
    if action.prev_attach:
        action.word = _rightmost_word(last_word + meta)
    return action
Esempio n. 9
0
 def flush(self):
     # FIXME:
     # - what about things like emoji zwj sequences?
     # - normalize strings to better handle combining characters?
     #
     # >>> u"C\u0327"
     # 'Ç'
     # >>> len(u"C\u0327")
     # 2
     # >>> len(unicodedata.normalize('NFC', u"C\u0327"))
     # 1
     if len(self.before.replaced_text) > len(self.after.replaced_text):
         assert self.before.replaced_text.endswith(self.after.replaced_text)
         replaced_text = self.before.replaced_text
     else:
         assert self.after.replaced_text.endswith(self.before.replaced_text)
         replaced_text = self.after.replaced_text
     before = replaced_text[:len(replaced_text)-len(self.before.replaced_text)] + self.before.appended_text
     after = replaced_text[:len(replaced_text)-len(self.after.replaced_text)] + self.after.appended_text
     common_length = len(commonprefix([before, after]))
     erased = len(before) - common_length
     if erased:
         self.output.send_backspaces(erased)
     appended = after[common_length:]
     if appended:
         self.output.send_string(appended)
     self.before.reset(self.after.trailing_space)
     self.after.reset(self.after.trailing_space)
Esempio n. 10
0
    def _dump_names_from_pathnames(self, pathnames):
        """Given a list of pathnames of this form:

        (uuid[.name].dump)+

        This function will return a list of just the name part of the path.
        in the case where there is no name, it will use the default dump
        name from configuration.

        example:

        ['6611a662-e70f-4ba5-a397-69a3a2121129.dump',
         '6611a662-e70f-4ba5-a397-69a3a2121129.flash1.dump',
         '6611a662-e70f-4ba5-a397-69a3a2121129.flash2.dump',
        ]

        returns

        ['upload_file_minidump', 'flash1', 'flash2']
        """
        prefix = path.commonprefix([path.basename(x) for x in pathnames])
        prefix_length = len(prefix)
        dump_names = []
        for a_pathname in pathnames:
            base_name = path.basename(a_pathname)
            dump_name = base_name[prefix_length:-len(self.config.dump_suffix)]
            if not dump_name:
                dump_name = self.config.dump_field
            dump_names.append(dump_name)
        return dump_names
Esempio n. 11
0
    def __init__(self, files, output_dir, name, format, reference, excludes):
        logger.debug('Incoming files: %s', files)

        basedir = commonprefix(files)
        logger.debug('Prefix is "%s"', basedir)

        if isdir(basedir):
            self.basedir = basedir
        else:
            self.basedir = dirname(basedir)

        logger.debug('Basedir is "%s"', self.basedir)

        self.output_dir = output_dir
        logger.debug('Output dir is "%s"', self.output_dir)

        globfiles = list(chain.from_iterable(list(map(glob, files))))

        logger.debug("Globfiles: %s", globfiles)

        for file_or_dir in globfiles:
            for walkroot, _, walkfiles in walk(file_or_dir):
                for walkfile in walkfiles:
                    globfiles.append(join(walkroot, walkfile))

        logger.debug('Resolved globfiles: %s', globfiles)

        for exclude in (excludes or []):

            if exclude[-1] is not '*':
                exclude += '*'

            evicts = fnmatch.filter(globfiles, exclude)
            logger.debug("exclude '%s' evicts => %s", exclude, evicts)

            globfiles = [
                globfile for globfile in globfiles if globfile not in evicts
            ]

        relative_files = [
            r for r in
            [relpath(globfile, self.basedir) for globfile in globfiles]
            if r is not '.'
        ]

        logger.debug('Resolved relative files: %s', relative_files)

        self._files = OrderedDict.fromkeys(relative_files)

        logger.debug("Initialized map, is now %s", self._files)

        self.name = name
        self.format = format

        if not reference:
            self.refdir = self.output_dir
        elif not isdir(reference):
            self.refdir = dirname(reference)
        else:
            self.refdir = reference
Esempio n. 12
0
 def parse_type(not_parent):
     abs_other = abspath(other)
     abs_not_parent = abspath(not_parent)
     if abs_not_parent == commonprefix([abs_not_parent, abs_other]):
         raise argparse.ArgumentTypeError("{0} may not be a parent directory of {1}".format(not_parent, other))
     else:
         return not_parent
Esempio n. 13
0
def _extract_archive(archive_fpath, archive_file, archive_namelist, output_dir,
                     force_commonprefix=True, prefix=None,
                     dryrun=False, verbose=not QUIET, overwrite=None):
    """
    archive_fpath = zip_fpath
    archive_file = zip_file
    """
    # force extracted components into a subdirectory if force_commonprefix is
    # on return_path = output_diG
    # FIXMpathE doesn't work right
    if prefix is not None:
        output_dir = join(output_dir, prefix)
        util_path.ensurepath(output_dir)

    archive_basename, ext = split_archive_ext(basename(archive_fpath))
    if force_commonprefix and commonprefix(archive_namelist) == '':
        # use the archivename as the default common prefix
        output_dir = join(output_dir, archive_basename)
        util_path.ensurepath(output_dir)

    for member in archive_namelist:
        (dname, fname) = split(member)
        dpath = join(output_dir, dname)
        util_path.ensurepath(dpath)
        if verbose:
            print('[utool] Unarchive ' + fname + ' in ' + dpath)

        if not dryrun:
            if overwrite is False:
                if exists(join(output_dir, member)):
                    continue
            archive_file.extract(member, path=output_dir)
    return output_dir
Esempio n. 14
0
File: core.py Progetto: geerk/mynt
 def _update_config(self):
     self.config = deepcopy(self.defaults)
     
     logger.debug('>> Searching for config')
     
     for ext in ('.yml', '.yaml'):
         f = File(normpath(self.src.path, 'config' + ext))
         
         if f.exists:
             logger.debug('..  found: %s', f.path)
             
             try:
                 self.config.update(Config(f.content))
             except ConfigException as e:
                 raise ConfigException(e.message, 'src: {0}'.format(f.path))
             
             self.config['locale'] = self.opts.get('locale', self.config['locale'])
             
             self.config['assets_url'] = absurl(self.config['assets_url'], '')
             self.config['base_url'] = absurl(self.opts.get('base_url', self.config['base_url']), '')
             
             for setting in ('archives_url', 'posts_url', 'tags_url'):
                 self.config[setting] = absurl(self.config[setting])
             
             for setting in ('archives_url', 'assets_url', 'base_url', 'posts_url', 'tags_url'):
                 if re.search(r'(?:^\.{2}/|/\.{2}$|/\.{2}/)', self.config[setting]):
                     raise ConfigException('Invalid config setting.', 'setting: {0}'.format(setting), 'path traversal is not allowed')
             
             for pattern in self.config['include']:
                 if op.commonprefix((self.src.path, normpath(self.src.path, pattern))) != self.src.path:
                     raise ConfigException('Invalid include path.', 'path: {0}'.format(pattern), 'path traversal is not allowed')
             
             break
     else:
         logger.debug('..  no config file found')
Esempio n. 15
0
 def commit(self, filename, basedir="/"):
     skip = len(path.commonprefix([filename, basedir]))
     sink = self.open(filename[skip:])
     source = open(filename, "r")
     shutil.copyfileobj(source, sink)
     source.close()
     sink.close()
Esempio n. 16
0
    def score_model(self, model_txn, txn):
        """Score an existing transaction for its ability to provide a model
        for an incomplete transaction.

        Args:
          model_txn: The transaction to be scored.
          txn: The incomplete transaction.
        Returns:
          A float number representing the score, normalized in [0,1].
        """
        def get_description(txn):
            return ('{} {}'.format(txn.payee or '', txn.narration or '')).strip()

        # If the target transaction does not have a description, there is
        # nothing we can do
        txn_description = get_description(txn)
        n_max = len(txn_description)
        if n_max > 1:
            # Only consider model transactions whose posting to the target
            # account has the same sign as the transaction to be completed
            posting = [p for p in model_txn.postings if p.account == self.account][0]
            if number.same_sign(posting.units.number, txn.postings[0].units.number):
                model_txn_description = get_description(model_txn)
                n_match = len(path.commonprefix(
                    [model_txn_description, txn_description]))
                score = float(n_match) / float(n_max)
                return score
        return 0
Esempio n. 17
0
def discover(directories, index, filterfunc=lambda filename: True):
    """Import and initialize modules from `directories` list.

    :param directories: list of directories
    :param index: index function"""

    def find(directories, filterfunc):
        """Discover and yield python modules (aka files that endswith .py) if
        `filterfunc` returns True for that filename."""

        for directory in directories:
            for root, dirs, files in os.walk(directory):
                for fname in files:
                    if fname.endswith('.py') and filterfunc(join(root, fname)):
                        yield join(root, fname)

    for filename in find(directories, filterfunc):
        modname, ext = os.path.splitext(os.path.basename(rchop(filename, os.sep + '__init__.py')))
        fp, path, descr = imp.find_module(modname, directories)

        prefix = commonprefix((PATH, filename))
        if prefix:
            modname = 'acrylamid.'
            modname += rchop(filename[len(prefix):].replace(os.sep, '.'), '.py')

        try:
            mod = sys.modules[modname]
        except KeyError:
            try:
                mod = imp.load_module(modname, fp, path, descr)
            except (ImportError, SyntaxError, ValueError) as e:
                log.exception('%r %s: %s', modname, e.__class__.__name__, e)
                continue

        index(mod)
Esempio n. 18
0
def get_articles(request):
  query = request.GET.get('query', '')
  tq = parse_tq(request.GET.get('tq', ''))
  tqx = parse_tqx(request.GET.get('tqx', ''))

  select = Article.select()
  select = select.limit(tq.get('limit', 1))
  select = select.offset(tq.get('offset', 0))
  if query:
    select = select.where(Article.subject % ('*%s*' % query))

  subjects = [ a.subject for a in select ]
  LOG.debug(lcs(subjects))
  LOG.debug(commonprefix(subjects))

  dt = gviz.DataTable({
      'posted': ('datetime', 'Posted'),
      'poster': ('string', 'Poster'),
      'subject': ('string', 'Subject'),
      'message_id': ('string', 'ID')
      })
  dt.LoadData( a._data for a in select )
  dt_order = ['subject', 'posted', 'poster', 'message_id']
  gviz_json = dt.ToJSonResponse(req_id=tqx.get('reqId', 0),
                                columns_order=dt_order)

  return itty.Response(gviz_json, content_type='application/json')
Esempio n. 19
0
 def _derive_module_name(self, path):
     common = commonprefix([self.working, path])
     slice_module_name = slice(len(common) + 1, len(path))
     return path[slice_module_name]\
         .replace('.py', '')\
         .replace('\\', '.')\
         .replace('/', '.')
Esempio n. 20
0
 def __contains__(self, path):
     p1 = self.path
     p2 = path
     if self.IS_WINDOWS:
         p1 = p1.lower()
         p2 = p2.lower()
     return commonprefix((p1 + sep, p2)) == p1 + sep
Esempio n. 21
0
def lca(scores1, scores2, tax):
    classdict = dict()
    for query, hit in scores1.iteritems():
        scr1 = set(hit.keys())
        scr2 = set(scores2[query].keys())
        # find the common hits of both dictionaries
        common = scr1.intersection(scr2)
        commonscores = dict()
        topscore = 0
        for goodhit in common:
            score = hit[goodhit] + scores2[query][goodhit]
            commonscores[goodhit] = score
            if score > topscore:
                topscore = score
                # remove from common all the scores that aren't at least 95% of topscore
        minscore = 0.95 * topscore
        topscores = commonscores.copy()
        for goodhit in commonscores:
            if commonscores[goodhit] < minscore:
                del topscores[goodhit]
                # get the LCA for these
        classify = ""
        for tophit in topscores:
            if classify == "" and tophit in tax:
                classify = str(tax[tophit])
            else:
                # print "And the common pref is " + commonprefix([classify, str(tax[tophit])])
                classify = commonprefix([classify, str(tax[tophit])])
        if classify == "" or classify == "[]":
            classify = "Unclassified;"
            # take longest substr ending in ;
        meaningful = re.match(".+;", classify)
        classify = meaningful.group()
        classdict[query] = classify
    return classdict
Esempio n. 22
0
    def lookup(self, index, word_array):
        """ Get closest match to word (accepts imperfect matches)

        :param list[str] word_array: str
        :param int index: index of word in word_array to check
        :return: closest match or None if none found
        :rtype: str
        """
        word = word_array[index]
        logging.debug("looking up in indicator dictionary: " + word)
        i = bisect_left(self.dict, word)
        nearest_matches = self.dict[i - 1: i + 1]

        # todo: return length of match as well
        for i in range(0, len(nearest_matches)):
            split = nearest_matches[i].split()
            # require multi-word indicators to match exactly
            # todo: after this, it's exact so don't use get_closest_matches
            if len(split) > 1 and \
                    not self.match_multiple_words(split, word_array[index:]):
                nearest_matches[i] = ""

        match = get_close_matches(word, nearest_matches, n=1,
                                  cutoff=min_indicator_distance)
        if not match:
            return None

        match = match[0]
        # todo: arbitrary, essentially checking stem of word
        if word != match and len(commonprefix([word, match])) < 3:
            return None

        logging.debug("Closest match to " + word + " is " + match)
        return match
Esempio n. 23
0
def printStatus(makeLog, makeAllLog, textTestTmp, smtpServer, out):
    failed = ""
    build = commonprefix([basename(makeLog), basename(makeAllLog)])
    print >> out, build,
    print >> out, datetime.now().ctime()
    print >> out, "--"
    print >> out, basename(makeLog)
    warnings = 0
    errors = 0
    svnLocked = False
    for l in file(makeLog):
        if ("svn: Working copy" in l and "locked" in l) or "svn: Failed" in l:
            svnLocked = True
            failed += l
        if re.search("[Ww]arn[ui]ng[: ]", l):
            warnings += 1
        if re.search("[Ee]rror[: ]", l) or re.search("[Ff]ehler[: ]", l):
            errors += 1
            failed += l
    if svnLocked:
        failed += "svn up failed\n\n"
    print >> out, warnings, "warnings"
    if errors:
        print >> out, errors, "errors"
        failed += "make failed\n\n"
    print >> out, "--"
    for root, dirs, files in os.walk(textTestTmp):
        for f in files:
            if f.startswith("batchreport"):
                b = open(join(root, f))
                l = b.readline()
                if l.startswith("FAILED") or l.startswith("succeeded") or l.startswith("killed") or l.startswith("known bugs"):
                    print >> out, f, l,
                b.close()
    print >> out, "--"
    print >> out, basename(makeAllLog)
    warnings = 0
    errors = 0
    for l in file(makeAllLog):
        if re.search("[Ww]arn[ui]ng[: ]", l):
            warnings += 1
        if "error " in l.lower():
            errors += 1
            failed += l
    print >> out, warnings, "warnings"
    if errors:
        print >> out, errors, "errors"
        failed += "make debug failed\n\n"
    print >> out, "--"
    if failed:
        fromAddr = "*****@*****.**"
        toAddr = "*****@*****.**"
        message = """From: "%s" <%s>
To: %s
Subject: Error occurred while building

%s""" % (build, fromAddr, toAddr, failed)
        server = smtplib.SMTP(smtpServer)
        server.sendmail(fromAddr, toAddr, message)
        server.quit()
Esempio n. 24
0
def lca(scores1, scores2, percent, tax):
	classdict = dict()
	for query, hit in scores1.iteritems():
		scr1 = set(hit.keys())
		scr2 = set(scores2[query].keys())
		#find the common hits of both dictionaries
		common = scr1.intersection(scr2)
		commonscores=dict()
		for goodhit in common:
			score = hit[goodhit] + scores2[query][goodhit]
			commonscores[goodhit] = score
		#get the top percent scores of this intersection
		topcommon = toppercent(commonscores, percent)
		#get the LCA for these
		classify = ''
		for query, score in topcommon.iteritems():
			if classify == '':
				classify = tax[query]
			else:
				classify = commonprefix([classify, tax[query]])
		if classify == '':
			classify = 'Unclassified;'
		#print classify
		#take longest substr ending in ;
		meaningful = re.match(".+;", classify)
		classify = meaningful.group()
		classdict[query] = classify
		#print query + "\t" + classify
	return classdict
Esempio n. 25
0
 def test_check_existing_dirpaths(self):
     # Check that returns a list with the paths, in the same order as the
     # input comma separated list
     tmp_dirpath1 = mkdtemp(prefix='pyqi_tmp_testd_')
     tmp_dirpath2 = mkdtemp(prefix='pyqi_tmp_testd_')
     self._dirs_to_clean_up = [tmp_dirpath1, tmp_dirpath2]
     option = PyqiOption('-d', '--dirs_test', type='existing_dirpaths')
     exp = [tmp_dirpath1, tmp_dirpath2]
     value = ",".join(exp)
     obs = check_existing_dirpaths(option, '-d', value)
     self.assertEqual(obs, exp)
     # Check that returns a list with the paths when using wildcars
     # note that the order is not important now
     value = commonprefix(exp) + '*'
     obs = check_existing_dirpaths(option, '-d', value)
     self.assertEqual(set(obs), set(exp))
     # Check that raises an error when the wildcard does not match any path
     self.assertRaises(OptionValueError, check_existing_dirpaths, option,
         '-f', '/hopefully/a/non/existing/path*')
     # Check that raises an error when one of the directories does not exist
     value = ",".join([tmp_dirpath1, tmp_dirpath2,
         '/hopefully/a/non/existing/path*'])
     self.assertRaises(OptionValueError, check_existing_dirpaths, option,
         '-f', value)
     # Check that raises an error when one of the paths is a file
     tmp_f, tmp_path = mkstemp()
     self._paths_to_clean_up = [tmp_path]
     value = ",".join([tmp_dirpath1, tmp_dirpath2, tmp_path])
     self.assertRaises(OptionValueError, check_existing_dirpaths, option,
         '-f', value)
Esempio n. 26
0
    def _dialog(self, message="Select Folder", new_directory=True):
        """Creates a directory dialog box for working with

        Keyword Arguments:
            message (string): Message to display in dialog
            new_directory (bool): True if allowed to create new directory

        Returns:
            A directory to be used for the file operation.
        """
        # Wildcard pattern to be used in file dialogs.
        if not self.multifile:
            mode = "directory"
        else:
            mode = "files"
        dlg = get_filedialog(what=mode, title=message, mustexist=not new_directory)
        if len(dlg) != 0:
            if not self.multifile:
                self.directory = dlg
                ret = self.directory
            else:
                ret = None
        else:
            self.pattern = [path.basename(name) for name in dlg]
            self.directory = path.commonprefix(dlg)
            ret = self.directory
        return ret
Esempio n. 27
0
def create_graph_df(vtask_paths, graphs_dir_out):
    """
    Creates a frame that maps sourcefiles to networkx digraphs in terms of DOT files
    :param source_path_list:
    :param dest_dir_path:
    :param relabel:
    :return:
    """
    if not isdir(graphs_dir_out):
        raise ValueError('Invalid destination directory.')
    data = []
    graphgen_times = []

    print('Writing graph representations of verification tasks to {}'.format(graphs_dir_out), flush=True)

    common_prefix = commonprefix(vtask_paths)
    for vtask in tqdm(vtask_paths):
        short_prefix = dirname(common_prefix)
        path = join(graphs_dir_out, vtask[len(short_prefix):][1:])

        if not os.path.exists(dirname(path)):
            os.makedirs(dirname(path))

        ret_path = path + '.pickle'

        # DEBUG
        if isfile(ret_path):
            data.append(ret_path)
            continue

        start_time = time.time()

        graph_path, node_labels_path, edge_types_path, edge_truth_path, node_depths_path \
            = _run_cpachecker(abspath(vtask))
        nx_digraph = nx.read_graphml(graph_path)

        node_labels = _read_node_labeling(node_labels_path)
        nx.set_node_attributes(nx_digraph, 'label', node_labels)

        edge_types = _read_edge_labeling(edge_types_path)
        parsed_edge_types = _parse_edge(edge_types)
        nx.set_edge_attributes(nx_digraph, 'type', parsed_edge_types)

        edge_truth = _read_edge_labeling(edge_truth_path)
        parsed_edge_truth = _parse_edge(edge_truth)
        nx.set_edge_attributes(nx_digraph, 'truth', parsed_edge_truth)

        node_depths = _read_node_labeling(node_depths_path)
        parsed_node_depths = _parse_node_depth(node_depths)
        nx.set_node_attributes(nx_digraph, 'depth', parsed_node_depths)

        assert not isfile(ret_path)
        assert node_labels and parsed_edge_types and parsed_edge_truth and parsed_node_depths
        nx.write_gpickle(nx_digraph, ret_path)
        data.append(ret_path)

        gg_time = time.time() - start_time
        graphgen_times.append(gg_time)

    return pd.DataFrame({'graph_representation': data}, index=vtask_paths), graphgen_times
Esempio n. 28
0
def safejoin(root, subpath):
    if not SAFENAME.match(subpath):
        raise BadName(u"unsafe path name: %r" % subpath)
    path = join(root, subpath)
    if commonprefix([root + sep, path]) != root + sep:
        raise BadName(u"invalid relative path: %r" % subpath)
    return path
Esempio n. 29
0
 def __contains__(self, path):
     p1 = self.path
     p2 = path
     if system() == "Windows":
         p1 = p1.lower()
         p2 = p2.lower()
     return commonprefix((p1 + sep, p2)) == p1 + sep
Esempio n. 30
0
    def validate_absolute_path(self, root, absolute_path):
        """Overrides StaticFileHandler's method to include authentication
        """
        # Get the filename (or the base directory) of the result
        len_prefix = len(commonprefix([root, absolute_path]))
        base_requested_fp = absolute_path[len_prefix:].split(sep, 1)[0]

        current_user = self.current_user

        # If the user is an admin, then allow access
        if current_user.level == 'admin':
            return super(ResultsHandler, self).validate_absolute_path(
                root, absolute_path)

        # otherwise, we have to check if they have access to the requested
        # resource
        user_id = current_user.id
        accessible_filepaths = check_access_to_analysis_result(
            user_id, base_requested_fp)

        # Turn these filepath IDs into absolute paths
        db_files_base_dir = get_db_files_base_dir()
        relpaths = filepath_ids_to_rel_paths(accessible_filepaths)

        accessible_filepaths = {join(db_files_base_dir, relpath)
                                for relpath in relpaths.values()}

        # check if the requested resource is a file (or is in a directory) that
        # the user has access to
        if join(root, base_requested_fp) in accessible_filepaths:
            return super(ResultsHandler, self).validate_absolute_path(
                root, absolute_path)
        else:
            raise QiitaPetAuthorizationError(user_id, absolute_path)
Esempio n. 31
0
def _atom_to_action_spaces_after(atom, last_action):
    """Convert an atom into an action.

    Arguments:

    atom -- A string holding an atom. An atom is an irreducible string that is
    either entirely a single meta command or entirely text containing no meta
    commands.

    last_action -- The context in which the new action takes place.

    Returns: An action for the atom.

    """
    
    action = _Action()
    last_word = last_action.word
    last_glue = last_action.glue
    last_attach = last_action.attach
    last_capitalize = last_action.capitalize
    last_lower = last_action.lower
    last_upper = last_action.upper
    last_upper_carry = last_action.upper_carry
    last_orthography = last_action.orthography
    last_space = SPACE if last_action.text.endswith(SPACE) else NO_SPACE
    meta = _get_meta(atom)
    if meta is not None:
        meta = _unescape_atom(meta)
        if meta in META_COMMAS:
            action.text = meta + SPACE
            if last_action.text != '':
                action.replace = SPACE
            if last_attach:
                action.replace = NO_SPACE
        elif meta in META_STOPS:
            action.text = meta + SPACE
            action.capitalize = True
            action.lower = False
            if last_action.text != '':
                action.replace = SPACE
            if last_attach:
                action.replace = NO_SPACE
        elif meta == META_CAPITALIZE:
            action = last_action.copy_state()
            action.capitalize = True
            action.lower = False
        elif meta == META_LOWER:
            action = last_action.copy_state()
            action.lower = True
            action.capitalize = False
        elif meta == META_UPPER:
            action = last_action.copy_state()
            action.lower = False
            action.upper = True
            action.capitalize = False
        elif meta == META_RETRO_CAPITALIZE:
            action = last_action.copy_state()
            action.word = _capitalize(action.word)
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word + SPACE
                action.text = _capitalize(last_action.word + SPACE)
            else:
                action.replace = last_action.text
                action.text = _capitalize_nowhitespace(last_action.text)
        elif meta == META_RETRO_LOWER:
            action = last_action.copy_state()
            action.word = _lower(action.word)
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word + SPACE
                action.text = _lower(last_action.word + SPACE)
            else:
                action.replace = last_action.text
                action.text = _lower_nowhitespace(last_action.text)
        elif meta == META_RETRO_UPPER:
            action = last_action.copy_state()
            action.word = _upper(action.word)
            action.upper_carry = True
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word + SPACE
                action.text = _upper(last_action.word + SPACE)
            else:
                action.replace = last_action.text
                action.text = _upper(last_action.text)
        elif meta.startswith(META_RETRO_FORMAT):
            if (meta.startswith(META_RETRO_FORMAT) and meta.endswith(')')):
                dict_format = meta[len(META_RETRO_FORMAT):-len(')')]
                action = last_action.copy_state()
                action.replace = last_action.word + SPACE
                try:
                    float(last_action.word)
                except ValueError:
                    pass
                else:
                    format = dict_format.replace('c', '{:,.2f}')
                    cast_input = float(last_action.word)
                try:
                    int(last_action.word)
                except ValueError:
                    pass
                else:
                    format = dict_format.replace('c', '{:,}')
                    cast_input = int(last_action.word)
                action.text = format.format(cast_input) + SPACE
                action.word = format.format(cast_input)
        elif meta.startswith(META_COMMAND):
            action = last_action.copy_state()
            action.command = meta[len(META_COMMAND):]
        elif meta.startswith(META_GLUE_FLAG):
            action.glue = True
            text = meta[len(META_GLUE_FLAG):]
            if last_capitalize:
                text = _capitalize(text)
            if last_lower:
                text = _lower(text)
            action.text = text + SPACE
            action.word = _rightmost_word(text)
            if last_glue:
                action.replace = SPACE
                action.word = _rightmost_word(last_word + text)
            if last_attach:
                action.replace = NO_SPACE
                action.word = _rightmost_word(last_word + text)
        elif (meta.startswith(META_ATTACH_FLAG) or 
              meta.endswith(META_ATTACH_FLAG)):
            begin = meta.startswith(META_ATTACH_FLAG)
            end = meta.endswith(META_ATTACH_FLAG)
            if begin:
                meta = meta[len(META_ATTACH_FLAG):]
            if end and len(meta) >= len(META_ATTACH_FLAG):
                meta = meta[:-len(META_ATTACH_FLAG)]
                
            space = NO_SPACE if end else SPACE
            replace_space = NO_SPACE if last_attach else SPACE
            
            if end:
                action.attach = True
            if begin and end and meta == '':
                # We use an empty connection to indicate a "break" in the 
                # application of orthography rules. This allows the stenographer 
                # to tell plover not to auto-correct a word.
                action.orthography = False
                if last_action.text != '':
                    action.replace = replace_space
            if (((begin and not end) or (begin and end and ' ' in meta)) and 
                last_orthography):
                new = orthography.add_suffix(last_word.lower(), meta)
                common = commonprefix([last_word.lower(), new])
                if last_action.text == '':
                    replace_space = NO_SPACE
                action.replace = last_word[len(common):] + replace_space
                meta = new[len(common):]
            if begin and end:
                if last_action.text != '':
                    action.replace = replace_space
            if last_capitalize:
                meta = _capitalize(meta)
            if last_lower:
                meta = _lower(meta)
            if last_upper_carry:
                meta = _upper(meta)
                action.upper_carry = True
            action.text = meta + space
            action.word = _rightmost_word(
                last_word[:len(last_word + last_space)-len(action.replace)] + meta)
            if end and not begin and last_space == SPACE:
                action.word = _rightmost_word(meta)
        elif meta.startswith(META_KEY_COMBINATION):
            action = last_action.copy_state()
            action.combo = meta[len(META_KEY_COMBINATION):]
    else:
        text = _unescape_atom(atom)
        if last_capitalize:
            text = _capitalize(text)
        if last_lower:
            text = _lower(text)
        if last_upper:
            text = _upper(text)
            action.upper_carry = True
            
        action.text = text + SPACE
        action.word = _rightmost_word(text)
    return action
Esempio n. 32
0
def convert():
    # Check if app is in another process
    from subprocess import check_output
    from shutil import copy2
    s = check_output('tasklist', shell=True)
    if s.count(b"pymanga") > 1:
        fatal("Pymanga is already running on another process!")

    # Prompt for archive
    archives = selectzip("Open archive(s) to use:")

    # Extract and read zip file
    files, skipped = zip.read(archives)
    if len(skipped):
        confirm("The following files will not be included:", str(skipped))

    # Select output file
    from pathlib import Path
    from os.path import commonprefix
    outFile = path.basename(commonprefix(archives))
    if "." in outFile:
        outFile = outFile.split(".")[0]

    if False:  # confirm("Choose format", "Do you want to convert the .cbz to .pdf?") == "yes":

        label("Creating .pdf file...")
        command = [imagemagick]
        command.extend(files)

        # Add extension to output file
        outFile = path.join(temp, outFile + ".pdf")
        command.append(outFile)

        # Convert file using ImageMagick
        from subprocess import Popen, PIPE, STDOUT
        p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='UTF8')

        # Wait for process to finish
        from time import sleep
        while p.poll() is None:
            sleep(1)
        response = p.stdout.readline()
        if response != "":
            fatal("Can't convert to pdf!", response)

    else:

        label("Creating .cbz file...")

        # Add extension to output file
        outFile = path.join(temp, outFile + ".cbz")

        # Copy all images in order to root of temp folder
        order = 0
        pages = []
        fill = len(str(len(files))) + 1
        for file in files:
            order += 1
            page = str(order).zfill(fill) + file[file.rfind('.'):]
            copy2(file, page)
            pages.append(page)

        # Create .cbz file
        zip.create(pages, outFile)

    # Save output file to Desktop or open with default editor
    outFile2 = path.join(Path.home(), "Desktop", path.basename(outFile))
    try:
        copy2(outFile, outFile2)
    except Exception as e:
        fatal("Error copying file!", e=e)

    success("File saved to the desktop successfully!")

    from os import _exit
    _exit(1)
Esempio n. 33
0
def _best_prefix_transform(set1, target_set2):
    """
    Find a way to transform prefixes of items in set1 to match target_set2

    Example:
        >>> set1 = {'mod.f.0.w',
        >>>         'mod.f.1.b',
        >>>         'mod.f.1.n',
        >>>         'mod.f.1.rm',
        >>>         'mod.f.1.rv',}
        >>> #
        >>> target_set2 = {
        >>>      'bar.foo.extra.f.1.b',
        >>>      'bar.foo.extra.f.1.n',
        >>>      'bar.foo.extra.f.1.w',
        >>>      'bar.foo.extra.f.3.w',
        >>> }
        >>> _best_prefix_transform(set1, target_set2)
        >>> target_set2.add('JUNK')
        >>> _best_prefix_transform(set1, target_set2)
    """

    # probably an efficient way to do this with a trie

    # NOTE: In general this is a graph-isomorphism problem or a  maximum common
    # subgraph problem. However, we can look only at the special case of
    # "maximum common subtrees". Given two directory structures (as trees)
    # we find the common bits.
    # https://perso.ensta-paris.fr/~diam/ro/online/viggo_wwwcompendium/node168.html
    # We can approximate to O(log log n / log^2 n)
    # Can get algorithm from maximum independent set
    # https://arxiv.org/abs/1602.07210

    # The most efficient algorithm here would be for solving
    # "Maximum common labeled subtrees"
    # APX-hard for unordered trees, but polytime solveable for ordered trees
    # For directory structures we can induce an order, and hense obtain a
    # polytime solution
    # #
    # On the Maximum Common Embedded Subtree Problem for Ordered Trees
    # https://pdfs.semanticscholar.org/0b6e/061af02353f7d9b887f9a378be70be64d165.pdf

    from os.path import commonprefix
    prefixes1 = commonprefix(list(set1)).split('.')
    prefixes2 = commonprefix(list(target_set2)).split('.')

    # Remove the trailing prefixes that are the same
    num_same = 0
    for i in range(1, min(len(prefixes1), len(prefixes2))):
        if prefixes1[-i] == prefixes2[-i]:
            num_same = i
        else:
            break
    prefixes1 = prefixes1[:-num_same]
    prefixes2 = prefixes2[:-num_same]

    ALLOW_FUZZY = 1
    if ALLOW_FUZZY and len(prefixes2) == 0:
        # SUPER HACK FOR CASE WHERE THERE IS JUST ONE SPOILER ELEMENT IN THE
        # TARGET SET. THE ALGORITHM NEEDS TO BE RETHOUGHT FOR THAT CASE
        possible_prefixes = [k.split('.') for k in target_set2]
        prefix_hist = ub.ddict(lambda: 0)
        for item in possible_prefixes:
            for i in range(1, len(item)):
                prefix_hist[tuple(item[0:i])] += 1
        prefixes2 = ['.'.join(ub.argmax(prefix_hist))]

    def add_prefix(items, prefix):
        return {prefix + k for k in items}

    def remove_prefix(items, prefix):
        return {k[len(prefix):] if k.startswith(prefix) else k for k in items}

    import itertools as it
    found_cand = []
    for i1, i2 in it.product(range(len(prefixes1) + 1),
                             range(len(prefixes2) + 1)):
        if i1 == 0 and i2 == 0:
            continue
        # Very inefficient, we should be able to do better
        prefix1 = '.'.join(prefixes1[:i1])
        prefix2 = '.'.join(prefixes2[:i2])
        if prefix1:
            prefix1 = prefix1 + '.'
        if prefix2:
            prefix2 = prefix2 + '.'

        # We are allowed to remove a prefix from a set, add the other
        # prefix to the set, or remove and then add.
        set1_cand1 = remove_prefix(set1, prefix1)
        set1_cand2 = add_prefix(set1, prefix2)
        set1_cand3 = add_prefix(set1_cand1, prefix2)

        common1 = set1_cand1 & target_set2
        common2 = set1_cand2 & target_set2
        common3 = set1_cand3 & target_set2
        if common1:
            found_cand.append({
                'transform': [('remove', prefix1)],
                'value': len(common1),
            })
        if common2:
            found_cand.append({
                'transform': [('add', prefix2)],
                'value': len(common2),
            })
        if common3:
            found_cand.append({
                'transform': [('remove', prefix1), ('add', prefix2)],
                'value':
                len(common3),
            })
    if len(found_cand):
        found = max(found_cand, key=lambda x: x['value'])
    else:
        found = None
    return found
Esempio n. 34
0
def find_common_path(dict_values):
    return commonprefix(dict_values).rsplit('.', 1)[0]
Esempio n. 35
0
def _atom_to_action_spaces_before(atom, last_action):
    """Convert an atom into an action.

    Arguments:

    atom -- A string holding an atom. An atom is an irreducible string that is
    either entirely a single meta command or entirely text containing no meta
    commands.

    last_action -- The context in which the new action takes place.

    Returns: An action for the atom.

    """

    action = _Action(space_char=last_action.space_char, case=last_action.case)
    last_word = last_action.word
    last_glue = last_action.glue
    last_attach = last_action.attach
    last_capitalize = last_action.capitalize
    last_lower = last_action.lower
    last_upper = last_action.upper
    last_upper_carry = last_action.upper_carry
    last_orthography = last_action.orthography
    begin = False  # for meta attach
    meta = _get_meta(atom)
    if meta is not None:
        meta = _unescape_atom(meta)
        if meta in META_COMMAS:
            action.text = meta
        elif meta in META_STOPS:
            action.text = meta
            action.capitalize = True
            action.lower = False
            action.upper = False
        elif meta == META_CAPITALIZE:
            action = last_action.copy_state()
            action.capitalize = True
            action.lower = False
            action.upper = False
        elif meta == META_LOWER:
            action = last_action.copy_state()
            action.lower = True
            action.upper = False
            action.capitalize = False
        elif meta == META_UPPER:
            action = last_action.copy_state()
            action.lower = False
            action.upper = True
            action.capitalize = False
        elif meta == META_RETRO_CAPITALIZE:
            action = last_action.copy_state()
            action.word = _capitalize(action.word)
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word
                action.text = _capitalize(last_action.word)
            else:
                action.replace = last_action.text
                action.text = _capitalize_nowhitespace(last_action.text)
        elif meta == META_RETRO_LOWER:
            action = last_action.copy_state()
            action.word = _lower(action.word)
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word
                action.text = _lower(last_action.word)
            else:
                action.replace = last_action.text
                action.text = _lower_nowhitespace(last_action.text)
        elif meta == META_RETRO_UPPER:
            action = last_action.copy_state()
            action.word = _upper(action.word)
            action.upper_carry = True
            if len(last_action.text) < len(last_action.word):
                action.replace = last_action.word
                action.text = _upper(last_action.word)
            else:
                action.replace = last_action.text
                action.text = _upper(last_action.text)
        elif (meta.startswith(META_CARRY_CAPITALIZATION) or
              meta.startswith(META_ATTACH_FLAG + META_CARRY_CAPITALIZATION)):
            action = _apply_carry_capitalize(meta, last_action)
        elif meta.startswith(META_RETRO_FORMAT):
            if meta.startswith(META_RETRO_FORMAT) and meta.endswith(')'):
                action = _apply_currency(meta, last_action)
        elif meta.startswith(META_COMMAND):
            action = last_action.copy_state()
            action.command = meta[len(META_COMMAND):]
        elif meta.startswith(META_MODE):
            action = last_action.copy_state()
            action = _change_mode(meta[len(META_MODE):], action)
        elif meta.startswith(META_GLUE_FLAG):
            action.glue = True
            glue = last_glue or last_attach
            space = NO_SPACE if glue else SPACE
            text = meta[len(META_GLUE_FLAG):]
            if last_capitalize:
                text = _capitalize(text)
            if last_lower:
                text = _lower(text)
            action.text = space + text
            action.word = _rightmost_word(last_word + action.text)
        elif (meta.startswith(META_ATTACH_FLAG)
              or meta.endswith(META_ATTACH_FLAG)):
            begin = meta.startswith(META_ATTACH_FLAG)
            end = meta.endswith(META_ATTACH_FLAG)
            if begin:
                meta = meta[len(META_ATTACH_FLAG):]
            if end and len(meta) >= len(META_ATTACH_FLAG):
                meta = meta[:-len(META_ATTACH_FLAG)]
            space = NO_SPACE if begin or last_attach else SPACE
            if end:
                action.attach = True
            if begin and end and meta == '':
                # We use an empty connection to indicate a "break" in the
                # application of orthography rules. This allows the
                # stenographer to tell plover not to auto-correct a word.
                action.orthography = False
            if (((begin and not end) or (begin and end and ' ' in meta))
                    and last_orthography):
                new = orthography.add_suffix(last_word.lower(), meta)
                common = commonprefix([last_word.lower(), new])
                action.replace = last_word[len(common):]
                meta = new[len(common):]
            if last_capitalize:
                meta = _capitalize(meta)
            if last_lower:
                meta = _lower(meta)
            if last_upper_carry:
                meta = _upper(meta)
                action.upper_carry = True
            action.text = space + meta
            action.word = _rightmost_word(last_word[:len(last_word) -
                                                    len(action.replace)] +
                                          action.text)
        elif meta.startswith(META_KEY_COMBINATION):
            action = last_action.copy_state()
            action.combo = meta[len(META_KEY_COMBINATION):]
    else:
        text = _unescape_atom(atom)
        if last_capitalize:
            text = _capitalize(text)
        if last_lower:
            text = _lower(text)
        if last_upper:
            text = _upper(text)
            action.upper_carry = True
        space = NO_SPACE if last_attach else SPACE
        action.text = space + text
        action.word = _rightmost_word(text)

    action.text = _apply_mode(action.text, action.case, action.space_char,
                              begin, last_attach, last_glue, last_capitalize,
                              last_upper, last_lower)

    return action
Esempio n. 36
0
    def addToTar(self,
                 tar,
                 pattern,
                 exclude=[],
                 base=None,
                 proc=None,
                 verbose=False):
        """The workhorse for the packCase-method"""

        if base == None:
            base = path.basename(self.name)

        if self.parallel and proc is None:
            for p in self.processorDirs():
                self.addToTar(tar,
                              path.join(path.dirname(pattern), p,
                                        path.basename(pattern)),
                              exclude=exclude,
                              base=base,
                              verbose=verbose,
                              proc=p)

        for name in glob.glob(pattern):
            excluded = False
            for e in exclude:
                if fnmatch.fnmatch(path.basename(name), e):
                    excluded = True
            if excluded:
                continue

            if path.isdir(name):
                for m in listdir(name):
                    self.addToTar(tar,
                                  path.join(name, m),
                                  exclude=exclude,
                                  verbose=verbose,
                                  proc=proc,
                                  base=base)
            else:
                arcname = path.join(base, name[len(self.name) + 1:])
                if path.islink(name):
                    # if the symbolic link points to a file in the case keep it
                    # otherwise replace with the real file
                    lPath = path.os.readlink(name)
                    if not path.isabs(lPath):
                        rPath = path.realpath(name)
                        common = path.commonprefix(
                            [path.abspath(rPath),
                             path.abspath(base)])
                        # if the path is shorter than the base it must be outside the case
                        if len(common) < len(path.abspath(base)):
                            name = path.abspath(rPath)
                    else:
                        # use the abolute path
                        name = lPath
                try:
                    tar.getmember(arcname)
                    # don't add ... the file is already there'
                except KeyError:
                    # file not in tar
                    if verbose:
                        print_("Adding", name, "to tar")
                    tar.add(name, arcname=arcname)
Esempio n. 37
0
 def _find_best_prefix(self, rkey):
     for prefix in self._subtries.keys():
         common = commonprefix([prefix, rkey])
         if common:
             return prefix, len(common)
     return None
Esempio n. 38
0
 def file_endswith(end_pattern):
     return splitext(split(list(
         filter(lambda x: len(commonprefix([filename, x])) > 0 and x.endswith(end_pattern), files))[0])[1])[0]
Esempio n. 39
0
 def _hosted_zone_matches_domain(self, zone, domain):
     reversed_domain = ''.join(reversed(domain))
     reversed_hosted_zone_name = ''.join(reversed(zone['Name']))
     common_name = commonprefix(
         (reversed_hosted_zone_name, reversed_domain))
     return len(common_name) == len(reversed_hosted_zone_name)
Esempio n. 40
0
    def seg(self, seg_node, seg_data):
        """
        Generate XML for the segment data and matching map node

        @param seg_node: Map Node
        @type seg_node: L{node<map_if.x12_node>}
        @param seg_data: Segment object
        @type seg_data: L{segment<segment.Segment>}
        """
        if not seg_node.is_segment():
            raise EngineError('Node must be a segment')
        parent = pop_to_parent_loop(seg_node)  # Get enclosing loop
        # check path for new loops to be added
        cur_path = self._path_list(parent.get_path())
        #if seg_node.id == 'GS':
        #    import ipdb; ipdb.set_trace()
        if self.last_path == cur_path and seg_node.is_first_seg_in_loop():
            # loop repeat
            self.writer.pop()
            (xname, attrib) = self._get_loop_info(cur_path[-1])
            self.writer.push(xname, attrib)
        else:
            last_path = self.last_path
            match_idx = self._get_path_match_idx(last_path, cur_path)
            root_path = self._path_list(
                commonprefix(['/'.join(cur_path), '/'.join(last_path)]))
            if seg_node.is_first_seg_in_loop() and root_path == cur_path:
                match_idx -= 1
            for i in range(len(last_path) - 1, match_idx - 1, -1):
                self.writer.pop()
            for i in range(match_idx, len(cur_path)):
                (xname, attrib) = self._get_loop_info(cur_path[i])
                self.writer.push(xname, attrib)
        seg_node_id = self._get_node_id(seg_node, parent, seg_data)
        (xname, attrib) = self._get_seg_info(seg_node_id)
        self.writer.push(xname, attrib)
        for i in range(len(seg_data)):
            child_node = seg_node.get_child_node_by_idx(i)
            if child_node.usage == 'N' or seg_data.get('%02i' %
                                                       (i + 1)).is_empty():
                pass  # Do not try to ouput for invalid or empty elements
            elif child_node.is_composite():
                (xname, attrib) = self._get_comp_info(seg_node_id)
                self.writer.push(xname, attrib)
                comp_data = seg_data.get('%02i' % (i + 1))
                for j in range(len(comp_data)):
                    subele_node = child_node.get_child_node_by_idx(j)
                    (xname, attrib) = self._get_subele_info(subele_node.id)
                    self.writer.elem(xname, comp_data[j].get_value(), attrib)
                self.writer.pop()  # end composite
            elif child_node.is_element():
                if seg_data.get_value('%02i' % (i + 1)) == '':
                    pass
                    #self.writer.empty(u"ele", attrs={u'id': child_node.id})
                else:
                    (xname, attrib) = self._get_ele_info(child_node.id)
                    self.writer.elem(xname,
                                     seg_data.get_value('%02i' % (i + 1)),
                                     attrib)
            else:
                raise EngineError(
                    'Node must be a either an element or a composite')
        self.writer.pop()  # end segment
        self.last_path = cur_path
Esempio n. 41
0
    def run(self, stdscr):
        """
        Cheats selection menu processing..
        :param stdscr: screen
        """
        # init
        Gui.init_colors()
        stdscr.clear()
        self.height, self.width = stdscr.getmaxyx()
        self.max_visible_cheats = self.height - 7
        self.cursorpos = 0

        while True:
            stdscr.refresh()
            self.cheats = self.search()
            self.draw(stdscr)
            c = stdscr.getch()
            if c == curses.KEY_ENTER or c == 10 or c == 13:
                # Process selected command (if not empty)
                if self.selected_cheat() != None:
                    Gui.cmd = command.Command(self.selected_cheat(),
                                              Gui.arsenalGlobalVars)
                    # check if arguments are needed
                    if len(Gui.cmd.args) != 0:
                        # args needed -> ask
                        args_menu = ArgslistMenu(self)
                        curses.endwin()
                        curses.echo()
                        wrapper(args_menu.run)
                    break
            elif c == curses.KEY_F10 or c == 27:
                Gui.cmd = None
                break  # Exit the while loop
            elif c == 339 or c == curses.KEY_PPAGE:
                # Page UP
                self.move_page(-1)
            elif c == 338 or c == curses.KEY_NPAGE:
                # Page DOWN
                self.move_page(1)
            elif c == curses.KEY_UP:
                # Move UP
                self.move_position(-1)
            elif c == curses.KEY_DOWN:
                # Move DOWN
                self.move_position(1)
            elif c == curses.KEY_BACKSPACE or c == 8:
                if self.check_move_cursor(-1):
                    i = self.xcursor - self.x_init - 1
                    self.input_buffer = self.input_buffer[:
                                                          i] + self.input_buffer[
                                                              i + 1:]
                    self.xcursor -= 1
                    # new search -> reset position
                    self.position = 0
                    self.page_position = 0
            elif c == curses.KEY_DC or c == 127:
                if self.check_move_cursor(1):
                    i = self.xcursor - self.x_init - 1
                    self.input_buffer = self.input_buffer[:i +
                                                          1] + self.input_buffer[
                                                              i + 2:]
                    # new search -> reset position
                    self.position = 0
                    self.page_position = 0
            elif c == curses.KEY_LEFT:
                # Move cursor LEFT
                if self.check_move_cursor(-1): self.xcursor -= 1
            elif c == curses.KEY_RIGHT:
                # Move cursor RIGHT
                if self.check_move_cursor(1): self.xcursor += 1
            elif c == curses.KEY_BEG or c == curses.KEY_HOME:
                # Move cursor to the BEGIN
                self.xcursor = self.x_init
            elif c == curses.KEY_END:
                # Move cursor to the END
                self.xcursor = self.x_init + len(self.input_buffer)
            elif c == 9:
                # TAB cmd auto complete
                if self.input_buffer != "":
                    predictions = []
                    for cheat in self.cheats:
                        if cheat.command.startswith(self.input_buffer):
                            predictions.append(cheat.command)
                    if len(predictions) != 0:
                        self.input_buffer = commonprefix(predictions)
                        self.xcursor = self.x_init + len(self.input_buffer)
                        self.position = 0
                        self.page_position = 0
            elif c >= 20 and c < 127:
                i = self.xcursor - self.x_init
                self.input_buffer = self.input_buffer[:i] + chr(
                    c) + self.input_buffer[i:]
                self.xcursor += 1
                # new search -> reset position
                self.position = 0
                self.page_position = 0
        curses.endwin()
Esempio n. 42
0
def activate(sandbox_name,
             add_sdk_to_path=False,
             new_env_vars=None,
             app_id=None,
             **overrides):
    """Context manager for command-line scripts started outside of dev_appserver.

    :param sandbox_name: str, one of 'local', 'remote' or 'test'
    :param add_sdk_to_path: bool, optionally adds the App Engine SDK to sys.path
    :param options_override: an options structure to pass down to dev_appserver setup

    Available sandboxes:

      local: Adds libraries specified in app.yaml to the path and initializes local service stubs as though
             dev_appserver were running.

      remote: Adds libraries specified in app.yaml to the path and initializes remote service stubs.

      test: Adds libraries specified in app.yaml to the path and sets up no service stubs. Use this
            with `google.appengine.ext.testbed` to provide isolation for tests.

    Example usage:

        import djangae.sandbox as sandbox

        with sandbox.activate('local'):
            from django.core.management import execute_from_command_line
            execute_from_command_line(sys.argv)

    """
    if sandbox_name not in SANDBOXES:
        raise RuntimeError('Unknown sandbox "{}"'.format(sandbox_name))

    project_root = environment.get_application_root()

    # Store our original sys.path before we do anything, this must be tacked
    # onto the end of the other paths so we can access globally installed things (e.g. ipdb etc.)
    original_path = sys.path[:]

    # Setup paths as though we were running dev_appserver. This is similar to
    # what the App Engine script wrappers do.
    if add_sdk_to_path:
        try:
            import wrapper_util  # Already on sys.path
        except ImportError:
            sys.path[0:0] = [_find_sdk_from_path()]
            import wrapper_util
    else:
        try:
            import wrapper_util
        except ImportError:
            raise RuntimeError(
                "Couldn't find a recent enough Google App Engine SDK, make sure you are using at least 1.9.6"
            )

    sdk_path = _find_sdk_from_python_path()
    _PATHS = wrapper_util.Paths(sdk_path)

    project_paths = []  # Paths under the application root
    system_paths = []  # All other paths
    app_root = environment.get_application_root()

    # We need to look at the original path, and make sure that any paths
    # which are under the project root are first, then any other paths
    # are added after the SDK ones
    for path in _PATHS.scrub_path(_SCRIPT_NAME, original_path):
        if commonprefix([app_root, path]) == app_root:
            project_paths.append(path)
        else:
            system_paths.append(path)

    # We build a list of SDK paths, and add any additional ones required for
    # the oauth client
    appengine_paths = _PATHS.script_paths(_SCRIPT_NAME)
    for path in _PATHS.oauth_client_extra_paths:
        if path not in appengine_paths:
            appengine_paths.append(path)

    # Now, we make sure that paths within the project take precedence, followed
    # by the SDK, then finally any paths from the system Python (for stuff like
    # ipdb etc.)
    sys.path = (project_paths + appengine_paths + system_paths)

    # Gotta set the runtime properly otherwise it changes appengine imports, like wepapp
    # when you are not running dev_appserver
    import yaml
    with open(os.path.join(project_root, 'app.yaml'), 'r') as app_yaml:
        app_yaml = yaml.load(app_yaml)
        os.environ['APPENGINE_RUNTIME'] = app_yaml.get('runtime', '')

    # Initialize as though `dev_appserver.py` is about to run our app, using all the
    # configuration provided in app.yaml.
    import google.appengine.tools.devappserver2.application_configuration as application_configuration
    import google.appengine.tools.devappserver2.python.sandbox as sandbox
    import google.appengine.tools.devappserver2.devappserver2 as devappserver2
    import google.appengine.tools.devappserver2.wsgi_request_info as wsgi_request_info
    import google.appengine.ext.remote_api.remote_api_stub as remote_api_stub
    import google.appengine.api.apiproxy_stub_map as apiproxy_stub_map

    # The argparser is the easiest way to get the default options.
    options = devappserver2.PARSER.parse_args([project_root])
    options.enable_task_running = False  # Disable task running by default, it won't work without a running server
    options.skip_sdk_update_check = True

    for option in overrides:
        if not hasattr(options, option):
            raise ValueError("Unrecognized sandbox option: {}".format(option))

        setattr(options, option, overrides[option])

    if app_id:
        configuration = application_configuration.ApplicationConfiguration(
            options.config_paths, app_id=app_id)
    else:
        configuration = application_configuration.ApplicationConfiguration(
            options.config_paths)

    # Enable built-in libraries from app.yaml without enabling the full sandbox.
    module = configuration.modules[0]
    for l in sandbox._enable_libraries(module.normalized_libraries):
        sys.path.insert(1, l)

    # Propagate provided environment variables to the sandbox.
    # This is required for the runserver management command settings flag,
    # which sets an environment variable needed by Django.
    from google.appengine.api.appinfo import EnvironmentVariables
    old_env_vars = module.env_variables if module.env_variables else {}
    if new_env_vars is None:
        new_env_vars = {}
    module._app_info_external.env_variables = EnvironmentVariables.Merge(
        old_env_vars,
        new_env_vars,
    )

    try:
        global _OPTIONS
        global _CONFIG
        _CONFIG = configuration
        _OPTIONS = options  # Store the options globally so they can be accessed later
        kwargs = dict(
            devappserver2=devappserver2,
            configuration=configuration,
            options=options,
            wsgi_request_info=wsgi_request_info,
            remote_api_stub=remote_api_stub,
            apiproxy_stub_map=apiproxy_stub_map,
        )
        with SANDBOXES[sandbox_name](**kwargs):
            yield

    finally:
        sys.path = original_path
Esempio n. 43
0
from os import listdir, stat
from os.path import isfile, join, commonprefix, splitext

thePath = "\\\\etude\\Archiv\\Daten\\Partikel\\FARO\\"
theScript = "U:\\home\\reina\\src\\megamol-dev\\plugins\\pbs\\utils\\cpe2mmpld.lua"

allFiles = [f for f in listdir(thePath) if isfile(join(thePath, f))]
filteredFiles = []

for f in allFiles:
    found = False
    for f2 in allFiles:
        pf = commonprefix([f, f2])
        # print("common prefix of %s and %s : %s" % (f, f2, pf))
        if pf == splitext(f)[0]:
            found = True
            break

    if not splitext(f)[1] == ".raw":
        found = True

    size = stat(thePath + f)
    if not found and size.st_size > 48:
        # print("%s is a leaf" % f)
        filteredFiles.append(f)

for f in filteredFiles:
    print("mmconsole -p %s -o file %s" % (theScript, splitext(f)[0]))
Esempio n. 44
0
 def reflexive(self, n):
     if n is None:
         return False
     if commonprefix([self.path, n.path]) == n.path:  #LCA(self,n)
         return True
     return False
Esempio n. 45
0
def df_read_list(dir_sat, filenms, add_word=None):
    out_filename = path.commonprefix(filenms)
    if add_word is not None:
        out_filename = out_filename + add_word
    return [df_read(dir_sat, fname)[0] for fname in filenms], out_filename
Esempio n. 46
0
def tied_name(name1, name2):
    common_suffix = commonprefix([name1[::-1], name2[::-1]])[::-1]
    return common_suffix.strip(':_')
Esempio n. 47
0
    def from_spec(cls, spec, ids):
        """
        Create a schema from a spec

        :param spec: This is a list of dictionaries. Each dictionary specifies
            a single attribute that's expected to appear in the message. Each
            dictionary contains these fields:

             - "name": The name of the attribute; this will typically be an
               identifier from an enum in a Linux kernel header file. This
               value must appear as a key in the ``ids`` param.

             - "python_name": The short Python identifier to be used to access
               the attribute in parsed values and identify it when building
               with kwargs. If absent, defaults to lower-case version of the
               unique suffix of "name" among sibling attributes e.g. if
               attribute names are "MY_ATTR_FOO" and "MY_ATTR_BAR", Python
               names default to
               "foo" and "bar".

             - "type": The name of the expected type of the attribute. This
               determines the way in which the attribute is built and
               parsed. Allowed values are:

               - "u8", "u16", "s16", "u32", "u64": Integer of relevant size and
                 signedness
               - "str": Null-terminated ASCII string
               - "bytes": Byte blob
               - "array": Concatenated array of fixed-size
                 sub-elements. "subelem_type" specifies type of those
                 sub-elems.  An example of this in Linux is
                 NL80211_ATTR_STA_SUPPORTED_RATES which is an array of u8
                 values. This is mapped to a Python list.
               - "list": Set of sub-attributes, using attribute IDs 1-N to
                 index sub-elements. This uses the attribute header (which has
                 a length field) allows the sub-elements to have a variable
                 size. An example of this is in Linux is
                 NL80211_ATTR_IFTYPE_EXT_CAPA; this is a nested set of
                 attributes with IDs 1-N, each of which is a _further_ nested
                 set of attributes expressing interface capability info.  This
                 is also mapped to a Python list.
               - Or, another list of attribute entries, representing a
                 directly-nested attribute set. An example of this in Linux is
                 NL80211_ATTR_KEY, where the payload contains further netlink
                 attributes.

             - "subelem_type": For attribute types that represent collections,
               this stores the type of the elements of the collection. This can
               have all the same values as "type".

             - "required": If present and True, :meth:`build` raises an error
               if no value is provided for this attribute

        :param ids: Mapping from attribute names to numerical IDs.
        """
        # In case of direct recursion from this class to itself (i.e. when
        # the spec has an attribute that directly embeds another attribute set)
        # We pass the full spec in as a dict instead of just the list of
        # subattribute specs. Need to convert to that list.
        if isinstance(spec, dict):
            spec = spec["type"]

        common_prefix = commonprefix(list(a["name"] for a in spec))
        if not common_prefix.endswith("_"):
            common_prefix += "_"

        # It would be weird for the ordering to matter semantically, but we use
        # OrderedDict to keep stable ordering so that message content can be
        # predicted byte-for-byte for testing and debugging.
        subattr_schemata = OrderedDict()
        name_mapping = {}
        required_attrs = []
        for field_spec in spec:
            schema_cls = get_schema_class(field_spec)
            subattr_schema = schema_cls.from_spec(field_spec, ids)
            subattr_schemata[field_spec["name"]] = subattr_schema

            if "python_name" in field_spec:
                name_mapping[field_spec["python_name"]] = field_spec["name"]
            else:
                python_name = field_spec["name"][len(common_prefix):].lower()
                name_mapping[python_name] = field_spec["name"]

            if field_spec.get("required", False):
                required_attrs.append(field_spec["name"])

        return cls(subattr_schemata, ids, required_attrs, name_mapping)
Esempio n. 48
0
    #if it really doesn't match the previous one
    #then take everything before this and send to orphan or testdict
    if latestname == "":
        #if it's the first in the file
        thistestname = name
        thistest = [line]
        latestname = name
        latestamnt = amount
    else:
        #NORMALLY:
        #IF it matches the previous one:
        if matches(name, amount, latestname, latestamnt, thistestname,
                   thistest):
            #if they at least match B13_XXXX
            print 'its a match'
            thistestname = commonprefix([thistestname, name])
            thistest.append(line)
        else:

            #IF they're not a match
            if len(thistest) == 0:
                print "ERROR THIS SHOULD NEVER HAPPEN"
            elif len(thistest) == 1:
                #EITHER send the orphan to orphans
                orphans.append(thistest)
                thistest = [line]
                latestname = name
                latestamnt = amount
                thistestname = name
            else:
                #OR send the previous good test to testdict
Esempio n. 49
0
    def execute(self, mount_strings):
        '''
        The execute method runs the generated command line
        (from either generateRandomParams or readInput)
        If docker is specified, it will attempt to use it, instead
        of local execution.
        After execution, it checks for output file existence.
        '''
        command, exit_code, con = self.cmdLine[0], None, self.con or {}
        # Check for Container image
        conType, conImage = con.get('type'), con.get('image'),
        conIndex = con.get("index")
        conIsPresent = (conImage is not None)
        # Export environment variables, if they are specified in the descriptor
        envVars = {}
        if 'environment-variables' in list(self.desc_dict.keys()):
            variables = [(p['name'], p['value'])
                         for p in self.desc_dict['environment-variables']]
            for (envVarName, envVarValue) in variables:
                os.environ[envVarName] = envVarValue
                envVars[envVarName] = envVarValue
        # Container script constant name
        # Note that docker/singularity cannot do a local volume
        # mount of files starting with a '.', hence this one does not
        millitime = int(time.time() * 1000)
        dsname = ('temp-' +
                  str(random.SystemRandom().randint(0, int(millitime))) + "-" +
                  str(millitime) + '.localExec.boshjob.sh')
        dsname = op.realpath(dsname)
        # If container is present, alter the command template accordingly
        container_location = ""
        container_command = ""
        if conIsPresent:
            if conType == 'docker':
                # Pull the docker image
                if self._localExecute("docker pull " + str(conImage))[1]:
                    container_location = "Local copy"
            elif conType == 'singularity':
                if not conIndex:
                    conIndex = "shub://"
                elif not conIndex.endswith("://"):
                    conIndex = conIndex + "://"
                conName = conImage.replace("/", "-").replace(":",
                                                             "-") + ".simg"

                if conName not in os.listdir('./'):
                    pull_loc = "\"{0}\" {1}{2}".format(conName, conIndex,
                                                       conImage)
                    container_location = ("Pulled from {1} ({0} not found "
                                          "in current"
                                          "working director").format(
                                              conName, pull_loc)
                    # Pull the singularity image
                    if self._localExecute("singularity pull --name " +
                                          pull_loc)[1]:
                        raise ExecutorError("Could not pull Singularity image")
                else:
                    container_location = "Local ({0})".format(conName)
                conName = op.abspath(conName)
            else:
                raise ExecutorError('Unrecognized container'
                                    ' type: \"%s\"' % conType)
            # Generate command script
            uname, uid = pwd.getpwuid(os.getuid())[0], str(os.getuid())
            # Adds the user to the container before executing
            # the templated command line
            userchange = '' if not self.changeUser else ("useradd --uid " +
                                                         uid + ' ' + uname +
                                                         "\n")
            # If --changeUser was desired, run with su so that
            # any output files are owned by the user instead of root
            # Get the supported shell by the docker or singularity
            if self.changeUser:
                command = 'su ' + uname + ' -c ' + "\"{0}\"".format(command)
            cmdString = "#!" + self.shell + " -l\n" + userchange + str(command)
            with open(dsname, "w") as scrFile:
                scrFile.write(cmdString)
            # Ensure the script is executable
            self._localExecute("chmod 755 " + dsname)
            # Prepare extra environment variables
            envString = ""
            if envVars:
                for (key, val) in list(envVars.items()):
                    envString += "SINGULARITYENV_{0}='{1}' ".format(key, val)
            # Change launch (working) directory if desired
            launchDir = self.launchDir
            if launchDir is None:
                launchDir = op.realpath('./')
            launchDir = op.realpath(launchDir)
            # Run it in docker
            mount_strings = [] if not mount_strings else mount_strings
            mount_strings = [
                op.realpath(m.split(":")[0]) + ":" + m.split(":")[1]
                for m in mount_strings
            ]
            mount_strings.append(op.realpath('./') + ':' + launchDir)
            if conType == 'docker':
                envString = " "
                if envVars:
                    for (key, val) in list(envVars.items()):
                        envString += " -e {0}='{1}' ".format(key, val)
                # export mounts to docker string
                docker_mounts = " -v ".join(m for m in mount_strings)
                container_command = ('docker run --entrypoint=' + self.shell +
                                     ' --rm' + envString + ' -v ' +
                                     docker_mounts + ' -w ' + launchDir + ' ' +
                                     str(conImage) + ' ' + dsname)
            elif conType == 'singularity':
                envString = ""
                if envVars:
                    for (key, val) in list(envVars.items()):
                        envString += "SINGULARITYENV_{0}='{1}' ".format(
                            key, val)
                # TODO: Singularity 2.4.6 default configuration binds: /proc,
                # /sys, /dev, ${HOME}, /tmp, /var/tmp, /etc/localtime, and
                # /etc/hosts. This means that any path down-stream shouldn't
                # be bound on the command-line, as this will currently raise
                # an exception. See:
                #   https://github.com/singularityware/singularity/issues/1469
                #
                # Previous bind string:
                #   singularity_mounts = " -B ".join(m for m in mount_strings)

                def_mounts = [
                    "/proc", "/sys", "/dev", "/tmp", "/var/tmp",
                    "/etc/localtime", "/etc/hosts",
                    op.realpath(op.expanduser('~')),
                    op.expanduser('~')
                ]

                # Ensures the set of paths provided has no overlap
                compaths = list()
                for idxm, m in enumerate(mount_strings):
                    for n in mount_strings[idxm:]:
                        if n != m:
                            tmp = op.dirname(op.commonprefix([n, m]))
                            if tmp != '/':
                                compaths += [tmp]
                    if not any(m.startswith(c) for c in compaths):
                        compaths += [m]
                mount_strings = set(compaths)

                # Only adds mount points for those not already included
                singularity_mounts = ""
                for m in mount_strings:
                    if not any(d in m for d in def_mounts):
                        singularity_mounts += "-B {0} ".format(m)

                container_command = (envString + 'singularity exec '
                                     '--cleanenv ' + singularity_mounts +
                                     ' -W ' + launchDir + ' ' + str(conName) +
                                     ' ' + dsname)
            else:
                raise ExecutorError('Unrecognized container type: '
                                    '\"%s\"' % conType)
            (stdout, stderr), exit_code = self._localExecute(container_command)
        # Otherwise, just run command locally
        else:
            (stdout, stderr), exit_code = self._localExecute(command)
        time.sleep(0.5)  # Give the OS a (half) second to finish writing

        # Destroy temporary docker script, if desired.
        # By default, keep the script so the dev can look at it.
        if conIsPresent and self.destroyTempScripts:
            if os.path.isfile(dsname):
                os.remove(dsname)

        # Check for output files
        missing_files = []
        output_files = []
        all_files = evaluateEngine(self, "output-files")
        required_files = evaluateEngine(self, "output-files/optional=False")
        optional_files = evaluateEngine(self, "output-files/optional=True")
        for f in all_files.keys():
            file_name = all_files[f]
            fd = FileDescription(f, file_name, False)
            if op.exists(file_name):
                output_files.append(fd)
            else:  # file does not exist
                if f in required_files.keys():
                    missing_files.append(fd)

        # Set error messages
        desc_err = ''
        if 'error-codes' in list(self.desc_dict.keys()):
            for err_elem in self.desc_dict['error-codes']:
                if err_elem['code'] == exit_code:
                    desc_err = err_elem['description']
                    break

        return ExecutorOutput(stdout, stderr, exit_code, desc_err,
                              output_files, missing_files, command,
                              container_command, container_location)
Esempio n. 50
0
def printStatus(makeLog, makeAllLog, textTestTmp, smtpServer, out):
    failed = ""
    build = commonprefix([basename(makeLog), basename(makeAllLog)])
    print >> out, build,
    print >> out, datetime.now().ctime()
    print >> out, "--"
    print >> out, basename(makeLog)
    warnings = 0
    errors = 0
    svnLocked = False
    for l in file(makeLog):
        if ("svn: Working copy" in l and "locked" in l) or "svn: Failed" in l:
            svnLocked = True
            failed += l
        if "warning " in l.lower() or "warnung" in l.lower():
            warnings += 1
        if "error " in l.lower():
            errors += 1
            failed += l
    if svnLocked:
        failed += "svn up failed\n\n"
    print >> out, warnings, "warnings"
    if errors:
        print >> out, errors, "errors"
        failed += "make failed\n\n"
    print >> out, "--"
    for root, dirs, files in os.walk(textTestTmp):
        for f in files:
            if f.startswith("batchreport"):
                b = open(join(root, f))
                l = b.readline()
                if l.startswith("FAILED") or l.startswith("succeeded"):
                    print >> out, f, l,
                b.close()
    print >> out, "--"
    print >> out, basename(makeAllLog)
    warnings = 0
    errors = 0
    for l in file(makeAllLog):
        if "warning " in l.lower() or "warnung" in l.lower():
            warnings += 1
        if "error " in l.lower():
            errors += 1
            failed += l
    print >> out, warnings, "warnings"
    if errors:
        print >> out, errors, "errors"
        failed += "make debug failed\n\n"
    print >> out, "--"

    if failed:
        fromAddr = "*****@*****.**"
        toAddr = "*****@*****.**"
        message = """From: "%s" <%s>
To: %s
Subject: Error occurred while building

%s""" % (build, fromAddr, toAddr, failed)
        server = smtplib.SMTP(smtpServer)
        server.sendmail(fromAddr, toAddr, message)
        server.quit()
Esempio n. 51
0
    def run(self, edit, character='^'):
        """Available parameters:
        edit (sublime.Edit)
            The edit parameter from TextCommand.
        character (str) = '^'
            The character to insert when suggesting where the test assertions should go.
        """

        view = self.view
        view.replace(edit, view.sel()[0], character)
        insert_at = view.sel()[0].begin()

        listener = sublime_plugin.find_view_event_listener(view, SyntaxTestHighlighterListener)
        if not listener.header:
            return

        lines, line = listener.get_details_of_line_being_tested()
        end_token = listener.header.comment_end
        # don't duplicate the end token if it is on the line but not selected
        if end_token and view.sel()[0].end() == lines[0].line_region.end():
            end_token = ' ' + end_token
        else:
            end_token = ''

        scopes = []
        length = 0
        # find the following columns on the line to be tested where the scopes don't change
        test_at_start_of_comment = False
        col = view.rowcol(insert_at)[1]
        assertion_colrange = lines[0].assertion_colrange or (-1, -1)
        if assertion_colrange[0] == assertion_colrange[1]:
            col = assertion_colrange[1]
            test_at_start_of_comment = True
            lines = lines[1:]

        col_start, col_end = lines[0].assertion_colrange
        base_scope = path.commonprefix([
            view.scope_name(pos)
            for pos in range(line.begin() + col_start, line.begin() + col_end)
        ])

        for pos in range(line.begin() + col, line.end() + 1):
            scope = view.scope_name(pos)
            if len(scopes) == 0:
                scopes.append(scope)
            elif not scope.startswith(base_scope):
                break
            length += 1
            if test_at_start_of_comment:
                break

        # find the unique scopes at each existing assertion position
        if lines and not test_at_start_of_comment:
            col_start, col_end = lines[0].assertion_colrange
            for pos in range(line.begin() + col_start, line.begin() + col_end):
                scope = view.scope_name(pos)
                if scope not in scopes:
                    scopes.append(scope)

        suggest_suffix = get_setting('syntax_test.suggest_scope_suffix', True)

        scope = find_common_scopes(scopes, not suggest_suffix)

        # delete the existing selection
        if not view.sel()[0].empty():
            view.erase(edit, view.sel()[0])

        view.insert(edit, insert_at, (character * max(1, length)) + ' ' + scope + end_token)

        # move the selection to cover the added scope name,
        # so that the user can easily insert another ^ to extend the test
        view.sel().clear()
        view.sel().add(sublime.Region(
            insert_at + length,
            insert_at + length + len(' ' + scope + end_token)
        ))
Esempio n. 52
0
    def _poll(self, host):
        self._reconnect_vcenter_if_necessary(host)
        vcenter_options = self.vcenters[host]
        values = {'clusters': {}, 'datacenters': {}}
        service_instance = vcenter_options['service_instance']

        nsx_t_clusters = set()

        with filter_spec_context(service_instance,
                                 obj_type=vim.HostSystem,
                                 path_set=['name', 'parent', 'config.network.opaqueSwitch']) as filter_spec:
            for h in vcu.collect_properties(service_instance, [filter_spec]):
                if 'config.network.opaqueSwitch' not in h:
                    LOG.debug("Broken ESXi host %s detected in cluster %s",
                              h['name'], h['parent'])
                    continue
                if len(h['config.network.opaqueSwitch']) > 0:
                    LOG.debug("(Possible) NSX-T switch found on %s", h['name'])
                    nsx_t_clusters.add(h['parent'])

        with filter_spec_context(service_instance) as filter_spec:
            availability_zones = set()
            cluster_options = None

            for cluster in vcu.collect_properties(service_instance, [filter_spec]):
                cluster_name = cluster['name']
                match = self.CLUSTER_MATCH.match(cluster_name)

                if not match:
                    LOG.debug(
                        "%s: Ignoring cluster %s "
                        "not matching naming scheme", host, cluster_name)
                    continue
                bb_name_no_zeroes = f'bb{match.group(1)}'

                nsx_t_enabled = cluster['obj'] in nsx_t_clusters
                if nsx_t_enabled:
                    LOG.debug('NSX-T enabled for %s', cluster_name)

                parent = cluster['parent']
                availability_zone = parent.parent.name.lower()

                availability_zones.add(availability_zone)
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(name=bb_name_no_zeroes,
                                       cluster_name=cluster_name,
                                       availability_zone=availability_zone,
                                       nsx_t_enabled=nsx_t_enabled,
                                       vcenter_name=vcenter_options['name'])

                if cluster_options.get('pbm_enabled', 'false') != 'true':
                    datastores = cluster['datastore']
                    datastore_names = [datastore.name
                                       for datastore in datastores
                                       if self.EPH_MATCH.match(datastore.name)]
                    eph = commonprefix(datastore_names)
                    cluster_options.update(datastore_regex=f"^{eph}.*")
                    hagroups = set()
                    for name in datastore_names:
                        m = self.HAGROUP_MATCH.match(name)
                        if not m:
                            continue
                        hagroups.add(m.group('hagroup').lower())
                    if {'a', 'b'}.issubset(hagroups):
                        LOG.debug('ephemeral datastore hagroups enabled for %s', cluster_name)
                        cluster_options.update(datastore_hagroup_regex=self.HAGROUP_MATCH.pattern)

                for network in cluster['network']:
                    try:
                        match = self.BR_MATCH.match(network.name)
                        if match:
                            cluster_options['bridge'] = match.group(0).lower()
                            cluster_options['physical'] = match.group(1).lower()
                            break
                    except vim.ManagedObjectNotFound:
                        # sometimes a portgroup might be already deleted when
                        # we try to query its name here
                        continue

                if 'bridge' not in cluster_options and not nsx_t_enabled:
                    LOG.warning("%s: Skipping cluster %s, "
                                "cannot find bridge matching naming scheme",
                                host, cluster_name)
                    continue

                values['clusters'][cluster_name] = cluster_options

            for availability_zone in availability_zones:
                cluster_options = self.global_options.copy()
                cluster_options.update(vcenter_options)
                cluster_options.pop('service_instance', None)
                cluster_options.update(availability_zone=availability_zone)
                values['datacenters'][availability_zone] = cluster_options

        return values
Esempio n. 53
0
    def test_that_a_valid_license_notice_exists_in_every_source_file_and_that_global_licensing_information_is_correct(
            self):
        license_notice = compile(
            r"""(?P<comment_start>#|--|//) This Source Code Form is subject to the terms of the Mozilla Public
(?P=comment_start) License, v\. 2\.0\. If a copy of the MPL was not distributed with this file,
(?P=comment_start) You can obtain one at http://mozilla\.org/MPL/2\.0/\.
(?P=comment_start)
(?P=comment_start) Copyright \(c\) (?P<first_year>20\d\d)(-(?P<last_year>20\d\d))?, Lars Asplund lars\.anders\.asplund@gmail\.com"""
        )
        log_date = compile(r'Date:\s*(?P<year>20\d\d)-\d\d-\d\d')
        licensed_files = []
        repo_root = abspath(join(dirname(__file__), '..'))
        for root, dirs, files in walk(repo_root):
            for f in files:
                if 'preprocessed' in root:
                    continue
                osvvm_directory = abspath(join(repo_root, 'vhdl', 'osvvm'))
                if commonprefix([osvvm_directory,
                                 abspath(join(root, f))]) == osvvm_directory:
                    continue
                osvvm_integration_example_directory = abspath(
                    join(repo_root, 'examples', 'osvvm_integration', 'src'))
                if commonprefix([
                        osvvm_integration_example_directory,
                        abspath(join(root, f))
                ]) == osvvm_integration_example_directory:
                    continue
                if splitext(f)[1] in ['.vhd', '.vhdl', '.py', '.v', '.sv']:
                    licensed_files.append(join(root, f))
        i = 0
        min_first_year = None
        max_last_year = None
        for f in licensed_files:
            stdout.write('\r%d/%d' % (i + 1, len(licensed_files)))
            stdout.flush()
            i += 1
            proc = Popen(['git', 'log',  '--follow', '--date=short', f], \
                  bufsize=0, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True)
            out, _ = proc.communicate()
            first_year = None
            last_year = None
            for date in log_date.finditer(out):
                first_year = int(
                    date.group('year')) if first_year is None else min(
                        int(date.group('year')), first_year)
                last_year = int(
                    date.group('year')) if last_year is None else max(
                        int(date.group('year')), last_year)
            min_first_year = first_year if min_first_year is None else min(
                min_first_year, first_year)
            max_last_year = last_year if max_last_year is None else max(
                max_last_year, last_year)

            with open(f) as fp:
                code = fp.read()
                match = license_notice.search(code)
                self.assertIsNotNone(match,
                                     "Failed to find license notice in %s" % f)
                if first_year == last_year:
                    self.assertEqual(
                        int(match.group('first_year')), first_year,
                        'Expected copyright year to be %d in %s' %
                        (first_year, f))
                    self.assertIsNone(
                        match.group('last_year'),
                        'Expected no copyright years range in %s' %
                        join(root, f))
                else:
                    self.assertIsNotNone(
                        match.group('last_year'),
                        'Expected copyright year range %d-%d in %s' %
                        (first_year, last_year, f))
                    self.assertEqual(
                        int(match.group('first_year')), first_year,
                        'Expected copyright year range to start with %d in %s'
                        % (first_year, f))
                    self.assertEqual(
                        int(match.group('last_year')), last_year,
                        'Expected copyright year range to end with %d in %s' %
                        (last_year, f))
        print('\n')
def commonprefix(args, sep='/'):
    return path.commonprefix(args).rpartition(sep)[0]
Esempio n. 55
0
def tabularize_dictionary(load_path, save_path, max_defs=0):

    with open(load_path) as f:
        dictionary = json.load(f)

    lmt = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
    words = sorted(list(dictionary.keys()))
    with open(save_path, 'w') as f:
        for word in words:
            if not dictionary[word]:
                continue

            if max_defs == 0:
                n = len(dictionary[word])
            else:
                n = max_defs

            pos2defs = defaultdict(list)
            for pos, definition in dictionary[word]:
                if pos is not None:
                    pos = pos.lower()
                if pos == 'idioms':
                    continue
                pos = POS_MAP.get(pos)
                pos2defs[pos].append(definition)

            tag = None
            for p in ['VERB', 'ADJ']:
                lemma = lmt(word, p)[0]
                if lemma != word and\
                    dictionary.get(lemma) != dictionary.get(word):
                    prefix = commonprefix([lemma, word])
                    suffix = word[len(prefix):]
                    if p == 'VERB':
                        if suffix.endswith('ing'):
                            tag = 'VBG'
                        elif suffix.endswith('s'):
                            tag = 'VBZ'
                        elif suffix.endswith('d'):
                            tag = 'VBD'
                        elif suffix.endswith('n'):
                            tag = 'VBN'
                    elif p == 'NOUN':
                        tag = 'NNS'
                    elif p == 'ADJ':
                        if suffix.endswith('t'):
                            tag = 'JJS'
                        elif suffix.endswith('r'):
                            tag = 'JJR'

                    if dictionary.get(lemma):
                        for pos, definition in dictionary[lemma]:
                            if pos is not None:
                                pos = pos.lower()
                            if pos == 'idioms':
                                continue
                            pos = POS_MAP.get(pos)
                            if len(definition) == 0:
                                continue
                            if pos == p:
                                f.write("%s\t%s\t%s\t%s\n" % (word, pos, tag, definition))


            for pos in pos2defs:
                for definition in pos2defs[pos][:n]:
                    if len(definition) == 0:
                        continue
                    
                    tag = None
                    skip = 0

                    for p in ['VERB', 'NOUN', 'ADJ']:
                        lemma = lmt(word, p)[0]
                        if lemma != word and\
                           dictionary.get(lemma) == dictionary.get(word):
                            if p == pos:
                                prefix = commonprefix([lemma, word])
                                suffix = word[len(prefix):]
                                if pos == 'VERB':
                                    if suffix.endswith('ing'):
                                        tag = 'VBG'
                                    elif suffix.endswith('s'):
                                        tag = 'VBZ'
                                    elif suffix.endswith('d'):
                                        tag = 'VBD'
                                    elif suffix.endswith('n'):
                                        tag = 'VBN'
                                elif pos == 'NOUN':
                                    tag = 'NNS'
                                elif pos == 'ADJ':
                                    if suffix.endswith('t'):
                                        tag = 'JJS'
                                    elif suffix.endswith('r'):
                                        tag = 'JJR'
                                if skip == 1:
                                    f.write("%s\t%s\t%s\t%s\n" % (word, pos, tag, definition))
                                break
                            else:
                                skip = 1

                    if not skip:
                        f.write("%s\t%s\t%s\t%s\n" % (word, pos, tag, definition))
Esempio n. 56
0
    def __call__(archive,
                 annex=None,
                 add_archive_leading_dir=False,
                 strip_leading_dirs=False,
                 leading_dirs_depth=None,
                 leading_dirs_consider=None,
                 use_current_dir=False,
                 delete=False,
                 key=False,
                 exclude=None,
                 rename=None,
                 existing='fail',
                 annex_options=None,
                 copy=False,
                 commit=True,
                 allow_dirty=False,
                 stats=None,
                 drop_after=False,
                 delete_after=False):
        """
        Returns
        -------
        annex
        """
        if exclude:
            exclude = assure_tuple_or_list(exclude)
        if rename:
            rename = assure_tuple_or_list(rename)

        # TODO: actually I see possibly us asking user either he wants to convert
        # his git repo into annex
        archive_path = archive
        pwd = getpwd()
        if annex is None:
            annex = get_repo_instance(pwd, class_=AnnexRepo)
            if not isabs(archive):
                # if not absolute -- relative to wd and thus
                archive_path = normpath(opj(pwd, archive))
                # abspath(archive) is not "good" since dereferences links in the path
                # archive_path = abspath(archive)
        elif not isabs(archive):
            # if we are given an annex, then assume that given path is within annex, not
            # relative to PWD
            archive_path = opj(annex.path, archive)
        annex_path = annex.path

        # _rpath below should depict paths relative to the top of the annex
        archive_rpath = relpath(archive_path, annex_path)

        # TODO: somewhat too cruel -- may be an option or smth...
        if not allow_dirty and annex.dirty:
            # already saved me once ;)
            raise RuntimeError(
                "You better commit all the changes and untracked files first")

        if not key:
            # we were given a file which must exist
            if not exists(archive_path):
                raise ValueError("Archive {} does not exist".format(archive))
            # TODO: support adding archives content from outside the annex/repo
            origin = 'archive'
            key = annex.get_file_key(archive_rpath)
            archive_dir = dirname(archive_path)
        else:
            origin = 'key'
            key = archive
            archive_dir = None  # We must not have anything to do with the location under .git/annex

        archive_basename = file_basename(archive)

        if not key:
            # TODO: allow for it to be under git???  how to reference then?
            raise NotImplementedError(
                "Provided file %s is not under annex.  We don't support yet adding everything "
                "straight to git" % archive)

        # are we in a subdirectory of the repository?
        pwd_under_annex = commonprefix([pwd, annex_path]) == annex_path
        #  then we should add content under that
        # subdirectory,
        # get the path relative to the repo top
        if use_current_dir:
            # if outside -- extract to the top of repo
            extract_rpath = relpath(pwd, annex_path) \
                if pwd_under_annex \
                else None
        else:
            extract_rpath = relpath(archive_dir, annex_path)

        # relpath might return '.' as the relative path to curdir, which then normalize_paths
        # would take as instructions to really go from cwd, so we need to sanitize
        if extract_rpath == curdir:
            extract_rpath = None  # no special relpath from top of the repo

        # and operate from now on the key or whereever content available "canonically"
        try:
            key_rpath = annex.get_contentlocation(
                key)  # , relative_to_top=True)
        except:
            raise RuntimeError(
                "Content of %s seems to be N/A.  Fetch it first" % key)

        # now we simply need to go through every file in that archive and
        lgr.info("Adding content of the archive %s into annex %s", archive,
                 annex)

        from datalad.customremotes.archives import ArchiveAnnexCustomRemote
        # TODO: shouldn't we be able just to pass existing AnnexRepo instance?
        # TODO: we will use persistent cache so we could just (ab)use possibly extracted archive
        annexarchive = ArchiveAnnexCustomRemote(path=annex_path,
                                                persistent_cache=True)
        # We will move extracted content so it must not exist prior running
        annexarchive.cache.allow_existing = True
        earchive = annexarchive.cache[key_rpath]

        # TODO: check if may be it was already added
        if ARCHIVES_SPECIAL_REMOTE not in annex.get_remotes():
            lgr.debug(
                "Adding new special remote {}".format(ARCHIVES_SPECIAL_REMOTE))
            annex.init_remote(ARCHIVES_SPECIAL_REMOTE, [
                'encryption=none', 'type=external',
                'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE, 'autoenable=true'
            ])
        else:
            lgr.debug("Special remote {} already exists".format(
                ARCHIVES_SPECIAL_REMOTE))

        try:
            old_always_commit = annex.always_commit
            annex.always_commit = False

            if annex_options:
                if isinstance(annex_options, string_types):
                    annex_options = shlex.split(annex_options)

            leading_dir = earchive.get_leading_directory(
                depth=leading_dirs_depth, exclude=exclude, consider=leading_dirs_consider) \
                if strip_leading_dirs else None
            leading_dir_len = len(leading_dir) + len(
                opsep) if leading_dir else 0

            # we need to create a temporary directory at the top level which would later be
            # removed
            prefix_dir = basename(tempfile.mkdtemp(prefix=".datalad", dir=annex_path)) \
                if delete_after \
                else None

            # dedicated stats which would be added to passed in (if any)
            outside_stats = stats
            stats = ActivityStats()

            for extracted_file in earchive.get_extracted_files():
                stats.files += 1
                extracted_path = opj(earchive.path, extracted_file)

                if islink(extracted_path):
                    link_path = realpath(extracted_path)
                    if not exists(
                            link_path
                    ):  # TODO: config  addarchive.symlink-broken='skip'
                        lgr.warning("Path %s points to non-existing file %s" %
                                    (extracted_path, link_path))
                        stats.skipped += 1
                        continue
                    # TODO: check if points outside of the archive -- warning and skip

                # preliminary target name which might get modified by renames
                target_file_orig = target_file = extracted_file

                # strip leading dirs
                target_file = target_file[leading_dir_len:]

                if add_archive_leading_dir:
                    target_file = opj(archive_basename, target_file)

                if rename:
                    target_file = apply_replacement_rules(rename, target_file)

                # continue to next iteration if extracted_file in excluded
                if exclude:
                    try:  # since we need to skip outside loop from inside loop
                        for regexp in exclude:
                            if re.search(regexp, extracted_file):
                                lgr.debug(
                                    "Skipping {extracted_file} since contains {regexp} pattern"
                                    .format(**locals()))
                                stats.skipped += 1
                                raise StopIteration
                    except StopIteration:
                        continue

                if prefix_dir:
                    target_file = opj(prefix_dir, target_file)

                url = annexarchive.get_file_url(
                    archive_key=key,
                    file=extracted_file,
                    size=os.stat(extracted_path).st_size)

                # lgr.debug("mv {extracted_path} {target_file}. URL: {url}".format(**locals()))

                if lexists(target_file):
                    if md5sum(target_file) == md5sum(extracted_path):
                        # must be having the same content, we should just add possibly a new extra URL
                        pass
                    elif existing == 'fail':
                        raise RuntimeError(
                            "File {} already exists, but new (?) file {} was instructed "
                            "to be placed there while overwrite=False".format(
                                target_file, extracted_file))
                    elif existing == 'overwrite':
                        stats.overwritten += 1
                        # to make sure it doesn't conflict -- might have been a tree
                        rmtree(target_file)
                    else:
                        target_file_orig_ = target_file

                        # To keep extension intact -- operate on the base of the filename
                        p, fn = os.path.split(target_file)
                        ends_with_dot = fn.endswith('.')
                        fn_base, fn_ext = file_basename(fn, return_ext=True)

                        if existing == 'archive-suffix':
                            fn_base += '-%s' % archive_basename
                        elif existing == 'numeric-suffix':
                            pass  # archive-suffix will have the same logic
                        else:
                            raise ValueError(existing)
                        # keep incrementing index in the suffix until file doesn't collide
                        suf, i = '', 0
                        while True:
                            target_file_new = opj(
                                p, fn_base + suf +
                                ('.' if
                                 (fn_ext or ends_with_dot) else '') + fn_ext)
                            if not lexists(target_file_new):
                                break
                            lgr.debug("File %s already exists" %
                                      target_file_new)
                            i += 1
                            suf = '.%d' % i
                        target_file = target_file_new
                        lgr.debug("Original file %s will be saved into %s" %
                                  (target_file_orig_, target_file))
                        # TODO: should we reserve smth like
                        # stats.clobbed += 1

                if target_file != target_file_orig:
                    stats.renamed += 1

                #target_path = opj(getpwd(), target_file)
                if copy:
                    raise NotImplementedError(
                        "Not yet copying from 'persistent' cache")
                else:
                    # os.renames(extracted_path, target_path)
                    # addurl implementation relying on annex'es addurl below would actually copy
                    pass

                lgr.debug(
                    "Adding %s to annex pointing to %s and with options %r",
                    target_file, url, annex_options)

                target_file_rpath = opj(
                    extract_rpath,
                    target_file) if extract_rpath else target_file
                out_json = annex.add_url_to_file(target_file_rpath,
                                                 url,
                                                 options=annex_options,
                                                 batch=True)

                if 'key' in out_json and out_json[
                        'key'] is not None:  # annex.is_under_annex(target_file, batch=True):
                    # due to http://git-annex.branchable.com/bugs/annex_drop_is_not___34__in_effect__34___for_load_which_was___34__addurl_--batch__34__ed_but_not_yet_committed/?updated
                    # we need to maintain a list of those to be dropped files
                    if drop_after:
                        annex.drop_key(out_json['key'], batch=True)
                        stats.dropped += 1
                    stats.add_annex += 1
                else:
                    lgr.debug(
                        "File {} was added to git, not adding url".format(
                            target_file))
                    stats.add_git += 1

                if delete_after:
                    # forcing since it is only staged, not yet committed
                    annex.remove(target_file_rpath, force=True)  # TODO: batch!
                    stats.removed += 1

                # # chaining 3 annex commands, 2 of which not batched -- less efficient but more bullet proof etc
                # annex.add(target_path, options=annex_options)
                # # above action might add to git or to annex
                # if annex.file_has_content(target_path):
                #     # if not --  it was added to git, if in annex, it is present and output is True
                #     annex.add_url_to_file(target_file, url, options=['--relaxed'], batch=True)
                #     stats.add_annex += 1
                # else:
                #     lgr.debug("File {} was added to git, not adding url".format(target_file))
                #     stats.add_git += 1
                # # TODO: actually check if it is anyhow different from a previous version. If not
                # # then it wasn't really added

                del target_file  # Done with target_file -- just to have clear end of the loop

            if delete and archive and origin != 'key':
                lgr.debug("Removing the original archive {}".format(archive))
                # force=True since some times might still be staged and fail
                annex.remove(archive_rpath, force=True)

            lgr.info("Finished adding %s: %s" %
                     (archive, stats.as_str(mode='line')))

            if outside_stats:
                outside_stats += stats
            if commit:
                commit_stats = outside_stats if outside_stats else stats
                annex.commit(
                    "Added content extracted from %s %s\n\n%s" %
                    (origin, archive, commit_stats.as_str(mode='full')),
                    _datalad_msg=True)
                commit_stats.reset()
        finally:
            # since we batched addurl, we should close those batched processes
            annex.precommit()

            if delete_after:
                prefix_path = opj(annex_path, prefix_dir)
                if exists(prefix_path):  # probably would always be there
                    lgr.info(
                        "Removing temporary directory under which extracted files were annexed: %s",
                        prefix_path)
                    rmtree(prefix_path)

            annex.always_commit = old_always_commit
            # remove what is left and/or everything upon failure
            earchive.clean(force=True)

        return annex
Esempio n. 57
0
def svg_heatmap(data,
                filename,
                row_labels=None,
                box_size=4,
                index=None,
                all_indices=None,
                all_colnames=None,
                internal_datanames=None,
                cmap=ISH,
                norm_rows_by=None,
                draw_row_labels=False,
                color_row_labels=False,
                col_sep='',
                box_height=None,
                total_width=None,
                draw_box=False,
                draw_name=False,
                data_names=None,
                make_hyperlinks=False,
                progress_bar=False,
                max_width=np.inf,
                x_min=10,
                y_min=10,
                spacers=None,
                convert=False,
                squeeze_rows=None,
                cmap_by_prefix=None,
                draw_average=False,
                draw_average_only=False,
                average_scale=1,
                split_columns=False,
                vspacer=30,
                hatch_nan=True,
                hatch_size=20,
                figure_title=None,
                nan_replace=None,
                first_col='',
                last_col=''):
    """
    Draw heatmap as an SVG file stored in filename

    *data* can be either a 2D array-like type (list of lists, numpy array,
    pandas DataFrame, etc), or a tuple of 2D array-likes, in which case a
    separator will be added between each one in the output

    *cmap* is a matplotlib-like colormap (i.e. a callable that expects floats
    in the range 0.0-1.0.), or an iterable of the same length as the tuple
    *data* containing colormaps

    *row_labels* can be supplied, otherwise they will detected from the first
    item in *data*, if available, and if not they will be blank.

    If *total_width* is supplied, width of each dataset in *data* will be
    scaled to that constant. If *box_height* is supplied, the height of each
    row will be *box_height*, otherwise it will be equal to the width of each
    element. If neither are supplied, elements will be squares equal to
    *box_size*. IT IS STRONGLY RECOMMENDED that if if supplying *total_width*,
    *box_height* also be specified, but this is not enforced.

    *draw_row_labels*, if True, will label the rows on the right hand side. As
    of 2013/09/03, this won't scale the SVG properly, so including the
    resulting file in an html element won't display properly.

    *spacers* is the distance between adjacent datasets.  Can either be a
    number, in which case it will apply to all datasets, or an interable for
    different distances. If the iterable is shorter than the number of
    datasets, the last value will be repeated.

    """
    import svgwrite as svg
    try:
        import pandas as pd
        has_pandas = True
    except:
        has_pandas = False
        assert all_indices
        assert all_colnames

    if not isinstance(data, tuple):
        data = (data, )

    if not isinstance(norm_rows_by, tuple):
        norm_rows_by = repeat(norm_rows_by)

    old_data = data
    colname_tuple = repeat(None)
    if split_columns and has_pandas:
        from Utils import sel_startswith
        data = []
        new_normers = []
        new_cmaps = []
        if isinstance(cmap, tuple):
            cmaps = cmap
        else:
            cmaps = repeat(cmap)
        for dataset, normer, c_cmap in zip(old_data, norm_rows_by, cmaps):
            if dataset is None:
                data.append(dataset)
                new_normers.append(normer)
                new_cmaps.append(c_cmap)
                continue

            if not isinstance(dataset, pd.DataFrame):
                dataset = pd.DataFrame(dataset).T
            colnames = list(
                sorted({col.split(col_sep)[0]
                        for col in dataset.columns}))
            data.extend(
                dataset.select(**sel_startswith(colname))
                for colname in colnames)
            new_normers.extend(normer for colname in colnames)
            new_cmaps.extend(c_cmap for colname in colnames)
        data = tuple(data)
        norm_rows_by = tuple(new_normers)
        cmap = tuple(new_cmaps)
    elif split_columns and all_colnames:
        colnames = list(sorted({col.split(col_sep)[0]
                                for col in all_colnames}))
        colname = colnames[0]
        data = tuple([
            data[:,
                 array([c.startswith(colname) for c in internal_datanames])]
            for colname in colnames
        ])
        colname_tuple = tuple(
            [c for c in all_colnames if c.startswith(colname)]
            for colname in colnames)
    elif not split_columns and all_colnames:
        colname_tuple = tuple(
            [c for c in all_colnames if c.startswith(dataname)]
            for dataname in internal_datanames)

    rows, cols = np.shape([ds for ds in data if ds is not None][0])
    if index is not None:
        rows = len(index)
    if box_height is None:
        box_height = box_size

    if row_labels is None:
        if index is not None:
            row_labels = list(index)
        elif hasattr(data[0], 'index'):
            row_labels = list(data[0].index)
        else:
            row_labels = ['' for row in range(rows)]

    if total_width is not None and max_width is not np.inf:
        boxes_per_row = max_width // (1.1 * total_width)
        if ((boxes_per_row + 1) * 1.1 * total_width - .1 * total_width <
                max_width):
            boxes_per_row += 1

        num_plotted_rows = np.ceil(
            len(data) / boxes_per_row + (draw_average or draw_average_only))
        if figure_title is None:
            fig_title_height = 0
        elif isinstance(figure_title, tuple):
            fig_title_height = len(figure_title)
        else:
            fig_title_height = 1
        dwg = svg.Drawing(
            filename,
            size=(max_width + 2 * x_min + 200 * draw_row_labels,
                  2 * y_min + (num_plotted_rows *
                               (rows) * box_height) + 80 * (fig_title_height) +
                  80 * draw_name + (num_plotted_rows - 1) * vspacer),
        )
    elif total_width is not None:
        width = len(data) * total_width * 1.1 - .1 * total_width
        height = rows * box_height
        max_row_label_len = max(len(str(i)) for i in row_labels)
        dwg = svg.Drawing(
            filename,
            size=(width + 2 * x_min + 20 * draw_row_labels * max_row_label_len,
                  height + 2 * y_min + 80 * draw_name +
                  (80 * (figure_title is not None))))
    else:
        dwg = svg.Drawing(filename)
    dwg.add(svg.base.Title(path.basename(filename)))

    pat = dwg.pattern(id='hatch',
                      insert=(0, 0),
                      size=(hatch_size, hatch_size),
                      patternUnits='userSpaceOnUse')
    g = pat.add(dwg.g(style="fill:none; stroke:#B0B0B0; stroke-width:1"))
    g.add(dwg.path(('M0,0', 'l{hatch},{hatch}'.format(hatch=hatch_size))))
    g.add(
        dwg.path(('M{hatch2},0 l{hatch2},{hatch2}'.format(hatch2=hatch_size /
                                                          2).split())))
    g.add(
        dwg.path(('M0,{hatch2} l{hatch2},{hatch2}'.format(hatch2=hatch_size /
                                                          2).split())))

    dwg.add(pat)

    if box_height is None:
        box_height = box_size

    if not hasattr(cmap, "__len__"):
        cmap = [cmap for frame in data]

    if data_names is None:
        data_names = ["" for frame in data]

    if len(cmap) != len(data):
        raise ValueError(
            "cmap and data should be the same length ({} vs {})".format(
                len(cmap), len(data)))

    if not hasattr(spacers, "__len__"):
        spacers = [spacers]
    else:
        spacers = list(spacers)
    while len(spacers) < len(data):
        spacers.append(spacers[-1])

    if ((isinstance(norm_rows_by, repeat)
         and isinstance(next(norm_rows_by), str)
         and next(norm_rows_by).startswith('center0all'))
            or (not isinstance(norm_rows_by, repeat)
                and isinstance(norm_rows_by[0], str)
                and np.any([i.startswith('center0all')
                            for i in norm_rows_by]))):
        all_data = pd.concat(data, axis=1)

    if squeeze_rows is not None:
        data = [
            pd.DataFrame(d.apply(squeeze_rows, axis=1),
                         columns=[path.commonprefix(list(d.columns))])
            for d in data
        ]

    x_start = x_min
    y_start = y_min
    y_diff = 0
    iterator = zip(data, cmap, data_names, norm_rows_by, spacers,
                   colname_tuple)
    if figure_title:
        if isinstance(figure_title, tuple):
            font_size = '3em'
            for title_line in figure_title:
                dwg.add(
                    dwg.text(
                        title_line, (
                            x_start,
                            y_start + 75,
                        ),
                        style="font-size:{};font-family:sans-serif".format(
                            font_size)))
                y_start += 80
                font_size = '1.5em'

        else:
            dwg.add(
                dwg.text(figure_title, (
                    x_start,
                    y_start + 75,
                ),
                         style="font-size:3em;font-family:sans-serif"))
            y_start += 80
    if progress_bar:
        from progressbar import ProgressBar
        pbar = ProgressBar(maxval=len(data) * rows).start()
        pbar_val = 0

    for frame, c_cmap, name, normer, spacer, colnames in iterator:
        if frame is None:
            dwg.add(dwg.text(normer, (x_start, y_start + box_height / 2)))
            if total_width is not None:
                if spacer is None:
                    x_start += total_width * 1.1
                else:
                    x_start += total_width + spacer
            else:
                if spacer is None:
                    x_start += box_size
                else:
                    x_start += spacer
            if x_start > max_width:
                x_start = x_min
                y_start += box_height + vspacer
            continue
        if has_pandas:
            frame = pd.DataFrame(frame)
        if index is not None:
            if has_pandas:
                frame = frame.ix[index]
            else:
                setix = set(index)
                #selector = [i for i, name in enumerate(all_indices) if name in setix]
                #frame = frame[selector, :]
        if normer is None:
            norm_data = array(frame.copy())
        elif normer is 'mean':
            if has_pandas:
                norm_data = array(
                    frame.divide(frame.dropna(axis=1, how='all').mean(axis=1) +
                                 10,
                                 axis=0))
            else:
                norm_data = frame / (
                    frame[:, isfinite(frame[0, :])].mean(axis=1) + 10).reshape(
                        (rows, 1))
        elif normer == 'max':
            if has_pandas:
                norm_data = array(
                    frame.divide(frame.dropna(axis=1, how='all').max(axis=1) +
                                 10,
                                 axis=0))
            else:
                norm_data = frame / (
                    frame[:, isfinite(frame[0, :])].max(axis=1) + 10).reshape(
                        (rows, 1))
        elif normer == 'maxall':
            if has_pandas:
                maxall = frame.max(axis=1)
                assert len(data) == len(new_normers)
                for old_frame, norm_type in zip(data, new_normers):
                    if norm_type != 'maxall': continue
                    if old_frame is not None:
                        old_frame = old_frame.max(
                            axis=1
                        ).ix[index if index is not None else old_frame.index]
                        maxall = maxall.where(maxall > old_frame, old_frame)
                norm_data = array(frame.divide(maxall + 10, axis=0))
            else:
                norm_data = frame / (old_data[:, isfinite(old_data[0, :])].max(
                    axis=1) + 10).reshape((rows, 1))
        elif normer == 'fullvar':
            norm_data = frame.subtract(
                frame.dropna(axis=1, how='all').min(axis=1) - 1e-6, axis=0)
            norm_data = array(
                norm_data.divide(norm_data.dropna(axis=1,
                                                  how='all').max(axis=1),
                                 axis=0))
        elif normer == 'center0':
            norm_data = array(
                0.5 + 0.5 *
                frame.divide(frame.dropna(axis=1).abs().max(axis=1), axis=0))
        elif isinstance(normer, str) and normer.startswith('center0min'):
            min_norm = (frame.dropna(axis=1).abs().max(axis=1).clip(
                float(normer[10:]), 1e6))
            norm_data = array(0.5 + 0.5 * frame.divide(min_norm, axis=0))

        elif isinstance(normer, str) and normer.startswith('center0allmin'):
            min_norm = (all_data.dropna(axis=1).abs().max(axis=1).clip(
                float(normer[13:]), 1e6))
            norm_data = array(0.5 + 0.5 * frame.divide(min_norm, axis=0))

        elif normer == 'center0all':
            norm_data = array(0.5 + 0.5 * frame.divide(
                all_data.dropna(how='all', axis=1).abs().max(axis=1), axis=0))
        elif normer == 'center0pre':
            norm_data = array(0.5 + 0.5 * frame)
        elif isinstance(normer, (int, float)):
            norm_data = array(frame / normer)
            normer = 'constant'
        elif index is not None and hasattr(normer, "ix"):
            norm_data = array(frame.divide(normer.ix[index], axis=0))
        elif hasattr(normer, "__len__") and len(normer) == rows:
            if has_pandas:
                norm_data = array(frame.divide(normer, axis=0))
            else:
                norm_data = array(frame / np.reshape(normer, (rows, -1)))

        elif hasattr(normer, "__len__"):
            print('\n' * 5)
            print(len(normer), normer, normer == 'max')
            print(frame.shape)
            raise TypeError("norm_rows_by should be the same shape "
                            "as the number of rows")
        else:
            norm_data = array(frame / normer)

        if not c_cmap or str(c_cmap).lower() == 'default':
            c_cmap = ISH

        new_rows, new_cols = np.shape(frame)
        if hasattr(frame, 'index'):
            col_labels = frame.columns
        elif colnames:
            col_labels = colnames
        else:
            col_labels = ['' for col in range(new_cols)]
        if new_rows != rows:
            raise ValueError("All input elements must have the same number of"
                             " rows (and same row meanings --unchecked)")

        if total_width is not None:
            box_size = total_width / float(new_cols)

        i = 0
        if not draw_average_only:
            for i in range(rows):
                if progress_bar:
                    pbar.update(pbar_val)
                    pbar_val += 1
                prefix = col_labels[0][:col_labels[0].find(col_sep)]
                if cmap_by_prefix:
                    c_cmap = cmap_by_prefix(prefix)
                for j in range(new_cols):
                    g = dwg.g()
                    val = frame.ix[i, j] if has_pandas else frame[i, j]
                    g.add(
                        svg.base.Title("{}, {}: {:.2f}".format(
                            row_labels[i], col_labels[j], val)))
                    hatch = not isfinite(norm_data[i, j])
                    if hatch and nan_replace is not None:
                        if isinstance(nan_replace, float):
                            norm_data[i, j] = nan_replace
                        else:
                            if normer.startswith('center0'):
                                norm_data[i, j] = 0.5
                            else:
                                norm_data[i, j] = 0.0
                    elif hatch:
                        n = 0
                        norm_data[i, j] = 0
                        left = j - 1
                        while left >= 0:
                            if isfinite(norm_data[i, left]):
                                norm_data[i, j] += norm_data[i, left]
                                n += 1
                                break
                            left -= 1
                        right = j + 1
                        while right < norm_data.shape[1]:
                            if isfinite(norm_data[i, right]):
                                norm_data[i, j] += norm_data[i, right]
                                n += 1
                                break
                            right += 1
                        if n == 0:
                            norm_data[i, j] = .5 if 'center' in normer else 0
                        else:
                            norm_data[i, j] /= n
                    g.add(
                        dwg.rect(
                            (x_start + box_size * j, y_start + i * box_height),
                            (box_size, box_height),
                            style="fill:#{:02x}{:02x}{:02x}".format(*[
                                int(255 * x) for x in c_cmap(norm_data[i, j])
                            ])))
                    dwg.add(g)
                    if hatch_nan and hatch:
                        g.add(
                            dwg.rect((x_start + box_size * j,
                                      y_start + i * box_height),
                                     (box_size, box_height),
                                     style="fill:url(#hatch)"))
                    col_base = col_labels[j][:col_labels[j].find(col_sep)]
                    if col_base != prefix:
                        prefix = col_base
                        if cmap_by_prefix:
                            c_cmap = cmap_by_prefix(prefix)
                        g.add(
                            dwg.line(
                                (x_start + box_size * j,
                                 y_start + i * box_height),
                                (x_start + box_size * j, y_start +
                                 (i + 1) * box_height),
                                style="stroke-width:{}; stroke:#000000".format(
                                    .1 * box_size)))
        else:
            for j in range(new_cols):
                hatch = not isfinite(norm_data[0, j])
                if hatch:
                    n = 0
                    norm_data[:, j] = 0
                    if j > 0 and isfinite(norm_data[0, j - 1]):
                        norm_data[:, j] += norm_data[:, j - 1]
                        n += 1
                    if (j + 1 < norm_data.shape[1]
                            and isfinite(norm_data[0, j + 1])):
                        norm_data[:, j] += norm_data[:, j + 1]
                        n += 1
                    norm_data[:, j] /= n
        dwg.add(dwg.text(first_col, (x_start, y_start + (i + 1) * box_height)))
        dwg.add(
            dwg.text(last_col, (x_start + (new_cols - 1) * box_size, y_start +
                                (i + 1) * box_height)))
        if draw_box and not draw_average_only:
            dwg.add(
                dwg.rect((x_start, y_start + 0),
                         (new_cols * box_size, rows * box_height),
                         style="stroke-width:1; "
                         "stroke:#000000; fill:none"))
        if draw_average or draw_average_only:
            avg_frame = norm_data.mean(axis=0)
            for j in range(new_cols):
                col_base = col_labels[j][:col_labels[j].find(col_sep)]
                prefix = col_base
                if cmap_by_prefix:
                    c_cmap = cmap_by_prefix(prefix)
                g = dwg.g()
                g.add(
                    svg.base.Title("Average, {}: {:.2f}".format(
                        col_labels[j], avg_frame[j])))
                g.add(
                    dwg.rect((x_start + box_size * j, y_start +
                              (i + (not draw_average_only)) * box_height),
                             (box_size, box_height),
                             style="fill:#{:02x}{:02x}{:02x}".format(*[
                                 int(255 * x)
                                 for x in c_cmap(average_scale * avg_frame[j])
                             ])))
                if not isfinite(norm_data[0, j]) and hatch_nan:
                    g.add(
                        dwg.rect((x_start + box_size * j, y_start +
                                  (i + (not draw_average_only)) * box_height),
                                 (box_size, box_height),
                                 style="fill:url(#hatch)"))

                dwg.add(g)
            dwg.add(
                dwg.rect((x_start, y_start +
                          (i + (not draw_average_only)) * box_height),
                         (new_cols * box_size, 1 * box_height),
                         style="stroke-width:1; stroke:#000000; fill:none"))

        if draw_name:
            if name == "" and split_columns:
                name = col_base
            xpos = x_start + box_size * new_cols / 2.0
            text = dwg.text(
                '', (xpos, y_start + box_height * (rows) *
                     (1 - draw_average_only) + box_height *
                     (draw_average or draw_average_only) + 13),
                style="text-anchor: middle;font-family:sans-serif;")
            text.add(dwg.tspan("", dy=["-1.5em"]))
            for line in name.split('_'):
                text.add(
                    dwg.tspan(
                        line,
                        dy=["1.5em"],
                        x=[xpos],
                        style="text-anchor: middle;",
                    ))
            dwg.add(text)

        if total_width is not None:
            if spacer is None:
                x_start += total_width * 1.1
            else:
                x_start += total_width + spacer
        else:
            if spacer is None:
                x_start += new_cols * box_size + box_size
            else:
                x_start += new_cols * box_size + spacer

        #y_diff = new_rows * box_height + vspacer
        if x_start + total_width >= max_width:
            x_start = x_min
            y_start += new_rows * box_height * (
                not draw_average_only) + vspacer
            y_start += box_height * (draw_average_only or draw_average)

    if draw_row_labels and isinstance(row_labels[0], tuple):
        lwidths = Counter()
        for r in row_labels:
            for i, l in enumerate(r):
                lwidths[i] = max(lwidths[i], len(str(l)))
        cum_len = 0
        for i in range(len(lwidths)):
            old_width = lwidths[i]
            lwidths[i] += cum_len
            cum_len += old_width

    if draw_row_labels and not draw_average_only:
        for i in range(rows):
            if color_row_labels:
                style = "font-family:sans-serif; font-size: {size}; fill: {color};".format(
                    size=box_height,
                    color='red'
                    if row_labels[i] in color_row_labels else 'black',
                )
            else:
                style = "font-family:sans-serif; font-size: {}".format(
                    box_height)
            if isinstance(row_labels[i], tuple):
                labeltext = dwg.g()
                for lnum, ltext in enumerate(row_labels[i]):
                    labeltext.add(
                        dwg.text(
                            ltext,
                            (x_start + lwidths[lnum - 1] * 10 + lnum * 50,
                             y_start + i * box_height + box_height),
                            style=style,
                        ))
            else:
                labeltext = (dwg.text(
                    row_labels[i],
                    (x_start, y_start + i * box_height + box_height),
                    style=style,
                ))
            if make_hyperlinks:
                if make_hyperlinks is True:
                    link = dwg.a(
                        'http://insitu.fruitfly.org/cgi-bin/ex/report.pl?ftype={}&ftext={}'
                        .format(
                            2 if (isinstance(row_labels[i], str) and
                                  (row_labels[i].startswith('FBgn'))) else 1,
                            row_labels[i]),
                        target='_replace',
                    )
                else:
                    link = dwg.a(make_hyperlinks.format(frame.index[i]))
                link.add(labeltext)
                dwg.add(link)
            else:
                dwg.add(labeltext)
    if progress_bar:
        pbar.finish()
    dwg.saveas(filename)
    if convert:
        cmd = [
            'convert',
            filename,
            '-units',
            'PixelsPerInch',
            '+antialias',
            '-density',
            '600',
            '-background',
            'none',
            '-transparent',
            'white',
            filename.replace('svg', 'png'),
        ]
        subprocess.Popen(cmd)
Esempio n. 58
0
def is_prefix_of(prefix, of_path):
    """
    Return True if 'prefix' is a prefix of 'of_path'
    """
    return commonprefix([prefix, of_path]) == prefix
Esempio n. 59
0
def pairspf(pp):
    pf = op.basename(op.commonprefix(pp).rstrip("._-"))
    if not pf.strip():
        pf = op.basename(pp[0])
    return pf
Esempio n. 60
0
    def refresh(self):
        """
        Refreshing search results panel
        """
        title = "'%s' - " % self.search_text
        if self.results is None:
            text = _('Search canceled')
        else:
            nb_files = len(self.results)
            if nb_files == 0:
                text = _('String not found')
            else:
                text_matches = _('matches in')
                text_files = _('file')
                if nb_files > 1:
                    text_files += 's'
                text = "%d %s %d %s" % (self.nb, text_matches, nb_files,
                                        text_files)
        if self.error_flag:
            text += ' (' + self.error_flag + ')'
        elif self.results is not None and not self.completed:
            text += ' (' + _('interrupted') + ')'
        self.set_title(title + text)
        self.clear()
        self.data = {}

        if not self.results:  # First search interrupted *or* No result
            return

        # Directory set
        dir_set = set()
        for filename in sorted(self.results.keys()):
            dirname = osp.abspath(osp.dirname(filename))
            dir_set.add(dirname)

        # Root path
        root_path_list = None
        _common = get_common_path(list(dir_set))
        if _common is not None:
            root_path_list = [_common]
        else:
            _common = get_common_path(self.pathlist)
            if _common is not None:
                root_path_list = [_common]
            else:
                root_path_list = self.pathlist
        if not root_path_list:
            return
        for _root_path in root_path_list:
            dir_set.add(_root_path)
        # Populating tree: directories
        def create_dir_item(dirname, parent):
            if dirname not in root_path_list:
                displayed_name = osp.basename(dirname)
            else:
                displayed_name = dirname
            item = QTreeWidgetItem(parent, [displayed_name],
                                   QTreeWidgetItem.Type)
            item.setIcon(0, get_std_icon('DirClosedIcon'))
            return item

        dirs = {}
        for dirname in sorted(list(dir_set)):
            if dirname in root_path_list:
                parent = self
            else:
                parent_dirname = abspardir(dirname)
                parent = dirs.get(parent_dirname)
                if parent is None:
                    # This is related to directories which contain found
                    # results only in some of their children directories
                    if osp.commonprefix([dirname] + root_path_list):
                        # create new root path
                        pass
                    items_to_create = []
                    while dirs.get(parent_dirname) is None:
                        items_to_create.append(parent_dirname)
                        parent_dirname = abspardir(parent_dirname)
                    items_to_create.reverse()
                    for item_dir in items_to_create:
                        item_parent = dirs[abspardir(item_dir)]
                        dirs[item_dir] = create_dir_item(item_dir, item_parent)
                    parent_dirname = abspardir(dirname)
                    parent = dirs[parent_dirname]
            dirs[dirname] = create_dir_item(dirname, parent)
        self.root_items = [dirs[_root_path] for _root_path in root_path_list]
        # Populating tree: files
        for filename in sorted(self.results.keys()):
            parent_item = dirs[osp.dirname(filename)]
            file_item = QTreeWidgetItem(parent_item, [osp.basename(filename)],
                                        QTreeWidgetItem.Type)
            file_item.setIcon(0, get_filetype_icon(filename))
            colno_dict = {}
            fname_res = []
            for lineno, colno, line in self.results[filename]:
                if lineno not in colno_dict:
                    fname_res.append((lineno, colno, line))
                colno_dict[lineno] = colno_dict.get(lineno, []) + [str(colno)]
            for lineno, colno, line in fname_res:
                colno_str = ",".join(colno_dict[lineno])
                item = QTreeWidgetItem(
                    file_item,
                    ["%d (%s): %s" % (lineno, colno_str, line.rstrip())],
                    QTreeWidgetItem.Type)
                item.setIcon(0, get_icon('arrow.png'))
                self.data[id(item)] = (filename, lineno)
        # Removing empty directories
        top_level_items = [
            self.topLevelItem(index)
            for index in range(self.topLevelItemCount())
        ]
        for item in top_level_items:
            if not item.childCount():
                self.takeTopLevelItem(self.indexOfTopLevelItem(item))