Example #1
0
    def __init__(self, language_info, settings=None):
        dictionary = {}
        self._settings = settings
        self.info = language_info

        if 'skip' in language_info:
            skip = map(methodcaller('lower'), language_info['skip'])
            dictionary.update(zip_longest(skip, [], fillvalue=None))
        if 'pertain' in language_info:
            pertain = map(methodcaller('lower'), language_info['pertain'])
            dictionary.update(zip_longest(pertain, [], fillvalue=None))
        for word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday',
                     'january', 'february', 'march', 'april', 'may', 'june', 'july',
                     'august', 'september', 'october', 'november', 'december',
                     'year', 'month', 'week', 'day', 'hour', 'minute', 'second',
                     'ago']:
            translations = map(methodcaller('lower'), language_info[word])
            dictionary.update(zip_longest(translations, [], fillvalue=word))
        dictionary.update(zip_longest(ALWAYS_KEEP_TOKENS, ALWAYS_KEEP_TOKENS))
        dictionary.update(zip_longest(map(methodcaller('lower'),
                                          DATEUTIL_PARSERINFO_KNOWN_TOKENS),
                                      DATEUTIL_PARSERINFO_KNOWN_TOKENS))

        self._dictionary = dictionary
        self._no_word_spacing = language_info.get('no_word_spacing', False)
Example #2
0
    def _load_data(self, languages=None, locales=None, region=None,
                   use_given_order=False, allow_conflicting_locales=False):
        locale_dict = OrderedDict()
        if locales:
            invalid_locales = []
            for locale in locales:
                lang_reg = LOCALE_SPLIT_PATTERN.split(locale)
                if len(lang_reg) == 1:
                    lang_reg.append('')
                locale_dict[locale] = tuple(lang_reg)
                if not _isvalidlocale(locale):
                    invalid_locales.append(locale)
            if invalid_locales:
                raise ValueError("Unknown locale(s): %s"
                                 % ', '.join(map(repr, invalid_locales)))

            if not allow_conflicting_locales:
                if len(set(locales)) > len(set([t[0] for t in locale_dict.values()])):
                    raise ValueError("Locales should not have same language and different region")

        else:
            if languages is None:
                languages = language_order
            unsupported_languages = set(languages) - set(language_order)
            if unsupported_languages:
                raise ValueError("Unknown language(s): %s"
                                 % ', '.join(map(repr, unsupported_languages)))
            if region is None:
                region = ''
            locales = _construct_locales(languages, region)
            locale_dict.update(zip_longest(locales,
                               tuple(zip_longest(languages, [], fillvalue=region))))

        if not use_given_order:
            locale_dict = OrderedDict(sorted(locale_dict.items(),
                                      key=lambda x: language_order.index(x[1][0])))

        for shortname, lang_reg in locale_dict.items():
            if shortname not in self._loaded_locales:
                lang, reg = lang_reg
                if lang in self._loaded_languages:
                    locale = Locale(shortname, language_info=deepcopy(self._loaded_languages[lang]))
                    self._loaded_locales[shortname] = locale
                else:
                    language_info = getattr(
                        import_module('dateparser.data.date_translation_data.' + lang), 'info')
                    language_info = convert_to_unicode(language_info)
                    locale = Locale(shortname, language_info=deepcopy(language_info))
                    self._loaded_languages[lang] = language_info
                    self._loaded_locales[shortname] = locale
            yield shortname, self._loaded_locales[shortname]
Example #3
0
 def _add(s, A, B):
   # A + B
   if len(A) == 1 and len(A[0]) == 1:
     A = A[0][0]
     return s.element_class(s, [[A + B[0][0]] + list(B[0][1:])] + list(B[1:]))
   elif len(B) == 1 and len(B[0]) == 1:
     B = B[0][0]
     return s.element_class(s, [[A[0][0] + B] + list(A[0][1:])] + list(A[1:]))
   ret = []
   for x, y in zip_longest(A, B, fillvalue=[0]):
     t = []
     for xs, ys in zip_longest(x, y, fillvalue=0):
       t += [xs + ys]
     ret += [t]
   return s.element_class(s, ret)
    def reconstruct_events(self, events, detector_ids=None, offsets=NO_OFFSET,
                           progress=True, initials=None):
        """Reconstruct events

        :param events: the events table for the station from an ESD data file.
        :param detector_ids: detectors to use for the reconstructions.
        :param offsets: time offsets for each detector or a
            :class:`~sapphire.api.Station` object.
        :param progress: if True show a progress bar while reconstructing.
        :param initials: list of dictionaries with already reconstructed shower
                         parameters.
        :return: list of theta, phi, and detector ids.

        """
        if initials is None:
            initials = []
        events = pbar(events, show=progress)
        events_init = zip_longest(events, initials)
        angles = [self.reconstruct_event(event, detector_ids, offsets, initial)
                  for event, initial in events_init]
        if len(angles):
            theta, phi, ids = zip(*angles)
        else:
            theta, phi, ids = ((), (), ())
        return theta, phi, ids
Example #5
0
    def reconstruct_coincidences(self, coincidences, station_numbers=None,
                                 progress=True, initials=None):
        """Reconstruct all coincidences

        :param coincidences: a list of coincidences, each consisting of
                             multiple (station_number, event) tuples.
        :param station_numbers: list of station numbers, to only use
                                events from those stations.
        :param progress: if True show a progress bar while reconstructing.
        :param initials: list of dictionaries with already reconstructed shower
                         parameters.
        :return: (x, y) core positions in m.

        """
        if initials is None:
            initials = []

        coincidences = pbar(coincidences, show=progress)
        coin_init = zip_longest(coincidences, initials)
        cores = [self.reconstruct_coincidence(coincidence, station_numbers,
                                              initial)
                 for coincidence, initial in coin_init]
        if len(cores):
            core_x, core_y = list(zip(*cores))
        else:
            core_x, core_y = ((), ())
        return core_x, core_y
Example #6
0
 def _build_components_from_string(self, arn_string):
     if '|' in arn_string:
         arn_string, query = arn_string.split('|')
         self.query = jmespath.compile(query)
     pairs = zip_longest(
         self.ComponentClasses, arn_string.split(':', 6), fillvalue='*')
     self._components = [c(n, self) for c, n in pairs]
Example #7
0
  def _ComparePrereleaseStrings(cls, s1, s2):
    """Compares the two given prerelease strings.

    Args:
      s1: str, The first prerelease string.
      s2: str, The second prerelease string.

    Returns:
      1 if s1 is greater than s2, -1 if s2 is greater than s1, and 0 if equal.
    """
    s1 = s1.split('.') if s1 else []
    s2 = s2.split('.') if s2 else []

    for (this, other) in zip_longest(s1, s2):
      # They can't both be None because empty parts of the string split will
      # come through as the empty string. None indicates it ran out of parts.
      if this is None:
        return 1
      elif other is None:
        return -1

      # Both parts have a value
      if this == other:
        # This part is the same, move on to the next.
        continue
      if this.isdigit() and other.isdigit():
        # Numerical comparison if they are both numbers.
        return SemVer._CmpHelper(int(this), int(other))
      # Lexical comparison if either is a string. Numbers will always sort
      # before strings.
      return SemVer._CmpHelper(this.lower(), other.lower())

    return 0
Example #8
0
 def assertContainsSameWords(self, string1, string2):
     try:
         for w1, w2 in zip_longest(string1.split(), string2.split(), fillvalue=''):
             self.assertEqual(w1, w2)
     except AssertionError:
         raise AssertionError("%r does not contain the same words as %r" % (string1,
                                                                            string2))
Example #9
0
    def reconstruct_events(self, events, detector_ids=None, progress=True,
                           initials=None):
        """Reconstruct events

        :param events: the events table for the station from an ESD data
                       file.
        :param detector_ids: detectors which use for the reconstructions.
        :param progress: if True show a progress bar while reconstructing.
        :param initials: list of dictionaries with already reconstructed shower
                         parameters.
        :return: (x, y) core positions in m.

        """
        if initials is None:
            initials = []

        events = pbar(events, show=progress)
        events_init = zip_longest(events, initials)
        cores = [self.reconstruct_event(event, detector_ids, initial)
                 for event, initial in events_init]
        if len(cores):
            core_x, core_y = zip(*cores)
        else:
            core_x, core_y = ((), ())
        return core_x, core_y
Example #10
0
    def test_logging(self):
        try:
            foo()
        except:
            logging.critical("Got exception", exc_info=True)
        expected = (
r'^WARNING$',
r'^warning here$',

r'^CRITICAL$',
r'^Got exception$',
r'^Traceback \(most recent call last\):$',
r'^  File ".+test_logging\.py", line [0-9]+, in test_logging$',
r'^    foo\(\)$',
r'^  File ".+test_logging\.py", line [0-9]+, in foo$',
r'^  \(in_foo=True\)$',
r'^  \(out_of_bar=True\)$',
r'^    baz\(\)$',
r'^  File ".+test_logging\.py", line [0-9]+, in baz$',
r'^  \(in_baz=True\)$',
r'^    raise ValueError\("ohno"\)$',
r'^ValueError: ohno$')
        actual = self.output.getvalue().split('\n')
        if actual and not actual[-1]:
            actual = actual[:-1]
        for a, e in zip_longest(actual, expected):
            self.assertIsNotNone(re.search(e, a), "%r != %r" % (a, e))
Example #11
0
    def __iter__(self):
        """Iterate over positions in the biological sequence.

        Yields
        ------
        Sequence
            Single character subsequence, one for each position in the
            sequence.

        Examples
        --------
        >>> from skbio import Sequence
        >>> s = Sequence('GGUC')
        >>> for c in s:
        ...     c
        Sequence('G', length=1)
        Sequence('G', length=1)
        Sequence('U', length=1)
        Sequence('C', length=1)

        """
        if self._has_quality():
            qual = self.quality
        else:
            qual = []

        for c, q in zip_longest(self.values, qual, fillvalue=None):
            yield self._to(sequence=c, quality=q)
def create_sounding_dataset(parent_group, dataset_name, in_data, shape_names=(), units=None):
    "Creates a new dataset but with the data reshaped so that the first dim is the sounding one"

    new_data = numpy.array(in_data)
    try:
        new_ds = parent_group.create_dataset(dataset_name, data=new_data.reshape(1, *new_data.shape))
    except RuntimeError as e:
        raise RuntimeError("Could not create dataset: %s with data: %s\n%s" % (dataset_name, new_data, e))

    # Adds optionally passed shape names as an attribute to the dataset
    # Missing dimensions will be called Dim%d
    if type(shape_names) is StringType:
        shape_names = [shape_names]

    all_shape_names = []
    for idx, s_name in zip_longest(list(range(len(new_ds.shape))), ["Retrieval"] + list(shape_names)):
        if s_name:
            all_shape_names.append(s_name)
        else:
            all_shape_names.append("Dim%d" % (idx+1))
    new_ds.attrs["Shape"] = [ numpy.array("_".join(all_shape_names) + "_Array") ]

    if units:
        new_ds.attrs["Units"] = [ numpy.array(units) ]

    return new_ds
Example #13
0
def test_mppovm_expectation(nr_sites, width, local_dim, rank, nopovm, rgen):
    # Verify that :func:`povm.MPPovm.expectations()` produces
    # correct results.
    pmap = nopovm.probability_map
    mpnopovm = povm.MPPovm.from_local_povm(nopovm, width)
    # Use a random MPO rho for testing (instead of a positive MPO).
    rho = factory.random_mpo(nr_sites, local_dim, rank, rgen)
    reductions = mpsmpo.reductions_mpo(rho, width)
    # Compute expectation values with mpnopovm.expectations(), which
    # uses mpnopovm.probability_map.
    expectations = list(mpnopovm.expectations(rho))
    assert len(expectations) == nr_sites - width + 1

    for evals_mp, rho_red in zip_longest(expectations, reductions):
        # Compute expectation values by constructing each tensor
        # product POVM element.
        rho_red_matrix = rho_red.to_array_global().reshape(
            (local_dim**width,) * 2)
        evals = []
        for factors in it.product(nopovm, repeat=width):
            elem = utils.mkron(*factors)
            evals.append(np.trace(np.dot(elem, rho_red_matrix)))
        evals = np.array(evals).reshape((len(nopovm),) * width)

        # Compute expectation with a different construction. In the
        # end, this is (should be, we verify it here) equivalent to
        # what `mpnopovm.expectations()` does.
        evals_ten = rho_red.ravel().to_array()
        for _ in range(width):
            evals_ten = np.tensordot(evals_ten, pmap, axes=(0, 1))

        assert_array_almost_equal(evals_ten, evals)
        assert_array_almost_equal(evals_mp.to_array(), evals)
    def _print_status(self, name, ids_in_couch, ids_in_sql, diff_count, num_docs_with_diffs, short, diffs_only):
        n_couch = len(ids_in_couch)
        n_sql = len(ids_in_sql)
        has_diff = n_couch != n_sql or diff_count

        if diffs_only and not has_diff:
            return False

        def _highlight(text):
            return shell_red(text) if has_diff else text

        row = "{:^40} | {:^40}"
        doc_count_row = row.format(n_couch, n_sql)
        header = ((82 - len(name)) // 2) * '_'

        print('\n{} {} {}'.format(header, name, header))
        print(row.format('Couch', 'SQL'))
        print(_highlight(doc_count_row))
        if diff_count:
            print(_highlight("{:^83}".format('{} diffs ({} docs)'.format(diff_count, num_docs_with_diffs))))

        if not short:
            if ids_in_couch ^ ids_in_sql:
                couch_only = list(ids_in_couch - ids_in_sql)
                sql_only = list(ids_in_sql - ids_in_couch)
                for couch, sql in zip_longest(couch_only, sql_only):
                    print(row.format(couch or '', sql or ''))

        return True
Example #15
0
 def __bind_commands(self):
     if not self.parallel:
         for attr in ['complete_kill', 'do_kill', 'do_status']:
             delattr(FrameworkConsole, attr)
     for name, func in get_commands():
         longname = 'do_{}'.format(name)
         # set the behavior of the console command (multi-processed or not)
         # setattr(Console, longname, MethodType(FrameworkConsole.start_process_template(func) \
         #                                   if self.parallel and func.behavior.is_multiprocessed else func, self))
         setattr(Console, longname, MethodType(func, self))
         # retrieve parts of function's docstring to make console command's docstring
         parts = func.__doc__.split(':param ')
         description = parts[0].strip()
         arguments = [" ".join([l.strip() for l in x.split(":")[-1].split('\n')]) for x in parts[1:]]
         docstring = COMMAND_DOCSTRING["description"].format(description)
         if len(arguments) > 0:
             arg_descrs = [' - {}:\t{}'.format(n, d or "[no description]") \
                           for n, d in list(zip_longest(signature(func).parameters.keys(), arguments or []))]
             docstring += COMMAND_DOCSTRING["arguments"].format('\n'.join(arg_descrs))
         if hasattr(func, 'examples') and isinstance(func.examples, list):
             args_examples = [' >>> {} {}'.format(name, e) for e in func.examples]
             docstring += COMMAND_DOCSTRING["examples"].format('\n'.join(args_examples))
         setattr(getattr(getattr(Console, longname), '__func__'), '__doc__', docstring)
         # set the autocomplete list of values (can be lazy by using lambda) if relevant
         if hasattr(func, 'autocomplete'):
             setattr(Console, 'complete_{}'.format(name),
                     MethodType(FrameworkConsole.complete_template(func.autocomplete), self))
         if hasattr(func, 'reexec_on_emptyline') and func.reexec_on_emptyline:
             self.reexec.append(name)
Example #16
0
def _check_bam_contigs(in_bam, ref_file, config):
    """Ensure a pre-aligned BAM file matches the expected reference genome.
    """
    # GATK allows chromosome M to be in multiple locations, skip checking it
    allowed_outoforder = ["chrM", "MT"]
    ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)]
    with pysam.Samfile(in_bam, "rb") as bamfile:
        bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
    extra_bcs = [x for x in bam_contigs if x not in ref_contigs]
    extra_rcs = [x for x in ref_contigs if x not in bam_contigs]
    problems = []
    warnings = []
    for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and
                                                                     x not in allowed_outoforder)],
                                         [x for x in ref_contigs if (x not in extra_rcs and
                                                                     x not in allowed_outoforder)]):
        if bc != rc:
            if bc and rc:
                problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc))
            elif bc:
                warnings.append("Extra BAM chromosomes: %s" % bc)
            elif rc:
                warnings.append("Extra reference chromosomes: %s" % rc)
    for bc in extra_bcs:
        warnings.append("Extra BAM chromosomes: %s" % bc)
    for rc in extra_rcs:
        warnings.append("Extra reference chromosomes: %s" % rc)
    if problems:
        raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n"
                         "Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue."
                         % "\n".join(problems))
    if warnings:
        print("*** Potential problems in input BAM compared to reference:\n%s\n" %
              "\n".join(warnings))
Example #17
0
    def __init__(self, rules=None, flags=0, branch_size=MAXGROUPS - 1):
        """Initialize index structures.

        :param rules: list of tuples (regular expression, data)
        :param flags: additional flags passed to SRE parser
        :param branch_size: number of groups in a branch (max. 99)
        """
        self._patterns = []
        self.flags = flags
        self.rules = rules or []
        self.branch_size = min(branch_size, len(self.rules))

        def make_pattern(rules, flags=0):
            """Compile a rules to single branch with groups."""
            return re.compile('|'.join('(?P<I{name}>{regex})'.format(
                name=name, regex=regex
            ) for regex, (name, creator) in rules), flags=flags)

        for rules in zip_longest(*[iter(self.rules)] * self.branch_size):
            self._patterns.append(make_pattern([
                rule for rule in rules if rule is not None
            ]))

        self._lookup = {
            name: (name, creator) for _, (name, creator) in self.rules
        }
Example #18
0
def histogram(bidx, infile, bins, sampling, rgb):

    """
    Histogram of raster values.
    """

    with rio.open(infile) as src:

        if bidx is not None:
            indexes = [bidx]
        else:
            indexes = src.indexes

        if rgb and len(indexes) < 3:
            raise click.ClickException("Not enough bands to display a RGB histogram.")
        elif rgb and len(indexes) >= 3:
            colors = ('red', 'green', 'blue', 'violet')
        else:
            colors = ()

        for b, color in zip_longest(indexes, colors, fillvalue=None):

            data = np.zeros(
                (src.height // sampling, src.width // sampling),
                dtype=src.dtypes[src.indexes.index(b)])
            data = src.read(indexes=b, out=data, masked=False)

            plt.hist(data.flatten(), bins=bins, alpha=0.5, color=color)

        plt.show()
Example #19
0
def cmp_like_py2(dict1, dict2):  # type: (Dict[Text, Any], Dict[Text, Any]) -> int
    """
    Comparision function to be used in sorting as python3 doesn't allow sorting
    of different types like str() and int().
    This function re-creates sorting nature in py2 of heterogeneous list of
    `int` and `str`
    """
    # extract lists from both dicts
    first, second = dict1["position"], dict2["position"]
    # iterate through both list till max of their size
    for i, j in zip_longest(first, second):
        if i == j:
            continue
        # in case 1st list is smaller
        # should come first in sorting
        if i is None:
            return -1
        # if 1st list is longer,
        # it should come later in sort
        elif j is None:
            return 1

        # if either of the list contains str element
        # at any index, both should be str before comparing
        if isinstance(i, str) or isinstance(j, str):
            return 1 if str(i) > str(j) else -1
        # int comparison otherwise
        return 1 if i > j else -1
    # if both lists are equal
    return 0
    def reconstruct_coincidences(self, coincidences, station_numbers=None,
                                 offsets=None, progress=True, initials=None):
        """Reconstruct all coincidences

        :param coincidences: a list of coincidence events, each consisting
                             of three or more (station_number, event) tuples.
        :param station_numbers: list of station numbers, to only use
                                events from those stations.
        :param offsets: dictionary with detector offsets for each station.
                        These detector offsets should be relative to one
                        detector from a specific station.
        :param progress: if True show a progress bar while reconstructing.
        :param initials: list of dictionaries with already reconstructed shower
                         parameters.
        :return: list of theta, phi, and station numbers.

        """
        if offsets is None:
            offsets = {}
        if initials is None:
            initials = []
        coincidences = pbar(coincidences, show=progress)
        coin_init = zip_longest(coincidences, initials)
        angles = [self.reconstruct_coincidence(coincidence, station_numbers,
                                               offsets, initial)
                  for coincidence, initial in coin_init]
        if len(angles):
            theta, phi, nums = zip(*angles)
        else:
            theta, phi, nums = ((), (), ())
        return theta, phi, nums
    def _check_apt_preferences(self, data, sections, priority):
        pins = data.split('\n\n')

        # in non-flat repo we have one pin per section
        if sections:
            self.assertEqual(len(pins), len(sections))

        # we should have one pin per section
        for pin, section in zip_longest(pins, sections):
            conditions = self._re_pin.search(pin).group(1).split(',')

            # check general template
            self.assertRegexpMatches(
                data, (
                    'Package: \*\n'
                    'Pin: release .*\n'
                    'Pin-Priority: {0}'.format(priority)
                ))

            # check pin
            expected_conditions = [
                'a=test-archive',
                'l=TestLabel',
                'n=testcodename',
                'o=TestOrigin',
            ]
            if section:
                expected_conditions.append('c={0}'.format(section))
            self.assertItemsEqual(conditions, expected_conditions)
Example #22
0
def make_date_range_tuples(start, end, gap):
    """Make an iterable of date tuples for use in iterating forms

    For example, a form might allow start and end dates and you want to iterate
    it one week at a time starting on Jan 1 and ending on Feb 3:

    >>> make_date_range_tuples(date(2017, 1, 1), date(2017, 2, 3), 7)
    [(Jan 1, Jan 7), (Jan 8, Jan 14), (Jan 15, Jan 21), (Jan 22, Jan 28),
     (Jan 29, Feb 3)]

    :param start: date when the query should start.
    :param end: date when the query should end.
    :param gap: the number of days, inclusive, that a query should span at a
    time.

    :rtype list(tuple)
    :returns: list of start, end tuples
    """
    # We create a list of start dates and a list of end dates, then zip them
    # together. If end_dates is shorter than start_dates, fill the last value
    # with the original end date.
    start_dates = [d.date() for d in rrule(DAILY, interval=gap, dtstart=start,
                                           until=end)]
    end_start = start + datetime.timedelta(days=gap - 1)
    end_dates = [d.date() for d in rrule(DAILY, interval=gap, dtstart=end_start,
                                         until=end)]
    return list(zip_longest(start_dates, end_dates, fillvalue=end))
Example #23
0
def tileswrap(ihtORsize, numtilings, floats, wrapwidths, ints=[],
              readonly=False):
    '''
    returns num-tilings tile indices corresponding to the floats and ints,
    wrapping some floats

    :param ihtORsize: integer or IHT object. An index hash table or a positive
        integer specifying the upper range of returned indices
    :param numtilings: integer. the number of tilings desired. For best
        results, the second argument, numTilings, should be a power of two
        greater or equal to four times the number of floats
    :param memory-size: ineteger. the number of possible tile indices
    :param floats: list. a list of real values making up the input vector
    :param wrapwidths:
    :param ints*: list. optional list of integers to get different hashings
    :param readonly*: boolean.
    '''
    qfloats = [floor(f*numtilings) for f in floats]
    Tiles = []
    for tiling in range(numtilings):
        tilingX2 = tiling*2
        coords = [tiling]
        b = tiling
        for q, width in zip_longest(qfloats, wrapwidths):
            c = (q + b % numtilings) // numtilings
            coords.append(c % width if width else c)
            b += tilingX2
        coords.extend(ints)
        Tiles.append(hashcoords(coords, ihtORsize, readonly))
    return Tiles
Example #24
0
def merge_lists(base, mine, other):
    if mine == other:
        return mine
    if other == base:
        return mine
    if mine == base:
        return other
    result = []
    last_conflict = False
    for i, (m, o, b) in enumerate(zip_longest(mine, other, base,
                                              fillvalue=_BLANK)):
        if (m == o and _BLANK not in (m, o) or
                isinstance(m, dict) and isinstance(o, dict)):
            result.append(m)
        else:  # Conflict
            if last_conflict:
                c = result[-1]
                c.update(m, o, b)
            else:
                c = Conflict(m, o, b)
                result.append(c)
            last_conflict = True
            continue
        last_conflict = False
    offset = 0
    for i, r in enumerate(result[:]):
        if isinstance(r, Conflict):
            c = r.resolve_conflict()
            result = result[:i + offset] + c + result[i + offset + 1:]
            offset += len(c) - 1
    return result
Example #25
0
    def test_parse_stream(
        self, structure_and_messages1, structure_and_messages2, structure_and_messages3
    ):
        """
        L{Parser.parse_stream} returns an iterable of completed and then
        incompleted tasks.
        """
        _, messages1 = structure_and_messages1
        _, messages2 = structure_and_messages2
        _, messages3 = structure_and_messages3
        # Need at least one non-dropped message in partial tree:
        assume(len(messages3) > 1)
        # Need unique UUIDs per task:
        assume(
            len(set(m[0][TASK_UUID_FIELD] for m in (messages1, messages2, messages3)))
            == 3
        )

        # Two complete tasks, one incomplete task:
        all_messages = (messages1, messages2, messages3[:-1])

        all_tasks = list(
            Parser.parse_stream(
                [m for m in chain(*zip_longest(*all_messages)) if m is not None]
            )
        )
        assertCountEqual(
            self, all_tasks, [parse_to_task(msgs) for msgs in all_messages]
        )
Example #26
0
def combine_slices(slice1, slice2):
    """Return two tuples of slices combined sequentially.

    These two should be equal:

        x[ combine_slices(s1, s2) ] == x[s1][s2]

    """
    out = []
    for exp1, exp2 in zip_longest(
            slice1, slice2, fillvalue=slice(None)):
        if isinstance(exp1, int):
            exp1 = slice(exp1, exp1+1)
        if isinstance(exp2, int):
            exp2 = slice(exp2, exp2+1)

        start = (exp1.start or 0) + (exp2.start or 0)
        step = (exp1.step or 1) * (exp2.step or 1)

        if exp1.stop is None and exp2.stop is None:
            stop = None
        elif exp1.stop is None:
            stop = (exp1.start or 0) + exp2.stop
        elif exp2.stop is None:
            stop = exp1.stop
        else:
            stop = min(exp1.stop, (exp1.start or 0) + exp2.stop)

        out.append(slice(start, stop, step))
    return tuple(out)
Example #27
0
    def compute_chunk(self, ordered_terms, extra_row_counts, base_mask):
        """
        Compute the FFC terms in the graph based on the assets and dates
        defined by base_mask.

        Returns a dictionary mapping terms to computed arrays.
        """
        loader = self._loader
        max_extra_rows = max(extra_row_counts.values())
        workspace = {term: None for term in ordered_terms}

        for term in ordered_terms:
            base_mask_for_term = base_mask.iloc[
                max_extra_rows - extra_row_counts[term]:
            ]
            if term.atomic:
                # FUTURE OPTIMIZATION: Scan the resolution order for terms in
                # the same dataset and load them here as well.
                to_load = [term]
                loaded = loader.load_adjusted_array(
                    to_load,
                    base_mask_for_term,
                )
                for loaded_term, adj_array in zip_longest(to_load, loaded):
                    workspace[loaded_term] = adj_array
            else:
                if term.windowed:
                    compute = term.compute_from_windows
                else:
                    compute = term.compute_from_arrays
                workspace[term] = compute(
                    self._inputs_for_term(term, workspace, extra_row_counts),
                    base_mask_for_term,
                )
        return workspace
Example #28
0
def get_language_list_for_templates(default_language):
    # type: (Text) -> List[Dict[str, Dict[str, str]]]
    language_list = [l for l in get_language_list()
                     if 'percent_translated' not in l or
                        l['percent_translated'] >= 5.]

    formatted_list = []
    lang_len = len(language_list)
    firsts_end = (lang_len // 2) + operator.mod(lang_len, 2)
    firsts = list(range(0, firsts_end))
    seconds = list(range(firsts_end, lang_len))
    assert len(firsts) + len(seconds) == lang_len
    for row in zip_longest(firsts, seconds):
        item = {}
        for position, ind in zip(['first', 'second'], row):
            if ind is None:
                continue

            lang = language_list[ind]
            percent = name = lang['name']
            if 'percent_translated' in lang:
                percent = u"{} ({}%)".format(name, lang['percent_translated'])

            item[position] = {
                'name': name,
                'code': lang['code'],
                'percent': percent,
                'selected': True if default_language == lang['code'] else False
            }

        formatted_list.append(item)

    return formatted_list
Example #29
0
def _apply_funcs(return_vals, funcs):
    """
    将func函数作用在被装饰的函数的返回值上
    """
    # 检查iterable
    if not isinstance(return_vals, tuple):
        return_vals = (return_vals,)
    try:
        iter(funcs)
    except TypeError:
        funcs = (funcs,) if funcs else ()

    # 检查函数
    if not funcs:
        return return_vals

    # 函数和返回值不对齐
    if 1 < len(return_vals) < len(funcs):
        raise TypeError(
            "In _apply_funcs(), len(funcs) == {} more than len(return_vals) == {}".format(
                len(funcs), len(return_vals)
            )
        )
    # 函数多于返回值
    if 1 == len(return_vals) < len(funcs):
        raise TypeError(
            "In _apply_funcs(), only 1 return value with len(processors) == {}".format(len(funcs),
                                                                                       len(return_vals))
        )

    # 将函数作用在返回值上
    return tuple([f(v) if f else v for v, f in zip_longest(return_vals, funcs)])
Example #30
0
  def _get_minibatch_feed_dict(self, target_q_values, 
                               non_terminal_minibatch, terminal_minibatch):
    """
    Helper to construct the feed_dict for train_op. Takes the non-terminal and 
    terminal minibatches as well as the max q-values computed from the target
    network for non-terminal states. Computes the expected q-values based on
    discounted future reward.

    @return: feed_dict to be used for train_op
    """
    assert len(target_q_values) == len(non_terminal_minibatch)

    states = []
    expected_q = []
    actions = []

    # Compute expected q-values to plug into the loss function
    minibatch = itertools.chain(non_terminal_minibatch, terminal_minibatch)
    for item, target_q in zip_longest(minibatch, target_q_values, fillvalue=0):
      state, action, reward, _, _ = item
      states.append(state)
      # target_q will be 0 for terminal states due to fillvalue in zip_longest
      expected_q.append(reward + self.config.reward_discount * target_q)
      actions.append(utils.one_hot(action, self.env.action_space.n))

    return {
      self.network.x_placeholder: states, 
      self.network.q_placeholder: expected_q,
      self.network.action_placeholder: actions,
    }
Example #31
0
def _compute_scores(target_filenames, prediction_filenames, scorer, delimiter):
  """Computes aggregates scores across the given target and prediction files.

  Args:
    target_filenames: List of filenames from which to read target lines.
    prediction_filenames: List of filenames from which to read prediction lines.
    scorer: A BaseScorer object to compute scores.
    delimiter: string delimiter between each record in input files
  Returns:
    A list of dicts mapping score_type to Score objects.
  Raises:
    ValueError: If invalid targets or predictions are provided.
  """

  if (len(target_filenames) < 1 or
      len(target_filenames) != len(prediction_filenames)):
    raise ValueError("Must have equal and positive number of target and "
                     "prediction files. Found: %d target files, %d prediction "
                     "files." % (len(target_filenames),
                                 len(prediction_filenames)))

  scores = []
  for target_filename, prediction_filename in zip(
      sorted(target_filenames), sorted(prediction_filenames)):
    logging.info("Reading targets from %s.", target_filename)
    logging.info("Reading predictions from %s.", prediction_filename)
    targets = _record_gen(target_filename, delimiter)
    preds = _record_gen(prediction_filename, delimiter)
    for target_rec, prediction_rec in zip_longest(targets, preds):
      if target_rec is None or prediction_rec is None:
        raise ValueError("Must have equal number of lines across target and "
                         "prediction files. Mismatch between files: %s, %s." %
                         (target_filename, prediction_filename))
      scores.append(scorer.score(target_rec, prediction_rec))

  return scores
Example #32
0
def _build_tsv(rect_list,
               side_by_side=False,
               transpose=False,
               format_cell=False):
    """
    Excel の範囲名(複数セル範囲)から一つの二次元配列を作る
    rect_list:    セル範囲自体の配列
    side_by_side: 複数のセル範囲を横に並べる。指定ない場合はタテに並べる
    transpose:    結果を行列入れ替えする(複数範囲を結合した後で処理する)
    """
    result = []
    for rect_index, rect in enumerate(rect_list):
        for row_index, row in enumerate(rect):
            line = []
            for cell in row:
                value = cell
                if not cell:
                    value = None
                elif hasattr(cell, 'value'):
                    value = _format_cell_value(
                        cell) if format_cell else cell.value
                else:
                    raise ValueError("Unknown type %s for %s" %
                                     (type(cell), cell))
                line.append(value)
            if side_by_side and rect_index > 0:
                result[row_index].extend(line)
            else:
                result.append(line)

    if transpose:
        result = [
            list(row) for row in moves.zip_longest(*result, fillvalue=None)
        ]  # idiom for transpose

    return result
Example #33
0
def compareVersions(v1, v2):
    """helper function: compare arrays or strings of version numbers.
    E.g., compare_version((1,3,25), (1,4,1)')
    returns -1, 0, 1 if v1 is <, ==, > v2
    """
    def make_version_list(v):
        if isinstance(v, (list, tuple)):
            return v
        elif isinstance(v, string_types):
            return list(
                map(lambda x: int(re.match('\d+', x).group()), v.split('.')))
        else:
            raise TypeError()

    v1 = make_version_list(v1)
    v2 = make_version_list(v2)

    # Compare corresponding elements of lists
    # The shorter list is filled with 0 till the lists have the same length
    for n1, n2 in zip_longest(v1, v2, fillvalue=0):
        if n1 < n2: return -1
        if n1 > n2: return 1

    return 0
Example #34
0
    def __call__(self, inputs, labels, loss_weights):
        # loss weight: [cross entropy, jmmd]
        inputs = tf.concat(inputs, axis=0)
        features = self.base_model(inputs)
        features = self.fcb(features)
        logits = self.fc(features)
        source_feature, target_feature = tf.split(features, 2)
        source_logits, target_logits = tf.split(logits, 2)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=labels[0], logits=source_logits, name='xentropy')
        cross_entropy_loss = tf.reduce_mean(cross_entropy,
                                            name='xentropy_mean')
        jmmd_losses = [
            L.jmmd_loss([source_feature, source_logits],
                        [target_feature, target_logits]),
        ]
        loss = sum([
            w * l if w is not None else l
            for w, l in zip_longest(loss_weights, jmmd_losses)
        ]) + cross_entropy_loss
        correct = tf.nn.in_top_k(target_logits, labels[1], 1)
        accuracy = tf.reduce_sum(tf.cast(correct, tf.int32))
        return loss, accuracy
Example #35
0
    def execute(self, line):
        _args = self.arg_parser.parse_args(line)

        if _args.limit is not None and _args.limit <= 0:
            raise InvalidCommandException('--limit must be >= 0')

        all_problems = judgeenv.get_supported_problems()

        if _args.filter:
            r = re.compile(_args.filter)
            all_problems = filter(lambda x: r.match(x[0]) is not None,
                                  all_problems)

        if _args.limit:
            all_problems = all_problems[:_args.limit]

        if len(all_problems):
            problems = iter(map(itemgetter(0), all_problems))
            max_len = max(len(p[0]) for p in all_problems)
            for row in zip_longest(*[problems] * 4, fillvalue=''):
                print(' '.join(('%*s' % (-max_len, row[i])) for i in range(4)))
            print()
        else:
            raise InvalidCommandException('No problems matching filter found.')
Example #36
0
def chunked(iterable, n):
    """Break an iterable into lists of a given length::

        >>> list(chunked([1, 2, 3, 4, 5, 6, 7], 3))
        [[1, 2, 3], [4, 5, 6], [7]]

    If the length of ``iterable`` is not evenly divisible by ``n``, the last
    returned list will be shorter.

    This is useful for splitting up a computation on a large number of keys
    into batches, to be pickled and sent off to worker processes. One example
    is operations on rows in MySQL, which does not implement server-side
    cursors properly and would otherwise load the entire dataset into RAM on
    the client.

    """
    # Doesn't seem to run into any number-of-args limits.
    for group in (
            list(g)
            for g in zip_longest(*[iter(iterable)] * n, fillvalue=_marker)):
        if group[-1] is _marker:
            # If this is the last group, shuck off the padding:
            del group[group.index(_marker):]
        yield group
    def _inner_walk(self, path, left, right):
        # type: (PathType, typing.Any, typing.Any) -> typing.Iterable[T]
        """
        Fully traverse the left and right objects.

        The traversal will short-circuit in case:
         * a given path should not be traversed
         * the path has already been traversed (recursive definition)
        """
        if not self.should_path_be_walked_through(
                path) or self._is_recursive_call(path, left, right):
            return ()

        if isinstance(left, dict) and isinstance(right, dict):
            return chain(
                self.dict_check(path, left, right),
                (value for key in set(chain(iterkeys(left), iterkeys(right)))
                 for value in self._inner_walk(
                     path=tuple(chain(path, [key])),
                     left=left.get(key, NO_VALUE),
                     right=right.get(key, NO_VALUE),
                 )),
            )
        elif isinstance(left, list) and isinstance(right, list):
            return chain(
                self.list_check(path, left, right),
                (value for index, (left_item, right_item) in enumerate(
                    zip_longest(left, right, fillvalue=NO_VALUE))
                 for value in self._inner_walk(
                     path=tuple(chain(path, [index])),
                     left=left_item,
                     right=right_item,
                 )),
            )
        else:
            return self.value_check(path, left, right)
Example #38
0
 def addfiles(self, handler):
     filemetas = []
     uploads = [
         upload for key in self.keys.get('file', [])
         for upload in handler.request.files.get(key, [])
     ]
     filenames = [
         name for key in self.keys.get('save', [])
         for name in handler.args.get(key, [])
     ]
     if_exists = getattr(handler, 'if_exists', 'unique')
     for upload, filename in zip_longest(uploads, filenames,
                                         fillvalue=None):
         filemeta = self.save_file(upload, filename, if_exists)
         key = filemeta['file']
         filemeta.update(
             key=key,
             user=handler.get_current_user(),
             data=handler.args,
         )
         filemeta = handler.transforms(filemeta)
         self.store.dump(key, filemeta)
         filemetas.append(filemeta)
     return filemetas
Example #39
0
def map2x(func, *iterables):
    """map() function for Python 2/3 compatability"""
    zipped = zip_longest(*iterables)
    if func is None:
        return zipped
    return starmap(func, zipped)
Example #40
0
 def grouper(n, iterable, fillvalue=None):
     " grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
     args = [iter(iterable)] * n
     return zip_longest(fillvalue=fillvalue, *args)
Example #41
0
def compare_strings(version1, version2):
    """
    Compare two upstream version strings or Debian revision strings.

    :param version1: An upstream version string or Debian revision string.
    :param version2: An upstream version string or Debian revision string.
    :returns: One of the following integer numbers:

              - -1 means version1 sorts before version2
              - 0 means version1 and version2 are equal
              - 1 means version1 sorts after version2

    This function is used by :func:`compare_version_objects()` to perform the
    comparison of Debian version strings.
    """
    logger.debug("Comparing Debian version number substrings %r and %r ..", version1, version2)
    mapping = get_order_mapping()
    v1 = list(version1)
    v2 = list(version2)
    while v1 or v2:
        # Quoting from the 'deb-version' manual page: First the initial part of each
        # string consisting entirely of non-digit characters is determined. These two
        # parts (one of which may be empty) are compared lexically. If a difference is
        # found it is returned. The lexical comparison is a comparison of ASCII values
        # modified so that all the letters sort earlier than all the non-letters and so
        # that a tilde sorts before anything, even the end of a part. For example, the
        # following parts are in sorted order: '~~', '~~a', '~', the empty part, 'a'.
        p1 = get_non_digit_prefix(v1)
        p2 = get_non_digit_prefix(v2)
        if p1 != p2:
            logger.debug("Comparing non-digit prefixes %r and %r ..", p1, p2)
            for c1, c2 in zip_longest(p1, p2, fillvalue=""):
                logger.debug("Performing lexical comparison between characters %r and %r ..", c1, c2)
                o1 = mapping.get(c1)
                o2 = mapping.get(c2)
                if o1 < o2:
                    logger.debug("Determined that %r sorts before %r (based on lexical comparison).", version1, version2)
                    return -1
                elif o1 > o2:
                    logger.debug("Determined that %r sorts after %r (based on lexical comparison).", version1, version2)
                    return 1
        elif p1:
            logger.debug("Skipping matching non-digit prefix %r ..", p1)
        # Quoting from the 'deb-version' manual page: Then the initial part of the
        # remainder of each string which consists entirely of digit characters is
        # determined. The numerical values of these two parts are compared, and any
        # difference found is returned as the result of the comparison. For these purposes
        # an empty string (which can only occur at the end of one or both version strings
        # being compared) counts as zero.
        d1 = get_digit_prefix(v1)
        d2 = get_digit_prefix(v2)
        logger.debug("Comparing numeric prefixes %i and %i ..", d1, d2)
        if d1 < d2:
            logger.debug("Determined that %r sorts before %r (based on numeric comparison).", version1, version2)
            return -1
        elif d1 > d2:
            logger.debug("Determined that %r sorts after %r (based on numeric comparison).", version1, version2)
            return 1
        else:
            logger.debug("Determined that numeric prefixes match.")
    logger.debug("Determined that version numbers are equal.")
    return 0
Example #42
0
def assert_arrays_equal(arrays1, arrays2):
    """ Check two iterables yield the same sequence of arrays. """
    for arr1, arr2 in zip_longest(arrays1, arrays2, fillvalue=None):
        assert_false(arr1 is None or arr2 is None)
        assert_array_equal(arr1, arr2)
def grouper(iterable, n, fillvalue=None):
    """Collect data into fixed-length chunks or blocks."""
    # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
    args = [iter(iterable)] * n
    return zip_longest(fillvalue=fillvalue, *args)
Example #44
0
def model_summary(model, threshold=1E-8, fva=None, digits=2, **solver_args):
    """Print a summary of the input and output fluxes of the model.

    threshold: float
        tolerance for determining if a flux is zero (not printed)

    fva: int or None
        Whether or not to calculate and report flux variability in the
        output summary

    digits: int
        number of digits after the decimal place to print

    """
    obj_fluxes = pd.Series({
        '{:<15}'.format(r.id): '{:.3f}'.format(r.x)
        for r in iterkeys(model.objective)
    })

    if not fva:

        out_rxns = model.reactions.query(lambda rxn: rxn.x > threshold,
                                         None).query(lambda x: x, 'boundary')

        in_rxns = model.reactions.query(lambda rxn: rxn.x < -threshold,
                                        None).query(lambda x: x, 'boundary')

        out_fluxes = pd.Series({r.reactants[0]: r.x for r in out_rxns})
        in_fluxes = pd.Series({r.reactants[0]: r.x for r in in_rxns})

        # sort and round
        out_fluxes.sort_values(ascending=False, inplace=True)
        out_fluxes = out_fluxes.round(digits)
        in_fluxes.sort_values(inplace=True)
        in_fluxes = in_fluxes.round(digits)

        table = pd.np.array([
            ((a if a else ''), (b if b else ''), (c if c else '')) for a, b, c
            in zip_longest(['IN FLUXES'] +
                           in_fluxes.to_string().split('\n'), ['OUT FLUXES'] +
                           out_fluxes.to_string().split('\n'), ['OBJECTIVES'] +
                           obj_fluxes.to_string().split('\n'))
        ])

    else:
        boundary_reactions = model.reactions.query(lambda x: x, 'boundary')

        fva_results = pd.DataFrame(
            flux_variability_analysis(model,
                                      reaction_list=boundary_reactions,
                                      fraction_of_optimum=fva,
                                      **solver_args)).T

        half_span = (fva_results.maximum - fva_results.minimum) / 2
        median = fva_results.minimum + half_span
        rxn_data = pd.concat([median, half_span], 1)
        rxn_data.columns = ['x', 'err']

        for r in rxn_data.index:
            rxn_data.loc[r, 'met'] = model.reactions.get_by_id(r).reactants[0]

        rxn_data.set_index('met', drop=True, inplace=True)

        out_fluxes = rxn_data[rxn_data.x > threshold]
        in_fluxes = rxn_data[rxn_data.x < -threshold]

        out_fluxes = out_fluxes.sort_values(by='x', ascending=False)
        out_fluxes = out_fluxes.round(digits)
        in_fluxes = in_fluxes.sort_values(by='x')
        in_fluxes = in_fluxes.round(digits)

        in_fluxes_s = in_fluxes.apply(
            lambda x: u'{0:0.2f} \u00B1 {1:0.2f}'.format(x.x, x.err), axis=1)
        out_fluxes_s = out_fluxes.apply(
            lambda x: u'{0:0.2f} \u00B1 {1:0.2f}'.format(x.x, x.err), axis=1)
        out_fluxes_s = out_fluxes.apply(
            lambda x: text_type(x.x) + u" \u00B1 " + text_type(x.err), axis=1)

        table = pd.np.array([
            ((a if a else ''), (b if b else ''), (c if c else ''))
            for a, b, c in
            zip_longest(['IN FLUXES'] +
                        in_fluxes_s.to_string().split('\n'), ['OUT FLUXES'] +
                        out_fluxes_s.to_string().split('\n'), ['OBJECTIVES'] +
                        obj_fluxes.to_string().split('\n'))
        ])

    print_(u'\n'.join(
        [u"{a:<30}{b:<30}{c:<20}".format(a=a, b=b, c=c) for a, b, c in table]))
Example #45
0
def _chunker(iterable, n, fillvalue=None):
    "Collect items from an iterable into fixed-length chunks or blocks"
    args = [iter(iterable)] * n
    return zip_longest(fillvalue=fillvalue, *args)
Example #46
0
    def _init_axes(self, data, method='plot',
                   xscale=None, sharex=False, sharey=False,
                   geometry=None, separate=None, **kwargs):
        """Populate this figure with data, creating `Axes` as necessary
        """
        if isinstance(sharex, bool):
            sharex = "all" if sharex else "none"
        if isinstance(sharey, bool):
            sharey = "all" if sharey else "none"

        # parse keywords
        axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
                   key in kwargs}

        # handle geometry and group axes
        if geometry is not None and geometry[0] * geometry[1] == len(data):
            separate = True
        axes_groups = _group_axes_data(data, separate=separate)
        if geometry is None:
            geometry = (len(axes_groups), 1)
        nrows, ncols = geometry
        if axes_groups and nrows * ncols != len(axes_groups):
            # mismatching data and geometry
            raise ValueError("cannot group data into {0} axes with a "
                             "{1}x{2} grid".format(len(axes_groups), nrows,
                                                   ncols))

        # create grid spec
        gs = GridSpec(nrows, ncols)
        axarr = numpy.empty((nrows, ncols), dtype=object)

        # set default labels
        defxlabel = 'xlabel' not in axes_kw
        defylabel = 'ylabel' not in axes_kw
        flatdata = [s for group in axes_groups for s in group]
        for axis in ('x', 'y'):
            unit = _common_axis_unit(flatdata, axis=axis)
            if unit:
                axes_kw.setdefault('{}label'.format(axis),
                                   unit.to_string('latex_inline_dimensional'))

        # create axes for each group and draw each data object
        for group, (row, col) in zip_longest(
                axes_groups, itertools.product(range(nrows), range(ncols)),
                fillvalue=[]):
            # create Axes
            shared_with = {"none": None, "all": axarr[0, 0],
                           "row": axarr[row, 0], "col": axarr[0, col]}
            axes_kw["sharex"] = shared_with[sharex]
            axes_kw["sharey"] = shared_with[sharey]
            axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
            ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)

            # plot data
            plot_func = getattr(ax, method)
            if method in ('imshow', 'pcolormesh'):
                for obj in group:
                    plot_func(obj, **kwargs)
            elif group:
                plot_func(*group, **kwargs)

            # set default axis labels
            for axis, share, pos, n, def_ in (
                    (ax.xaxis, sharex, row, nrows, defxlabel),
                    (ax.yaxis, sharey, col, ncols, defylabel),
            ):
                # hide label if shared axis and not bottom left panel
                if share == 'all' and pos < n - 1:
                    axis.set_label_text('')
                # otherwise set default status
                else:
                    axis.isDefault_label = def_

        return self.axes
Example #47
0
    def test_filters(self):
        filters = (
            {'protocol': 'all', 'pref': 168, 'kind': 'basic',
             'parent': '1389:', 'basic': {}},
            {'protocol': 'all', 'pref': 168, 'kind': 'basic',
             'parent': '1389:',
             'basic': {'flowid': '1389:a8', 'handle': '0x1',
                       'mask': 0, 'module': 'meta', 'object': 'vlan',
                       'relation': 'eq', 'value': 168}},
            {'protocol': 'all', 'pref': 168, 'kind': 'basic',
             'parent': '1389:',
             'basic': {'flowid': '1389:a8', 'handle': '0x1',
                       'mask': 0, 'module': 'meta', 'object': 'vlan'}},
            {'protocol': 'all', 'pref': 168, 'kind': 'basic',
             'parent': '1389:',
             'basic': {'module': 'meta', 'flowid': '1389:a8',
                       'handle': '0x1'}},
            {'protocol': 'all', 'pref': 49149, 'kind': 'u32', 'u32': {}},
            {'protocol': 'all', 'pref': 49149, 'kind': 'u32', 'u32': {
                'fh': '803:', 'ht_divisor': 1}},
            {'protocol': 'all', 'pref': 49149, 'kind': 'u32', 'u32': {
                'fh': '803::800', 'order': 2048, 'key_ht': 0x803,
                'key_bkt': 0x0, 'terminal': True, 'match': {
                    'value': 0x0, 'mask': 0x0, 'offset': 0x0},
                'actions': [
                    {'order': 1, 'kind': 'mirred', 'action': 'egress_mirror',
                     'target': 'tap1', 'op': 'pipe', 'index': 18, 'ref': 1,
                     'bind': 1}]}},

            {'protocol': 'all', 'pref': 49150, 'kind': 'u32', 'u32': {}},
            {'protocol': 'all', 'pref': 49150, 'kind': 'u32', 'u32': {
                'fh': '802:', 'ht_divisor': 1}},
            {'protocol': 'all', 'pref': 49150, 'kind': 'u32', 'u32': {
                'fh': '802::800', 'order': 2048, 'key_ht': 0x802,
                'key_bkt': 0x0, 'terminal': True, 'match': {
                    'value': 0x0, 'mask': 0x0, 'offset': 0x0},
                'actions': [
                    {'order': 33, 'kind': 'mirred', 'action': 'egress_mirror',
                     'target': 'tap2', 'op': 'pipe', 'index': 17, 'ref': 1,
                     'bind': 1}]}},

            {'protocol': 'all', 'pref': 49152, 'kind': 'u32', 'u32': {}},
            {'protocol': 'all', 'pref': 49152, 'kind': 'u32', 'u32': {
                'fh': '800:', 'ht_divisor': 1}},
            {'protocol': 'all', 'pref': 49152, 'kind': 'u32', 'u32': {
                'fh': '800::800', 'order': 2048, 'key_ht': 0x800,
                'key_bkt': 0x0, 'terminal': True, 'match': {
                    'value': 0x0, 'mask': 0x0, 'offset': 0x0},
                'actions': [
                    {'order': 1, 'kind': 'mirred', 'action': 'egress_mirror',
                     'target': 'target', 'op': 'pipe', 'index': 60, 'ref': 1,
                     'bind': 1},
                    {'order': 2, 'kind': 'mirred', 'action': 'egress_mirror',
                     'target': 'target2', 'op': 'pipe', 'index': 61, 'ref': 1,
                     'bind': 1},
                ]}},
        )
        dirName = os.path.dirname(os.path.realpath(__file__))
        path = os.path.join(dirName, "tc_filter_show.out")
        with open(path) as tc_filter_show:
            data = tc_filter_show.read()

        for parsed, correct in zip_longest(tc._filters(None, out=data),
                                           filters):
            self.assertEqual(parsed, correct)
Example #48
0
 def test_qdiscs(self):
     data = '\n'.join((
         'qdisc hfsc 1: root refcnt 2 default 5000',
         'qdisc sfq 10: parent 1:10 limit 127p quantum 1514b',
         'qdisc sfq 20: parent 1:20 limit 127p quantum 1514b',
         'qdisc sfq 30: parent 1:30 limit 127p quantum 30Kb perturb 3sec',
         'qdisc sfq 40: parent 1:40 limit 127p quantum 20Mb perturb 5sec',
         'qdisc ingress ffff: parent ffff:fff1 ----------------',
         'qdisc mq 0: dev wlp3s0 root',
         'qdisc ingress ffff: dev vdsmtest-Z2TMO parent ffff:fff1 '
         '----------------',  # end of previous line
         'qdisc pfifo_fast 0: dev em1 root refcnt 2 bands 3 priomap  '
         '1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1',  # end of previous line
         'qdisc pfifo_fast 0: dev wlp3s0 parent :1 bands 3 priomap  '
         '1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1',  # end of previous line
         'qdisc fq_codel 801e: root refcnt 2 limit 132p flows 15 quantum '
         '400 target 5.0ms interval 150.0ms ecn',  # end of previous line
     ))
     qdiscs = (
         {
             'kind': 'hfsc',
             'root': True,
             'handle': '1:',
             'refcnt': 2,
             'hfsc': {
                 'default': 0x5000
             }
         },
         {
             'kind': 'sfq',
             'handle': '10:',
             'parent': '1:10',
             'sfq': {
                 'limit': 127,
                 'quantum': 1514
             }
         },
         {
             'kind': 'sfq',
             'handle': '20:',
             'parent': '1:20',
             'sfq': {
                 'limit': 127,
                 'quantum': 1514
             }
         },
         {
             'kind': 'sfq',
             'handle': '30:',
             'parent': '1:30',
             'sfq': {
                 'limit': 127,
                 'quantum': 30 * 1024,
                 'perturb': 3
             }
         },
         {
             'kind': 'sfq',
             'handle': '40:',
             'parent': '1:40',
             'sfq': {
                 'limit': 127,
                 'quantum': 20 * 1024**2,
                 'perturb': 5
             }
         },
         {
             'kind': 'ingress',
             'handle': 'ffff:',
             'parent': 'ffff:fff1'
         },
         {
             'kind': 'mq',
             'handle': '0:',
             'dev': 'wlp3s0',
             'root': True
         },
         {
             'kind': 'ingress',
             'handle': 'ffff:',
             'dev': 'vdsmtest-Z2TMO',
             'parent': 'ffff:fff1'
         },
         {
             'kind': 'pfifo_fast',
             'handle': '0:',
             'dev': 'em1',
             'root': True,
             'refcnt': 2,
             'pfifo_fast': {
                 'bands': 3,
                 'priomap': [1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1]
             }
         },
         {
             'kind': 'pfifo_fast',
             'handle': '0:',
             'dev': 'wlp3s0',
             'parent': ':1',
             'pfifo_fast': {
                 'bands': 3,
                 'priomap': [1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1]
             }
         },
         {
             'kind': 'fq_codel',
             'handle': '801e:',
             'root': True,
             'refcnt': 2,
             'fq_codel': {
                 'limit': 132,
                 'flows': 15,
                 'quantum': 400,
                 'target': 5000.0,
                 'interval': 150000.0,
                 'ecn': True
             }
         },
     )
     for parsed, correct in zip_longest(tc.qdiscs(None, out=data), qdiscs):
         self.assertEqual(parsed, correct)
Example #49
0
 def test_classes(self):
     cmd_line_ls_10 = 3200
     cmd_line_ls_m1_20 = 6400
     cmd_line_ls_d_20 = 152
     cmd_line_ls_m2_20 = 3200
     cmd_line_ls_30 = 3500
     cmd_line_ls_m2_5000 = 40000
     data = '\n'.join((
         'class hfsc 1: root',
         'class hfsc 1:10 parent 1: leaf 10: sc m1 0bit d 0us '
         'm2 {0}Kbit'.format(cmd_line_ls_10),  # end of previous line
         'class hfsc 1:20 parent 1: leaf 20: ls m1 {0}Kibit d {1}us '
         'm2 {2}Kbit ul m1 0bit d 0us m2 30000Kbit'.format(
             cmd_line_ls_m1_20, cmd_line_ls_d_20, cmd_line_ls_m2_20),
         'class hfsc 1:30 parent 1: leaf 40: sc m1 0bit d 0us '
         'm2 {0}bit'.format(cmd_line_ls_30),  # end of previous line
         'class hfsc 1:5000 parent 1: leaf 5000: ls m1 0bit d 0us '
         'm2 {0}Kbit'.format(cmd_line_ls_m2_5000),  # end of previous line
     ))
     reported_ls_10 = cmd_line_ls_10 * 1000 // 8
     reported_ls_m1_20 = cmd_line_ls_m1_20 * 1024 // 8
     reported_ls_d_20 = cmd_line_ls_d_20 // 8
     reported_ls_m2_20 = cmd_line_ls_m2_20 * 1000 // 8
     reported_ls_30 = cmd_line_ls_30 // 8
     reported_ls_5000 = cmd_line_ls_m2_5000 * 1000 // 8
     classes = (
         {
             'kind': 'hfsc',
             'root': True,
             'handle': '1:'
         },
         {
             'kind': 'hfsc',
             'handle': '1:10',
             'parent': '1:',
             'leaf': '10:',
             'hfsc': {
                 'ls': {
                     'm1': 0,
                     'd': 0,
                     'm2': reported_ls_10
                 },
                 'rt': {
                     'm1': 0,
                     'd': 0,
                     'm2': reported_ls_10
                 }
             }
         },
         {
             'kind': 'hfsc',
             'handle': '1:20',
             'parent': '1:',
             'leaf': '20:',
             'hfsc': {
                 'ls': {
                     'm1': reported_ls_m1_20,
                     'd': reported_ls_d_20,
                     'm2': reported_ls_m2_20
                 },
                 'ul': {
                     'm1': 0,
                     'd': 0,
                     'm2': 30000 * 1000
                 }
             }
         },
         {
             'kind': 'hfsc',
             'handle': '1:30',
             'parent': '1:',
             'leaf': '40:',
             'hfsc': {
                 'ls': {
                     'm1': 0,
                     'd': 0,
                     'm2': reported_ls_30
                 },
                 'rt': {
                     'm1': 0,
                     'd': 0,
                     'm2': reported_ls_30
                 }
             }
         },
         {
             'kind': 'hfsc',
             'handle': '1:5000',
             'parent': '1:',
             'leaf': '5000:',
             'hfsc': {
                 'ls': {
                     'm1': 0,
                     'd': 0,
                     'm2': reported_ls_5000
                 }
             }
         },
     )
     for parsed, correct in zip_longest(tc.classes(None, out=data),
                                        classes):
         self.assertEqual(parsed, correct)
def assert_api_compatible_list(old, new):
    for x0, x1 in zip_longest(old, new):
        assert_api_compatible(x0, x1)
Example #51
0
def PLEC(ligand, protein, depth_ligand=2, depth_protein=4, distance_cutoff=4.5,
         size=16384, count_bits=True, sparse=True, ignore_hoh=True, bits_info=None):
    """Protein ligand extended connectivity fingerprint. For every pair of
    atoms in contact, compute ECFP and then hash every single, corresponding
    depth.

    Parameters
    ----------
    ligand, protein : oddt.toolkit.Molecule object
            Molecules, which are analysed in order to find interactions.

    depth_ligand, depth_protein : int (deafult = (2, 4))
        The depth of the fingerprint, i.e. the number of bonds in Morgan
        algorithm. Note: For ECFP2: depth = 1, ECFP4: depth = 2, etc.

    size: int (default = 16384)
        SPLIF is folded to given size.

    distance_cutoff: float (default=4.5)
        Cutoff distance for close contacts.

    sparse: bool (default = True)
        Should fingerprints be dense (contain all bits) or sparse (just the on
        bits).

    count_bits: bool (default = True)
        Should the bits be counted or unique. In dense representation it
        translates to integer array (count_bits=True) or boolean array if False.

    ignore_hoh: bool (default = True)
        Should the water molecules be ignored. This is based on the name of the
        residue ('HOH').

    bits_info: dict or None (default = None)
        If dictionary is provided it is filled with information about bit contents.
        Root atom index and depth is provided for both ligand and protein.
        Dictionary is modified in-place.

    Returns
    -------
    PLEC: numpy array
        fp (size = atoms in contacts * max(depth_protein, depth_ligand))

    """
    result = []
    bit_info_content = []

    # removing h
    protein_mask = protein_no_h = (protein.atom_dict['atomicnum'] != 1)
    if ignore_hoh:
        # a copy is needed, so not modifing inplace
        protein_mask = protein_mask & (protein.atom_dict['resname'] != 'HOH')
    protein_dict = protein.atom_dict[protein_mask]
    ligand_dict = ligand.atom_dict[ligand.atom_dict['atomicnum'] != 1]

    # atoms in contact
    protein_atoms, ligand_atoms = close_contacts(
        protein_dict, ligand_dict, cutoff=distance_cutoff)

    lig_atom_repr = {aidx: _ECFP_atom_repr(ligand, aidx)
                     for aidx in ligand_dict['id'].tolist()}
    # HOH residues might be connected to metal atoms
    prot_atom_repr = {aidx: _ECFP_atom_repr(protein, aidx)
                      for aidx in protein.atom_dict[protein_no_h]['id'].tolist()}

    for ligand_atom, protein_atom in zip(ligand_atoms['id'].tolist(),
                                         protein_atoms['id'].tolist()):
        ligand_ecfp = _ECFP_atom_hash(ligand,
                                      ligand_atom,
                                      depth=depth_ligand,
                                      atom_repr_dict=lig_atom_repr)
        protein_ecfp = _ECFP_atom_hash(protein,
                                       protein_atom,
                                       depth=depth_protein,
                                       atom_repr_dict=prot_atom_repr)
        assert len(ligand_ecfp) == depth_ligand + 1
        assert len(protein_ecfp) == depth_protein + 1
        # fillvalue is parameter from zip_longest
        # it's used, when ligand_ecfp and protein_ecfp are not the same size,
        # so if one is shorter the last given ECFP is used
        if depth_ligand < depth_protein:
            fillvalue = depth_ligand, ligand_ecfp[-1]
        else:
            fillvalue = depth_protein, protein_ecfp[-1]
        for (ligand_depth, ligand_bit), (protein_depth, protein_bit) in zip_longest(
                enumerate(ligand_ecfp), enumerate(protein_ecfp), fillvalue=fillvalue):
            result.append(hash32((ligand_bit, protein_bit)))
            if bits_info is not None:
                bit_info_content.append(PLEC_bit_info_record(
                    ligand_root_atom_idx=ligand_atom,
                    ligand_depth=ligand_depth,
                    protein_root_atom_idx= protein_atom,
                    protein_depth=protein_depth
                ))

    # folding and sorting
    plec = fold(np.array(result), size=size)

    # add bits info after folding
    if bits_info is not None:
        sort_indexes = np.argsort(plec)
        plec = plec[sort_indexes].astype(np.min_scalar_type(size))
        # sort bit info according to folded PLEC
        for bit_number, bit_info_idx in zip(plec, sort_indexes):
            if bit_number not in bits_info:
                bits_info[bit_number] = set()
            bits_info[bit_number].add(bit_info_content[bit_info_idx])
    else:
        plec = np.sort(plec).astype(np.min_scalar_type(size))

    # count_bits
    if not count_bits:
        plec = np.unique(plec)

    # sparse or dense FP
    if not sparse:
        plec = sparse_to_dense(plec, size=size)
    return plec
Example #52
0
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
                warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
    """Typecheck the size, shape and dtype of a numpy array, with optional
    casting.

    Parameters
    ----------
    val : {np.ndaraay, None}
        The array to check
    dtype : {nd.dtype, str}
        The dtype you'd like the array to have
    ndim : int
        The number of dimensions you'd like the array to have
    name : str
        name of the array. This is used when throwing exceptions, so that
        we can describe to the user which array is messed up.
    length : int, optional
        How long should the array be?
    can_be_none : bool
        Is ``val == None`` acceptable?
    shape : tuple, optional
        What should be shape of the array be? If the provided tuple has
        Nones in it, those will be semantically interpreted as matching
        any length in that dimension. So, for example, using the shape
        spec ``(None, None, 3)`` will ensure that the last dimension is of
        length three without constraining the first two dimensions
    warn_on_cast : bool, default=True
        Raise a warning when the dtypes don't match and a cast is done.
    add_newaxis_on_deficient_ndim : bool, default=True
        Add a new axis to the beginining of the array if the number of
        dimensions is deficient by one compared to your specification. For
        instance, if you're trying to get out an array of ``ndim == 3``,
        but the user provides an array of ``shape == (10, 10)``, a new axis will
        be created with length 1 in front, so that the return value is of
        shape ``(1, 10, 10)``.

    Notes
    -----
    The returned value will always be C-contiguous.

    Returns
    -------
    typechecked_val : np.ndarray, None
        If `val=None` and `can_be_none=True`, then this will return None.
        Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
        it'll be casted to the right shape. If the array was not C-contiguous, it'll
        be copied as well.

    """
    if can_be_none and val is None:
        return None

    if not isinstance(val, np.ndarray):
        # special case: if the user is looking for a 1d array, and
        # they request newaxis upconversion, and provided a scalar
        # then we should reshape the scalar to be a 1d length-1 array
        if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
            val = np.array([val])
        else:
            raise TypeError(("%s must be numpy array. "
                             " You supplied type %s" % (name, type(val))))

    if warn_on_cast and val.dtype != dtype:
        warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
                      TypeCastPerformanceWarning)

    if not val.ndim == ndim:
        if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
            val = val[np.newaxis, ...]
        else:
            raise ValueError(("%s must be ndim %s. "
                              "You supplied %s" % (name, ndim, val.ndim)))

    val = np.ascontiguousarray(val, dtype=dtype)

    if length is not None and len(val) != length:
        raise ValueError(("%s must be length %s. "
                          "You supplied %s" % (name, length, len(val))))

    if shape is not None:
        # the shape specified given by the user can look like (None, None 3)
        # which indicates that ANY length is accepted in dimension 0 or
        # dimension 1
        sentenel = object()
        error = ValueError(("%s must be shape %s. You supplied  "
                            "%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
        for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
            if a is sentenel or b is sentenel:
                # if the sentenel was reached, it means that the ndim didn't
                # match or something. this really shouldn't happen
                raise error
            if b is None:
                # if the user's shape spec has a None in it, it matches anything
                continue
            if a != b:
                # check for equality
                raise error

    return val
Example #53
0
def wrap_sequence(n, sequence, fillvalue=''):
    args = [iter(sequence)] * n
    for line in zip_longest(fillvalue=fillvalue, *args):
        yield ''.join(line + ("\n",))
Example #54
0
    def add_parent_datums(self, datums, module):

        def update_refs(datum_meta, changed_ids_):
            """
            Update references in the nodeset of the given datum, if necessary

            e.g. "instance('casedb')/casedb/case[@case_type='guppy']
                                                [@status='open']
                                                [index/parent=instance('commcaresession')/session/data/parent_id]"
            is updated to
                 "instance('casedb')/casedb/case[@case_type='guppy']
                                                [@status='open']
                                                [index/parent=instance('commcaresession')/session/data/case_id]"
                                                                                                       ^^^^^^^
            because the case referred to by "parent_id" in the child module has the ID "case_id" in the parent
            module.
            """
            def _apply_change_to_datum_attr(datum, attr, change):
                xpath = getattr(datum, attr, None)
                if xpath:
                    old = session_var(change['old_id'])
                    new = session_var(change['new_id'])
                    setattr(datum, attr, xpath.replace(old, new))

            datum = datum_meta.datum
            action = datum_meta.action
            if action:
                if hasattr(action, 'case_indices'):
                    # This is an advanced module
                    for case_index in action.case_indices:
                        if case_index.tag in changed_ids_:
                            # update any reference to previously changed datums
                            for change in changed_ids_[case_index.tag]:
                                _apply_change_to_datum_attr(datum, 'nodeset', change)
                                _apply_change_to_datum_attr(datum, 'function', change)
                else:
                    if 'basic' in changed_ids_:
                        for change in changed_ids_['basic']:
                            _apply_change_to_datum_attr(datum, 'nodeset', change)
                            _apply_change_to_datum_attr(datum, 'function', change)

        def rename_other_id(this_datum_meta_, parent_datum_meta_, datum_ids_):
            """
            If the ID of parent datum matches the ID of another datum in this
            form, rename the ID of the other datum in this form

            e.g. if parent datum ID == "case_id" and there is a datum in this
            form with the ID of "case_id" too, then rename the ID of the datum
            in this form to "case_id_<case_type>" (where <case_type> is the
            case type of the datum in this form).
            """
            changed_id = {}
            parent_datum = parent_datum_meta_.datum
            action = this_datum_meta_.action
            if action:
                if parent_datum.id in datum_ids_:
                    datum = datum_ids_[parent_datum.id]
                    new_id = '_'.join((datum.datum.id, datum.case_type))
                    # Only advanced module actions have a case_tag attribute.
                    case_tag = getattr(action, 'case_tag', 'basic')
                    changed_id = {
                        case_tag: {
                            'old_id': datum.datum.id,
                            'new_id': new_id,
                        }
                    }
                    datum.datum.id = new_id
            return changed_id

        def get_changed_id(this_datum_meta_, parent_datum_meta_):
            """
            Maps IDs in the child module to IDs in the parent module

            e.g. The case with the ID "parent_id" in the child module has the
            ID "case_id" in the parent module.
            """
            changed_id = {}
            action = this_datum_meta_.action
            if action:
                case_tag = getattr(action, 'case_tag', 'basic')
                changed_id = {
                    case_tag: {
                        "old_id": this_datum_meta_.datum.id,
                        "new_id": parent_datum_meta_.datum.id
                    }
                }
            return changed_id

        def get_datums(module_):
            """
            Return the datums of the first form in the given module
            """
            datums_ = []
            if module_:
                try:
                    # assume that all forms in the module have the same case management
                    form = module_.get_form(0)
                except FormNotFoundException:
                    pass
                else:
                    datums_.extend(self.get_datums_meta_for_form_generic(form))

            return datums_

        def append_update(dict_, new_dict):
            for key in new_dict:
                dict_[key].append(new_dict[key])

        parent_datums = get_datums(module.root_module)
        if parent_datums:
            # we need to try and match the datums to the root module so that
            # the navigation on the phone works correctly
            # 1. Add in any datums that don't require user selection e.g. new case IDs
            # 2. Match the datum ID for datums that appear in the same position and
            #    will be loading the same case type
            # see advanced_app_features#child-modules in docs
            datum_ids = {d.datum.id: d for d in datums}
            index = 0
            changed_ids_by_case_tag = defaultdict(list)
            for this_datum_meta, parent_datum_meta in list(zip_longest(datums, parent_datums)):
                if this_datum_meta:
                    update_refs(this_datum_meta, changed_ids_by_case_tag)
                if not parent_datum_meta:
                    continue
                if not this_datum_meta or this_datum_meta.datum.id != parent_datum_meta.datum.id:
                    if not parent_datum_meta.requires_selection:
                        # Add parent datums of opened subcases and automatically-selected cases
                        datums.insert(index, parent_datum_meta._replace(from_parent=True))
                    elif this_datum_meta and this_datum_meta.case_type == parent_datum_meta.case_type:
                        append_update(changed_ids_by_case_tag,
                                      rename_other_id(this_datum_meta, parent_datum_meta, datum_ids))
                        append_update(changed_ids_by_case_tag,
                                      get_changed_id(this_datum_meta, parent_datum_meta))
                        this_datum_meta.datum.id = parent_datum_meta.datum.id
                index += 1
def grouper(n, iterable, padvalue=None):
    "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
    return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
Example #56
0
def merge_doctrees(old, new, condition):
    # type: (nodes.Node, nodes.Node, Any) -> Iterator[nodes.Node]
    """Merge the `old` doctree with the `new` one while looking at nodes
    matching the `condition`.

    Each node which replaces another one or has been added to the `new` doctree
    will be yielded.

    :param condition:
        A callable which returns either ``True`` or ``False`` for a given node.
    """
    old_iter = old.traverse(condition)
    new_iter = new.traverse(condition)
    old_nodes = []
    new_nodes = []
    ratios = {}
    seen = set()
    # compare the nodes each doctree in order
    for old_node, new_node in zip_longest(old_iter, new_iter):
        if old_node is None:
            new_nodes.append(new_node)
            continue
        if not getattr(old_node, 'uid', None):
            # maybe config.gettext_uuid has been changed.
            old_node.uid = uuid4().hex
        if new_node is None:
            old_nodes.append(old_node)
            continue
        ratio = get_ratio(old_node.rawsource, new_node.rawsource)
        if ratio == 0:
            new_node.uid = old_node.uid
            seen.add(new_node)
        else:
            ratios[old_node, new_node] = ratio
            old_nodes.append(old_node)
            new_nodes.append(new_node)
    # calculate the ratios for each unequal pair of nodes, should we stumble
    # on a pair which is equal we set the uid and add it to the seen ones
    for old_node, new_node in product(old_nodes, new_nodes):
        if new_node in seen or (old_node, new_node) in ratios:
            continue
        ratio = get_ratio(old_node.rawsource, new_node.rawsource)
        if ratio == 0:
            new_node.uid = old_node.uid
            seen.add(new_node)
        else:
            ratios[old_node, new_node] = ratio
    # choose the old node with the best ratio for each new node and set the uid
    # as long as the ratio is under a certain value, in which case we consider
    # them not changed but different
    ratios = sorted(ratios.items(), key=itemgetter(1))  # type: ignore
    for (old_node, new_node), ratio in ratios:
        if new_node in seen:
            continue
        else:
            seen.add(new_node)
        if ratio < VERSIONING_RATIO:
            new_node.uid = old_node.uid
        else:
            new_node.uid = uuid4().hex
            yield new_node
    # create new uuids for any new node we left out earlier, this happens
    # if one or more nodes are simply added.
    for new_node in set(new_nodes) - seen:
        new_node.uid = uuid4().hex
        yield new_node
Example #57
0
def test_zip_longest():
    from six.moves import zip_longest
    it = zip_longest(range(2), range(1))

    assert six.advance_iterator(it) == (0, 0)
    assert six.advance_iterator(it) == (1, None)
Example #58
0
def grouper(iterable, n, fillvalue=None):
    """Collect data into fixed-length chunks or blocks"""
    args = [iter(iterable)] * n
    return zip_longest(*args, fillvalue=fillvalue)
Example #59
0
 def stagger_group(list_):
     return ut.filter_Nones(ut.iflatten(zip_longest(*list_)))
Example #60
0
def deleteCenters(meshFaces, uvFaces, centerDel, pBar=None):
    """ Delete the given vertices and connected edges from a face representation
        to give a new representation.

    Parameters
    ----------
    meshFaces : [[int, ...], ...]
        The list of lists of vertex indices making up faces
    centerDel : set(int)
        A set of vertices to delete. These were the vertices added
        to the centers of the faces when subdividing.
    uvFaces : [[int, ...], ...]
        The list of lists of uv indices making up uvFaces
    pBar : QProgressDialog or None
        An optional progress bar

    Returns
    -------
    : [[int, ...], ...]
        The new list of faces of the mesh
    : [[int, ...], ...]
        The new list of uvfaces of the mesh
    : {int: (int, int)}
        A dict of a deleted edge-midpoint to its two existing neighbor verts
    : {int: (int, int)}
        A dict of a deleted uv edge-midpoint to its two existing neighbor uvs
    """
    # For each deleted index, grab the neighboring faces,
    # and twist the faces so the deleted index is first
    cds = set(centerDel)
    faceDelDict = {}
    uvDelDict = {}
    uvFaces = uvFaces or []
    for face, uvFace in zip_longest(meshFaces, uvFaces):
        fi = cds.intersection(face)
        # If we are a subdivided mesh, Then each face will have exactly one
        # vertex that is part of the deletion set
        if len(fi) != 1:
            raise ValueError("Found a face with an unrecognized connectivity")
        # Get that one vert
        idx = fi.pop()
        # Each face is a cycle. Rotate the cycle
        # so that idx is first in the list
        rv = face.index(idx)
        rFace = face[rv:] + face[:rv]
        faceDelDict.setdefault(idx, []).append(rFace)

        if uvFace is not None:
            rUVFace = uvFace[rv:] + uvFace[:rv]
            uvDelDict.setdefault(idx, []).append(rUVFace)

    newFaces = []
    nUVFaces = []
    wings = {}
    uvWings = {}

    if pBar is not None:
        pBar.setValue(0)
        pBar.setMaximum(len(faceDelDict))

    chk = -1
    for idx, rFaces in six.iteritems(faceDelDict):
        chk += 1
        if pBar is not None:
            pBar.setValue(chk)
            QApplication.processEvents()

        ruvFaces = uvDelDict.get(idx, [])
        # The faces are guaranteed to be in a single loop cycle
        # so I don't have to handle any annoying edge cases! Yay!
        faceEnds = {
            f[1]: (f[2], f[3], uvf) for f, uvf in zip_longest(rFaces, ruvFaces)
        }  # face ends

        end = rFaces[-1][-1]  # get an arbitrary face to start with
        newFace = []
        nUVFace = []
        while faceEnds:
            try:
                diag, nxt, uvf = faceEnds.pop(end)
            except KeyError:
                print("rFaces", rFaces)
                print("fe", faceEnds)
                raise
            if uvf is not None:
                try:
                    nUVFace.append(uvf[2])
                    uvWings.setdefault(uvf[1], []).append(uvf[2])
                    uvWings.setdefault(uvf[3], []).append(uvf[2])
                except IndexError:
                    print("UVF", uvf, chk)
                    raise

            newFace.append(diag)
            wings.setdefault(end, []).append(diag)
            wings.setdefault(nxt, []).append(diag)

            end = nxt
        newFaces.append(newFace)
        if nUVFace:
            nUVFaces.append(nUVFace)
    nUVFaces = nUVFaces or None

    return newFaces, nUVFaces, wings, uvWings