예제 #1
0
 def test_sorter(self):
     get_inst = self._py3k_protection
     assert get_inst(range(100, 0, -1), sorted) == tuple(range(1, 101))
     c = caching_iter(range(100, 0, -1), sorted)
     assert c
     assert tuple(c) == tuple(range(1, 101))
     c = caching_iter(range(50, 0, -1), sorted)
     assert c[10] == 11
     assert tuple(range(1, 51)) == tuple(c)
예제 #2
0
 def test_sorter(self):
     assert tuple(caching_iter(range(100, 0, -1),
                               sorted)) == tuple(range(1, 101))
     c = caching_iter(range(100, 0, -1), sorted)
     assert c
     assert tuple(iter(c)) == tuple(range(1, 101))
     c = caching_iter(range(50, 0, -1), sorted)
     assert c[10] == 11
     assert tuple(iter(c)) == tuple(range(1, 51))
예제 #3
0
 def test_sorter(self):
     get_inst = self._py3k_protection
     assert get_inst(range(100, 0, -1), sorted) == tuple(range(1, 101))
     c = caching_iter(range(100, 0, -1), sorted)
     assert c
     assert tuple(c) == tuple(range(1, 101))
     c = caching_iter(range(50, 0, -1), sorted)
     assert c[10] == 11
     assert tuple(range(1, 51)) == tuple(c)
예제 #4
0
 def test_sorter(self):
     get_inst = self._py3k_protection
     self.assertEqual(get_inst(xrange(100, 0, -1), sorted), tuple(xrange(1, 101)))
     c = caching_iter(xrange(100, 0, -1), sorted)
     self.assertTrue(c)
     if compatibility.is_py3k:
         c = tuple(c)
     self.assertEqual(c, tuple(xrange(1, 101)))
     c = caching_iter(xrange(50, 0, -1), sorted)
     self.assertEqual(c[10], 11)
     if compatibility.is_py3k:
         c = tuple(c)
     self.assertEqual(tuple(xrange(1, 51)), c)
예제 #5
0
 def test_sorter(self):
     get_inst = self._py3k_protection
     self.assertEqual(
         get_inst(xrange(100, 0, -1), sorted), tuple(xrange(1, 101)))
     c = caching_iter(xrange(100, 0, -1), sorted)
     self.assertTrue(c)
     if compatibility.is_py3k:
         c = tuple(c)
     self.assertEqual(c, tuple(xrange(1, 101)))
     c = caching_iter(xrange(50, 0, -1), sorted)
     self.assertEqual(c[10], 11)
     if compatibility.is_py3k:
         c = tuple(c)
     self.assertEqual(tuple(xrange(1, 51)), c)
예제 #6
0
 def test_getitem(self):
     c = caching_iter(range(20))
     assert c[-1] == 19
     with pytest.raises(IndexError):
         operator.getitem(c, -21)
     with pytest.raises(IndexError):
         operator.getitem(c, 21)
예제 #7
0
 def test_edgecase(self):
     c = caching_iter(xrange(5))
     self.assertEqual(c[0], 0)
     # do an off by one access- this actually has broke before
     self.assertEqual(c[2], 2)
     self.assertEqual(c[1], 1)
     self.assertEqual(list(c), list(xrange(5)))
예제 #8
0
파일: misc.py 프로젝트: houseofsuns/pkgcore
 def match(self, restrict):
     v = self.__cache__.get(restrict)
     if v is None:
         v = self.__cache__[restrict] = \
             caching_iter(
                 self.__db__.itermatch(restrict, sorter=self.__strategy__))
     return v
예제 #9
0
파일: glsa.py 프로젝트: radhermit/pkgcore
def find_vulnerable_repo_pkgs(glsa_src, repo, grouped=False, arch=None):
    """generator yielding GLSA restrictions, and vulnerable pkgs from a repo.

    :param glsa_src: GLSA pkgset to pull vulnerabilities from
    :param repo: repo to scan for vulnerable packages
    :param grouped: if grouped, combine glsa restrictions into one restriction
        (thus yielding a pkg only once)
    :param arch: arch to scan for, x86 for example
    """

    if grouped:
        i = glsa_src.pkg_grouped_iter()
    else:
        i = iter(glsa_src)
    if arch is None:
        wrapper = lambda p: p
    else:
        if isinstance(arch, str):
            arch = (arch,)
        else:
            arch = tuple(arch)
        wrapper = lambda p: mutated.MutatedPkg(p, {"keywords": arch})
    for restrict in i:
        matches = caching_iter(wrapper(x)
                               for x in repo.itermatch(restrict,
                                                       sorter=sorted))
        if matches:
            yield restrict, matches
예제 #10
0
 def test_getitem(self):
     c = caching_iter(range(20))
     assert c[-1] == 19
     with pytest.raises(IndexError):
         operator.getitem(c, -21)
     with pytest.raises(IndexError):
         operator.getitem(c, 21)
예제 #11
0
 def test_edgecase(self):
     c = caching_iter(range(5))
     assert c[0] == 0
     # do an off by one access- this actually has broke before
     assert c[2] == 2
     assert c[1] == 1
     assert list(c) == list(range(5))
예제 #12
0
def find_vulnerable_repo_pkgs(glsa_src, repo, grouped=False, arch=None):
    """generator yielding GLSA restrictions, and vulnerable pkgs from a repo.

    :param glsa_src: GLSA pkgset to pull vulnerabilities from
    :param repo: repo to scan for vulnerable packages
    :param grouped: if grouped, combine glsa restrictions into one restriction
        (thus yielding a pkg only once)
    :param arch: arch to scan for, x86 for example
    """

    if grouped:
        i = glsa_src.pkg_grouped_iter()
    else:
        i = iter(glsa_src)
    if arch is None:
        wrapper = lambda p: p
    else:
        if isinstance(arch, basestring):
            arch = (arch, )
        else:
            arch = tuple(arch)
        wrapper = lambda p: mutated.MutatedPkg(p, {"keywords": arch})
    for restrict in i:
        matches = caching_iter(
            wrapper(x) for x in repo.itermatch(restrict, sorter=sorted))
        if matches:
            yield restrict, matches
예제 #13
0
 def test_edgecase(self):
     c = caching_iter(range(5))
     assert c[0] == 0
     # do an off by one access- this actually has broke before
     assert c[2] == 2
     assert c[1] == 1
     assert list(c) == list(range(5))
예제 #14
0
 def test_cmp(self):
     assert tuple(caching_iter(range(100))) == tuple(range(100))
     assert tuple(caching_iter(range(90))) != tuple(range(100))
     assert tuple(caching_iter(range(100))) > tuple(range(90))
     assert not tuple(caching_iter(range(90))) > tuple(range(100))
     assert tuple(caching_iter(range(100))) >= tuple(range(100))
     assert tuple(caching_iter(range(90))) < tuple(range(100))
     assert not tuple(caching_iter(range(100))) < tuple(range(90))
     assert tuple(caching_iter(range(90))) <= tuple(range(100))
예제 #15
0
    def feed(self, pkg, reporter):
        # query_cache gets caching_iter partial repo searches shoved into it-
        # reason is simple, it's likely that versions of this pkg probably
        # use similar deps- so we're forcing those packages that were
        # accessed for atom matching to remain in memory.
        # end result is less going to disk

        if vcs_eclasses.intersection(pkg.inherited):
            # vcs ebuild that better not be visible
            self.check_visibility_vcs(pkg, reporter)

        suppressed_depsets = []
        for attr, depset in (("depends", pkg.depends),
                             ("rdepends", pkg.rdepends), ("post_rdepends",
                                                          pkg.post_rdepends)):
            nonexistent = set()
            try:
                for orig_node in visit_atoms(pkg, depset):

                    node = strip_atom_use(orig_node)
                    if node not in self.query_cache:
                        if node in self.profiles.global_insoluble:
                            nonexistent.add(node)
                            # insert an empty tuple, so that tight loops further
                            # on don't have to use the slower get method
                            self.query_cache[node] = ()

                        else:
                            matches = caching_iter(
                                self.options.search_repo.itermatch(node))
                            if matches:
                                self.query_cache[node] = matches
                                if orig_node is not node:
                                    self.query_cache[str(orig_node)] = matches
                            elif not node.blocks:
                                nonexistent.add(node)
                                self.query_cache[node] = ()
                                self.profiles.global_insoluble.add(node)
                    elif not self.query_cache[node]:
                        nonexistent.add(node)

            except _BlockMemoryExhaustion as e:
                reporter.add_report(UncheckableDep(pkg, attr))
                suppressed_depsets.append(attr)
            if nonexistent:
                reporter.add_report(NonExistentDeps(pkg, attr, nonexistent))

        del nonexistent

        for attr, depset in (("depends", pkg.depends),
                             ("rdepends", pkg.rdepends), ("post_rdepends",
                                                          pkg.post_rdepends)):
            if attr in suppressed_depsets:
                continue
            for edepset, profiles in self.depset_cache.collapse_evaluate_depset(
                    pkg, attr, depset):
                self.process_depset(pkg, attr, edepset, profiles, reporter)
예제 #16
0
    def feed(self, pkg, reporter):
        # query_cache gets caching_iter partial repo searches shoved into it-
        # reason is simple, it's likely that versions of this pkg probably
        # use similar deps- so we're forcing those packages that were
        # accessed for atom matching to remain in memory.
        # end result is less going to disk

        if vcs_eclasses.intersection(pkg.inherited):
            # vcs ebuild that better not be visible
            self.check_visibility_vcs(pkg, reporter)

        suppressed_depsets = []
        for attr, depset in (("depends", pkg.depends),
                             ("rdepends", pkg.rdepends),
                             ("post_rdepends", pkg.post_rdepends)):
            nonexistent = set()
            try:
                for orig_node in visit_atoms(pkg, depset):

                    node = strip_atom_use(orig_node)
                    if node not in self.query_cache:
                        if node in self.profiles.global_insoluble:
                            nonexistent.add(node)
                            # insert an empty tuple, so that tight loops further
                            # on don't have to use the slower get method
                            self.query_cache[node] = ()

                        else:
                            matches = caching_iter(
                                self.options.search_repo.itermatch(node))
                            if matches:
                                self.query_cache[node] = matches
                                if orig_node is not node:
                                    self.query_cache[str(orig_node)] = matches
                            elif not node.blocks:
                                nonexistent.add(node)
                                self.query_cache[node] = ()
                                self.profiles.global_insoluble.add(node)
                    elif not self.query_cache[node]:
                        nonexistent.add(node)

            except _BlockMemoryExhaustion as e:
                reporter.add_report(UncheckableDep(pkg, attr))
                suppressed_depsets.append(attr)
            if nonexistent:
                reporter.add_report(NonExistentDeps(pkg, attr, nonexistent))

        del nonexistent

        for attr, depset in (("depends", pkg.depends),
                             ("rdepends", pkg.rdepends),
                             ("post_rdepends", pkg.post_rdepends)):
            if attr in suppressed_depsets:
                continue
            for edepset, profiles in self.depset_cache.collapse_evaluate_depset(pkg, attr, depset):
                self.process_depset(pkg, attr, edepset, profiles, reporter)
예제 #17
0
파일: plan.py 프로젝트: filmor/pkgcore
    def _viable(self, stack, mode, atom, dbs, drop_cycles, limit_to_vdb):
        """
        internal function to discern if an atom is viable, returning
        the choicepoint/matches iterator if viable.

        :param stack: current stack
        :type stack: :obj:`resolver_stack`
        :param mode: type of dependency (depend/rdepend)
        :type mode: str
        :param atom: atom for the current package
        :type atom: :obj:`pkgcore.ebuild.atom.atom`
        :param dbs: db list to walk
        :param drop_cycles: boolean controlling whether to drop dep cycles
        :param limit_to_vdb: boolean controlling considering pkgs only from the vdb
        :return: 3 possible; None (not viable), True (presolved),
          :obj:`caching_iter` (not solved, but viable), :obj:`choice_point`
        """
        choices = ret = None
        if atom in self.insoluble:
            ret = ((False, "globally insoluble"),{})
            matches = ()
        else:
            matches = self.state.match_atom(atom)
            if matches:
                ret = ((True,), {"pre_solved":True})
            else:
                # not in the plan thus far.
                matches = caching_iter(dbs.itermatch(atom))
                if matches:
                    choices = choice_point(atom, matches)
                    # ignore what dropped out, at this juncture we don't care.
                    choices.reduce_atoms(self.insoluble)
                    if not choices:
                        # and was intractable because it has a hard dep on an
                        # unsolvable atom.
                        ret = ((False, "pruning of insoluble deps "
                            "left no choices"), {})
                else:
                    ret = ((False, "no matches"), {})

        if choices is None:
            choices = choice_point(atom, matches)

        stack.add_frame(mode, atom, choices, dbs,
            self.state.current_state, drop_cycles, vdb_limited=limit_to_vdb)

        if not limit_to_vdb and not matches:
            self.insoluble.add(atom)
        if ret is not None:
            self.notify_viable(stack, atom, *ret[0], **ret[1])
            if ret[0][0] == True:
                state.add_backref_op(choices, choices.current_pkg).apply(self.state)
                return True
            return None
        return choices, matches
예제 #18
0
파일: plan.py 프로젝트: vapier/pkgcore
    def _viable(self, stack, mode, atom, dbs, drop_cycles, limit_to_vdb):
        """
        internal function to discern if an atom is viable, returning
        the choicepoint/matches iterator if viable.

        :param stack: current stack
        :type stack: :obj:`resolver_stack`
        :param mode: type of dependency (depends/rdepends)
        :type mode: str
        :param atom: atom for the current package
        :type atom: :obj:`pkgcore.ebuild.atom.atom`
        :param dbs: db list to walk
        :param drop_cycles: boolean controlling whether to drop dep cycles
        :param limit_to_vdb: boolean controlling considering pkgs only from the vdb
        :return: 3 possible; None (not viable), True (presolved),
          :obj:`caching_iter` (not solved, but viable), :obj:`choice_point`
        """
        choices = ret = None
        if atom in self.insoluble:
            ret = ((False, "globally insoluble"),{})
            matches = ()
        else:
            matches = self.state.match_atom(atom)
            if matches:
                ret = ((True,), {"pre_solved":True})
            else:
                # not in the plan thus far.
                matches = caching_iter(dbs.itermatch(atom))
                if matches:
                    choices = choice_point(atom, matches)
                    # ignore what dropped out, at this juncture we don't care.
                    choices.reduce_atoms(self.insoluble)
                    if not choices:
                        # and was intractable because it has a hard dep on an
                        # unsolvable atom.
                        ret = ((False, "pruning of insoluble deps "
                            "left no choices"), {})
                else:
                    ret = ((False, "no matches"), {})

        if choices is None:
            choices = choice_point(atom, matches)

        stack.add_frame(mode, atom, choices, dbs,
            self.state.current_state, drop_cycles, vdb_limited=limit_to_vdb)

        if not limit_to_vdb and not matches:
            self.insoluble.add(atom)
        if ret is not None:
            self.notify_viable(stack, atom, *ret[0], **ret[1])
            if ret[0][0] == True:
                state.add_backref_op(choices, choices.current_pkg).apply(self.state)
                return True
            return None
        return choices, matches
예제 #19
0
 def test_iter_consumption(self):
     i = iter(range(100))
     c = caching_iter(i)
     i2 = iter(c)
     for _ in range(20):
         next(i2)
     assert next(i) == 20
     # note we consumed one ourselves
     assert c[20] == 21
     list(c)
     pytest.raises(StopIteration, i.__next__)
     assert list(c) == list(range(20)) + list(range(21, 100))
예제 #20
0
 def test_iter_consumption(self):
     i = iter(xrange(100))
     c = caching_iter(i)
     i2 = iter(c)
     for _ in xrange(20):
         i2.next()
     self.assertEqual(i.next(), 20)
     # note we consumed one ourselves
     self.assertEqual(c[20], 21)
     list(c)
     self.assertRaises(StopIteration, i.next)
     self.assertEqual(list(c), range(20) + range(21, 100))
예제 #21
0
 def test_iter_consumption(self):
     i = iter(range(100))
     c = caching_iter(i)
     i2 = iter(c)
     for _ in range(20):
         next(i2)
     assert next(i) == 20
     # note we consumed one ourselves
     assert c[20] == 21
     list(c)
     pytest.raises(StopIteration, i.__next__)
     assert list(c) == list(range(20)) + list(range(21, 100))
예제 #22
0
파일: plan.py 프로젝트: veelai/pkgcore
    def _viable(self, stack, mode, atom, dbs, drop_cycles, limit_to_vdb):
        """
        internal function to discern if an atom is viable, returning
        the choicepoint/matches iter if viable.

        :return: 3 possible; None (not viable), True (presolved),
          :obj:`caching_iter` (not solved, but viable), :obj:`choice_point`
        """
        choices = ret = None
        if atom in self.insoluble:
            ret = ((False, "globally insoluble"),{})
            matches = ()
        else:
            matches = self.state.match_atom(atom)
            if matches:
                ret = ((True,), {"pre_solved":True})
            else:
                # not in the plan thus far.
                matches = caching_iter(dbs.itermatch(atom))
                if matches:
                    choices = choice_point(atom, matches)
                    # ignore what dropped out, at this juncture we don't care.
                    choices.reduce_atoms(self.insoluble)
                    if not choices:
                        # and was intractable because it has a hard dep on an
                        # unsolvable atom.
                        ret = ((False, "pruning of insoluble deps "
                            "left no choices"), {})
                else:
                    ret = ((False, "no matches"), {})

        if choices is None:
            choices = choice_point(atom, matches)

        stack.add_frame(mode, atom, choices, dbs,
            self.state.current_state, drop_cycles, vdb_limited=limit_to_vdb)

        if not limit_to_vdb and not matches:
            self.insoluble.add(atom)
        if ret is not None:
            self.notify_viable(stack, atom, *ret[0], **ret[1])
            if ret[0][0] == True:
                state.add_backref_op(choices, choices.current_pkg).apply(self.state)
                return True
            return None
        return choices, matches
예제 #23
0
 def _py3k_protection(*args, **kwds):
     return tuple(caching_iter(*args, **kwds))
예제 #24
0
 def _py3k_protection(*args, **kwds):
     ci = caching_iter(*args, **kwds)
     if compatibility.is_py3k:
         ci = tuple(ci)
     return ci
예제 #25
0
    def feed(self, pkg):
        super().feed(pkg)

        # query_cache gets caching_iter partial repo searches shoved into it-
        # reason is simple, it's likely that versions of this pkg probably
        # use similar deps- so we're forcing those packages that were
        # accessed for atom matching to remain in memory.
        # end result is less going to disk

        if pkg.live:
            # vcs ebuild that better not be visible
            yield from self.check_visibility_vcs(pkg)

        suppressed_depsets = []
        for attr in (x.lower() for x in pkg.eapi.dep_keys):
            nonexistent = set()
            try:
                for orig_node in visit_atoms(pkg, getattr(pkg, attr)):
                    node = orig_node.no_usedeps
                    if node not in self.query_cache:
                        if node in self.profiles.global_insoluble:
                            nonexistent.add(node)
                            # insert an empty tuple, so that tight loops further
                            # on don't have to use the slower get method
                            self.query_cache[node] = ()
                        else:
                            matches = caching_iter(
                                self.options.search_repo.itermatch(node))
                            if matches:
                                self.query_cache[node] = matches
                                if orig_node is not node:
                                    self.query_cache[str(orig_node)] = matches
                            elif not node.blocks:
                                nonexistent.add(node)
                                self.query_cache[node] = ()
                                self.profiles.global_insoluble.add(node)
                    elif not self.query_cache[node]:
                        nonexistent.add(node)

            except _BlockMemoryExhaustion as e:
                yield UncheckableDep(attr, pkg=pkg)
                suppressed_depsets.append(attr)
            if nonexistent:
                nonexistent = map(str, sorted(nonexistent))
                yield NonexistentDeps(attr.upper(), nonexistent, pkg=pkg)

        del nonexistent

        for attr in (x.lower() for x in pkg.eapi.dep_keys):
            if attr in suppressed_depsets:
                continue
            depset = getattr(pkg, attr)
            profile_failures = defaultdict(lambda: defaultdict(set))
            for edepset, profiles in self.collapse_evaluate_depset(
                    pkg, attr, depset):
                for profile, failures in self.process_depset(
                        pkg, attr, depset, edepset, profiles):
                    failures = tuple(map(str, stable_unique(failures)))
                    profile_failures[failures][profile.status].add(profile)

            if profile_failures:
                if self.options.verbosity > 0:
                    # report all failures across all profiles in verbose mode
                    for failures, profiles in profile_failures.items():
                        for profile_status, cls in self.report_cls_map.items():
                            for profile in sorted(
                                    profiles.get(profile_status, ()),
                                    key=attrgetter('key', 'name')):
                                yield cls(attr,
                                          profile.key,
                                          profile.name,
                                          failures,
                                          profile_status,
                                          profile.deprecated,
                                          pkg=pkg)
                else:
                    # only report one failure per depset per profile type in regular mode
                    for failures, profiles in profile_failures.items():
                        for profile_status, cls in self.report_cls_map.items():
                            status_profiles = sorted(
                                profiles.get(profile_status, ()),
                                key=attrgetter('key', 'name'))
                            if status_profiles:
                                profile = status_profiles[0]
                                yield cls(attr,
                                          profile.key,
                                          profile.name,
                                          failures,
                                          profile_status,
                                          profile.deprecated,
                                          len(status_profiles),
                                          pkg=pkg)
예제 #26
0
 def test_getitem(self):
     c = caching_iter(xrange(20))
     self.assertEqual(19, c[-1])
     self.assertRaises(IndexError, operator.getitem, c, -21)
     self.assertRaises(IndexError, operator.getitem, c, 21)
예제 #27
0
 def test_hash(self):
     self.assertEqual(hash(caching_iter(xrange(100))),
                      hash(tuple(range(100))))
예제 #28
0
 def test_setitem(self):
     self.assertRaises(
         TypeError, operator.setitem, caching_iter(xrange(10)), 3, 4)
예제 #29
0
 def test_full_consumption(self):
     i = iter(range(100))
     c = caching_iter(i)
     assert list(c) == list(range(100))
     # do it twice, to verify it returns properly
     assert list(c) == list(range(100))
예제 #30
0
 def test_hash(self):
     assert hash(caching_iter(range(100))) == hash(tuple(range(100)))
예제 #31
0
 def test_setitem(self):
     with pytest.raises(TypeError):
         operator.setitem(caching_iter(range(10)), 3, 4)
예제 #32
0
 def test_full_consumption(self):
     i = iter(xrange(100))
     c = caching_iter(i)
     self.assertEqual(list(c), range(100))
     # do it twice, to verify it returns properly
     self.assertEqual(list(c), range(100))
예제 #33
0
 def test_nonzero(self):
     c = caching_iter(xrange(100))
     self.assertEqual(bool(c), True)
     # repeat to check if it works when cached.
     self.assertEqual(bool(c), True)
     self.assertEqual(bool(caching_iter(iter([]))), False)
예제 #34
0
 def test_str(self):
     # Just make sure this works at all.
     assert str(caching_iter(range(10)))
예제 #35
0
 def test_setitem(self):
     with pytest.raises(TypeError):
         operator.setitem(caching_iter(range(10)), 3, 4)
예제 #36
0
 def test_str(self):
     # Just make sure this works at all.
     self.assertTrue(str(caching_iter(xrange(10))))
예제 #37
0
 def test_init(self):
     self.assertEqual(caching_iter(list(xrange(100)))[0], 0)
예제 #38
0
 def test_init(self):
     assert caching_iter(list(range(100)))[0] == 0
예제 #39
0
 def test_full_consumption(self):
     i = iter(range(100))
     c = caching_iter(i)
     assert list(c) == list(range(100))
     # do it twice, to verify it returns properly
     assert list(c) == list(range(100))
예제 #40
0
 def test_str(self):
     # Just make sure this works at all.
     assert str(caching_iter(range(10)))
예제 #41
0
 def test_len(self):
     assert 100 == len(caching_iter(range(100)))
예제 #42
0
 def test_init(self):
     assert caching_iter(list(range(100)))[0] == 0
예제 #43
0
 def test_hash(self):
     assert hash(caching_iter(range(100))) == hash(tuple(range(100)))
예제 #44
0
 def test_len(self):
     assert 100 == len(caching_iter(range(100)))
예제 #45
0
 def test_bool(self):
     c = caching_iter(range(100))
     assert bool(c)
     # repeat to check if it works when cached.
     assert bool(c)
     assert not bool(caching_iter(iter([])))
예제 #46
0
 def test_bool(self):
     c = caching_iter(range(100))
     assert bool(c) == True
     # repeat to check if it works when cached.
     assert bool(c) == True
     assert bool(caching_iter(iter([]))) == False
예제 #47
0
 def test_len(self):
     self.assertEqual(100, len(caching_iter(xrange(100))))