示例#1
0
 def test_iter_read_bash(self):
     self.assertEqual(
         list(iter_read_bash(StringIO("\n" "# hi I am a comment\n" "I am not \n" " asdf # inline comment\n"))),
         ["I am not", "asdf"],
     )
     self.assertEqual(
         list(iter_read_bash(StringIO("inline # comment "), allow_inline_comments=False)), ["inline # comment"]
     )
示例#2
0
    def test_iter_read_bash(self):
        output = iter_read_bash(
            StringIO('\n'
                     '# hi I am a comment\n'
                     'I am not \n'
                     ' asdf # inline comment\n'))
        assert list(output) == ['I am not', 'asdf']

        output = iter_read_bash(StringIO('inline # comment '),
                                allow_inline_comments=False)
        assert list(output) == ['inline # comment']
示例#3
0
    def test_iter_read_bash(self):
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\n'
            'I am not \n'
            ' asdf # inline comment\n'))
        assert list(output) == ['I am not', 'asdf']

        output = iter_read_bash(StringIO(
            'inline # comment '), allow_inline_comments=False)
        assert list(output) == ['inline # comment']
示例#4
0
文件: test_bash.py 项目: rhn/snakeoil
 def test_iter_read_bash(self):
     self.assertEqual(
         list(iter_read_bash(StringIO(
             '\n'
             '# hi I am a comment\n'
             'I am not \n'
             ' asdf # inline comment\n'))),
         ['I am not', 'asdf'])
     self.assertEqual(
         list(iter_read_bash(StringIO(
             'inline # comment '), allow_inline_comments=False)),
         ['inline # comment'])
示例#5
0
 def test_iter_read_bash(self):
     self.assertEqual(
         list(
             iter_read_bash(
                 StringIO('\n'
                          '# hi I am a comment\n'
                          'I am not \n'
                          ' asdf # inline comment\n'))),
         ['I am not', 'asdf'])
     self.assertEqual(
         list(
             iter_read_bash(StringIO('inline # comment '),
                            allow_inline_comments=False)),
         ['inline # comment'])
示例#6
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if self.config.profile_formats.intersection(['portage-1', 'portage-2']):
             paths = sorted_scan(path)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'),
                         'package.mask', "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except IOError as i:
         if i.errno != errno.ENOENT:
             raise
     except ebuild_errors.MalformedAtom as ma:
         raise_from(profiles.ProfileError(
             pjoin(self.base, 'profiles'),
             'package.mask', ma))
     return [neg, pos]
示例#7
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if (self.config.eapi.options['has_profile_data_dirs']
                 or self.config.profile_formats.intersection(
                     ['portage-1', 'portage-2'])):
             paths = sorted_scan(path)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'), 'package.mask',
                         "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except FileNotFoundError:
         pass
     except ebuild_errors.MalformedAtom as e:
         raise profiles.ProfileError(pjoin(self.base, 'profiles'),
                                     'package.mask', e) from e
     return tuple(neg), tuple(pos)
示例#8
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if (self.config.eapi.options['has_profile_data_dirs'] or
                 self.config.profile_formats.intersection(['portage-1', 'portage-2'])):
             paths = sorted_scan(path)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'),
                         'package.mask', "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except FileNotFoundError:
         pass
     except ebuild_errors.MalformedAtom as e:
         raise profiles.ProfileError(
             pjoin(self.base, 'profiles'), 'package.mask', e) from e
     return tuple(neg), tuple(pos)
示例#9
0
 def parse(profiles_base, repo_id, known_status=None, known_arch=None):
     """Return the mapping of arches to profiles for a repo."""
     l = []
     fp = pjoin(profiles_base, 'profiles.desc')
     try:
         for lineno, line in iter_read_bash(fp, enum_line=True):
             try:
                 arch, profile, status = line.split()
             except ValueError:
                 logger.error(
                     f"{repo_id}::profiles/profiles.desc, "
                     f"line {lineno}: invalid profile line format: "
                     "should be 'arch profile status'")
                 continue
             if known_status is not None and status not in known_status:
                 logger.warning(
                     f"{repo_id}::profiles/profiles.desc, "
                     f"line {lineno}: unknown profile status: {status!r}")
             if known_arch is not None and arch not in known_arch:
                 logger.warning(f"{repo_id}::profiles/profiles.desc, "
                                f"line {lineno}: unknown arch: {arch!r}")
             # Normalize the profile name on the offchance someone slipped an extra /
             # into it.
             path = '/'.join(filter(None, profile.split('/')))
             deprecated = os.path.exists(
                 os.path.join(profiles_base, path, 'deprecated'))
             l.append(
                 _KnownProfile(profiles_base, arch, path, status,
                               deprecated))
     except FileNotFoundError:
         logger.debug(
             f"No profile descriptions found at {repo_id}::profiles/profiles.desc"
         )
     return frozenset(l)
示例#10
0
 def known_arches(self):
     """All valid KEYWORDS for the repo."""
     try:
         return frozenset(
             iter_read_bash(pjoin(self.profiles_base, 'arch.list')))
     except FileNotFoundError:
         return frozenset()
示例#11
0
    def arches_desc(self):
        """Arch stability status (GLEP 72).

        See https://www.gentoo.org/glep/glep-0072.html for more details.
        """
        fp = pjoin(self.profiles_base, 'arches.desc')
        d = {'stable': set(), 'transitional': set(), 'testing': set()}
        try:
            for lineno, line in iter_read_bash(fp, enum_line=True):
                try:
                    arch, status = line.split()
                except ValueError:
                    logger.error(f"{self.repo_id}::profiles/arches.desc, "
                                 f"line {lineno}: invalid line format: "
                                 "should be '<arch> <status>'")
                    continue
                if arch not in self.known_arches:
                    logger.warning(f"{self.repo_id}::profiles/arches.desc, "
                                   f"line {lineno}: unknown arch: {arch!r}")
                    continue
                if status not in d:
                    logger.warning(
                        f"{self.repo_id}::profiles/arches.desc, "
                        f"line {lineno}: unknown status: {status!r}")
                    continue
                d[status].add(arch)
        except FileNotFoundError:
            pass
        return mappings.ImmutableDict(d)
示例#12
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if self.config.profile_format not in ['pms', 'portage-2']:
             paths = sorted(x.location for x in iter_scan(path) if x.is_reg)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(
                         pjoin(self.base, 'profiles'), 'package.mask',
                         "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except IOError as i:
         if i.errno != errno.ENOENT:
             raise
     except ebuild_errors.MalformedAtom as ma:
         raise_from(
             profiles.ProfileError(pjoin(self.base, 'profiles'),
                                   'package.mask', ma))
     return [neg, pos]
示例#13
0
    def parse(self):
        """Parse the given file into Mask objects."""
        with open(self.path) as f:
            lines = f.readlines()

        # determine mask groups by line number
        mask_map = dict(iter_read_bash(self.path, enum_line=True))
        for mask_lines in map(list, consecutive_groups(mask_map)):
            # use profile's EAPI setting to coerce supported masks
            atoms = [self.profile.eapi_atom(mask_map[x]) for x in mask_lines]

            # pull comment lines above initial mask entry line
            comment = []
            i = mask_lines[0] - 2
            while i >= 0 and (line := lines[i].rstrip()):
                if not line.startswith('# '):
                    mask.error(
                        f'invalid mask entry header, lineno {i + 1}: {line!r}')
                comment.append(line[2:])
                i -= 1
            if not self.header:
                self.header = lines[:i + 1]
            comment = list(reversed(comment))

            # pull attribution data from first comment line
            if mo := self.attribution_re.match(comment[0]):
                author, email, date = mo.group('author'), mo.group(
                    'email'), mo.group('date')
示例#14
0
    def read_ld_so_conf(self, offset):
        fp = self.ld_so_path(offset)

        try:
            l = [x.lstrip(os.path.sep) for x in iter_read_bash(fp)]
        except FileNotFoundError:
            self._mk_ld_so_conf(fp)
            # fall back to an educated guess.
            l = self.default_ld_path
        return [pjoin(offset, x) for x in l]
示例#15
0
文件: domain.py 项目: ulm/pkgcore
def _read_config_file(path):
    """Read all the data files under a given path."""
    try:
        for fs_obj in iter_scan(path, follow_symlinks=True):
            if not fs_obj.is_reg or '/.' in fs_obj.location:
                continue
            for lineno, line, in iter_read_bash(
                    fs_obj.location, allow_line_cont=True, enum_line=True):
                yield line, lineno, fs_obj.location
    except FileNotFoundError:
        pass
    except EnvironmentError as e:
        raise Failure(f"failed reading {filename!r}: {e}") from e
示例#16
0
def _read_profile_files(files, allow_line_cont=False):
    """Read all the given data files."""
    for path in files:
        # determine file path relative to the profiles dir
        try:
            relpath = path.split('/profiles/')[1]
        except IndexError:
            # profiles base path
            relpath = os.path.basename(path)

        for lineno, line in iter_read_bash(
                path, allow_line_cont=allow_line_cont, enum_line=True):
            yield line, lineno, relpath
示例#17
0
    def test_iter_read_bash_line_cont(self):
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\\\n'
            'I am not \\\n'
            'a comment \n'
            ' asdf # inline comment\\\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment', 'asdf']

        # continuation into inline comment
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\n'
            'I am \\\n'
            'not a \\\n'
            'comment # inline comment\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment']

        # ends with continuation
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\n'
            'I am \\\n'
            '\\\n'
            'not a \\\n'
            'comment\\\n'
            '\\\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment']

        # embedded comment prefix via continued lines
        output = iter_read_bash(StringIO(
            '\\\n'
            '# comment\\\n'
            ' not a comment\n'
            '\\\n'
            ' # inner comment\n'
            'also not\\\n'
            '#\\\n'
            'a comment\n'),
            allow_line_cont=True)
        assert list(output) == ['not a comment', 'also not#a comment']

        # Line continuations have to end with \<newline> without any backslash
        # before the pattern.
        output = iter_read_bash(StringIO(
            'I am \\ \n'
            'not a comment'),
            allow_line_cont=True)
        assert list(output) == ['I am \\', 'not a comment']
        output = iter_read_bash(StringIO(
            '\\\n'
            'I am \\\\\n'
            'not a comment'),
            allow_line_cont=True)
        assert list(output) == ['I am \\\\', 'not a comment']
示例#18
0
    def test_iter_read_bash_line_cont(self):
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\\\n'
            'I am not \\\n'
            'a comment \n'
            ' asdf # inline comment\\\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment', 'asdf']

        # continuation into inline comment
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\n'
            'I am \\\n'
            'not a \\\n'
            'comment # inline comment\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment']

        # ends with continuation
        output = iter_read_bash(StringIO(
            '\n'
            '# hi I am a comment\n'
            'I am \\\n'
            '\\\n'
            'not a \\\n'
            'comment\\\n'
            '\\\n'),
            allow_line_cont=True)
        assert list(output) == ['I am not a comment']

        # embedded comment prefix via continued lines
        output = iter_read_bash(StringIO(
            '\\\n'
            '# comment\\\n'
            ' not a comment\n'
            '\\\n'
            ' # inner comment\n'
            'also not\\\n'
            '#\\\n'
            'a comment\n'),
            allow_line_cont=True)
        assert list(output) == ['not a comment', 'also not#a comment']

        # Line continuations have to end with \<newline> without any backslash
        # before the pattern.
        output = iter_read_bash(StringIO(
            'I am \\ \n'
            'not a comment'),
            allow_line_cont=True)
        assert list(output) == ['I am \\', 'not a comment']
        output = iter_read_bash(StringIO(
            '\\\n'
            'I am \\\\\n'
            'not a comment'),
            allow_line_cont=True)
        assert list(output) == ['I am \\\\', 'not a comment']
示例#19
0
def parse_moves(location):
    pjoin = os.path.join

    # schwartzian comparison, convert it into YYYY-QQ
    def get_key(fname):
        return tuple(reversed(fname.split('-')))

    moves = {}
    for update_file in sorted(listdir_files(location), key=get_key):
        for line in iter_read_bash(pjoin(location, update_file)):
            line = line.split()
            if line[0] != 'move':
                continue
            moves[atom(line[1])] = atom(line[2])
    return moves
示例#20
0
 def _split_use_desc_file(self, name, converter, matcher=True):
     line = None
     fp = pjoin(self.profiles_base, name)
     try:
         for line in iter_read_bash(fp):
             try:
                 key, val = line.split(None, 1)
                 key = converter(key)
                 if matcher:
                     yield key[0], (key[1], val.split('-', 1)[1].strip())
                 else:
                     yield key, val.split('-', 1)[1].strip()
             except ValueError as e:
                 logger.error(f'failed parsing {fp!r}, line {line!r}: {e}')
     except FileNotFoundError:
         pass
     except ValueError as e:
         logger.error(f'failed parsing {fp!r}: {e}')
示例#21
0
 def _visibility_limiters(self):
     path = pjoin(self.base, 'profiles', 'package.mask')
     pos, neg = [], []
     try:
         if self.config.profile_format != 'pms':
             paths = sorted(x.location for x in iter_scan(path)
                 if x.is_reg)
         else:
             paths = [path]
         for path in paths:
             for line in iter_read_bash(path):
                 line = line.strip()
                 if line in ('-', ''):
                     raise profiles.ProfileError(pjoin(self.base, 'profiles'),
                         'package.mask', "encountered empty negation: -")
                 if line.startswith('-'):
                     neg.append(atom.atom(line[1:]))
                 else:
                     pos.append(atom.atom(line))
     except IOError, i:
         if i.errno != errno.ENOENT:
             raise
示例#22
0
    def test_iter_read_bash_line_cont(self):
        self.assertEqual(
            list(
                iter_read_bash(
                    StringIO(
                        "\n" "# hi I am a comment\\\n" "I am not \\\n" "a comment \n" " asdf # inline comment\\\n"
                    ),
                    allow_line_cont=True,
                )
            ),
            ["I am not a comment", "asdf"],
        )

        # continuation into inline comment
        self.assertEqual(
            list(
                iter_read_bash(
                    StringIO("\n" "# hi I am a comment\n" "I am \\\n" "not a \\\n" "comment # inline comment\n"),
                    allow_line_cont=True,
                )
            ),
            ["I am not a comment"],
        )

        # ends with continuation
        self.assertEqual(
            list(
                iter_read_bash(
                    StringIO("\n" "# hi I am a comment\n" "I am \\\n" "\\\n" "not a \\\n" "comment\\\n" "\\\n"),
                    allow_line_cont=True,
                )
            ),
            ["I am not a comment"],
        )

        # embedded comment prefix via continued lines
        self.assertEqual(
            list(
                iter_read_bash(
                    StringIO(
                        "\\\n"
                        "# comment\\\n"
                        " not a comment\n"
                        "\\\n"
                        " # inner comment\n"
                        "also not\\\n"
                        "#\\\n"
                        "a comment\n"
                    ),
                    allow_line_cont=True,
                )
            ),
            ["not a comment", "also not#a comment"],
        )

        # Line continuations have to end with \<newline> without any backslash
        # before the pattern.
        self.assertEqual(
            list(iter_read_bash(StringIO("I am \\ \n" "not a comment"), allow_line_cont=True)),
            ["I am \\", "not a comment"],
        )
        self.assertEqual(
            list(iter_read_bash(StringIO("\\\n" "I am \\\\\n" "not a comment"), allow_line_cont=True)),
            ["I am \\\\", "not a comment"],
        )
示例#23
0
    def _parse_config(self):
        """Load data from the repo's metadata/layout.conf file."""
        path = pjoin(self.location, self.layout_offset)
        data = read_dict(iter_read_bash(
            readlines(path, strip_whitespace=True, swallow_missing=True)),
                         source_isiter=True,
                         strip=True,
                         filename=path,
                         ignore_errors=True)

        sf = object.__setattr__
        sf(self, 'repo_name', data.get('repo-name', None))

        hashes = data.get('manifest-hashes', '').lower().split()
        if hashes:
            hashes = ['size'] + hashes
            hashes = tuple(iter_stable_unique(hashes))
        else:
            hashes = self.default_hashes

        required_hashes = data.get('manifest-required-hashes',
                                   '').lower().split()
        if required_hashes:
            required_hashes = ['size'] + required_hashes
            required_hashes = tuple(iter_stable_unique(required_hashes))
        else:
            required_hashes = self.default_required_hashes

        manifest_policy = data.get('use-manifests', 'strict').lower()
        d = {
            'disabled': (manifest_policy == 'false'),
            'strict': (manifest_policy == 'strict'),
            'thin': (data.get('thin-manifests', '').lower() == 'true'),
            'signed': (data.get('sign-manifests', 'true').lower() == 'true'),
            'hashes': hashes,
            'required_hashes': required_hashes,
        }

        sf(self, 'manifests', _immutable_attr_dict(d))
        masters = data.get('masters')
        _missing_masters = False
        if masters is None:
            if not self.is_empty:
                logger.warning(
                    f"{self.repo_id} repo at {self.location!r}, doesn't "
                    "specify masters in metadata/layout.conf. Please explicitly "
                    "set masters (use \"masters =\" if the repo is standalone)."
                )
            _missing_masters = True
            masters = ()
        else:
            masters = tuple(iter_stable_unique(masters.split()))
        sf(self, '_missing_masters', _missing_masters)
        sf(self, 'masters', masters)
        aliases = data.get('aliases', '').split() + [
            self.config_name, self.repo_name, self.pms_repo_name, self.location
        ]
        sf(self, 'aliases', tuple(filter(None, iter_stable_unique(aliases))))
        sf(self, 'eapis_deprecated',
           tuple(iter_stable_unique(data.get('eapis-deprecated', '').split())))
        sf(self, 'eapis_banned',
           tuple(iter_stable_unique(data.get('eapis-banned', '').split())))
        sf(
            self, 'properties_allowed',
            tuple(
                iter_stable_unique(data.get('properties-allowed',
                                            '').split())))
        sf(self, 'restrict_allowed',
           tuple(iter_stable_unique(data.get('restrict-allowed', '').split())))

        v = set(data.get('cache-formats', 'md5-dict').lower().split())
        if not v:
            v = [None]
        else:
            # sort into favored order
            v = [f for f in self.supported_cache_formats if f in v]
            if not v:
                logger.warning(
                    f'unknown cache format: falling back to md5-dict format')
                v = ['md5-dict']
        sf(self, 'cache_format', list(v)[0])

        profile_formats = set(
            data.get('profile-formats', 'pms').lower().split())
        if not profile_formats:
            logger.info(
                f"{self.repo_id!r} repo at {self.location!r} has explicitly "
                "unset profile-formats, defaulting to pms")
            profile_formats = {'pms'}
        unknown = profile_formats.difference(self.supported_profile_formats)
        if unknown:
            logger.info("%r repo at %r has unsupported profile format%s: %s",
                        self.repo_id, self.location, pluralism(unknown),
                        ', '.join(sorted(unknown)))
            profile_formats.difference_update(unknown)
            profile_formats.add('pms')
        sf(self, 'profile_formats', profile_formats)
示例#24
0
文件: domain.py 项目: chutz/pkgcore
    def __init__(self,
                 profile,
                 repositories,
                 vdb,
                 name=None,
                 root='/',
                 prefix='/',
                 incrementals=const.incrementals,
                 triggers=(),
                 **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers

        # prevent critical variables from being changed by the user in make.conf
        for k in set(profile.profile_only_variables).intersection(
                settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo
            for (collapsed, repo) in izip(repositories_collapsed, repositories)
        }
        self.repos_raw.update(
            (collapsed.name, repo)
            for (collapsed, repo) in izip(vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_maskers, pkg_unmaskers, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_maskers, parse_match),
            ("package.unmask", pkg_unmaskers, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords,
             package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
        ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x) for x in iter_read_bash(fs_obj.location))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        self.name = name
        settings.setdefault("PKGCORE_DOMAIN", name)
        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in (
                    "USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(s, settings[incremental],
                                  'While expanding %s ' % (incremental, ))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(
            optimize_incrementals(settings.get("USE", ())))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        self.use_expand = frozenset(profile.use_expand)
        self.use_expand_hidden = frozenset(profile.use_expand_hidden)
        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower() + "_"
            use.update(u2 + x for x in v.split())

        if not 'ACCEPT_KEYWORDS' in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(s, settings['ACCEPT_KEYWORDS'],
                              'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [
            self.make_keywords_filter(self.arch,
                                      default_keywords,
                                      accept_keywords,
                                      profile.keywords,
                                      incremental="package.keywords"
                                      in incrementals)
        ]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(
                self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure('user-specified bashrc %r does not exist' %
                              (data, ))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'forced_use'))
            c.add_bare_global((), (self.arch, ))
            setattr(self, attr + 'forced_use', c)

            c = ChunkedDataDict()
            c.merge(getattr(profile, attr + 'masked_use'))
            setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {
            r.repo_id: r._visibility_limiters()
            for r in repositories
        }

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(
                            Failure("failed configuring repo '%s': "
                                    "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    masks = [
                        repo_masks.get(master, [(), ()]) for master in masters
                    ]
                    masks.append(repo_masks[repo.repo_id])
                    masks.extend(profile_masks)
                    mask_atoms = set()
                    for neg, pos in masks:
                        mask_atoms.difference_update(neg)
                        mask_atoms.update(pos)
                    mask_atoms.update(pkg_maskers)
                    unmask_atoms = set(chain(pkg_unmaskers, *profile_unmasks))
                    filtered = self.generate_filter(
                        generate_masking_restrict(mask_atoms),
                        generate_unmasking_restrict(unmask_atoms), *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(
                        wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        if profile.virtuals:
            l = [
                x for x in (getattr(v, 'old_style_virtuals', None)
                            for v in self.vdb) if x is not None
            ]
            profile_repo = profile.make_virtuals_repo(
                multiplex.tree(*repositories), *l)
            self.repos_raw["profile virtuals"] = profile_repo
            self.repos_configured_filtered["profile virtuals"] = profile_repo
            self.repos_configured["profile virtuals"] = profile_repo
            self.repos = [profile_repo] + self.repos

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(self.use_expand, reverse=True)))
示例#25
0
文件: domain.py 项目: den4ix/pkgcore
    def __init__(self, profile, repositories, vdb, name=None,
                 root='/', prefix='/', incrementals=const.incrementals,
                 triggers=(), **settings):
        # voodoo, unfortunately (so it goes)
        # break this up into chunks once it's stabilized (most of code
        # here has already, but still more to add)
        self._triggers = triggers
        self.name = name

        # prevent critical variables from being changed in make.conf
        for k in profile.profile_only_variables.intersection(settings.keys()):
            del settings[k]

        if 'CHOST' in settings and 'CBUILD' not in settings:
            settings['CBUILD'] = settings['CHOST']

        # if unset, MAKEOPTS defaults to CPU thread count
        if 'MAKEOPTS' not in settings:
            settings['MAKEOPTS'] = '-j%i' % cpu_count()

        # map out sectionname -> config manager immediately.
        repositories_collapsed = [r.collapse() for r in repositories]
        repositories = [r.instantiate() for r in repositories_collapsed]

        self.fetcher = settings.pop("fetcher")

        self.default_licenses_manager = OverlayedLicenses(*repositories)
        vdb_collapsed = [r.collapse() for r in vdb]
        vdb = [r.instantiate() for r in vdb_collapsed]
        self.repos_raw = {
            collapsed.name: repo for (collapsed, repo) in izip(
                repositories_collapsed, repositories)}
        self.repos_raw.update(
            (collapsed.name, repo) for (collapsed, repo) in izip(
                vdb_collapsed, vdb))
        self.repos_raw.pop(None, None)
        if profile.provides_repo is not None:
            self.repos_raw['package.provided'] = profile.provides_repo
            vdb.append(profile.provides_repo)

        self.profile = profile
        pkg_masks, pkg_unmasks, pkg_keywords, pkg_licenses = [], [], [], []
        pkg_use, self.bashrcs = [], []

        self.ebuild_hook_dir = settings.pop("ebuild_hook_dir", None)

        for key, val, action in (
            ("package.mask", pkg_masks, parse_match),
            ("package.unmask", pkg_unmasks, parse_match),
            ("package.keywords", pkg_keywords, package_keywords_splitter),
            ("package.accept_keywords", pkg_keywords, package_keywords_splitter),
            ("package.license", pkg_licenses, package_keywords_splitter),
            ("package.use", pkg_use, package_keywords_splitter),
            ("package.env", self.bashrcs, package_env_splitter),
            ):

            for fp in settings.pop(key, ()):
                try:
                    if key == "package.env":
                        base = self.ebuild_hook_dir
                        if base is None:
                            base = os.path.dirname(fp)
                        action = partial(action, base)
                    for fs_obj in iter_scan(fp, follow_symlinks=True):
                        if not fs_obj.is_reg or '/.' in fs_obj.location:
                            continue
                        val.extend(
                            action(x) for x in
                            iter_read_bash(fs_obj.location, allow_line_cont=True))
                except EnvironmentError as e:
                    if e.errno == errno.ENOENT:
                        raise MissingFile(fp, key)
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))
                except ValueError as e:
                    raise_from(Failure("failed reading '%s': %s" % (fp, e)))

        for x in incrementals:
            if isinstance(settings.get(x), basestring):
                settings[x] = tuple(settings[x].split())

        # roughly... all incremental stacks should be interpreted left -> right
        # as such we start with the profile settings, and append ours onto it.
        for k, v in profile.default_env.iteritems():
            if k not in settings:
                settings[k] = v
                continue
            if k in incrementals:
                settings[k] = v + tuple(settings[k])

        # next we finalize incrementals.
        for incremental in incrementals:
            # Skip USE/ACCEPT_LICENSE for the time being; hack; we need the
            # negations currently so that pkg iuse induced enablings can be
            # disabled by negations. For example, think of the profile doing
            # USE=-cdr for brasero w/ IUSE=+cdr. Similarly, ACCEPT_LICENSE is
            # skipped because negations are required for license filtering.
            if incremental not in settings or incremental in ("USE", "ACCEPT_LICENSE"):
                continue
            s = set()
            incremental_expansion(
                s, settings[incremental],
                'While expanding %s ' % (incremental,))
            settings[incremental] = tuple(s)

        # use is collapsed; now stack use_expand.
        use = settings['USE'] = set(optimize_incrementals(
            list(settings.get('USE', ())) + os.environ.get('USE', '').split()))

        self._extend_use_for_features(use, settings.get("FEATURES", ()))

        for u in profile.use_expand:
            v = settings.get(u)
            if v is None:
                continue
            u2 = u.lower()+"_"
            use.update(u2 + x for x in v.split())

        if 'ACCEPT_KEYWORDS' not in settings:
            raise Failure("No ACCEPT_KEYWORDS setting detected from profile, "
                          "or user config")
        s = set()
        default_keywords = []
        incremental_expansion(
            s, settings['ACCEPT_KEYWORDS'],
            'while expanding ACCEPT_KEYWORDS')
        default_keywords.extend(s)
        settings['ACCEPT_KEYWORDS'] = set(default_keywords)

        self.use = use

        if "ARCH" not in settings:
            raise Failure(
                "No ARCH setting detected from profile, or user config")

        self.arch = self.stable_arch = settings["ARCH"]
        self.unstable_arch = "~%s" % self.arch

        # ~amd64 -> [amd64, ~amd64]
        for x in default_keywords[:]:
            if x.startswith("~"):
                default_keywords.append(x.lstrip("~"))
        default_keywords = unstable_unique(default_keywords + [self.arch])

        accept_keywords = pkg_keywords + list(profile.accept_keywords)
        vfilters = [self.make_keywords_filter(
            self.arch, default_keywords, accept_keywords, profile.keywords,
            incremental="package.keywords" in incrementals)]

        del default_keywords, accept_keywords

        # we can finally close that fricking
        # "DISALLOW NON FOSS LICENSES" bug via this >:)
        master_license = []
        master_license.extend(settings.get('ACCEPT_LICENSE', ()))
        if master_license or pkg_licenses:
            vfilters.append(self.make_license_filter(master_license, pkg_licenses))

        del master_license

        # if it's made it this far...

        self.root = settings["ROOT"] = root
        self.prefix = prefix
        self.settings = ProtectedDict(settings)

        for data in self.settings.get('bashrc', ()):
            source = local_source(data)
            # this is currently local-only so a path check is ok
            # TODO make this more general
            if not os.path.exists(source.path):
                raise Failure(
                    'user-specified bashrc %r does not exist' % (data,))
            self.bashrcs.append((packages.AlwaysTrue, source))

        # stack use stuff first, then profile.
        self.enabled_use = ChunkedDataDict()
        self.enabled_use.add_bare_global(*split_negations(self.use))
        self.enabled_use.merge(profile.pkg_use)
        self.enabled_use.update_from_stream(
            chunked_data(k, *split_negations(v)) for k, v in pkg_use)

        for attr in ('', 'stable_'):
             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'forced_use'))
             c.add_bare_global((), (self.arch,))
             setattr(self, attr + 'forced_use', c)

             c = ChunkedDataDict()
             c.merge(getattr(profile, attr + 'masked_use'))
             setattr(self, attr + 'disabled_use', c)

        self.repos = []
        self.vdb = []
        self.repos_configured = {}
        self.repos_configured_filtered = {}

        rev_names = {repo: name for name, repo in self.repos_raw.iteritems()}

        profile_masks = profile._incremental_masks()
        profile_unmasks = profile._incremental_unmasks()
        repo_masks = {r.repo_id: r._visibility_limiters() for r in repositories}

        for l, repos, filtered in ((self.repos, repositories, True),
                                   (self.vdb, vdb, False)):
            for repo in repos:
                if not repo.configured:
                    pargs = [repo]
                    try:
                        for x in repo.configurables:
                            if x == "domain":
                                pargs.append(self)
                            elif x == "settings":
                                pargs.append(settings)
                            elif x == "profile":
                                pargs.append(profile)
                            else:
                                pargs.append(getattr(self, x))
                    except AttributeError as ae:
                        raise_from(Failure("failed configuring repo '%s': "
                                           "configurable missing: %s" % (repo, ae)))
                    wrapped_repo = repo.configure(*pargs)
                else:
                    wrapped_repo = repo
                key = rev_names.get(repo)
                self.repos_configured[key] = wrapped_repo
                if filtered:
                    config = getattr(repo, 'config', None)
                    masters = getattr(config, 'masters', ())
                    if masters is None:
                        # tough cookies.  If a user has an overlay, no masters
                        # defined, we're not applying the portdir masks.
                        # we do this both since that's annoying, and since
                        # frankly there isn't any good course of action.
                        masters = ()
                    global_masks = [repo_masks.get(master, [(), ()]) for master in masters]
                    global_masks.append(repo_masks[repo.repo_id])
                    global_masks.extend(profile_masks)
                    masks = set()
                    for neg, pos in global_masks:
                        masks.difference_update(neg)
                        masks.update(pos)
                    masks.update(pkg_masks)
                    unmasks = set(chain(pkg_unmasks, *profile_unmasks))
                    filtered = generate_filter(masks, unmasks, *vfilters)
                if filtered:
                    wrapped_repo = visibility.filterTree(wrapped_repo, filtered, True)
                self.repos_configured_filtered[key] = wrapped_repo
                l.append(wrapped_repo)

        self.use_expand_re = re.compile(
            "^(?:[+-])?(%s)_(.*)$" %
            "|".join(x.lower() for x in sorted(profile.use_expand, reverse=True)))
示例#26
0
def _read_profile_files(files, allow_line_cont=False):
    """Read all the given data files."""
    for path in files:
        for lineno, line in iter_read_bash(
                path, allow_line_cont=allow_line_cont, enum_line=True):
            yield line, lineno, path