Ejemplo n.º 1
0
	def _create_binpkgs(self, binpkgs):
		# When using BUILD_ID, there can be mutiple instances for the
		# same cpv. Therefore, binpkgs may be an iterable instead of
		# a dict.
		items = getattr(binpkgs, 'items', None)
		items = items() if items is not None else binpkgs
		for cpv, metadata in items:
			a = Atom("=" + cpv, allow_repo=True)
			repo = a.repo
			if repo is None:
				repo = "test_repo"

			pn = catsplit(a.cp)[1]
			cat, pf = catsplit(a.cpv)
			metadata = metadata.copy()
			metadata.setdefault("SLOT", "0")
			metadata.setdefault("KEYWORDS", "x86")
			metadata.setdefault("BUILD_TIME", "0")
			metadata["repository"] = repo
			metadata["CATEGORY"] = cat
			metadata["PF"] = pf

			repo_dir = self.pkgdir
			category_dir = os.path.join(repo_dir, cat)
			if "BUILD_ID" in metadata:
				binpkg_path = os.path.join(category_dir, pn,
					"%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
			else:
				binpkg_path = os.path.join(category_dir, pf + ".tbz2")

			ensure_dirs(os.path.dirname(binpkg_path))
			t = portage.xpak.tbz2(binpkg_path)
			t.recompose_mem(portage.xpak.xpak_mem(metadata))
Ejemplo n.º 2
0
    def _create_binpkgs(self, binpkgs):
        # When using BUILD_ID, there can be mutiple instances for the
        # same cpv. Therefore, binpkgs may be an iterable instead of
        # a dict.
        items = getattr(binpkgs, 'items', None)
        items = items() if items is not None else binpkgs
        for cpv, metadata in items:
            a = Atom("=" + cpv, allow_repo=True)
            repo = a.repo
            if repo is None:
                repo = "test_repo"

            pn = catsplit(a.cp)[1]
            cat, pf = catsplit(a.cpv)
            metadata = metadata.copy()
            metadata.setdefault("SLOT", "0")
            metadata.setdefault("KEYWORDS", "x86")
            metadata.setdefault("BUILD_TIME", "0")
            metadata["repository"] = repo
            metadata["CATEGORY"] = cat
            metadata["PF"] = pf

            repo_dir = self.pkgdir
            category_dir = os.path.join(repo_dir, cat)
            if "BUILD_ID" in metadata:
                binpkg_path = os.path.join(
                    category_dir, pn,
                    "%s-%s.xpak" % (pf, metadata["BUILD_ID"]))
            else:
                binpkg_path = os.path.join(category_dir, pf + ".tbz2")

            ensure_dirs(os.path.dirname(binpkg_path))
            t = portage.xpak.tbz2(binpkg_path)
            t.recompose_mem(portage.xpak.xpak_mem(metadata))
Ejemplo n.º 3
0
    def _create_binpkgs(self, binpkgs):
        # When using BUILD_ID, there can be mutiple instances for the
        # same cpv. Therefore, binpkgs may be an iterable instead of
        # a dict.
        items = getattr(binpkgs, "items", None)
        items = items() if items is not None else binpkgs
        binpkg_format = self.settings.get("BINPKG_FORMAT",
                                          SUPPORTED_GENTOO_BINPKG_FORMATS[0])
        if binpkg_format == "gpkg":
            if self.gpg is None:
                self.gpg = GPG(self.settings)
                self.gpg.unlock()
        for cpv, metadata in items:
            a = Atom("=" + cpv, allow_repo=True)
            repo = a.repo
            if repo is None:
                repo = "test_repo"

            pn = catsplit(a.cp)[1]
            cat, pf = catsplit(a.cpv)
            metadata = metadata.copy()
            metadata.setdefault("SLOT", "0")
            metadata.setdefault("KEYWORDS", "x86")
            metadata.setdefault("BUILD_TIME", "0")
            metadata["repository"] = repo
            metadata["CATEGORY"] = cat
            metadata["PF"] = pf
            metadata["BINPKG_FORMAT"] = binpkg_format

            repo_dir = self.pkgdir
            category_dir = os.path.join(repo_dir, cat)
            if "BUILD_ID" in metadata:
                if binpkg_format == "xpak":
                    binpkg_path = os.path.join(
                        category_dir, pn,
                        "%s-%s.xpak" % (pf, metadata["BUILD_ID"]))
                elif binpkg_format == "gpkg":
                    binpkg_path = os.path.join(
                        category_dir, pn,
                        "%s-%s.gpkg.tar" % (pf, metadata["BUILD_ID"]))
                else:
                    raise InvalidBinaryPackageFormat(binpkg_format)
            else:
                if binpkg_format == "xpak":
                    binpkg_path = os.path.join(category_dir, pf + ".tbz2")
                elif binpkg_format == "gpkg":
                    binpkg_path = os.path.join(category_dir, pf + ".gpkg.tar")
                else:
                    raise InvalidBinaryPackageFormat(binpkg_format)

            ensure_dirs(os.path.dirname(binpkg_path))
            if binpkg_format == "xpak":
                t = portage.xpak.tbz2(binpkg_path)
                t.recompose_mem(portage.xpak.xpak_mem(metadata))
            elif binpkg_format == "gpkg":
                t = portage.gpkg.gpkg(self.settings, a.cpv, binpkg_path)
                t.compress(os.path.dirname(binpkg_path), metadata)
            else:
                raise InvalidBinaryPackageFormat(binpkg_format)
Ejemplo n.º 4
0
def similar_name_search(dbs, atom):

	cp_lower = atom.cp.lower()
	cat, pkg = catsplit(cp_lower)
	if cat == "null":
		cat = None

	all_cp = set()
	for db in dbs:
		all_cp.update(db.cp_all())

	# discard dir containing no ebuilds
	all_cp.discard(atom.cp)

	orig_cp_map = {}
	for cp_orig in all_cp:
		orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
	all_cp = set(orig_cp_map)

	if cat:
		matches = difflib.get_close_matches(cp_lower, all_cp)
	else:
		pkg_to_cp = {}
		for other_cp in list(all_cp):
			other_pkg = catsplit(other_cp)[1]
			if other_pkg == pkg:
				# Check for non-identical package that
				# differs only by upper/lower case.
				identical = True
				for cp_orig in orig_cp_map[other_cp]:
					if catsplit(cp_orig)[1] != \
						catsplit(atom.cp)[1]:
						identical = False
						break
				if identical:
					# discard dir containing no ebuilds
					all_cp.discard(other_cp)
					continue
			pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)

		pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
		matches = []
		for pkg_match in pkg_matches:
			matches.extend(pkg_to_cp[pkg_match])

	matches_orig_case = []
	for cp in matches:
		matches_orig_case.extend(orig_cp_map[cp])

	return matches_orig_case
def similar_name_search(dbs, atom):

    cp_lower = atom.cp.lower()
    cat, pkg = catsplit(cp_lower)
    if cat == "null":
        cat = None

    all_cp = set()
    for db in dbs:
        all_cp.update(db.cp_all())

    # discard dir containing no ebuilds
    all_cp.discard(atom.cp)

    orig_cp_map = {}
    for cp_orig in all_cp:
        orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
    all_cp = set(orig_cp_map)

    if cat:
        matches = difflib.get_close_matches(cp_lower, all_cp)
    else:
        pkg_to_cp = {}
        for other_cp in list(all_cp):
            other_pkg = catsplit(other_cp)[1]
            if other_pkg == pkg:
                # Check for non-identical package that
                # differs only by upper/lower case.
                identical = True
                for cp_orig in orig_cp_map[other_cp]:
                    if catsplit(cp_orig)[1] != \
                     catsplit(atom.cp)[1]:
                        identical = False
                        break
                if identical:
                    # discard dir containing no ebuilds
                    all_cp.discard(other_cp)
                    continue
            pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)

        pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
        matches = []
        for pkg_match in pkg_matches:
            matches.extend(pkg_to_cp[pkg_match])

    matches_orig_case = []
    for cp in matches:
        matches_orig_case.extend(orig_cp_map[cp])

    return matches_orig_case
Ejemplo n.º 6
0
def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
	'''
	@rtype: Atom
	'''
	if not len(mydep):
		return mydep
	if mydep[0]=="*":
		mydep=mydep[1:]
	orig_dep = mydep
	if isinstance(orig_dep, Atom):
		mydep = orig_dep.cp
	else:
		mydep = orig_dep
		has_cat = '/' in orig_dep
		if not has_cat:
			alphanum = re.search(r'\w', orig_dep)
			if alphanum:
				mydep = orig_dep[:alphanum.start()] + "null/" + \
					orig_dep[alphanum.start():]
		try:
			mydep = Atom(mydep)
		except InvalidAtom:
			# Missing '=' prefix is allowed for backward compatibility.
			if not isvalidatom("=" + mydep):
				raise
			mydep = Atom('=' + mydep)
			orig_dep = '=' + orig_dep
		if not has_cat:
			null_cat, pn = catsplit(mydep.cp)
			mydep = pn
		else:
			mydep = mydep.cp
	expanded = cpv_expand(mydep, mydb=mydb,
		use_cache=use_cache, settings=settings)
	return Atom(orig_dep.replace(mydep, expanded, 1))
Ejemplo n.º 7
0
def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
	'''
	@rtype: Atom
	'''
	if not len(mydep):
		return mydep
	if mydep[0]=="*":
		mydep=mydep[1:]
	orig_dep = mydep
	if isinstance(orig_dep, Atom):
		mydep = orig_dep.cp
	else:
		mydep = orig_dep
		has_cat = '/' in orig_dep
		if not has_cat:
			alphanum = re.search(r'\w', orig_dep)
			if alphanum:
				mydep = orig_dep[:alphanum.start()] + "null/" + \
					orig_dep[alphanum.start():]
		try:
			mydep = Atom(mydep)
		except InvalidAtom:
			# Missing '=' prefix is allowed for backward compatibility.
			if not isvalidatom("=" + mydep):
				raise
			mydep = Atom('=' + mydep)
			orig_dep = '=' + orig_dep
		if not has_cat:
			null_cat, pn = catsplit(mydep.cp)
			mydep = pn
		else:
			mydep = mydep.cp
	expanded = cpv_expand(mydep, mydb=mydb,
		use_cache=use_cache, settings=settings)
	return Atom(orig_dep.replace(mydep, expanded, 1))
Ejemplo n.º 8
0
	def load(self):
		myatoms = []
		for cp in self._db.cp_all():
			if catsplit(cp)[0] == self._category:
				if (not self._check) or len(self._db.match(cp)) > 0:
					myatoms.append(cp)
		self._setAtoms(myatoms)
Ejemplo n.º 9
0
	def load(self):
		myatoms = []
		for cp in self._db.cp_all():
			if catsplit(cp)[0] == self._category:
				if (not self._check) or len(self._db.match(cp)) > 0:
					myatoms.append(cp)
		self._setAtoms(myatoms)
Ejemplo n.º 10
0
 def __init__(self, repo, pkgcand):
     cat = catsplit(pkgcand)[0]
     self._atom = PortageHackedAtom(
         str(repo._atom).replace('null/', '%s/' % cat), repo._atom.repo)
     self._dbapi = repo._dbapi
     self._path = repo._path
     self._pkg_class = repo._pkg_class
     self._prio = repo._prio
     self._name = repo._name
Ejemplo n.º 11
0
def is_private_package_atom(atom, installed_from=None, debug=False):
    cat = catsplit(get_cp(atom))[0]
    if GeneratedPackages().is_generated_cat(cat):
        if GeneratedPackages().is_private_cat(cat):
            if debug:
                print '  skipping "%s" (%s, %s)' % \
                    (atom, 'was generated', 'is currently blacklisted')
            return True
        else:
            if installed_from and \
                    installed_from[0] != '' and \
                    not GeneratedPackages().is_dedicated_repo_name(
                        installed_from[0]):
                if debug:
                    print '  removing %s source tree "%s" for atom "%s"' % \
                    ('misleading', installed_from[0], atom)
                installed_from[0] = ''

    if installed_from != None:
        """
        -   We collect private packages iff they come from a non-private
            overlay, because that means they were in there before and are
            actually not private.  An example would be media-sound/xmms.

        -   We collect packages from private overlays iff the package also
            exists in a non-private overlay.  An example would be that
            you posted your ebuild to bugs.gentoo.org and somebody added
            it to the tree in the meantime.
        """
        if not Overlays().is_private_overlay_name(installed_from[0]):
            return False
        if not Overlays().is_private_package_atom(atom):
            if installed_from and installed_from[0] != '':
                if debug:
                    print '  removing %s source tree "%s" for atom "%s"' % \
                    ('private', installed_from[0], atom)
                installed_from[0] = ''
            return False
        if debug:
            print '  skipping "%s" (%s, %s)' % \
                (atom, 'was installed from private tree',
                'is currently not in non-private tree')
        return True
    else:
        not_in_public_trees = Overlays().is_private_package_atom(atom)
        if debug and not_in_public_trees:
            print '  skipping "%s" (not in public trees)' % atom
        return not_in_public_trees
Ejemplo n.º 12
0
def is_private_package_atom(atom, installed_from=None, debug=False):
    cat = catsplit(get_cp(atom))[0]
    if GeneratedPackages().is_generated_cat(cat):
        if GeneratedPackages().is_private_cat(cat):
            if debug:
                print '  skipping "%s" (%s, %s)' % \
                    (atom, 'was generated', 'is currently blacklisted')
            return True
        else:
            if installed_from and \
                    installed_from[0] != '' and \
                    not GeneratedPackages().is_dedicated_repo_name(
                        installed_from[0]):
                if debug:
                    print '  removing %s source tree "%s" for atom "%s"' % \
                    ('misleading', installed_from[0], atom)
                installed_from[0] = ''

    if installed_from != None:
        """
        -   We collect private packages iff they come from a non-private
            overlay, because that means they were in there before and are
            actually not private.  An example would be media-sound/xmms.

        -   We collect packages from private overlays iff the package also
            exists in a non-private overlay.  An example would be that
            you posted your ebuild to bugs.gentoo.org and somebody added
            it to the tree in the meantime.
        """
        if not Overlays().is_private_overlay_name(installed_from[0]):
            return False
        if not Overlays().is_private_package_atom(atom):
            if installed_from and installed_from[0] != '':
                if debug:
                    print '  removing %s source tree "%s" for atom "%s"' % \
                    ('private', installed_from[0], atom)
                installed_from[0] = ''
            return False
        if debug:
            print '  skipping "%s" (%s, %s)' % \
                (atom, 'was installed from private tree',
                'is currently not in non-private tree')
        return True
    else:
        not_in_public_trees = Overlays().is_private_package_atom(atom)
        if debug and not_in_public_trees:
            print '  skipping "%s" (not in public trees)' % atom
        return not_in_public_trees
Ejemplo n.º 13
0
def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
    '''
	@rtype: Atom
	'''
    orig_dep = mydep
    if isinstance(orig_dep, Atom):
        has_cat = True
    else:
        if not mydep:
            return mydep
        if mydep[0] == "*":
            mydep = mydep[1:]
            orig_dep = mydep
        has_cat = '/' in orig_dep.split(':')[0]
        if not has_cat:
            alphanum = re.search(r'\w', orig_dep)
            if alphanum:
                mydep = orig_dep[:alphanum.start()] + "null/" + \
                 orig_dep[alphanum.start():]
        try:
            mydep = Atom(mydep, allow_repo=True)
        except InvalidAtom:
            # Missing '=' prefix is allowed for backward compatibility.
            if not isvalidatom("=" + mydep, allow_repo=True):
                raise
            mydep = Atom('=' + mydep, allow_repo=True)
            orig_dep = '=' + orig_dep
        if not has_cat:
            null_cat, pn = catsplit(mydep.cp)
            mydep = pn

    if has_cat:
        # Optimize most common cases to avoid calling cpv_expand.
        if not mydep.cp.startswith("virtual/"):
            return mydep
        if not hasattr(mydb, "cp_list") or \
         mydb.cp_list(mydep.cp):
            return mydep
        # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
        mydep = mydep.cp

    expanded = cpv_expand(mydep,
                          mydb=mydb,
                          use_cache=use_cache,
                          settings=settings)
    return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
Ejemplo n.º 14
0
def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
	'''
	@rtype: Atom
	'''
	orig_dep = mydep
	if isinstance(orig_dep, Atom):
		has_cat = True
	else:
		if not mydep:
			return mydep
		if mydep[0] == "*":
			mydep = mydep[1:]
			orig_dep = mydep
		has_cat = '/' in orig_dep.split(':')[0]
		if not has_cat:
			alphanum = re.search(r'\w', orig_dep)
			if alphanum:
				mydep = orig_dep[:alphanum.start()] + "null/" + \
					orig_dep[alphanum.start():]
		try:
			mydep = Atom(mydep, allow_repo=True)
		except InvalidAtom:
			# Missing '=' prefix is allowed for backward compatibility.
			if not isvalidatom("=" + mydep, allow_repo=True):
				raise
			mydep = Atom('=' + mydep, allow_repo=True)
			orig_dep = '=' + orig_dep
		if not has_cat:
			null_cat, pn = catsplit(mydep.cp)
			mydep = pn

	if has_cat:
		# Optimize most common cases to avoid calling cpv_expand.
		if not mydep.cp.startswith("virtual/"):
			return mydep
		if not hasattr(mydb, "cp_list") or \
			mydb.cp_list(mydep.cp):
			return mydep
		# Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
		mydep = mydep.cp

	expanded = cpv_expand(mydep, mydb=mydb,
		use_cache=use_cache, settings=settings)
	return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
Ejemplo n.º 15
0
    def _create_binpkgs(self, binpkgs):
        for cpv, metadata in binpkgs.items():
            a = Atom("=" + cpv, allow_repo=True)
            repo = a.repo
            if repo is None:
                repo = "test_repo"

            cat, pf = catsplit(a.cpv)
            metadata = metadata.copy()
            metadata.setdefault("SLOT", "0")
            metadata.setdefault("KEYWORDS", "x86")
            metadata.setdefault("BUILD_TIME", "0")
            metadata["repository"] = repo
            metadata["CATEGORY"] = cat
            metadata["PF"] = pf

            repo_dir = self.pkgdir
            category_dir = os.path.join(repo_dir, cat)
            binpkg_path = os.path.join(category_dir, pf + ".tbz2")
            ensure_dirs(category_dir)
            t = portage.xpak.tbz2(binpkg_path)
            t.recompose_mem(portage.xpak.xpak_mem(metadata))
Ejemplo n.º 16
0
	def _create_binpkgs(self, binpkgs):
		for cpv, metadata in binpkgs.items():
			a = Atom("=" + cpv, allow_repo=True)
			repo = a.repo
			if repo is None:
				repo = "test_repo"

			cat, pf = catsplit(a.cpv)
			metadata = metadata.copy()
			metadata.setdefault("SLOT", "0")
			metadata.setdefault("KEYWORDS", "x86")
			metadata.setdefault("BUILD_TIME", "0")
			metadata["repository"] = repo
			metadata["CATEGORY"] = cat
			metadata["PF"] = pf

			repo_dir = self.pkgdir
			category_dir = os.path.join(repo_dir, cat)
			binpkg_path = os.path.join(category_dir, pf + ".tbz2")
			ensure_dirs(category_dir)
			t = portage.xpak.tbz2(binpkg_path)
			t.recompose_mem(portage.xpak.xpak_mem(metadata))
Ejemplo n.º 17
0
def digestgen(myarchives=None, mysettings=None,
	overwrite=None, manifestonly=None, myportdb=None):
	"""
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
	if mysettings is None:
		raise TypeError("portage.digestgen(): missing" + \
			" required 'mysettings' parameter")
	if myportdb is None:
		warnings.warn("portage.digestgen() called without 'myportdb' parameter",
			DeprecationWarning, stacklevel=2)
		myportdb = portage.portdb
	if overwrite is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'overwrite' parameter",
			DeprecationWarning, stacklevel=2)
	if manifestonly is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'manifestonly' parameter",
			DeprecationWarning, stacklevel=2)

	try:
		portage._doebuild_manifest_exempt_depend += 1
		distfiles_map = {}
		fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
		for cpv in fetchlist_dict:
			try:
				for myfile in fetchlist_dict[cpv]:
					distfiles_map.setdefault(myfile, []).append(cpv)
			except InvalidDependString as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				del e
				return 0
		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
		manifest1_compat = False
		mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
			fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
		# Don't require all hashes since that can trigger excessive
		# fetches when sufficient digests already exist.  To ease transition
		# while Manifest 1 is being removed, only require hashes that will
		# exist before and after the transition.
		required_hash_types = set()
		required_hash_types.add("size")
		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
		dist_hashes = mf.fhashdict.get("DIST", {})

		# To avoid accidental regeneration of digests with the incorrect
		# files (such as partially downloaded files), trigger the fetch
		# code if the file exists and it's size doesn't match the current
		# manifest entry. If there really is a legitimate reason for the
		# digest to change, `ebuild --force digest` can be used to avoid
		# triggering this code (or else the old digests can be manually
		# removed from the Manifest).
		missing_files = []
		for myfile in distfiles_map:
			myhashes = dist_hashes.get(myfile)
			if not myhashes:
				try:
					st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
				except OSError:
					st = None
				if st is None or st.st_size == 0:
					missing_files.append(myfile)
				continue
			size = myhashes.get("size")

			try:
				st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				if size == 0:
					missing_files.append(myfile)
					continue
				if required_hash_types.difference(myhashes):
					missing_files.append(myfile)
					continue
			else:
				if st.st_size == 0 or size is not None and size != st.st_size:
					missing_files.append(myfile)
					continue

		if missing_files:
				mytree = os.path.realpath(os.path.dirname(
					os.path.dirname(mysettings["O"])))
				fetch_settings = config(clone=mysettings)
				debug = mysettings.get("PORTAGE_DEBUG") == "1"
				for myfile in missing_files:
					uris = set()
					for cpv in distfiles_map[myfile]:
						myebuild = os.path.join(mysettings["O"],
							catsplit(cpv)[1] + ".ebuild")
						# for RESTRICT=fetch, mirror, etc...
						doebuild_environment(myebuild, "fetch",
							mysettings["ROOT"], fetch_settings,
							debug, 1, myportdb)
						uris.update(myportdb.getFetchMap(
							cpv, mytree=mytree)[myfile])

					fetch_settings["A"] = myfile # for use by pkg_nofetch()

					try:
						st = os.stat(os.path.join(
							mysettings["DISTDIR"],myfile))
					except OSError:
						st = None

					if not fetch({myfile : uris}, fetch_settings):
						writemsg(_("!!! Fetch failed for %s, can't update "
							"Manifest\n") % myfile, noiselevel=-1)
						if myfile in dist_hashes and \
							st is not None and st.st_size > 0:
							# stat result is obtained before calling fetch(),
							# since fetch may rename the existing file if the
							# digest does not match.
							writemsg(_("!!! If you would like to "
								"forcefully replace the existing "
								"Manifest entry\n!!! for %s, use "
								"the following command:\n") % myfile + \
								"!!!    " + colorize("INFORM",
								"ebuild --force %s manifest" % \
								os.path.basename(myebuild)) + "\n",
								noiselevel=-1)
						return 0
		writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
		try:
			mf.create(assumeDistHashesSometimes=True,
				assumeDistHashesAlways=(
				"assume-digests" in mysettings.features))
		except FileNotFound as e:
			writemsg(_("!!! File %s doesn't exist, can't update "
				"Manifest\n") % e, noiselevel=-1)
			return 0
		except PortagePackageException as e:
			writemsg(("!!! %s\n") % (e,), noiselevel=-1)
			return 0
		try:
			mf.write(sign=False)
		except PermissionDenied as e:
			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
			return 0
		if "assume-digests" not in mysettings.features:
			distlist = list(mf.fhashdict.get("DIST", {}))
			distlist.sort()
			auto_assumed = []
			for filename in distlist:
				if not os.path.exists(
					os.path.join(mysettings["DISTDIR"], filename)):
					auto_assumed.append(filename)
			if auto_assumed:
				mytree = os.path.realpath(
					os.path.dirname(os.path.dirname(mysettings["O"])))
				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
				pkgs = myportdb.cp_list(cp, mytree=mytree)
				pkgs.sort()
				writemsg_stdout("  digest.assumed" + colorize("WARN",
					str(len(auto_assumed)).rjust(18)) + "\n")
				for pkg_key in pkgs:
					fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
					pv = pkg_key.split("/")[1]
					for filename in auto_assumed:
						if filename in fetchlist:
							writemsg_stdout(
								"   %s::%s\n" % (pv, filename))
		return 1
	finally:
		portage._doebuild_manifest_exempt_depend -= 1
Ejemplo n.º 18
0
	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):

		user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for repo in self.repo_dirs:
			repo_dir = self._get_repo_dir(repo)
			profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
			metadata_dir = os.path.join(repo_dir, "metadata")
			os.makedirs(metadata_dir)

			#Create $REPO/profiles/categories
			categories = set()
			for cpv in ebuilds:
				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
				if ebuilds_repo is None:
					ebuilds_repo = "test_repo"
				if ebuilds_repo == repo:
					categories.add(catsplit(cpv)[0])

			categories_file = os.path.join(profile_dir, "categories")
			f = open(categories_file, "w")
			for cat in categories:
				f.write(cat + "\n")
			f.close()
			
			#Create $REPO/profiles/license_groups
			license_file = os.path.join(profile_dir, "license_groups")
			f = open(license_file, "w")
			f.write("EULA TEST\n")
			f.close()

			repo_config = repo_configs.get(repo) 
			if repo_config:
				for config_file, lines in repo_config.items():
					if config_file not in self.config_files:
						raise ValueError("Unknown config file: '%s'" % config_file)

					if config_file in ("layout.conf",):
						file_name = os.path.join(repo_dir, "metadata", config_file)
					else:
						file_name = os.path.join(profile_dir, config_file)
					f = open(file_name, "w")
					for line in lines:
						f.write("%s\n" % line)
					f.close()

			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
			os.makedirs(os.path.join(repo_dir, "eclass"))

			if repo == "test_repo":
				#Create a minimal profile in /usr/portage
				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
				os.makedirs(sub_profile_dir)

				eapi_file = os.path.join(sub_profile_dir, "eapi")
				f = open(eapi_file, "w")
				f.write("0\n")
				f.close()

				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
				f = open(make_defaults_file, "w")
				f.write("ARCH=\"x86\"\n")
				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
				f.close()

				use_force_file = os.path.join(sub_profile_dir, "use.force")
				f = open(use_force_file, "w")
				f.write("x86\n")
				f.close()

				parent_file = os.path.join(sub_profile_dir, "parent")
				f = open(parent_file, "w")
				f.write("..\n")
				f.close()

				if profile:
					for config_file, lines in profile.items():
						if config_file not in self.config_files:
							raise ValueError("Unknown config file: '%s'" % config_file)

						file_name = os.path.join(sub_profile_dir, config_file)
						f = open(file_name, "w")
						for line in lines:
							f.write("%s\n" % line)
						f.close()

				#Create profile symlink
				os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))

				#Create minimal herds.xml
				herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
				with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
					f.write(herds_xml)

		# Write empty entries for each repository, in order to exercise
		# RepoConfigLoader's repos.conf processing.
		repos_conf_file = os.path.join(user_config_dir, "repos.conf")		
		f = open(repos_conf_file, "w")
		for repo in sorted(self.repo_dirs.keys()):
			f.write("[%s]\n" % repo)
			f.write("\n")
		f.close()

		for config_file, lines in user_config.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		#Create /usr/share/portage/config/make.globals
		make_globals_path = os.path.join(self.eroot,
			GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
		ensure_dirs(os.path.dirname(make_globals_path))
		os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"),
			make_globals_path)

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
		
		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = \
			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		user_config_dir = os.path.join(self.eroot, "etc", "portage")

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for config_file, lines in user_config.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()
Ejemplo n.º 19
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError(
            "portage.digestgen(): 'mysettings' and 'myportdb' parameter are required."
        )

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        repo_required_hashes = mf.manifest_required_hashes
        if repo_required_hashes is None:
            repo_required_hashes = MANIFEST2_HASH_DEFAULTS
        mf = mf.load_manifest(mysettings["O"],
                              mysettings["DISTDIR"],
                              fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; "
                  "repository is configured to not use them\n") %
                mysettings["O"])
            return 1

        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.update(repo_required_hashes)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        for myfile in missing_files:
            uris = set()
            all_restrict = set()
            for cpv in distfiles_map[myfile]:
                uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                restrict = myportdb.aux_get(cpv, ['RESTRICT'],
                                            mytree=mytree)[0]
                # Here we ignore conditional parts of RESTRICT since
                # they don't apply unconditionally. Assume such
                # conditionals only apply on the client side where
                # digestgen() does not need to be called.
                all_restrict.update(
                    use_reduce(restrict, flat=True, matchnone=True))

                # fetch() uses CATEGORY and PF to display a message
                # when fetch restriction is triggered.
                cat, pf = catsplit(cpv)
                mysettings["CATEGORY"] = cat
                mysettings["PF"] = pf

            # fetch() uses PORTAGE_RESTRICT to control fetch
            # restriction, which is only applied to files that
            # are not fetchable via a mirror:// URI.
            mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError:
                st = None

            if not fetch({myfile: uris}, mysettings):
                myebuild = os.path.join(mysettings["O"],
                                        catsplit(cpv)[1] + ".ebuild")
                spawn_nofetch(myportdb, myebuild)
                writemsg(
                    _("!!! Fetch failed for %s, can't update Manifest\n") %
                    myfile,
                    noiselevel=-1)
                if myfile in dist_hashes and \
                 st is not None and st.st_size > 0:
                    # stat result is obtained before calling fetch(),
                    # since fetch may rename the existing file if the
                    # digest does not match.
                    cmd = colorize(
                        "INFORM", "ebuild --force %s manifest" %
                        os.path.basename(myebuild))
                    writemsg((_(
                        "!!! If you would like to forcefully replace the existing Manifest entry\n"
                        "!!! for %s, use the following command:\n") % myfile) +
                             "!!!    %s\n" % cmd,
                             noiselevel=-1)
                return 0

        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update Manifest\n") %
                     e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Ejemplo n.º 20
0
 def category(self):
     return catsplit(self)[0]
Ejemplo n.º 21
0
    def _create_profile(self, ebuilds, installed, profile, repo_configs,
                        user_config, sets):

        user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for repo in self.repo_dirs:
            repo_dir = self._get_repo_dir(repo)
            profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
            metadata_dir = os.path.join(repo_dir, "metadata")
            os.makedirs(metadata_dir)

            #Create $REPO/profiles/categories
            categories = set()
            for cpv in ebuilds:
                ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo
                if ebuilds_repo is None:
                    ebuilds_repo = "test_repo"
                if ebuilds_repo == repo:
                    categories.add(catsplit(cpv)[0])

            categories_file = os.path.join(profile_dir, "categories")
            f = open(categories_file, "w")
            for cat in categories:
                f.write(cat + "\n")
            f.close()

            #Create $REPO/profiles/license_groups
            license_file = os.path.join(profile_dir, "license_groups")
            f = open(license_file, "w")
            f.write("EULA TEST\n")
            f.close()

            repo_config = repo_configs.get(repo)
            if repo_config:
                for config_file, lines in repo_config.items():
                    if config_file not in self.config_files:
                        raise ValueError("Unknown config file: '%s'" %
                                         config_file)

                    if config_file in ("layout.conf", ):
                        file_name = os.path.join(repo_dir, "metadata",
                                                 config_file)
                    else:
                        file_name = os.path.join(profile_dir, config_file)
                    f = open(file_name, "w")
                    for line in lines:
                        f.write("%s\n" % line)
                    f.close()

            #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
            os.makedirs(os.path.join(repo_dir, "eclass"))

            if repo == "test_repo":
                #Create a minimal profile in /usr/portage
                sub_profile_dir = os.path.join(profile_dir, "default", "linux",
                                               "x86", "test_profile")
                os.makedirs(sub_profile_dir)

                eapi_file = os.path.join(sub_profile_dir, "eapi")
                f = open(eapi_file, "w")
                f.write("0\n")
                f.close()

                make_defaults_file = os.path.join(sub_profile_dir,
                                                  "make.defaults")
                f = open(make_defaults_file, "w")
                f.write("ARCH=\"x86\"\n")
                f.write("ACCEPT_KEYWORDS=\"x86\"\n")
                f.close()

                use_force_file = os.path.join(sub_profile_dir, "use.force")
                f = open(use_force_file, "w")
                f.write("x86\n")
                f.close()

                parent_file = os.path.join(sub_profile_dir, "parent")
                f = open(parent_file, "w")
                f.write("..\n")
                f.close()

                if profile:
                    for config_file, lines in profile.items():
                        if config_file not in self.config_files:
                            raise ValueError("Unknown config file: '%s'" %
                                             config_file)

                        file_name = os.path.join(sub_profile_dir, config_file)
                        f = open(file_name, "w")
                        for line in lines:
                            f.write("%s\n" % line)
                        f.close()

                #Create profile symlink
                os.symlink(sub_profile_dir,
                           os.path.join(user_config_dir, "make.profile"))

                #Create minimal herds.xml
                herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
                with open(os.path.join(metadata_dir, "metadata.xml"),
                          'w') as f:
                    f.write(herds_xml)

        # Write empty entries for each repository, in order to exercise
        # RepoConfigLoader's repos.conf processing.
        repos_conf_file = os.path.join(user_config_dir, "repos.conf")
        f = open(repos_conf_file, "w")
        for repo in sorted(self.repo_dirs.keys()):
            f.write("[%s]\n" % repo)
            f.write("\n")
        f.close()

        for config_file, lines in user_config.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            f = open(file_name, "w")
            for line in lines:
                f.write("%s\n" % line)
            f.close()

        #Create /usr/share/portage/config/make.globals
        make_globals_path = os.path.join(self.eroot,
                                         GLOBAL_CONFIG_PATH.lstrip(os.sep),
                                         "make.globals")
        ensure_dirs(os.path.dirname(make_globals_path))
        os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"),
                   make_globals_path)

        #Create /usr/share/portage/config/sets/portage.conf
        default_sets_conf_dir = os.path.join(self.eroot,
                                             "usr/share/portage/config/sets")

        try:
            os.makedirs(default_sets_conf_dir)
        except os.error:
            pass

        provided_sets_portage_conf = \
         os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
        os.symlink(provided_sets_portage_conf,
                   os.path.join(default_sets_conf_dir, "portage.conf"))

        set_config_dir = os.path.join(user_config_dir, "sets")

        try:
            os.makedirs(set_config_dir)
        except os.error:
            pass

        for sets_file, lines in sets.items():
            file_name = os.path.join(set_config_dir, sets_file)
            f = open(file_name, "w")
            for line in lines:
                f.write("%s\n" % line)
            f.close()

        user_config_dir = os.path.join(self.eroot, "etc", "portage")

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for config_file, lines in user_config.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            f = open(file_name, "w")
            for line in lines:
                f.write("%s\n" % line)
            f.close()
Ejemplo n.º 22
0
 def __init__(self, repo, pkgcand):
     cat = catsplit(pkgcand)[0]
     self._atom = str(repo._atom).replace('null/', '%s/' % cat)
     self._dbapi = repo._dbapi
     self._pkg_class = repo._pkg_class
Ejemplo n.º 23
0
    def _create_profile(self, ebuilds, installed, profile, user_config, sets):
        #Create $PORTDIR/profiles/categories
        categories = set()
        for cpv in chain(ebuilds.keys(), installed.keys()):
            categories.add(catsplit(cpv)[0])

        profile_dir = os.path.join(self.portdir, "profiles")
        try:
            os.makedirs(profile_dir)
        except os.error:
            pass

        categories_file = os.path.join(profile_dir, "categories")

        f = open(categories_file, "w")
        for cat in categories:
            f.write(cat + "\n")
        f.close()

        #Create $REPO/profiles/license_groups
        license_file = os.path.join(profile_dir, "license_groups")
        f = open(license_file, "w")
        f.write("EULA TEST\n")
        f.close()

        #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
        os.makedirs(os.path.join(self.portdir, "eclass"))

        sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86",
                                       "test_profile")
        os.makedirs(sub_profile_dir)

        eapi_file = os.path.join(sub_profile_dir, "eapi")
        f = open(eapi_file, "w")
        f.write("0\n")
        f.close()

        make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
        f = open(make_defaults_file, "w")
        f.write("ARCH=\"x86\"\n")
        f.write("ACCEPT_KEYWORDS=\"x86\"\n")
        f.close()

        use_force_file = os.path.join(sub_profile_dir, "use.force")
        f = open(use_force_file, "w")
        f.write("x86\n")
        f.close()

        if profile:
            #This is meant to allow the consumer to set up his own profile,
            #with package.mask and what not.
            raise NotImplementedError()

        #Create profile symlink
        os.makedirs(os.path.join(self.eroot, "etc"))
        os.symlink(sub_profile_dir,
                   os.path.join(self.eroot, "etc", "make.profile"))

        user_config_dir = os.path.join(self.eroot, "etc", "portage")

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for config_file, lines in user_config.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            f = open(file_name, "w")
            for line in lines:
                f.write("%s\n" % line)
            f.close()

        #Create /usr/share/portage/config/sets/portage.conf
        default_sets_conf_dir = os.path.join(self.eroot,
                                             "usr/share/portage/config/sets")

        try:
            os.makedirs(default_sets_conf_dir)
        except os.error:
            pass

        provided_sets_portage_conf = \
         os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
        os.symlink(provided_sets_portage_conf,
                   os.path.join(default_sets_conf_dir, "portage.conf"))

        set_config_dir = os.path.join(user_config_dir, "sets")

        try:
            os.makedirs(set_config_dir)
        except os.error:
            pass

        for sets_file, lines in sets.items():
            file_name = os.path.join(set_config_dir, sets_file)
            f = open(file_name, "w")
            for line in lines:
                f.write("%s\n" % line)
            f.close()
Ejemplo n.º 24
0
    def _create_profile(self, ebuilds, eclasses, installed, profile,
                        repo_configs, user_config, sets):

        user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for repo in self._repositories:
            if repo == "DEFAULT":
                continue

            repo_dir = self._get_repo_dir(repo)
            profile_dir = os.path.join(repo_dir, "profiles")
            metadata_dir = os.path.join(repo_dir, "metadata")
            os.makedirs(metadata_dir)

            # Create $REPO/profiles/categories
            categories = set()
            for cpv in ebuilds:
                ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo
                if ebuilds_repo is None:
                    ebuilds_repo = "test_repo"
                if ebuilds_repo == repo:
                    categories.add(catsplit(cpv)[0])

            categories_file = os.path.join(profile_dir, "categories")
            with open(categories_file, "w") as f:
                for cat in categories:
                    f.write(cat + "\n")

            # Create $REPO/profiles/license_groups
            license_file = os.path.join(profile_dir, "license_groups")
            with open(license_file, "w") as f:
                f.write("EULA TEST\n")

            repo_config = repo_configs.get(repo)
            if repo_config:
                for config_file, lines in repo_config.items():
                    if config_file not in self.config_files and not any(
                            fnmatch.fnmatch(config_file, os.path.join(x, "*"))
                            for x in self.config_files):
                        raise ValueError("Unknown config file: '%s'" %
                                         config_file)

                    if config_file in ("layout.conf", ):
                        file_name = os.path.join(repo_dir, "metadata",
                                                 config_file)
                    else:
                        file_name = os.path.join(profile_dir, config_file)
                        if "/" in config_file and not os.path.isdir(
                                os.path.dirname(file_name)):
                            os.makedirs(os.path.dirname(file_name))
                    with open(file_name, "w") as f:
                        for line in lines:
                            f.write("%s\n" % line)
                        # Temporarily write empty value of masters until it becomes default.
                        # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
                        if config_file == "layout.conf" and not any(
                                line.startswith(("masters =",
                                                 "# use implicit masters"))
                                for line in lines):
                            f.write("masters =\n")

            # Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
            eclass_dir = os.path.join(repo_dir, "eclass")
            os.makedirs(eclass_dir)

            for eclass_name, eclass_content in eclasses.items():
                with open(
                        os.path.join(eclass_dir,
                                     "{}.eclass".format(eclass_name)),
                        "wt") as f:
                    if isinstance(eclass_content, str):
                        eclass_content = [eclass_content]
                    for line in eclass_content:
                        f.write("{}\n".format(line))

            # Temporarily write empty value of masters until it becomes default.
            if not repo_config or "layout.conf" not in repo_config:
                layout_conf_path = os.path.join(repo_dir, "metadata",
                                                "layout.conf")
                with open(layout_conf_path, "w") as f:
                    f.write("masters =\n")

            if repo == "test_repo":
                # Create a minimal profile in /var/db/repos/gentoo
                sub_profile_dir = os.path.join(profile_dir, "default", "linux",
                                               "x86", "test_profile")
                os.makedirs(sub_profile_dir)

                if not (profile and "eapi" in profile):
                    eapi_file = os.path.join(sub_profile_dir, "eapi")
                    with open(eapi_file, "w") as f:
                        f.write("0\n")

                make_defaults_file = os.path.join(sub_profile_dir,
                                                  "make.defaults")
                with open(make_defaults_file, "w") as f:
                    f.write('ARCH="x86"\n')
                    f.write('ACCEPT_KEYWORDS="x86"\n')

                use_force_file = os.path.join(sub_profile_dir, "use.force")
                with open(use_force_file, "w") as f:
                    f.write("x86\n")

                parent_file = os.path.join(sub_profile_dir, "parent")
                with open(parent_file, "w") as f:
                    f.write("..\n")

                if profile:
                    for config_file, lines in profile.items():
                        if config_file not in self.config_files:
                            raise ValueError("Unknown config file: '%s'" %
                                             config_file)

                        file_name = os.path.join(sub_profile_dir, config_file)
                        with open(file_name, "w") as f:
                            for line in lines:
                                f.write("%s\n" % line)

                # Create profile symlink
                os.symlink(sub_profile_dir,
                           os.path.join(user_config_dir, "make.profile"))

        gpg_test_path = os.environ["PORTAGE_GNUPGHOME"]

        make_conf = {
            "ACCEPT_KEYWORDS": "x86",
            "BINPKG_GPG_SIGNING_BASE_COMMAND":
            f"flock {gpg_test_path}/portage-binpkg-gpg.lock /usr/bin/gpg --sign --armor --yes --pinentry-mode loopback --passphrase GentooTest [PORTAGE_CONFIG]",
            "BINPKG_GPG_SIGNING_GPG_HOME": gpg_test_path,
            "BINPKG_GPG_SIGNING_KEY": "0x5D90EA06352177F6",
            "BINPKG_GPG_VERIFY_GPG_HOME": gpg_test_path,
            "CLEAN_DELAY": "0",
            "DISTDIR": self.distdir,
            "EMERGE_WARNING_DELAY": "0",
            "FEATURES": "${FEATURES} binpkg-signing binpkg-request-signature "
            "gpg-keepalive",
            "PKGDIR": self.pkgdir,
            "PORTAGE_INST_GID": str(portage.data.portage_gid),
            "PORTAGE_INST_UID": str(portage.data.portage_uid),
            "PORTAGE_TMPDIR": os.path.join(self.eroot, "var/tmp"),
        }

        if os.environ.get("NOCOLOR"):
            make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

        # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
        # need to be inherited by ebuild subprocesses.
        if "PORTAGE_USERNAME" in os.environ:
            make_conf["PORTAGE_USERNAME"] = os.environ["PORTAGE_USERNAME"]
        if "PORTAGE_GRPNAME" in os.environ:
            make_conf["PORTAGE_GRPNAME"] = os.environ["PORTAGE_GRPNAME"]

        make_conf_lines = []
        for k_v in make_conf.items():
            make_conf_lines.append('%s="%s"' % k_v)

        if "make.conf" in user_config:
            make_conf_lines.extend(user_config["make.conf"])

        if not portage.process.sandbox_capable or os.environ.get(
                "SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            make_conf_lines.append(
                'FEATURES="${FEATURES} -sandbox -usersandbox"')

        configs = user_config.copy()
        configs["make.conf"] = make_conf_lines

        for config_file, lines in configs.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)

        # Create /usr/share/portage/config/make.globals
        make_globals_path = os.path.join(self.eroot,
                                         GLOBAL_CONFIG_PATH.lstrip(os.sep),
                                         "make.globals")
        ensure_dirs(os.path.dirname(make_globals_path))
        os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path)

        # Create /usr/share/portage/config/sets/portage.conf
        default_sets_conf_dir = os.path.join(self.eroot,
                                             "usr/share/portage/config/sets")

        try:
            os.makedirs(default_sets_conf_dir)
        except os.error:
            pass

        provided_sets_portage_conf = os.path.join(cnf_path, "sets",
                                                  "portage.conf")
        os.symlink(
            provided_sets_portage_conf,
            os.path.join(default_sets_conf_dir, "portage.conf"),
        )

        set_config_dir = os.path.join(user_config_dir, "sets")

        try:
            os.makedirs(set_config_dir)
        except os.error:
            pass

        for sets_file, lines in sets.items():
            file_name = os.path.join(set_config_dir, sets_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)

        if cnf_path_repoman is not None:
            # Create /usr/share/repoman
            repoman_share_dir = os.path.join(self.eroot, "usr", "share",
                                             "repoman")
            os.symlink(cnf_path_repoman, repoman_share_dir)
Ejemplo n.º 25
0
 def _cpv(self):
     cpv = self._pkg._cpv
     return 'null/%s' % catsplit(cpv)[1]
Ejemplo n.º 26
0
	def __get_path(self,cpv):
		cat,pn = catsplit(cpv_getkey(cpv))
		return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
Ejemplo n.º 27
0
 def __new__(self, s):
     a = _get_atom(s)
     if catsplit(a.cp)[0] == 'null':
         return UnexpandedPortageAtom(a)
     else:
         return CompletePortageAtom(a)
Ejemplo n.º 28
0
 def __get_path(self, cpv):
     cat, pn = catsplit(cpv_getkey(cpv))
     return os.path.join(self.portdir, cat, pn,
                         os.path.basename(cpv) + ".ebuild")
Ejemplo n.º 29
0
def match_from_list(mydep, candidate_list):
	"""
	Searches list for entries that matches the package.

	@param mydep: The package atom to match
	@type mydep: String
	@param candidate_list: The list of package atoms to compare against
	@param candidate_list: List
	@rtype: List
	@return: A list of package atoms that match the given package atom
	"""

	if not candidate_list:
		return []

	from portage.util import writemsg
	if "!" == mydep[:1]:
		mydep = mydep[1:]
	if not isinstance(mydep, Atom):
		mydep = Atom(mydep)

	mycpv     = mydep.cpv
	mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
	slot      = mydep.slot

	if not mycpv_cps:
		cat, pkg = catsplit(mycpv)
		ver      = None
		rev      = None
	else:
		cat, pkg, ver, rev = mycpv_cps
		if mydep == mycpv:
			raise KeyError(_("Specific key requires an operator"
				" (%s) (try adding an '=')") % (mydep))

	if ver and rev:
		operator = mydep.operator
		if not operator:
			writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
			return []
	else:
		operator = None

	mylist = []

	if operator is None:
		for x in candidate_list:
			cp = getattr(x, "cp", None)
			if cp is None:
				mysplit = catpkgsplit(remove_slot(x))
				if mysplit is not None:
					cp = mysplit[0] + '/' + mysplit[1]
			if cp != mycpv:
				continue
			mylist.append(x)

	elif operator == "=": # Exact match
		for x in candidate_list:
			xcpv = getattr(x, "cpv", None)
			if xcpv is None:
				xcpv = remove_slot(x)
			if not cpvequal(xcpv, mycpv):
				continue
			mylist.append(x)

	elif operator == "=*": # glob match
		# XXX: Nasty special casing for leading zeros
		# Required as =* is a literal prefix match, so can't 
		# use vercmp
		mysplit = catpkgsplit(mycpv)
		myver = mysplit[2].lstrip("0")
		if not myver or not myver[0].isdigit():
			myver = "0"+myver
		mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
		for x in candidate_list:
			xs = getattr(x, "cpv_split", None)
			if xs is None:
				xs = catpkgsplit(remove_slot(x))
			myver = xs[2].lstrip("0")
			if not myver or not myver[0].isdigit():
				myver = "0"+myver
			xcpv = xs[0]+"/"+xs[1]+"-"+myver
			if xcpv.startswith(mycpv):
				mylist.append(x)

	elif operator == "~": # version, any revision, match
		for x in candidate_list:
			xs = getattr(x, "cpv_split", None)
			if xs is None:
				xs = catpkgsplit(remove_slot(x))
			if xs is None:
				raise InvalidData(x)
			if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
				continue
			if xs[2] != ver:
				continue
			mylist.append(x)

	elif operator in [">", ">=", "<", "<="]:
		mysplit = ["%s/%s" % (cat, pkg), ver, rev]
		for x in candidate_list:
			xs = getattr(x, "cpv_split", None)
			if xs is None:
				xs = catpkgsplit(remove_slot(x))
			xcat, xpkg, xver, xrev = xs
			xs = ["%s/%s" % (xcat, xpkg), xver, xrev]
			try:
				result = pkgcmp(xs, mysplit)
			except ValueError: # pkgcmp may return ValueError during int() conversion
				writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
				raise
			if result is None:
				continue
			elif operator == ">":
				if result > 0:
					mylist.append(x)
			elif operator == ">=":
				if result >= 0:
					mylist.append(x)
			elif operator == "<":
				if result < 0:
					mylist.append(x)
			elif operator == "<=":
				if result <= 0:
					mylist.append(x)
			else:
				raise KeyError(_("Unknown operator: %s") % mydep)
	else:
		raise KeyError(_("Unknown operator: %s") % mydep)

	if slot is not None:
		candidate_list = mylist
		mylist = []
		for x in candidate_list:
			xslot = getattr(x, "slot", False)
			if xslot is False:
				xslot = dep_getslot(x)
			if xslot is not None and xslot != slot:
				continue
			mylist.append(x)

	if mydep.use:
		candidate_list = mylist
		mylist = []
		for x in candidate_list:
			use = getattr(x, "use", None)
			if use is not None:
				is_valid_flag = x.iuse.is_valid_flag
				missing_iuse = False
				for y in mydep.use.required:
					if not is_valid_flag(y):
						missing_iuse = True
						break
				if missing_iuse:
					continue
				if mydep.use.enabled.difference(use.enabled):
					continue
				if mydep.use.disabled.intersection(use.enabled):
					continue
			mylist.append(x)

	return mylist
Ejemplo n.º 30
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; " "repository is configured to not use them\n")
                % mysettings["O"]
            )
            return 1

            # Don't require all hashes since that can trigger excessive
            # fetches when sufficient digests already exist.  To ease transition
            # while Manifest 1 is being removed, only require hashes that will
            # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            for myfile in missing_files:
                uris = set()
                all_restrict = set()
                for cpv in distfiles_map[myfile]:
                    uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                    restrict = myportdb.aux_get(cpv, ["RESTRICT"], mytree=mytree)[0]
                    # Here we ignore conditional parts of RESTRICT since
                    # they don't apply unconditionally. Assume such
                    # conditionals only apply on the client side where
                    # digestgen() does not need to be called.
                    all_restrict.update(use_reduce(restrict, flat=True, matchnone=True))

                    # fetch() uses CATEGORY and PF to display a message
                    # when fetch restriction is triggered.
                    cat, pf = catsplit(cpv)
                    mysettings["CATEGORY"] = cat
                    mysettings["PF"] = pf

                    # fetch() uses PORTAGE_RESTRICT to control fetch
                    # restriction, which is only applied to files that
                    # are not fetchable via a mirror:// URI.
                mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, mysettings):
                    myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild")
                    spawn_nofetch(myportdb, myebuild)
                    writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1)
                    if myfile in dist_hashes and st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(
                            _(
                                "!!! If you would like to "
                                "forcefully replace the existing "
                                "Manifest entry\n!!! for %s, use "
                                "the following command:\n"
                            )
                            % myfile
                            + "!!!    "
                            + colorize("INFORM", "ebuild --force %s manifest" % os.path.basename(myebuild))
                            + "\n",
                            noiselevel=-1,
                        )
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=("assume-digests" in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e,), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Ejemplo n.º 31
0
	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):

		user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for repo in self._repositories:
			if repo == "DEFAULT":
				continue

			repo_dir = self._get_repo_dir(repo)
			profile_dir = os.path.join(repo_dir, "profiles")
			metadata_dir = os.path.join(repo_dir, "metadata")
			os.makedirs(metadata_dir)

			#Create $REPO/profiles/categories
			categories = set()
			for cpv in ebuilds:
				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
				if ebuilds_repo is None:
					ebuilds_repo = "test_repo"
				if ebuilds_repo == repo:
					categories.add(catsplit(cpv)[0])

			categories_file = os.path.join(profile_dir, "categories")
			with open(categories_file, "w") as f:
				for cat in categories:
					f.write(cat + "\n")

			#Create $REPO/profiles/license_groups
			license_file = os.path.join(profile_dir, "license_groups")
			with open(license_file, "w") as f:
				f.write("EULA TEST\n")

			repo_config = repo_configs.get(repo)
			if repo_config:
				for config_file, lines in repo_config.items():
					if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
						raise ValueError("Unknown config file: '%s'" % config_file)

					if config_file in ("layout.conf",):
						file_name = os.path.join(repo_dir, "metadata", config_file)
					else:
						file_name = os.path.join(profile_dir, config_file)
						if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
							os.makedirs(os.path.dirname(file_name))
					with open(file_name, "w") as f:
						for line in lines:
							f.write("%s\n" % line)
						# Temporarily write empty value of masters until it becomes default.
						# TODO: Delete all references to "# use implicit masters" when empty value becomes default.
						if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
							f.write("masters =\n")

			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
			os.makedirs(os.path.join(repo_dir, "eclass"))

			# Temporarily write empty value of masters until it becomes default.
			if not repo_config or "layout.conf" not in repo_config:
				layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
				with open(layout_conf_path, "w") as f:
					f.write("masters =\n")

			if repo == "test_repo":
				#Create a minimal profile in /usr/portage
				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
				os.makedirs(sub_profile_dir)

				if not (profile and "eapi" in profile):
					eapi_file = os.path.join(sub_profile_dir, "eapi")
					with open(eapi_file, "w") as f:
						f.write("0\n")

				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
				with open(make_defaults_file, "w") as f:
					f.write("ARCH=\"x86\"\n")
					f.write("ACCEPT_KEYWORDS=\"x86\"\n")

				use_force_file = os.path.join(sub_profile_dir, "use.force")
				with open(use_force_file, "w") as f:
					f.write("x86\n")

				parent_file = os.path.join(sub_profile_dir, "parent")
				with open(parent_file, "w") as f:
					f.write("..\n")

				if profile:
					for config_file, lines in profile.items():
						if config_file not in self.config_files:
							raise ValueError("Unknown config file: '%s'" % config_file)

						file_name = os.path.join(sub_profile_dir, config_file)
						with open(file_name, "w") as f:
							for line in lines:
								f.write("%s\n" % line)

				#Create profile symlink
				os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))

		make_conf = {
			"ACCEPT_KEYWORDS": "x86",
			"CLEAN_DELAY": "0",
			"DISTDIR" : self.distdir,
			"EMERGE_WARNING_DELAY": "0",
			"PKGDIR": self.pkgdir,
			"PORTAGE_INST_GID": str(portage.data.portage_gid),
			"PORTAGE_INST_UID": str(portage.data.portage_uid),
			"PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
		}

		if os.environ.get("NOCOLOR"):
			make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
		# need to be inherited by ebuild subprocesses.
		if 'PORTAGE_USERNAME' in os.environ:
			make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
		if 'PORTAGE_GRPNAME' in os.environ:
			make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

		make_conf_lines = []
		for k_v in make_conf.items():
			make_conf_lines.append('%s="%s"' % k_v)

		if "make.conf" in user_config:
			make_conf_lines.extend(user_config["make.conf"])

		if not portage.process.sandbox_capable or \
			os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')

		configs = user_config.copy()
		configs["make.conf"] = make_conf_lines

		for config_file, lines in configs.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			with open(file_name, "w") as f:
				for line in lines:
					f.write("%s\n" % line)

		#Create /usr/share/portage/config/make.globals
		make_globals_path = os.path.join(self.eroot,
			GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
		ensure_dirs(os.path.dirname(make_globals_path))
		os.symlink(os.path.join(cnf_path, "make.globals"),
			make_globals_path)

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")

		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = (
			os.path.join(cnf_path, "sets", "portage.conf"))
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			with open(file_name, "w") as f:
				for line in lines:
					f.write("%s\n" % line)
Ejemplo n.º 32
0
	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):

		user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for repo in self._repositories:
			if repo == "DEFAULT":
				continue

			repo_dir = self._get_repo_dir(repo)
			profile_dir = os.path.join(repo_dir, "profiles")
			metadata_dir = os.path.join(repo_dir, "metadata")
			os.makedirs(metadata_dir)

			#Create $REPO/profiles/categories
			categories = set()
			for cpv in ebuilds:
				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
				if ebuilds_repo is None:
					ebuilds_repo = "test_repo"
				if ebuilds_repo == repo:
					categories.add(catsplit(cpv)[0])

			categories_file = os.path.join(profile_dir, "categories")
			f = open(categories_file, "w")
			for cat in categories:
				f.write(cat + "\n")
			f.close()

			#Create $REPO/profiles/license_groups
			license_file = os.path.join(profile_dir, "license_groups")
			f = open(license_file, "w")
			f.write("EULA TEST\n")
			f.close()

			repo_config = repo_configs.get(repo)
			if repo_config:
				for config_file, lines in repo_config.items():
					if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
						raise ValueError("Unknown config file: '%s'" % config_file)

					if config_file in ("layout.conf",):
						file_name = os.path.join(repo_dir, "metadata", config_file)
					else:
						file_name = os.path.join(profile_dir, config_file)
						if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
							os.makedirs(os.path.dirname(file_name))
					f = open(file_name, "w")
					for line in lines:
						f.write("%s\n" % line)
					f.close()

			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
			os.makedirs(os.path.join(repo_dir, "eclass"))

			if repo == "test_repo":
				#Create a minimal profile in /usr/portage
				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
				os.makedirs(sub_profile_dir)

				if not (profile and "eapi" in profile):
					eapi_file = os.path.join(sub_profile_dir, "eapi")
					f = open(eapi_file, "w")
					f.write("0\n")
					f.close()

				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
				f = open(make_defaults_file, "w")
				f.write("ARCH=\"x86\"\n")
				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
				f.close()

				use_force_file = os.path.join(sub_profile_dir, "use.force")
				f = open(use_force_file, "w")
				f.write("x86\n")
				f.close()

				parent_file = os.path.join(sub_profile_dir, "parent")
				f = open(parent_file, "w")
				f.write("..\n")
				f.close()

				if profile:
					for config_file, lines in profile.items():
						if config_file not in self.config_files:
							raise ValueError("Unknown config file: '%s'" % config_file)

						file_name = os.path.join(sub_profile_dir, config_file)
						f = open(file_name, "w")
						for line in lines:
							f.write("%s\n" % line)
						f.close()

				#Create profile symlink
				os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))

				#Create minimal herds.xml
				herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
				with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
					f.write(herds_xml)

		make_conf = {
			"ACCEPT_KEYWORDS": "x86",
			"CLEAN_DELAY": "0",
			"DISTDIR" : self.distdir,
			"EMERGE_WARNING_DELAY": "0",
			"PKGDIR": self.pkgdir,
			"PORTAGE_INST_GID": str(portage.data.portage_gid),
			"PORTAGE_INST_UID": str(portage.data.portage_uid),
			"PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
		}

		if os.environ.get("NOCOLOR"):
			make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
		# need to be inherited by ebuild subprocesses.
		if 'PORTAGE_USERNAME' in os.environ:
			make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
		if 'PORTAGE_GRPNAME' in os.environ:
			make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

		make_conf_lines = []
		for k_v in make_conf.items():
			make_conf_lines.append('%s="%s"' % k_v)

		if "make.conf" in user_config:
			make_conf_lines.extend(user_config["make.conf"])

		if not portage.process.sandbox_capable or \
			os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')

		configs = user_config.copy()
		configs["make.conf"] = make_conf_lines

		for config_file, lines in configs.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		#Create /usr/share/portage/config/make.globals
		make_globals_path = os.path.join(self.eroot,
			GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
		ensure_dirs(os.path.dirname(make_globals_path))
		os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"),
			make_globals_path)

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")

		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = \
			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()
Ejemplo n.º 33
0
 def __new__(self, key):
     return PMIncompletePackageKey.__new__(self, catsplit(key)[1])
Ejemplo n.º 34
0
 def package(self):
     return catsplit(self)[1]
Ejemplo n.º 35
0
    def _create_profile(self, ebuilds, installed, profile, repo_configs,
                        user_config, sets):

        user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for repo in self._repositories:
            if repo == "DEFAULT":
                continue

            repo_dir = self._get_repo_dir(repo)
            profile_dir = os.path.join(repo_dir, "profiles")
            metadata_dir = os.path.join(repo_dir, "metadata")
            os.makedirs(metadata_dir)

            #Create $REPO/profiles/categories
            categories = set()
            for cpv in ebuilds:
                ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo
                if ebuilds_repo is None:
                    ebuilds_repo = "test_repo"
                if ebuilds_repo == repo:
                    categories.add(catsplit(cpv)[0])

            categories_file = os.path.join(profile_dir, "categories")
            with open(categories_file, "w") as f:
                for cat in categories:
                    f.write(cat + "\n")

            #Create $REPO/profiles/license_groups
            license_file = os.path.join(profile_dir, "license_groups")
            with open(license_file, "w") as f:
                f.write("EULA TEST\n")

            repo_config = repo_configs.get(repo)
            if repo_config:
                for config_file, lines in repo_config.items():
                    if config_file not in self.config_files and not any(
                            fnmatch.fnmatch(config_file, os.path.join(x, "*"))
                            for x in self.config_files):
                        raise ValueError("Unknown config file: '%s'" %
                                         config_file)

                    if config_file in ("layout.conf", ):
                        file_name = os.path.join(repo_dir, "metadata",
                                                 config_file)
                    else:
                        file_name = os.path.join(profile_dir, config_file)
                        if "/" in config_file and not os.path.isdir(
                                os.path.dirname(file_name)):
                            os.makedirs(os.path.dirname(file_name))
                    with open(file_name, "w") as f:
                        for line in lines:
                            f.write("%s\n" % line)
                        # Temporarily write empty value of masters until it becomes default.
                        # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
                        if config_file == "layout.conf" and not any(
                                line.startswith(("masters =",
                                                 "# use implicit masters"))
                                for line in lines):
                            f.write("masters =\n")

            #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
            os.makedirs(os.path.join(repo_dir, "eclass"))

            # Temporarily write empty value of masters until it becomes default.
            if not repo_config or "layout.conf" not in repo_config:
                layout_conf_path = os.path.join(repo_dir, "metadata",
                                                "layout.conf")
                with open(layout_conf_path, "w") as f:
                    f.write("masters =\n")

            if repo == "test_repo":
                #Create a minimal profile in /usr/portage
                sub_profile_dir = os.path.join(profile_dir, "default", "linux",
                                               "x86", "test_profile")
                os.makedirs(sub_profile_dir)

                if not (profile and "eapi" in profile):
                    eapi_file = os.path.join(sub_profile_dir, "eapi")
                    with open(eapi_file, "w") as f:
                        f.write("0\n")

                make_defaults_file = os.path.join(sub_profile_dir,
                                                  "make.defaults")
                with open(make_defaults_file, "w") as f:
                    f.write("ARCH=\"x86\"\n")
                    f.write("ACCEPT_KEYWORDS=\"x86\"\n")

                use_force_file = os.path.join(sub_profile_dir, "use.force")
                with open(use_force_file, "w") as f:
                    f.write("x86\n")

                parent_file = os.path.join(sub_profile_dir, "parent")
                with open(parent_file, "w") as f:
                    f.write("..\n")

                if profile:
                    for config_file, lines in profile.items():
                        if config_file not in self.config_files:
                            raise ValueError("Unknown config file: '%s'" %
                                             config_file)

                        file_name = os.path.join(sub_profile_dir, config_file)
                        with open(file_name, "w") as f:
                            for line in lines:
                                f.write("%s\n" % line)

                #Create profile symlink
                os.symlink(sub_profile_dir,
                           os.path.join(user_config_dir, "make.profile"))

                #Create minimal herds.xml
                herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
                with open(os.path.join(metadata_dir, "metadata.xml"),
                          'w') as f:
                    f.write(herds_xml)

        make_conf = {
            "ACCEPT_KEYWORDS": "x86",
            "CLEAN_DELAY": "0",
            "DISTDIR": self.distdir,
            "EMERGE_WARNING_DELAY": "0",
            "PKGDIR": self.pkgdir,
            "PORTAGE_INST_GID": str(portage.data.portage_gid),
            "PORTAGE_INST_UID": str(portage.data.portage_uid),
            "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
        }

        if os.environ.get("NOCOLOR"):
            make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

        # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
        # need to be inherited by ebuild subprocesses.
        if 'PORTAGE_USERNAME' in os.environ:
            make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
        if 'PORTAGE_GRPNAME' in os.environ:
            make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

        make_conf_lines = []
        for k_v in make_conf.items():
            make_conf_lines.append('%s="%s"' % k_v)

        if "make.conf" in user_config:
            make_conf_lines.extend(user_config["make.conf"])

        if not portage.process.sandbox_capable or \
         os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            make_conf_lines.append(
                'FEATURES="${FEATURES} -sandbox -usersandbox"')

        configs = user_config.copy()
        configs["make.conf"] = make_conf_lines

        for config_file, lines in configs.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)

        #Create /usr/share/portage/config/make.globals
        make_globals_path = os.path.join(self.eroot,
                                         GLOBAL_CONFIG_PATH.lstrip(os.sep),
                                         "make.globals")
        ensure_dirs(os.path.dirname(make_globals_path))
        os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path)

        #Create /usr/share/portage/config/sets/portage.conf
        default_sets_conf_dir = os.path.join(self.eroot,
                                             "usr/share/portage/config/sets")

        try:
            os.makedirs(default_sets_conf_dir)
        except os.error:
            pass

        provided_sets_portage_conf = (os.path.join(cnf_path, "sets",
                                                   "portage.conf"))
        os.symlink(provided_sets_portage_conf,
                   os.path.join(default_sets_conf_dir, "portage.conf"))

        set_config_dir = os.path.join(user_config_dir, "sets")

        try:
            os.makedirs(set_config_dir)
        except os.error:
            pass

        for sets_file, lines in sets.items():
            file_name = os.path.join(set_config_dir, sets_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)
Ejemplo n.º 36
0
def digestgen(myarchives=None,
              mysettings=None,
              overwrite=None,
              manifestonly=None,
              myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
    if mysettings is None:
        raise TypeError("portage.digestgen(): missing" + \
         " required 'mysettings' parameter")
    if myportdb is None:
        warnings.warn(
            "portage.digestgen() called without 'myportdb' parameter",
            DeprecationWarning,
            stacklevel=2)
        myportdb = portage.portdb
    if overwrite is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'overwrite' parameter",
         DeprecationWarning, stacklevel=2)
    if manifestonly is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'manifestonly' parameter",
         DeprecationWarning, stacklevel=2)

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        manifest1_compat = False
        mf = Manifest(mysettings["O"],
                      mysettings["DISTDIR"],
                      fetchlist_dict=fetchlist_dict,
                      manifest1_compat=manifest1_compat)
        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            mytree = os.path.realpath(
                os.path.dirname(os.path.dirname(mysettings["O"])))
            fetch_settings = config(clone=mysettings)
            debug = mysettings.get("PORTAGE_DEBUG") == "1"
            for myfile in missing_files:
                uris = set()
                for cpv in distfiles_map[myfile]:
                    myebuild = os.path.join(mysettings["O"],
                                            catsplit(cpv)[1] + ".ebuild")
                    # for RESTRICT=fetch, mirror, etc...
                    doebuild_environment(myebuild, "fetch", mysettings["ROOT"],
                                         fetch_settings, debug, 1, myportdb)
                    uris.update(
                        myportdb.getFetchMap(cpv, mytree=mytree)[myfile])

                fetch_settings["A"] = myfile  # for use by pkg_nofetch()

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, fetch_settings):
                    writemsg(_("!!! Fetch failed for %s, can't update "
                               "Manifest\n") % myfile,
                             noiselevel=-1)
                    if myfile in dist_hashes and \
                     st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(_("!!! If you would like to "
                         "forcefully replace the existing "
                         "Manifest entry\n!!! for %s, use "
                         "the following command:\n") % myfile + \
                         "!!!    " + colorize("INFORM",
                         "ebuild --force %s manifest" % \
                         os.path.basename(myebuild)) + "\n",
                         noiselevel=-1)
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update "
                       "Manifest\n") % e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                mytree = os.path.realpath(
                    os.path.dirname(os.path.dirname(mysettings["O"])))
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Ejemplo n.º 37
0
	def _create_profile(self, ebuilds, installed, profile, user_config, sets):
		#Create $PORTDIR/profiles/categories
		categories = set()
		for cpv in chain(ebuilds.keys(), installed.keys()):
			categories.add(catsplit(cpv)[0])
		
		profile_dir = os.path.join(self.portdir, "profiles")
		try:
			os.makedirs(profile_dir)
		except os.error:
			pass
		
		categories_file = os.path.join(profile_dir, "categories")
		
		f = open(categories_file, "w")
		for cat in categories:
			f.write(cat + "\n")
		f.close()
		
		
		#Create $REPO/profiles/license_groups
		license_file = os.path.join(profile_dir, "license_groups")
		f = open(license_file, "w")
		f.write("EULA TEST\n")
		f.close()

		#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
		os.makedirs(os.path.join(self.portdir, "eclass"))

		sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
		os.makedirs(sub_profile_dir)
		
		eapi_file = os.path.join(sub_profile_dir, "eapi")
		f = open(eapi_file, "w")
		f.write("0\n")
		f.close()
		
		make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
		f = open(make_defaults_file, "w")
		f.write("ARCH=\"x86\"\n")
		f.write("ACCEPT_KEYWORDS=\"x86\"\n")
		f.close()
		
		use_force_file = os.path.join(sub_profile_dir, "use.force")
		f = open(use_force_file, "w")
		f.write("x86\n")
		f.close()

		if profile:
			#This is meant to allow the consumer to set up his own profile,
			#with package.mask and what not.
			raise NotImplementedError()

		#Create profile symlink
		os.makedirs(os.path.join(self.eroot, "etc"))
		os.symlink(sub_profile_dir, os.path.join(self.eroot, "etc", "make.profile"))

		user_config_dir = os.path.join(self.eroot, "etc", "portage")

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for config_file, lines in user_config.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")

		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = \
			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()