Example #1
0
    def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
     target_root=None):
        self.user_profile_dir = None
        self._local_repo_conf_path = None
        self.eprefix = eprefix
        self.config_root = config_root
        self.target_root = target_root
        self._user_config = local_config

        if self.eprefix is None:
            self.eprefix = portage.const.EPREFIX
        elif self.eprefix:
            self.eprefix = normalize_path(self.eprefix)
            if self.eprefix == os.sep:
                self.eprefix = ""

        if self.config_root is None:
            self.config_root = portage.const.EPREFIX + os.sep

        self.config_root = normalize_path(os.path.abspath(
            self.config_root)).rstrip(os.path.sep) + os.path.sep

        self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
        self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
        self.config_profile_path = config_profile_path
Example #2
0
	def _expand_parent_colon(self, parentsFile, parentPath,
		repo_loc, repositories):
		colon = parentPath.find(":")
		if colon == -1:
			return parentPath

		if colon == 0:
			if repo_loc is None:
				raise ParseError(
					_("Parent '%s' not found: '%s'") %  \
					(parentPath, parentsFile))
			else:
				parentPath = normalize_path(os.path.join(
					repo_loc, 'profiles', parentPath[colon+1:]))
		else:
			p_repo_name = parentPath[:colon]
			try:
				p_repo_loc = repositories.get_location_for_name(p_repo_name)
			except KeyError:
				raise ParseError(
					_("Parent '%s' not found: '%s'") %  \
					(parentPath, parentsFile))
			else:
				parentPath = normalize_path(os.path.join(
					p_repo_loc, 'profiles', parentPath[colon+1:]))

		return parentPath
Example #3
0
	def __init__(self):
		self.PF = ""
		self.ED = ""
		self.DOCDESTTREE = ""

		if "PF" in os.environ:
			self.PF = os.environ["PF"]
			if self.PF:
				self.PF = normalize_path(self.PF)
		if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
			os.environ.get("EAPI", "0") in ("0", "1", "2"):
			self.ED = os.environ.get("D", "")
		else:
			self.ED = os.environ.get("ED", "")
		if self.ED:
			self.ED = normalize_path(self.ED)
		if "_E_DOCDESTTREE_" in os.environ:
			self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
			if self.DOCDESTTREE:
				self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)

		self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
		if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
			self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
		self.allowed_files = []
		self.disallowed_dirs = ['CVS']
		self.recurse = False
		self.verbose = False
		self.doc_prefix = ""
Example #4
0
    def __init__(self):
        self.PF = ""
        self.ED = ""
        self.DOCDESTTREE = ""

        if "PF" in os.environ:
            self.PF = os.environ["PF"]
            if self.PF:
                self.PF = normalize_path(self.PF)
        if "force-prefix" not in os.environ.get(
            "FEATURES", ""
        ).split() and os.environ.get("EAPI", "0") in ("0", "1", "2"):
            self.ED = os.environ.get("D", "")
        else:
            self.ED = os.environ.get("ED", "")
        if self.ED:
            self.ED = normalize_path(self.ED)
        if "_E_DOCDESTTREE_" in os.environ:
            self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
            if self.DOCDESTTREE:
                self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)

        self.allowed_exts = ["css", "gif", "htm", "html", "jpeg", "jpg", "js", "png"]
        if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
            self.allowed_exts += ["ico", "svg", "xhtml", "xml"]
        self.allowed_files = []
        self.disallowed_dirs = ["CVS"]
        self.recurse = False
        self.verbose = False
        self.doc_prefix = ""
Example #5
0
	def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
		target_root=None):
		self.user_profile_dir = None
		self._local_repo_conf_path = None
		self.eprefix = eprefix
		self.config_root = config_root
		self.target_root = target_root
		self._user_config = local_config

		if self.eprefix is None:
			self.eprefix = portage.const.EPREFIX
		elif self.eprefix:
			self.eprefix = normalize_path(self.eprefix)
			if self.eprefix == os.sep:
				self.eprefix = ""

		if self.config_root is None:
			self.config_root = portage.const.EPREFIX + os.sep

		self.config_root = normalize_path(os.path.abspath(
			self.config_root)).rstrip(os.path.sep) + os.path.sep

		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
		self.config_profile_path = config_profile_path
Example #6
0
    def __init__(self):
        self.PF = ""
        self.ED = ""
        self.DOCDESTTREE = ""

        if "PF" in os.environ:
            self.PF = os.environ["PF"]
            if self.PF:
                self.PF = normalize_path(self.PF)
        if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
         os.environ.get("EAPI", "0") in ("0", "1", "2"):
            self.ED = os.environ.get("D", "")
        else:
            self.ED = os.environ.get("ED", "")
        if self.ED:
            self.ED = normalize_path(self.ED)
        if "_E_DOCDESTTREE_" in os.environ:
            self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
            if self.DOCDESTTREE:
                self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)

        self.allowed_exts = [
            'css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png'
        ]
        if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
            self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
        self.allowed_files = []
        self.disallowed_dirs = ['CVS']
        self.recurse = False
        self.verbose = False
        self.doc_prefix = ""
Example #7
0
    def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
     user_config={}, sets={}, world=[], world_sets=[], distfiles={},
     eprefix=None, targetroot=False, debug=False):
        """
		ebuilds: cpv -> metadata mapping simulating available ebuilds.
		installed: cpv -> metadata mapping simulating installed packages.
			If a metadata key is missing, it gets a default value.
		profile: settings defined by the profile.
		"""

        self.debug = debug
        if eprefix is None:
            self.eprefix = normalize_path(tempfile.mkdtemp())
        else:
            self.eprefix = normalize_path(eprefix)

        # Tests may override portage.const.EPREFIX in order to
        # simulate a prefix installation. It's reasonable to do
        # this because tests should be self-contained such that
        # the "real" value of portage.const.EPREFIX is entirely
        # irrelevant (see bug #492932).
        portage.const.EPREFIX = self.eprefix.rstrip(os.sep)

        self.eroot = self.eprefix + os.sep
        if targetroot:
            self.target_root = os.path.join(self.eroot, 'target_root')
        else:
            self.target_root = os.sep
        self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
        self.pkgdir = os.path.join(self.eprefix, "pkgdir")
        self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
        os.makedirs(self.vdbdir)

        if not debug:
            portage.util.noiselimit = -2

        self._repositories = {}
        #Make sure the main repo is always created
        self._get_repo_dir("test_repo")

        self._create_distfiles(distfiles)
        self._create_ebuilds(ebuilds)
        self._create_binpkgs(binpkgs)
        self._create_installed(installed)
        self._create_profile(ebuilds, installed, profile, repo_configs,
                             user_config, sets)
        self._create_world(world, world_sets)

        self.settings, self.trees = self._load_config()

        self._create_ebuild_manifests(ebuilds)

        portage.util.noiselimit = 0
Example #8
0
	def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
		user_config={}, sets={}, world=[], world_sets=[], distfiles={},
		eprefix=None, targetroot=False, debug=False):
		"""
		ebuilds: cpv -> metadata mapping simulating available ebuilds.
		installed: cpv -> metadata mapping simulating installed packages.
			If a metadata key is missing, it gets a default value.
		profile: settings defined by the profile.
		"""

		self.debug = debug
		if eprefix is None:
			self.eprefix = normalize_path(tempfile.mkdtemp())
		else:
			self.eprefix = normalize_path(eprefix)

		# Tests may override portage.const.EPREFIX in order to
		# simulate a prefix installation. It's reasonable to do
		# this because tests should be self-contained such that
		# the "real" value of portage.const.EPREFIX is entirely
		# irrelevant (see bug #492932).
		portage.const.EPREFIX = self.eprefix.rstrip(os.sep)

		self.eroot = self.eprefix + os.sep
		if targetroot:
			self.target_root = os.path.join(self.eroot, 'target_root')
		else:
			self.target_root = os.sep
		self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
		self.pkgdir = os.path.join(self.eprefix, "pkgdir")
		self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
		os.makedirs(self.vdbdir)

		if not debug:
			portage.util.noiselimit = -2

		self._repositories = {}
		#Make sure the main repo is always created
		self._get_repo_dir("test_repo")

		self._create_distfiles(distfiles)
		self._create_ebuilds(ebuilds)
		self._create_binpkgs(binpkgs)
		self._create_installed(installed)
		self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
		self._create_world(world, world_sets)

		self.settings, self.trees = self._load_config()

		self._create_ebuild_manifests(ebuilds)

		portage.util.noiselimit = 0
Example #9
0
	def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
		self.news_path = news_path
		self.unread_path = unread_path
		self.language_id = language_id
		self.config = vardb.settings
		self.vdb = vardb
		self.portdb = portdb

		# GLEP 42 says:
		#   All news item related files should be root owned and in the
		#   portage group with the group write (and, for directories,
		#   execute) bits set. News files should be world readable.
		self._uid = int(self.config["PORTAGE_INST_UID"])
		self._gid = portage_gid
		self._file_mode = 0o0064
		self._dir_mode  = 0o0074
		self._mode_mask = 0o0000

		portdir = portdb.repositories.mainRepoLocation()
		profiles_base = None
		if portdir is not None:
			profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
		profile_path = None
		if profiles_base is not None and portdb.settings.profile_path:
			profile_path = normalize_path(
				os.path.realpath(portdb.settings.profile_path))
			if profile_path.startswith(profiles_base):
				profile_path = profile_path[len(profiles_base):]
		self._profile_path = profile_path
Example #10
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		self.target_root = self.target_root or os.sep

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		if self.sysroot != "/" and self.sysroot != self.target_root:
			writemsg(_("!!! Error: SYSROOT (currently %s) must "
				"equal / or ROOT (currently %s).\n") %
				(self.sysroot, self.target_root),
				noiselevel=-1)
			raise InvalidLocation(self.sysroot)

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		self.global_config_path = GLOBAL_CONFIG_PATH
		if portage.const.EPREFIX:
			self.global_config_path = os.path.join(portage.const.EPREFIX,
				GLOBAL_CONFIG_PATH.lstrip(os.sep))
Example #11
0
	def _addProfile(self, currentPath):
		parentsFile = os.path.join(currentPath, "parent")
		eapi_file = os.path.join(currentPath, "eapi")
		try:
			eapi = codecs.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace'
				).readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))
				if os.path.exists(parentPath):
					self._addProfile(parentPath)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))
		self.profiles.append(currentPath)
Example #12
0
def cacheddir(my_original_path,
              ignorecvs,
              ignorelist,
              EmptyOnError,
              followSymlinks=True):
    mypath = normalize_path(my_original_path)
    try:
        pathstat = os.stat(mypath)
        if not stat.S_ISDIR(pathstat.st_mode):
            raise DirectoryNotFound(mypath)
    except EnvironmentError as e:
        if e.errno == PermissionDenied.errno:
            raise PermissionDenied(mypath)
        del e
        return [], []
    except PortageException:
        return [], []
    else:
        try:
            fpaths = os.listdir(mypath)
        except EnvironmentError as e:
            if e.errno != errno.EACCES:
                raise
            del e
            raise PermissionDenied(mypath)
        ftype = []
        for x in fpaths:
            try:
                if followSymlinks:
                    pathstat = os.stat(mypath + "/" + x)
                else:
                    pathstat = os.lstat(mypath + "/" + x)

                if stat.S_ISREG(pathstat[stat.ST_MODE]):
                    ftype.append(0)
                elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
                    ftype.append(1)
                elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
                    ftype.append(2)
                else:
                    ftype.append(3)
            except (IOError, OSError):
                ftype.append(3)

    if ignorelist or ignorecvs:
        ret_list = []
        ret_ftype = []
        for file_path, file_type in zip(fpaths, ftype):
            if file_path in ignorelist:
                pass
            elif ignorecvs:
                if file_path[:2] != ".#" and not (file_type == 1
                                                  and file_path in VCS_DIRS):
                    ret_list.append(file_path)
                    ret_ftype.append(file_type)
    else:
        ret_list = fpaths
        ret_ftype = ftype

    return ret_list, ret_ftype
Example #13
0
	def testSetCpv(self):
		"""
		Test the clone via constructor.
		"""

		ebuilds = {
			"dev-libs/A-1": {"IUSE": "static-libs"},
			"dev-libs/B-1": {"IUSE": "static-libs"},
		}

		env_files = {
			"A" : ("USE=\"static-libs\"",)
		}

		package_env = (
			"dev-libs/A A",
		)

		eprefix = normalize_path(tempfile.mkdtemp())
		playground = None
		try:
			user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
			os.makedirs(user_config_dir)

			with io.open(os.path.join(user_config_dir, "package.env"),
				mode='w', encoding=_encodings['content']) as f:
				for line in package_env:
					f.write(line + "\n")

			env_dir = os.path.join(user_config_dir, "env")
			os.makedirs(env_dir)
			for k, v in env_files.items():
				with io.open(os.path.join(env_dir, k), mode='w',
					encoding=_encodings['content']) as f:
					for line in v:
						f.write(line + "\n")

			playground = ResolverPlayground(eprefix=eprefix, ebuilds=ebuilds)
			settings = config(clone=playground.settings)

			result = playground.run(["=dev-libs/A-1"])
			pkg, existing_node = result.depgraph._select_package(
				playground.eroot, Atom("=dev-libs/A-1"))
			settings.setcpv(pkg)
			self.assertTrue("static-libs" in
				settings["PORTAGE_USE"].split())

			# Test bug #522362, where a USE=static-libs package.env
			# setting leaked from one setcpv call to the next.
			pkg, existing_node = result.depgraph._select_package(
				playground.eroot, Atom("=dev-libs/B-1"))
			settings.setcpv(pkg)
			self.assertTrue("static-libs" not in
				settings["PORTAGE_USE"].split())

		finally:
			if playground is None:
				shutil.rmtree(eprefix)
			else:
				playground.cleanup()
Example #14
0
	def testSetCpv(self):
		"""
		Test the clone via constructor.
		"""

		ebuilds = {
			"dev-libs/A-1": {"IUSE": "static-libs"},
			"dev-libs/B-1": {"IUSE": "static-libs"},
		}

		env_files = {
			"A" : ("USE=\"static-libs\"",)
		}

		package_env = (
			"dev-libs/A A",
		)

		eprefix = normalize_path(tempfile.mkdtemp())
		playground = None
		try:
			user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
			os.makedirs(user_config_dir)

			with io.open(os.path.join(user_config_dir, "package.env"),
				mode='w', encoding=_encodings['content']) as f:
				for line in package_env:
					f.write(line + "\n")

			env_dir = os.path.join(user_config_dir, "env")
			os.makedirs(env_dir)
			for k, v in env_files.items():
				with io.open(os.path.join(env_dir, k), mode='w',
					encoding=_encodings['content']) as f:
					for line in v:
						f.write(line + "\n")

			playground = ResolverPlayground(eprefix=eprefix, ebuilds=ebuilds)
			settings = config(clone=playground.settings)

			result = playground.run(["=dev-libs/A-1"])
			pkg, existing_node = result.depgraph._select_package(
				playground.eroot, Atom("=dev-libs/A-1"))
			settings.setcpv(pkg)
			self.assertTrue("static-libs" in
				settings["PORTAGE_USE"].split())

			# Test bug #522362, where a USE=static-libs package.env
			# setting leaked from one setcpv call to the next.
			pkg, existing_node = result.depgraph._select_package(
				playground.eroot, Atom("=dev-libs/B-1"))
			settings.setcpv(pkg)
			self.assertTrue("static-libs" not in
				settings["PORTAGE_USE"].split())

		finally:
			if playground is None:
				shutil.rmtree(eprefix)
			else:
				playground.cleanup()
Example #15
0
    def __init__(self,
                 portdb,
                 vardb,
                 news_path,
                 unread_path,
                 language_id='en'):
        self.news_path = news_path
        self.unread_path = unread_path
        self.language_id = language_id
        self.config = vardb.settings
        self.vdb = vardb
        self.portdb = portdb

        # GLEP 42 says:
        #   All news item related files should be root owned and in the
        #   portage group with the group write (and, for directories,
        #   execute) bits set. News files should be world readable.
        self._uid = int(self.config["PORTAGE_INST_UID"])
        self._gid = portage_gid
        self._file_mode = 0o0064
        self._dir_mode = 0o0074
        self._mode_mask = 0o0000

        portdir = portdb.repositories.mainRepoLocation()
        profiles_base = None
        if portdir is not None:
            profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
        profile_path = None
        if profiles_base is not None and portdb.settings.profile_path:
            profile_path = normalize_path(
                os.path.realpath(portdb.settings.profile_path))
            if profile_path.startswith(profiles_base):
                profile_path = profile_path[len(profiles_base):]
        self._profile_path = profile_path
Example #16
0
	def set_port_dirs(self, portdir, portdir_overlay):
		self.portdir = portdir
		self.portdir_overlay = portdir_overlay
		if self.portdir_overlay is None:
			self.portdir_overlay = ""

		self.overlay_profiles = []
		for ov in shlex_split(self.portdir_overlay):
			ov = normalize_path(ov)
			profiles_dir = os.path.join(ov, "profiles")
			if os.path.isdir(profiles_dir):
				self.overlay_profiles.append(profiles_dir)

		self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
		self.profile_and_user_locations = self.profile_locations[:]
		if self._user_config:
			self.profile_and_user_locations.append(self.abs_user_config)

		self.profile_locations = tuple(self.profile_locations)
		self.profile_and_user_locations = tuple(self.profile_and_user_locations)

		self.pmask_locations = [os.path.join(portdir, "profiles")]
		self.pmask_locations.extend(self.profiles)
		self.pmask_locations.extend(self.overlay_profiles)
		self.pmask_locations = tuple(self.pmask_locations)
Example #17
0
    def set_root_override(self, root_overwrite=None):
        # Allow ROOT setting to come from make.conf if it's not overridden
        # by the constructor argument (from the calling environment).
        if self.target_root is None and root_overwrite is not None:
            self.target_root = root_overwrite
            if not self.target_root.strip():
                self.target_root = None
        self.target_root = self.target_root or os.sep

        self.target_root = normalize_path(os.path.abspath(
            self.target_root)).rstrip(os.path.sep) + os.path.sep

        if self.sysroot != "/" and self.sysroot != self.target_root:
            writemsg(_("!!! Error: SYSROOT (currently %s) must "
                       "equal / or ROOT (currently %s).\n") %
                     (self.sysroot, self.target_root),
                     noiselevel=-1)
            raise InvalidLocation(self.sysroot)

        ensure_dirs(self.target_root)
        self._check_var_directory("ROOT", self.target_root)

        self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

        self.global_config_path = GLOBAL_CONFIG_PATH
        if portage.const.EPREFIX:
            self.global_config_path = os.path.join(
                portage.const.EPREFIX, GLOBAL_CONFIG_PATH.lstrip(os.sep))
Example #18
0
	def __init__(self, ebuilds={}, installed={}, profile={}, repo_configs={}, \
		user_config={}, sets={}, world=[], world_sets=[], distfiles={}, debug=False):
		"""
		ebuilds: cpv -> metadata mapping simulating available ebuilds. 
		installed: cpv -> metadata mapping simulating installed packages.
			If a metadata key is missing, it gets a default value.
		profile: settings defined by the profile.
		"""
		self.debug = debug
		self.eprefix = normalize_path(tempfile.mkdtemp())
		self.eroot = self.eprefix + os.sep
		self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
		self.portdir = os.path.join(self.eroot, "usr/portage")
		self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
		os.makedirs(self.portdir)
		os.makedirs(self.vdbdir)

		if not debug:
			portage.util.noiselimit = -2

		self.repo_dirs = {}
		#Make sure the main repo is always created
		self._get_repo_dir("test_repo")

		self._create_distfiles(distfiles)
		self._create_ebuilds(ebuilds)
		self._create_installed(installed)
		self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
		self._create_world(world, world_sets)

		self.settings, self.trees = self._load_config()

		self._create_ebuild_manifests(ebuilds)
		
		portage.util.noiselimit = 0
Example #19
0
	def multiBuilder(self, options, settings, trees):
		rValue = {}
		directory = options.get("directory",
			os.path.join(settings["PORTAGE_CONFIGROOT"],
			USER_CONFIG_PATH, "sets"))
		name_pattern = options.get("name_pattern", "${name}")
		if not "$name" in name_pattern and not "${name}" in name_pattern:
			raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
		greedy = get_boolean(options, "greedy", False)
		# look for repository path variables
		match = self._repopath_match.match(directory)
		if match:
			try:
				directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
			except KeyError:
				raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])

		try:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='strict')
			# Now verify that we can also encode it.
			_unicode_encode(directory,
				encoding=_encodings['fs'], errors='strict')
		except UnicodeError:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='replace')
			raise SetConfigError(
				_("Directory path contains invalid character(s) for encoding '%s': '%s'") \
				% (_encodings['fs'], directory))

		if os.path.isdir(directory):
			directory = normalize_path(directory)

			for parent, dirs, files in os.walk(directory):
				try:
					parent = _unicode_decode(parent,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					continue
				for d in dirs[:]:
					if d[:1] == '.':
						dirs.remove(d)
				for filename in files:
					try:
						filename = _unicode_decode(filename,
							encoding=_encodings['fs'], errors='strict')
					except UnicodeDecodeError:
						continue
					if filename[:1] == '.':
						continue
					if filename.endswith(".metadata"):
						continue
					filename = os.path.join(parent,
						filename)[1 + len(directory):]
					myname = name_pattern.replace("$name", filename)
					myname = myname.replace("${name}", filename)
					rValue[myname] = StaticFileSet(
						os.path.join(directory, filename),
						greedy=greedy, dbapi=trees["vartree"].dbapi)
		return rValue
Example #20
0
    def testNormalizePath(self):

        from portage.util import normalize_path

        path = "///foo/bar/baz"
        good = "/foo/bar/baz"
        self.assertEqual(normalize_path(path), good)
Example #21
0
	def multiBuilder(self, options, settings, trees):
		rValue = {}
		directory = options.get("directory",
			os.path.join(settings["PORTAGE_CONFIGROOT"],
			USER_CONFIG_PATH, "sets"))
		name_pattern = options.get("name_pattern", "${name}")
		if not "$name" in name_pattern and not "${name}" in name_pattern:
			raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
		greedy = get_boolean(options, "greedy", False)
		# look for repository path variables
		match = self._repopath_match.match(directory)
		if match:
			try:
				directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
			except KeyError:
				raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])

		try:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='strict')
			# Now verify that we can also encode it.
			_unicode_encode(directory,
				encoding=_encodings['fs'], errors='strict')
		except UnicodeError:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='replace')
			raise SetConfigError(
				_("Directory path contains invalid character(s) for encoding '%s': '%s'") \
				% (_encodings['fs'], directory))

		if os.path.isdir(directory):
			directory = normalize_path(directory)

			for parent, dirs, files in os.walk(directory):
				try:
					parent = _unicode_decode(parent,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					continue
				for d in dirs[:]:
					if d[:1] == '.':
						dirs.remove(d)
				for filename in files:
					try:
						filename = _unicode_decode(filename,
							encoding=_encodings['fs'], errors='strict')
					except UnicodeDecodeError:
						continue
					if filename[:1] == '.':
						continue
					if filename.endswith(".metadata"):
						continue
					filename = os.path.join(parent,
						filename)[1 + len(directory):]
					myname = name_pattern.replace("$name", filename)
					myname = myname.replace("${name}", filename)
					rValue[myname] = StaticFileSet(
						os.path.join(directory, filename),
						greedy=greedy, dbapi=trees["vartree"].dbapi)
		return rValue
Example #22
0
    def __init__(
        self,
        config_root=None,
        eprefix=None,
        config_profile_path=None,
        local_config=True,
        target_root=None,
        sysroot=None,
    ):
        self.user_profile_dir = None
        self._local_repo_conf_path = None
        self.eprefix = eprefix
        self.config_root = config_root
        self.target_root = target_root
        self.sysroot = sysroot
        self._user_config = local_config

        if self.eprefix is None:
            self.eprefix = portage.const.EPREFIX
        elif self.eprefix:
            self.eprefix = normalize_path(self.eprefix)
            if self.eprefix == os.sep:
                self.eprefix = ""

        if self.config_root is None:
            self.config_root = portage.const.EPREFIX + os.sep

        self.config_root = (normalize_path(
            os.path.abspath(self.config_root or os.sep)).rstrip(os.sep) +
                            os.sep)

        self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
        self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
        self.config_profile_path = config_profile_path

        if self.sysroot is None:
            self.sysroot = "/"
        else:
            self.sysroot = (normalize_path(
                os.path.abspath(self.sysroot or os.sep)).rstrip(os.sep) +
                            os.sep)

        self.esysroot = self.sysroot.rstrip(os.sep) + self.eprefix + os.sep

        # TODO: Set this via the constructor using
        # PORTAGE_OVERRIDE_EPREFIX.
        self.broot = portage.const.EPREFIX
Example #23
0
def process(mysettings, key, logentries, fulltext):

	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		uid = -1
		if portage.data.secpass >= 2:
			uid = portage_uid
		ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)

	cat = mysettings['CATEGORY']
	pf = mysettings['PF']

	elogfilename = pf + ":" + _unicode_decode(
		time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
		encoding=_encodings['content'], errors='replace') + ".log"

	if "split-elog" in mysettings.features:
		log_subdir = os.path.join(logdir, "elog", cat)
		elogfilename = os.path.join(log_subdir, elogfilename)
	else:
		log_subdir = os.path.join(logdir, "elog")
		elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
	_ensure_log_subdirs(logdir, log_subdir)

	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='w', encoding=_encodings['content'], errors='backslashreplace')
	elogfile.write(_unicode_decode(fulltext))
	elogfile.close()

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(log_subdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	return elogfilename
def process(mysettings, key, logentries, fulltext):
	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		logdir_uid = -1
		if portage.data.secpass >= 2:
			logdir_uid = portage_uid
		ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

	elogdir = os.path.join(logdir, "elog")
	_ensure_log_subdirs(logdir, elogdir)

	# TODO: Locking
	elogfilename = elogdir+"/summary.log"
	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='a', encoding=_encodings['content'], errors='backslashreplace')

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(elogdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
		time.localtime(time.time()))
	# Avoid potential UnicodeDecodeError later.
	time_str = _unicode_decode(time_str,
		encoding=_encodings['content'], errors='replace')
	elogfile.write(_unicode_decode(
		_(">>> Messages generated by process " +
		"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
		{"pid": os.getpid(), "time": time_str, "pkg": key}))
	elogfile.write(_unicode_decode(fulltext))
	elogfile.write(_unicode_decode("\n"))
	elogfile.close()

	return elogfilename
Example #25
0
	def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
		target_root=None):
		self.user_profile_dir = None
		self._local_repo_conf_path = None
		self.eprefix = eprefix
		self.config_root = config_root
		self.target_root = target_root
		self._user_config = local_config
		
		if self.eprefix is None:
			self.eprefix = ""

		if self.config_root is None:
			self.config_root = self.eprefix + os.sep

		self.config_root = normalize_path(os.path.abspath(
			self.config_root)).rstrip(os.path.sep) + os.path.sep

		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)

		if not config_profile_path:
			config_profile_path = \
				os.path.join(self.config_root, PROFILE_PATH)
			if os.path.isdir(config_profile_path):
				self.profile_path = config_profile_path
			else:
				config_profile_path = \
					os.path.join(self.abs_user_config, 'make.profile')
				if os.path.isdir(config_profile_path):
					self.profile_path = config_profile_path
				else:
					self.profile_path = None
		else:
			self.profile_path = config_profile_path


		# The symlink might not exist or might not be a symlink.
		self.profiles = []
		if self.profile_path is not None:
			try:
				self._addProfile(os.path.realpath(self.profile_path))
			except ParseError as e:
				writemsg(_("!!! Unable to parse profile: '%s'\n") % \
					self.profile_path, noiselevel=-1)
				writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
				self.profiles = []

		if self._user_config and self.profiles:
			custom_prof = os.path.join(
				self.config_root, CUSTOM_PROFILE_PATH)
			if os.path.exists(custom_prof):
				self.user_profile_dir = custom_prof
				self.profiles.append(custom_prof)
			del custom_prof

		self.profiles = tuple(self.profiles)
Example #26
0
def parse_args():
    argv = sys.argv[:]

    if sys.hexversion >= 0x3000000:
        # We can't trust that the filesystem encoding (locale dependent)
        # correctly matches the arguments, so use surrogateescape to
        # pass through the original argv bytes for Python 3.
        fs_encoding = sys.getfilesystemencoding()
        argv = [x.encode(fs_encoding, 'surrogateescape') for x in argv]

    for x, arg in enumerate(argv):
        try:
            argv[x] = _unicode_decode(arg, errors='strict')
        except UnicodeDecodeError:
            writemsg('dohtml: argument is not encoded as UTF-8: %s\n' %
                     _unicode_decode(arg),
                     noiselevel=-1)
            sys.exit(1)

    options = OptionsClass()
    args = []

    x = 1
    while x < len(argv):
        arg = argv[x]
        if arg in ["-h", "-r", "-V"]:
            if arg == "-h":
                print_help()
                sys.exit(0)
            elif arg == "-r":
                options.recurse = True
            elif arg == "-V":
                options.verbose = True
        elif argv[x] in ["-A", "-a", "-f", "-x", "-p"]:
            x += 1
            if x == len(argv):
                print_help()
                sys.exit(0)
            elif arg == "-p":
                options.doc_prefix = argv[x]
                if options.doc_prefix:
                    options.doc_prefix = normalize_path(options.doc_prefix)
            else:
                values = argv[x].split(",")
                if arg == "-A":
                    options.allowed_exts.extend(values)
                elif arg == "-a":
                    options.allowed_exts = values
                elif arg == "-f":
                    options.allowed_files = values
                elif arg == "-x":
                    options.disallowed_dirs = values
        else:
            args.append(argv[x])
        x += 1

    return (options, args)
Example #27
0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
	mypath = normalize_path(my_original_path)
	try:
		pathstat = os.stat(mypath)
		if not stat.S_ISDIR(pathstat.st_mode):
			raise DirectoryNotFound(mypath)
	except EnvironmentError as e:
		if e.errno == PermissionDenied.errno:
			raise PermissionDenied(mypath)
		del e
		return [], []
	except PortageException:
		return [], []
	else:
		try:
			fpaths = os.listdir(mypath)
		except EnvironmentError as e:
			if e.errno != errno.EACCES:
				raise
			del e
			raise PermissionDenied(mypath)
		ftype = []
		for x in fpaths:
			try:
				if followSymlinks:
					pathstat = os.stat(mypath+"/"+x)
				else:
					pathstat = os.lstat(mypath+"/"+x)

				if stat.S_ISREG(pathstat[stat.ST_MODE]):
					ftype.append(0)
				elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
					ftype.append(1)
				elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
					ftype.append(2)
				else:
					ftype.append(3)
			except (IOError, OSError):
				ftype.append(3)

	if ignorelist or ignorecvs:
		ret_list = []
		ret_ftype = []
		for file_path, file_type in zip(fpaths, ftype):
			if file_path in ignorelist:
				pass
			elif ignorecvs:
				if file_path[:2] != ".#" and \
					not (file_type == 1 and file_path in VCS_DIRS):
					ret_list.append(file_path)
					ret_ftype.append(file_type)
	else:
		ret_list = fpaths
		ret_ftype = ftype

	return ret_list, ret_ftype
Example #28
0
	def __call__(self, argv):
		"""
		@return: tuple of (stdout, stderr, returncode)
		"""

		# Python 3:
		# cmd, root, *args = argv
		cmd = argv[0]
		root = argv[1]
		args = argv[2:]

		warnings = []
		warnings_str = ''

		db = self.get_db()
		eapi = self.settings.get('EAPI')

		root = normalize_path(root or os.sep).rstrip(os.sep) + os.sep
		if root not in db:
			return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3)

		portdb = db[root]["porttree"].dbapi
		vardb = db[root]["vartree"].dbapi

		if cmd in ('best_version', 'has_version'):
			try:
				atom = Atom(args[0], allow_repo=False)
			except InvalidAtom:
				return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2)

			try:
				atom = Atom(args[0], allow_repo=False, eapi=eapi)
			except InvalidAtom as e:
				warnings.append("QA Notice: %s: %s" % (cmd, e))

			use = self.settings.get('PORTAGE_BUILT_USE')
			if use is None:
				use = self.settings['PORTAGE_USE']

			use = frozenset(use.split())
			atom = atom.evaluate_conditionals(use)

		if warnings:
			warnings_str = self._elog('eqawarn', warnings)

		if cmd == 'has_version':
			if vardb.match(atom):
				returncode = 0
			else:
				returncode = 1
			return ('', warnings_str, returncode)
		elif cmd == 'best_version':
			m = best(vardb.match(atom))
			return ('%s\n' % m, warnings_str, 0)
		else:
			return ('', 'Invalid command: %s\n' % cmd, 3)
Example #29
0
def parse_args():
	argv = sys.argv[:]

	if sys.hexversion >= 0x3000000:
		# We can't trust that the filesystem encoding (locale dependent)
		# correctly matches the arguments, so use surrogateescape to
		# pass through the original argv bytes for Python 3.
		fs_encoding = sys.getfilesystemencoding()
		argv = [x.encode(fs_encoding, 'surrogateescape') for x in argv]

	for x, arg in enumerate(argv):
		try:
			argv[x] = _unicode_decode(arg, errors='strict')
		except UnicodeDecodeError:
			writemsg('dohtml: argument is not encoded as UTF-8: %s\n' %
				_unicode_decode(arg), noiselevel=-1)
			sys.exit(1)

	options = OptionsClass()
	args = []

	x = 1
	while x < len(argv):
		arg = argv[x]
		if arg in ["-h","-r","-V"]:
			if arg == "-h":
				print_help()
				sys.exit(0)
			elif arg == "-r":
				options.recurse = True
			elif arg == "-V":
				options.verbose = True
		elif argv[x] in ["-A","-a","-f","-x","-p"]:
			x += 1
			if x == len(argv):
				print_help()
				sys.exit(0)
			elif arg == "-p":
				options.doc_prefix = argv[x]
				if options.doc_prefix:
					options.doc_prefix = normalize_path(options.doc_prefix)
			else:
				values = argv[x].split(",")
				if arg == "-A":
					options.allowed_exts.extend(values)
				elif arg == "-a":
					options.allowed_exts = values
				elif arg == "-f":
					options.allowed_files = values
				elif arg == "-x":
					options.disallowed_dirs = values
		else:
			args.append(argv[x])
		x += 1

	return (options, args)
Example #30
0
	def __call__(self, argv):
		"""
		@returns: tuple of (stdout, stderr, returncode)
		"""

		cmd, root, atom_str = argv

		eapi = self.settings.get('EAPI')
		allow_repo = eapi_has_repo_deps(eapi)
		try:
			atom = Atom(atom_str, allow_repo=allow_repo)
		except InvalidAtom:
			return ('', 'invalid atom: %s\n' % atom_str, 2)

		warnings = []
		try:
			atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi)
		except InvalidAtom as e:
			warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))

		use = self.settings.get('PORTAGE_BUILT_USE')
		if use is None:
			use = self.settings['PORTAGE_USE']

		use = frozenset(use.split())
		atom = atom.evaluate_conditionals(use)

		db = self._db
		if db is None:
			db = portage.db

		warnings_str = ''
		if warnings:
			warnings_str = self._elog('eqawarn', warnings)

		root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
		if root not in db:
			return ('', 'invalid ROOT: %s\n' % root, 2)

		vardb = db[root]["vartree"].dbapi

		if cmd == 'has_version':
			if vardb.match(atom):
				returncode = 0
			else:
				returncode = 1
			return ('', warnings_str, returncode)
		elif cmd == 'best_version':
			m = best(vardb.match(atom))
			return ('%s\n' % m, warnings_str, 0)
		else:
			return ('', 'invalid command: %s\n' % cmd, 2)
Example #31
0
def install_mask_dir(base_dir, install_mask, onerror=None):
    """
    Remove files and directories matched by INSTALL_MASK.

    @param base_dir: directory path corresponding to ${ED}
    @type base_dir: str
    @param install_mask: INSTALL_MASK configuration
    @type install_mask: InstallMask
    """
    onerror = onerror or _raise_exc
    base_dir = normalize_path(base_dir)
    base_dir_len = len(base_dir) + 1
    dir_stack = []

    # Remove masked files.
    todo = [base_dir]
    while todo:
        parent = todo.pop()
        try:
            parent = _unicode_decode(parent, errors="strict")
        except UnicodeDecodeError:
            continue

        dir_stack.append(parent)
        for entry in os.scandir(parent):
            try:
                abs_path = _unicode_decode(entry.path, errors="strict")
            except UnicodeDecodeError:
                continue

            if entry.is_dir(follow_symlinks=False):
                todo.append(entry.path)
            elif install_mask.match(abs_path[base_dir_len:]):
                try:
                    os.unlink(entry.path)
                except OSError as e:
                    onerror(e)

    # Remove masked dirs (unless non-empty due to exclusions).
    while True:
        try:
            dir_path = dir_stack.pop()
        except IndexError:
            break

        if install_mask.match(dir_path[base_dir_len:] + "/"):
            try:
                os.rmdir(dir_path)
            except OSError:
                pass
Example #32
0
	def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
		target_root=None, sysroot=None):
		self.user_profile_dir = None
		self._local_repo_conf_path = None
		self.eprefix = eprefix
		self.config_root = config_root
		self.target_root = target_root
		self.sysroot = sysroot
		self._user_config = local_config

		if self.eprefix is None:
			self.eprefix = portage.const.EPREFIX
		elif self.eprefix:
			self.eprefix = normalize_path(self.eprefix)
			if self.eprefix == os.sep:
				self.eprefix = ""

		if self.config_root is None:
			self.config_root = portage.const.EPREFIX + os.sep

		self.config_root = normalize_path(os.path.abspath(
			self.config_root or os.sep)).rstrip(os.sep) + os.sep

		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
		self.config_profile_path = config_profile_path

		if self.sysroot is None:
			self.sysroot = "/"
		else:
			self.sysroot = normalize_path(os.path.abspath(self.sysroot or os.sep)).rstrip(os.sep) + os.sep

		self.esysroot = self.sysroot.rstrip(os.sep) + self.eprefix + os.sep

		# TODO: Set this via the constructor using
		# PORTAGE_OVERRIDE_EPREFIX.
		self.broot = portage.const.EPREFIX
Example #33
0
    def add(self, entry):
        """
        Add one NEEDED.ELF.2 entry, for inclusion in the generated
        REQUIRES and PROVIDES values.

        @param entry: NEEDED.ELF.2 entry
        @type entry: NeededEntry
        """

        multilib_cat = entry.multilib_category
        if multilib_cat is None:
            # This usage is invalid. The caller must ensure that
            # the multilib category data is supplied here.
            raise AssertionError("Missing multilib category data: %s" %
                                 entry.filename)

        self._basename_map.setdefault(os.path.basename(entry.filename),
                                      []).append(entry)

        if entry.needed and (self._requires_exclude is None
                             or self._requires_exclude.match(
                                 entry.filename.lstrip(os.sep)) is None):
            runpaths = frozenset()
            if entry.runpaths is not None:
                expand = {"ORIGIN": os.path.dirname(entry.filename)}
                runpaths = frozenset(
                    normalize_path(
                        varexpand(
                            x,
                            expand,
                            error_leader=lambda: "%s: DT_RUNPATH: " % entry.
                            filename,
                        )) for x in entry.runpaths)
            for x in entry.needed:
                if (self._requires_exclude is None
                        or self._requires_exclude.match(x) is None):
                    self._requires_map[multilib_cat][x].add(runpaths)

        if entry.soname:
            self._provides_unfiltered.setdefault(multilib_cat,
                                                 set()).add(entry.soname)

        if entry.soname and (
                self._provides_exclude is None or
            (self._provides_exclude.match(entry.filename.lstrip(os.sep)) is
             None and self._provides_exclude.match(entry.soname) is None)):
            self._provides_map.setdefault(multilib_cat,
                                          set()).add(entry.soname)
Example #34
0
def install_mask_dir(base_dir, install_mask, onerror=None):
    """
	Remove files and directories matched by INSTALL_MASK.

	@param base_dir: directory path corresponding to ${ED}
	@type base_dir: str
	@param install_mask: INSTALL_MASK configuration
	@type install_mask: InstallMask
	"""
    onerror = onerror or _raise_exc
    base_dir = normalize_path(base_dir)
    base_dir_len = len(base_dir) + 1
    dir_stack = []

    # Remove masked files.
    for parent, dirs, files in os.walk(base_dir, onerror=onerror):
        try:
            parent = _unicode_decode(parent, errors='strict')
        except UnicodeDecodeError:
            continue
        dir_stack.append(parent)
        for fname in files:
            try:
                fname = _unicode_decode(fname, errors='strict')
            except UnicodeDecodeError:
                continue
            abs_path = os.path.join(parent, fname)
            relative_path = abs_path[base_dir_len:]
            if install_mask.match(relative_path):
                try:
                    os.unlink(abs_path)
                except OSError as e:
                    onerror(e)

    # Remove masked dirs (unless non-empty due to exclusions).
    while True:
        try:
            dir_path = dir_stack.pop()
        except IndexError:
            break

        if install_mask.match(dir_path[base_dir_len:] + '/'):
            try:
                os.rmdir(dir_path)
            except OSError:
                pass
Example #35
0
def install_mask_dir(base_dir, install_mask, onerror=None):
	"""
	Remove files and directories matched by INSTALL_MASK.

	@param base_dir: directory path corresponding to ${ED}
	@type base_dir: str
	@param install_mask: INSTALL_MASK configuration
	@type install_mask: InstallMask
	"""
	onerror = onerror or _raise_exc
	base_dir = normalize_path(base_dir)
	base_dir_len = len(base_dir) + 1
	dir_stack = []

	# Remove masked files.
	for parent, dirs, files in os.walk(base_dir, onerror=onerror):
		try:
			parent = _unicode_decode(parent, errors='strict')
		except UnicodeDecodeError:
			continue
		dir_stack.append(parent)
		for fname in files:
			try:
				fname = _unicode_decode(fname, errors='strict')
			except UnicodeDecodeError:
				continue
			abs_path = os.path.join(parent, fname)
			relative_path = abs_path[base_dir_len:]
			if install_mask.match(relative_path):
				try:
					os.unlink(abs_path)
				except OSError as e:
					onerror(e)

	# Remove masked dirs (unless non-empty due to exclusions).
	while True:
		try:
			dir_path = dir_stack.pop()
		except IndexError:
			break

		if install_mask.match(dir_path[base_dir_len:] + '/'):
			try:
				os.rmdir(dir_path)
			except OSError:
				pass
Example #36
0
	def add(self, entry):
		"""
		Add one NEEDED.ELF.2 entry, for inclusion in the generated
		REQUIRES and PROVIDES values.

		@param entry: NEEDED.ELF.2 entry
		@type entry: NeededEntry
		"""

		multilib_cat = entry.multilib_category
		if multilib_cat is None:
			# This usage is invalid. The caller must ensure that
			# the multilib category data is supplied here.
			raise AssertionError(
				"Missing multilib category data: %s" % entry.filename)

		self._basename_map.setdefault(
			os.path.basename(entry.filename), []).append(entry)

		if entry.needed and (
			self._requires_exclude is None or
			self._requires_exclude.match(
			entry.filename.lstrip(os.sep)) is None):
			runpaths = frozenset()
			if entry.runpaths is not None:
				expand = {"ORIGIN": os.path.dirname(entry.filename)}
				runpaths = frozenset(normalize_path(varexpand(x, expand,
					error_leader=lambda: "%s: DT_RUNPATH: " % entry.filename))
					for x in entry.runpaths)
			for x in entry.needed:
				if (self._requires_exclude is None or
					self._requires_exclude.match(x) is None):
					self._requires_map[multilib_cat][x].add(runpaths)

		if entry.soname:
			self._provides_unfiltered.setdefault(
				multilib_cat, set()).add(entry.soname)

		if entry.soname and (
			self._provides_exclude is None or
			(self._provides_exclude.match(
			entry.filename.lstrip(os.sep)) is None and
			self._provides_exclude.match(entry.soname) is None)):
			self._provides_map.setdefault(
				multilib_cat, set()).add(entry.soname)
    def update_eclasses(self):
        self.eclasses = {}
        self._eclass_locations = {}
        master_eclasses = {}
        eclass_len = len(".eclass")
        ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
        for x in [
                normalize_path(os.path.join(y, "eclass"))
                for y in self.porttrees
        ]:
            try:
                eclass_filenames = os.listdir(x)
            except OSError as e:
                if e.errno in ignored_listdir_errnos:
                    del e
                    continue
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(x)
                raise
            for y in eclass_filenames:
                if not y.endswith(".eclass"):
                    continue
                obj = hashed_path(os.path.join(x, y))
                obj.eclass_dir = x
                try:
                    mtime = obj.mtime
                except FileNotFound:
                    continue
                ys = y[:-eclass_len]
                if x == self._master_eclass_root:
                    master_eclasses[ys] = mtime
                    self.eclasses[ys] = obj
                    self._eclass_locations[ys] = x
                    continue

                master_mtime = master_eclasses.get(ys)
                if master_mtime is not None:
                    if master_mtime == mtime:
                        # It appears to be identical to the master,
                        # so prefer the master entry.
                        continue

                self.eclasses[ys] = obj
                self._eclass_locations[ys] = x
Example #38
0
    def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
     user_config={}, sets={}, world=[], world_sets=[], distfiles={},
     targetroot=False, debug=False):
        """
		ebuilds: cpv -> metadata mapping simulating available ebuilds.
		installed: cpv -> metadata mapping simulating installed packages.
			If a metadata key is missing, it gets a default value.
		profile: settings defined by the profile.
		"""
        self.debug = debug
        self.eprefix = normalize_path(tempfile.mkdtemp())
        portage.const.EPREFIX = self.eprefix.rstrip(os.sep)

        self.eroot = self.eprefix + os.sep
        if targetroot:
            self.target_root = os.path.join(self.eroot, 'target_root')
        else:
            self.target_root = os.sep
        self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
        self.pkgdir = os.path.join(self.eprefix, "pkgdir")
        self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
        os.makedirs(self.vdbdir)

        if not debug:
            portage.util.noiselimit = -2

        self._repositories = {}
        #Make sure the main repo is always created
        self._get_repo_dir("test_repo")

        self._create_distfiles(distfiles)
        self._create_ebuilds(ebuilds)
        self._create_binpkgs(binpkgs)
        self._create_installed(installed)
        self._create_profile(ebuilds, installed, profile, repo_configs,
                             user_config, sets)
        self._create_world(world, world_sets)

        self.settings, self.trees = self._load_config()

        self._create_ebuild_manifests(ebuilds)

        portage.util.noiselimit = 0
Example #39
0
	def __init__(self, porttree_root, overlays=None):
		if overlays is not None:
			warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
			DeprecationWarning, stacklevel=2)

		self.eclasses = {} # {"Name": hashed_path}
		self._eclass_locations = {}
		self._eclass_locations_str = None

		# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
		# ~harring
		if porttree_root:
			self.porttree_root = porttree_root
			self.porttrees = (normalize_path(self.porttree_root),)
			self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
			self.update_eclasses()
		else:
			self.porttree_root = None
			self.porttrees = ()
			self._master_eclass_root = None
Example #40
0
	def set_port_dirs(self, portdir, portdir_overlay):
		self.portdir = portdir
		self.portdir_overlay = portdir_overlay
		if self.portdir_overlay is None:
			self.portdir_overlay = ""

		self.overlay_profiles = []
		for ov in shlex_split(self.portdir_overlay):
			ov = normalize_path(ov)
			profiles_dir = os.path.join(ov, "profiles")
			if os.path.isdir(profiles_dir):
				self.overlay_profiles.append(profiles_dir)

		self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
		self.profile_and_user_locations = self.profile_locations[:]
		if self._user_config:
			self.profile_and_user_locations.append(self.abs_user_config)

		self.profile_locations = tuple(self.profile_locations)
		self.profile_and_user_locations = tuple(self.profile_and_user_locations)
Example #41
0
	def __init__(self, porttree_root, overlays=None):
		if overlays is not None:
			warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
			DeprecationWarning, stacklevel=2)

		self.eclasses = {} # {"Name": hashed_path}
		self._eclass_locations = {}
		self._eclass_locations_str = None

		# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
		# ~harring
		if porttree_root:
			self.porttree_root = porttree_root
			self.porttrees = (normalize_path(self.porttree_root),)
			self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
			self.update_eclasses()
		else:
			self.porttree_root = None
			self.porttrees = ()
			self._master_eclass_root = None
Example #42
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		if self.target_root is None:
			self.target_root = "/"

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		# make.globals should not be relative to config_root
		# because it only contains constants. However, if EPREFIX
		# is set then there are two possible scenarios:
		# 1) If $ROOT == "/" then make.globals should be
		#    relative to EPREFIX.
		# 2) If $ROOT != "/" then the correct location of
		#    make.globals needs to be specified in the constructor
		#    parameters, since it's a property of the host system
		#    (and the current config represents the target system).
		self.global_config_path = GLOBAL_CONFIG_PATH
		if self.eprefix:
			if self.target_root == "/":
				# case (1) above
				self.global_config_path = os.path.join(self.eprefix,
					GLOBAL_CONFIG_PATH.lstrip(os.sep))
			else:
				# case (2) above
				# For now, just assume make.globals is relative
				# to EPREFIX.
				# TODO: Pass in more info to the constructor,
				# so we know the host system configuration.
				self.global_config_path = os.path.join(self.eprefix,
					GLOBAL_CONFIG_PATH.lstrip(os.sep))
Example #43
0
	def __call__(self, argv):
		"""
		@returns: tuple of (stdout, stderr, returncode)
		"""

		# Note that $USE is passed via IPC in order to ensure that
		# we have the correct value for built/installed packages,
		# since the config class doesn't currently provide a way
		# to access built/installed $USE that would work in all
		# possible scenarios.
		cmd, root, atom, use = argv

		try:
			atom = Atom(atom)
		except InvalidAtom:
			return ('', 'invalid atom: %s\n' % atom, 2)

		use = frozenset(use.split())
		atom = atom.evaluate_conditionals(use)

		db = self._db
		if db is None:
			db = portage.db

		root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
		if root not in db:
			return ('', 'invalid ROOT: %s\n' % root, 2)

		vardb = db[root]["vartree"].dbapi

		if cmd == 'has_version':
			if vardb.match(atom):
				returncode = 0
			else:
				returncode = 1
			return ('', '', returncode)
		elif cmd == 'best_version':
			m = best(vardb.match(atom))
			return ('%s\n' % m, '', 0)
		else:
			return ('', 'invalid command: %s\n' % cmd, 2)
Example #44
0
	def update_eclasses(self):
		self.eclasses = {}
		self._eclass_locations = {}
		master_eclasses = {}
		eclass_len = len(".eclass")
		ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
		for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
			try:
				eclass_filenames = os.listdir(x)
			except OSError as e:
				if e.errno in ignored_listdir_errnos:
					del e
					continue
				elif e.errno == PermissionDenied.errno:
					raise PermissionDenied(x)
				raise
			for y in eclass_filenames:
				if not y.endswith(".eclass"):
					continue
				obj = hashed_path(os.path.join(x, y))
				obj.eclass_dir = x
				try:
					mtime = obj.mtime
				except FileNotFound:
					continue
				ys = y[:-eclass_len]
				if x == self._master_eclass_root:
					master_eclasses[ys] = mtime
					self.eclasses[ys] = obj
					self._eclass_locations[ys] = x
					continue

				master_mtime = master_eclasses.get(ys)
				if master_mtime is not None:
					if master_mtime == mtime:
						# It appears to be identical to the master,
						# so prefer the master entry.
						continue

				self.eclasses[ys] = obj
				self._eclass_locations[ys] = x
Example #45
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		if self.target_root is None:
			self.target_root = "/"

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		self.global_config_path = GLOBAL_CONFIG_PATH
		if portage.const.EPREFIX:
			self.global_config_path = os.path.join(portage.const.EPREFIX,
				GLOBAL_CONFIG_PATH.lstrip(os.sep))
Example #46
0
def parse_args():
	options = OptionsClass()
	args = []

	x = 1
	while x < len(sys.argv):
		arg = sys.argv[x]
		if arg in ["-h","-r","-V"]:
			if arg == "-h":
				print_help()
				sys.exit(0)
			elif arg == "-r":
				options.recurse = True
			elif arg == "-V":
				options.verbose = True
		elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
			x += 1
			if x == len(sys.argv):
				print_help()
				sys.exit(0)
			elif arg == "-p":
				options.doc_prefix = sys.argv[x]
				if options.doc_prefix:
					options.doc_prefix = normalize_path(options.doc_prefix)
			else:
				values = sys.argv[x].split(",")
				if arg == "-A":
					options.allowed_exts.extend(values)
				elif arg == "-a":
					options.allowed_exts = values
				elif arg == "-f":
					options.allowed_files = values
				elif arg == "-x":
					options.disallowed_dirs = values
		else:
			args.append(sys.argv[x])
		x += 1

	return (options, args)
Example #47
0
def parse_args():
	options = OptionsClass()
	args = []

	x = 1
	while x < len(sys.argv):
		arg = sys.argv[x]
		if arg in ["-h","-r","-V"]:
			if arg == "-h":
				print_help()
				sys.exit(0)
			elif arg == "-r":
				options.recurse = True
			elif arg == "-V":
				options.verbose = True
		elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
			x += 1
			if x == len(sys.argv):
				print_help()
				sys.exit(0)
			elif arg == "-p":
				options.doc_prefix = sys.argv[x]
				if options.doc_prefix:
					options.doc_prefix = normalize_path(options.doc_prefix)
			else:
				values = sys.argv[x].split(",")
				if arg == "-A":
					options.allowed_exts.extend(values)
				elif arg == "-a":
					options.allowed_exts = values
				elif arg == "-f":
					options.allowed_files = values
				elif arg == "-x":
					options.disallowed_dirs = values
		else:
			args.append(sys.argv[x])
		x += 1

	return (options, args)
Example #48
0
    def set_root_override(self, root_overwrite=None):
        # Allow ROOT setting to come from make.conf if it's not overridden
        # by the constructor argument (from the calling environment).
        if self.target_root is None and root_overwrite is not None:
            self.target_root = root_overwrite
            if not self.target_root.strip():
                self.target_root = None
        if self.target_root is None:
            self.target_root = "/"

        self.target_root = normalize_path(os.path.abspath(
            self.target_root)).rstrip(os.path.sep) + os.path.sep

        ensure_dirs(self.target_root)
        self._check_var_directory("ROOT", self.target_root)

        self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

        self.global_config_path = GLOBAL_CONFIG_PATH
        if portage.const.EPREFIX:
            self.global_config_path = os.path.join(
                portage.const.EPREFIX, GLOBAL_CONFIG_PATH.lstrip(os.sep))
Example #49
0
def main():

    (options, args) = parse_args()

    if options.verbose:
        print("Allowed extensions:", options.allowed_exts)
        print("Document prefix : '" + options.doc_prefix + "'")
        print("Allowed files :", options.allowed_files)

    success = False
    endswith_slash = (os.sep, os.sep + ".")

    for x in args:
        trailing_slash = x.endswith(endswith_slash)
        x = normalize_path(x)
        if trailing_slash:
            # Modify behavior of basename and dirname
            # as noted in bug #425214, causing foo/ to
            # behave similarly to the way that foo/*
            # behaves.
            x += os.sep
        basename = os.path.basename(x)
        dirname = os.path.dirname(x)
        success |= install(basename, dirname, options)

    for x in skipped_directories:
        eqawarn([
            "QA Notice: dohtml on directory '%s' without recursion option" % x
        ])
    for x in skipped_files:
        eqawarn(["dohtml: skipped file '%s'" % x])

    if success:
        retcode = 0
    else:
        retcode = 1

    sys.exit(retcode)
Example #50
0
def main():

	(options, args) = parse_args()

	if options.verbose:
		print("Allowed extensions:", options.allowed_exts)
		print("Document prefix : '" + options.doc_prefix + "'")
		print("Allowed files :", options.allowed_files)

	success = False
	endswith_slash = (os.sep, os.sep + ".")

	for x in args:
		trailing_slash = x.endswith(endswith_slash)
		x = normalize_path(x)
		if trailing_slash:
			# Modify behavior of basename and dirname
			# as noted in bug #425214, causing foo/ to
			# behave similarly to the way that foo/*
			# behaves.
			x += os.sep
		basename = os.path.basename(x)
		dirname  = os.path.dirname(x)
		success |= install(basename, dirname, options)

	for x in skipped_directories:
		eqawarn(["QA Notice: dohtml on directory '%s' without recursion option" % x])
	for x in skipped_files:
		eqawarn(["dohtml: skipped file '%s'" % x])

	if success:
		retcode = 0
	else:
		retcode = 1

	sys.exit(retcode)
Example #51
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
	writemsg_level):
	if writemsg_level is None:
		writemsg_level = portage.util.writemsg_level
	if target_root is None:
		target_root = portage.settings["ROOT"]
	if prev_mtimes is None:
		prev_mtimes = portage.mtimedb["ldpath"]
	if env is None:
		settings = portage.settings
	else:
		settings = env

	eprefix = settings.get("EPREFIX", "")
	eprefix_lstrip = eprefix.lstrip(os.sep)
	envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
	ensure_dirs(envd_dir, mode=0o755)
	fns = listdir(envd_dir, EmptyOnError=1)
	fns.sort()
	templist = []
	for x in fns:
		if len(x) < 3:
			continue
		if not x[0].isdigit() or not x[1].isdigit():
			continue
		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
			continue
		templist.append(x)
	fns = templist
	del templist

	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
		  "PYTHONPATH", "ROOTPATH"])

	config_list = []

	for x in fns:
		file_path = os.path.join(envd_dir, x)
		try:
			myconfig = getconfig(file_path, expand=False)
		except ParseError as e:
			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
			del e
			continue
		if myconfig is None:
			# broken symlink or file removed by a concurrent process
			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
			continue

		config_list.append(myconfig)
		if "SPACE_SEPARATED" in myconfig:
			space_separated.update(myconfig["SPACE_SEPARATED"].split())
			del myconfig["SPACE_SEPARATED"]
		if "COLON_SEPARATED" in myconfig:
			colon_separated.update(myconfig["COLON_SEPARATED"].split())
			del myconfig["COLON_SEPARATED"]

	env = {}
	specials = {}
	for var in space_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split():
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = " ".join(mylist)
		specials[var] = mylist

	for var in colon_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split(":"):
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = ":".join(mylist)
		specials[var] = mylist

	for myconfig in config_list:
		"""Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
		env.update(myconfig)

	ldsoconf_path = os.path.join(
		target_root, eprefix_lstrip, "etc", "ld.so.conf")
	try:
		myld = io.open(_unicode_encode(ldsoconf_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['content'], errors='replace')
		myldlines=myld.readlines()
		myld.close()
		oldld=[]
		for x in myldlines:
			#each line has at least one char (a newline)
			if x[:1] == "#":
				continue
			oldld.append(x[:-1])
	except (IOError, OSError) as e:
		if e.errno != errno.ENOENT:
			raise
		oldld = None

	newld = specials["LDPATH"]
	if (oldld != newld):
		#ld.so.conf needs updating and ldconfig needs to be run
		myfd = atomic_ofstream(ldsoconf_path)
		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
		myfd.write("# contents of /etc/env.d directory\n")
		for x in specials["LDPATH"]:
			myfd.write(x + "\n")
		myfd.close()

	# Update prelink.conf if we are prelink-enabled
	if prelink_capable:
		newprelink = atomic_ofstream(os.path.join(
			target_root, eprefix_lstrip, "etc", "prelink.conf"))
		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
		newprelink.write("# contents of /etc/env.d directory\n")

		for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
			newprelink.write("-l %s\n" % (x,));
		prelink_paths = []
		prelink_paths += specials.get("LDPATH", [])
		prelink_paths += specials.get("PATH", [])
		prelink_paths += specials.get("PRELINK_PATH", [])
		prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
		for x in prelink_paths:
			if not x:
				continue
			if x[-1:] != '/':
				x += "/"
			plmasked = 0
			for y in prelink_path_mask:
				if not y:
					continue
				if y[-1] != '/':
					y += "/"
				if y == x[0:len(y)]:
					plmasked = 1
					break
			if not plmasked:
				newprelink.write("-h %s\n" % (x,))
		for x in prelink_path_mask:
			newprelink.write("-b %s\n" % (x,))
		newprelink.close()

	current_time = long(time.time())
	mtime_changed = False
	lib_dirs = set()
	for lib_dir in set(specials["LDPATH"] + \
		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
		x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
		try:
			newldpathtime = os.stat(x)[stat.ST_MTIME]
			lib_dirs.add(normalize_path(x))
		except OSError as oe:
			if oe.errno == errno.ENOENT:
				try:
					del prev_mtimes[x]
				except KeyError:
					pass
				# ignore this path because it doesn't exist
				continue
			raise
		if newldpathtime == current_time:
			# Reset mtime to avoid the potential ambiguity of times that
			# differ by less than 1 second.
			newldpathtime -= 1
			os.utime(x, (newldpathtime, newldpathtime))
			prev_mtimes[x] = newldpathtime
			mtime_changed = True
		elif x in prev_mtimes:
			if prev_mtimes[x] == newldpathtime:
				pass
			else:
				prev_mtimes[x] = newldpathtime
				mtime_changed = True
		else:
			prev_mtimes[x] = newldpathtime
			mtime_changed = True

	if makelinks and \
		not mtime_changed and \
		contents is not None:
		libdir_contents_changed = False
		for mypath, mydata in contents.items():
			if mydata[0] not in ("obj", "sym"):
				continue
			head, tail = os.path.split(mypath)
			if head in lib_dirs:
				libdir_contents_changed = True
				break
		if not libdir_contents_changed:
			makelinks = False

	ldconfig = "/sbin/ldconfig"
	if "CHOST" in settings and "CBUILD" in settings and \
		settings["CHOST"] != settings["CBUILD"]:
		ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

	# Only run ldconfig as needed
	if makelinks and ldconfig and not eprefix:
		# ldconfig has very different behaviour between FreeBSD and Linux
		if ostype == "Linux" or ostype.lower().endswith("gnu"):
			# We can't update links if we haven't cleaned other versions first, as
			# an older package installed ON TOP of a newer version will cause ldconfig
			# to overwrite the symlinks we just made. -X means no links. After 'clean'
			# we can safely create links.
			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
				(target_root,))
			os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
		elif ostype in ("FreeBSD","DragonFly"):
			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
				target_root)
			os.system(("cd / ; %s -elf -i " + \
				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
				(ldconfig, target_root, target_root))

	del specials["LDPATH"]

	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
	cenvnotice  = penvnotice[:]
	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

	#create /etc/profile.env for bash support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "profile.env"))
	outfile.write(penvnotice)

	env_keys = [ x for x in env if x != "LDPATH" ]
	env_keys.sort()
	for k in env_keys:
		v = env[k]
		if v.startswith('$') and not v.startswith('${'):
			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
		else:
			outfile.write("export %s='%s'\n" % (k, v))
	outfile.close()

	#create /etc/csh.env for (t)csh support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "csh.env"))
	outfile.write(cenvnotice)
	for x in env_keys:
		outfile.write("setenv %s '%s'\n" % (x, env[x]))
	outfile.close()
Example #52
0
	def rebuild(self, exclude_pkgs=None, include_file=None,
		preserve_paths=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.

		@param exclude_pkgs: A set of packages that should be excluded from
			the LinkageMap, since they are being unmerged and their NEEDED
			entries are therefore irrelevant and would only serve to corrupt
			the LinkageMap.
		@type exclude_pkgs: set
		@param include_file: The path of a file containing NEEDED entries for
			a package which does not exist in the vardbapi yet because it is
			currently being merged.
		@type include_file: String
		@param preserve_paths: Libraries preserved by a package instance that
			is currently being merged. They need to be explicitly passed to the
			LinkageMap, since they are not registered in the
			PreservedLibsRegistry yet.
		@type preserve_paths: set
		"""

		os = _os_merge
		root = self._root
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			for line in grabfile(include_file):
				lines.append((None, include_file, line))

		aux_keys = [self._needed_aux_key]
		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
		if can_lock:
			self._dbapi.lock()
		try:
			for cpv in self._dbapi.cpv_all():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					continue
				needed_file = self._dbapi.getpath(cpv,
					filename=self._needed_aux_key)
				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
					lines.append((cpv, needed_file, line))
		finally:
			if can_lock:
				self._dbapi.unlock()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = {}
		if preserve_paths is not None:
			plibs.update((x, None) for x in preserve_paths)
		if self._dbapi._plib_registry and \
			self._dbapi._plib_registry.hasEntries():
			for cpv, items in \
				self._dbapi._plib_registry.getPreservedLibs().items():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					# These preserved libs will either be unmerged,
					# rendering them irrelevant, or they will be
					# preserved in the replacement package and are
					# already represented via the preserve_paths
					# parameter.
					continue
				plibs.update((x, cpv) for x in items)
		if plibs:
			args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
				for x in plibs)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					fields = l.split(";")
					if len(fields) < 5:
						writemsg_level(_("\nWrong number of fields " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
						continue
					fields[1] = fields[1][root_len:]
					owner = plibs.pop(fields[1], None)
					lines.append((owner, "scanelf", ";".join(fields)))
				proc.wait()
				proc.stdout.close()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x, cpv in plibs.items():
				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))

		# Share identical frozenset instances when available,
		# in order to conserve memory.
		frozensets = {}

		for owner, location, l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			if '\0' in l:
				# os.stat() will raise "TypeError: must be encoded string
				# without NULL bytes, not str" in this case.
				writemsg_level(_("\nLine contains null byte(s) " \
					"in %s: %s\n\n") % (location, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			try:
				entry = NeededEntry.parse(location, l)
			except InvalidData as e:
				writemsg_level("\n%s\n\n" % (e,),
					level=logging.ERROR, noiselevel=-1)
				continue

			# If NEEDED.ELF.2 contains the new multilib category field,
			# then use that for categorization. Otherwise, if a mapping
			# exists, map e_machine (entry.arch) to an approximate
			# multilib category. If all else fails, use e_machine, just
			# as older versions of portage did.
			arch = entry.multilib_category
			if arch is None:
				arch = _approx_multilib_categories.get(
					entry.arch, entry.arch)

			obj = entry.filename
			soname = entry.soname
			expand = {"ORIGIN": os.path.dirname(entry.filename)}
			path = frozenset(normalize_path(
				varexpand(x, expand, error_leader=lambda: "%s: " % location))
				for x in entry.runpaths)
			path = frozensets.setdefault(path, path)
			needed = frozenset(entry.needed)

			needed = frozensets.setdefault(needed, needed)

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = self._obj_properties_class(
					arch, needed, path, soname, [], owner)
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops.alt_paths.append(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[soname] = soname_map
				soname_map.providers.append(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[needed_soname] = soname_map
				soname_map.consumers.append(obj_key)

		for arch, sonames in libs.items():
			for soname_node in sonames.values():
				soname_node.providers = tuple(set(soname_node.providers))
				soname_node.consumers = tuple(set(soname_node.consumers))
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
                writemsg_level):
    if writemsg_level is None:
        writemsg_level = portage.util.writemsg_level
    if target_root is None:
        target_root = portage.settings["ROOT"]
    if prev_mtimes is None:
        prev_mtimes = portage.mtimedb["ldpath"]
    if env is None:
        settings = portage.settings
    else:
        settings = env

    eprefix = settings.get("EPREFIX", "")
    eprefix_lstrip = eprefix.lstrip(os.sep)
    eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(
        os.sep) + os.sep
    envd_dir = os.path.join(eroot, "etc", "env.d")
    ensure_dirs(envd_dir, mode=0o755)
    fns = listdir(envd_dir, EmptyOnError=1)
    fns.sort()
    templist = []
    for x in fns:
        if len(x) < 3:
            continue
        if not x[0].isdigit() or not x[1].isdigit():
            continue
        if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
            continue
        templist.append(x)
    fns = templist
    del templist

    space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
    colon_separated = set([
        "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR",
        "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH",
        "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"
    ])

    config_list = []

    for x in fns:
        file_path = os.path.join(envd_dir, x)
        try:
            myconfig = getconfig(file_path, expand=False)
        except ParseError as e:
            writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
            del e
            continue
        if myconfig is None:
            # broken symlink or file removed by a concurrent process
            writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
            continue

        config_list.append(myconfig)
        if "SPACE_SEPARATED" in myconfig:
            space_separated.update(myconfig["SPACE_SEPARATED"].split())
            del myconfig["SPACE_SEPARATED"]
        if "COLON_SEPARATED" in myconfig:
            colon_separated.update(myconfig["COLON_SEPARATED"].split())
            del myconfig["COLON_SEPARATED"]

    env = {}
    specials = {}
    for var in space_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split():
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = " ".join(mylist)
        specials[var] = mylist

    for var in colon_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split(":"):
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = ":".join(mylist)
        specials[var] = mylist

    for myconfig in config_list:
        """Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
        env.update(myconfig)

    ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
    try:
        myld = io.open(_unicode_encode(ldsoconf_path,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace')
        myldlines = myld.readlines()
        myld.close()
        oldld = []
        for x in myldlines:
            #each line has at least one char (a newline)
            if x[:1] == "#":
                continue
            oldld.append(x[:-1])
    except (IOError, OSError) as e:
        if e.errno != errno.ENOENT:
            raise
        oldld = None

    newld = specials["LDPATH"]
    if (oldld != newld):
        #ld.so.conf needs updating and ldconfig needs to be run
        myfd = atomic_ofstream(ldsoconf_path)
        myfd.write(
            "# ld.so.conf autogenerated by env-update; make all changes to\n")
        myfd.write("# contents of /etc/env.d directory\n")
        for x in specials["LDPATH"]:
            myfd.write(x + "\n")
        myfd.close()

    potential_lib_dirs = set()
    for lib_dir_glob in ('usr/lib*', 'lib*'):
        x = os.path.join(eroot, lib_dir_glob)
        for y in glob.glob(
                _unicode_encode(x, encoding=_encodings['fs'],
                                errors='strict')):
            try:
                y = _unicode_decode(y,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                continue
            if os.path.basename(y) != 'libexec':
                potential_lib_dirs.add(y[len(eroot):])

    # Update prelink.conf if we are prelink-enabled
    if prelink_capable:
        prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
        ensure_dirs(prelink_d)
        newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
        newprelink.write(
            "# prelink.conf autogenerated by env-update; make all changes to\n"
        )
        newprelink.write("# contents of /etc/env.d directory\n")

        for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
            newprelink.write('-l /%s\n' % (x, ))
        prelink_paths = set()
        prelink_paths |= set(specials.get('LDPATH', []))
        prelink_paths |= set(specials.get('PATH', []))
        prelink_paths |= set(specials.get('PRELINK_PATH', []))
        prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
        for x in prelink_paths:
            if not x:
                continue
            if x[-1:] != '/':
                x += "/"
            plmasked = 0
            for y in prelink_path_mask:
                if not y:
                    continue
                if y[-1] != '/':
                    y += "/"
                if y == x[0:len(y)]:
                    plmasked = 1
                    break
            if not plmasked:
                newprelink.write("-h %s\n" % (x, ))
        for x in prelink_path_mask:
            newprelink.write("-b %s\n" % (x, ))
        newprelink.close()

        # Migration code path.  If /etc/prelink.conf was generated by us, then
        # point it to the new stuff until the prelink package re-installs.
        prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
        try:
            with open(
                    _unicode_encode(prelink_conf,
                                    encoding=_encodings['fs'],
                                    errors='strict'), 'rb') as f:
                if f.readline(
                ) == b'# prelink.conf autogenerated by env-update; make all changes to\n':
                    f = atomic_ofstream(prelink_conf)
                    f.write('-c /etc/prelink.conf.d/*.conf\n')
                    f.close()
        except IOError as e:
            if e.errno != errno.ENOENT:
                raise

    current_time = long(time.time())
    mtime_changed = False

    lib_dirs = set()
    for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
        x = os.path.join(eroot, lib_dir.lstrip(os.sep))
        try:
            newldpathtime = os.stat(x)[stat.ST_MTIME]
            lib_dirs.add(normalize_path(x))
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                try:
                    del prev_mtimes[x]
                except KeyError:
                    pass
                # ignore this path because it doesn't exist
                continue
            raise
        if newldpathtime == current_time:
            # Reset mtime to avoid the potential ambiguity of times that
            # differ by less than 1 second.
            newldpathtime -= 1
            os.utime(x, (newldpathtime, newldpathtime))
            prev_mtimes[x] = newldpathtime
            mtime_changed = True
        elif x in prev_mtimes:
            if prev_mtimes[x] == newldpathtime:
                pass
            else:
                prev_mtimes[x] = newldpathtime
                mtime_changed = True
        else:
            prev_mtimes[x] = newldpathtime
            mtime_changed = True

    if makelinks and \
     not mtime_changed and \
     contents is not None:
        libdir_contents_changed = False
        for mypath, mydata in contents.items():
            if mydata[0] not in ("obj", "sym"):
                continue
            head, tail = os.path.split(mypath)
            if head in lib_dirs:
                libdir_contents_changed = True
                break
        if not libdir_contents_changed:
            makelinks = False

    ldconfig = "/sbin/ldconfig"
    if "CHOST" in settings and "CBUILD" in settings and \
     settings["CHOST"] != settings["CBUILD"]:
        ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

    # Only run ldconfig as needed
    if makelinks and ldconfig and not eprefix:
        # ldconfig has very different behaviour between FreeBSD and Linux
        if ostype == "Linux" or ostype.lower().endswith("gnu"):
            # We can't update links if we haven't cleaned other versions first, as
            # an older package installed ON TOP of a newer version will cause ldconfig
            # to overwrite the symlinks we just made. -X means no links. After 'clean'
            # we can safely create links.
            writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
             (target_root,))
            os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
        elif ostype in ("FreeBSD", "DragonFly"):
            writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
             target_root)
            os.system(("cd / ; %s -elf -i " + \
             "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
             (ldconfig, target_root, target_root))

    del specials["LDPATH"]

    penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
    penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
    cenvnotice = penvnotice[:]
    penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
    cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

    #create /etc/profile.env for bash support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
    outfile.write(penvnotice)

    env_keys = [x for x in env if x != "LDPATH"]
    env_keys.sort()
    for k in env_keys:
        v = env[k]
        if v.startswith('$') and not v.startswith('${'):
            outfile.write("export %s=$'%s'\n" % (k, v[1:]))
        else:
            outfile.write("export %s='%s'\n" % (k, v))
    outfile.close()

    #create /etc/csh.env for (t)csh support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
    outfile.write(cenvnotice)
    for x in env_keys:
        outfile.write("setenv %s '%s'\n" % (x, env[x]))
    outfile.close()
Example #54
0
    def __init__(
        self,
        ebuilds={},
        binpkgs={},
        installed={},
        profile={},
        repo_configs={},
        user_config={},
        sets={},
        world=[],
        world_sets=[],
        distfiles={},
        eclasses={},
        eprefix=None,
        targetroot=False,
        debug=False,
    ):
        """
        ebuilds: cpv -> metadata mapping simulating available ebuilds.
        installed: cpv -> metadata mapping simulating installed packages.
                If a metadata key is missing, it gets a default value.
        profile: settings defined by the profile.
        """

        self.debug = debug
        if eprefix is None:
            self.eprefix = normalize_path(tempfile.mkdtemp())

            # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin
            eubin = os.path.join(self.eprefix, "usr", "bin")
            ensure_dirs(eubin)
            for x in self.portage_bin:
                os.symlink(os.path.join(PORTAGE_BIN_PATH, x),
                           os.path.join(eubin, x))

            eusbin = os.path.join(self.eprefix, "usr", "sbin")
            ensure_dirs(eusbin)
            for x in self.portage_sbin:
                os.symlink(os.path.join(PORTAGE_BIN_PATH, x),
                           os.path.join(eusbin, x))

            essential_binaries = (
                "awk",
                "basename",
                "bzip2",
                "cat",
                "chgrp",
                "chmod",
                "chown",
                "comm",
                "cp",
                "egrep",
                "env",
                "find",
                "flock",
                "grep",
                "head",
                "install",
                "ln",
                "mkdir",
                "mkfifo",
                "mktemp",
                "mv",
                "readlink",
                "rm",
                "sed",
                "sort",
                "tar",
                "tr",
                "uname",
                "uniq",
                "xargs",
                "zstd",
            )
            # Exclude internal wrappers from PATH lookup.
            orig_path = os.environ["PATH"]
            included_paths = []
            for path in orig_path.split(":"):
                if path and not fnmatch.fnmatch(path,
                                                "*/portage/*/ebuild-helpers*"):
                    included_paths.append(path)
            try:
                os.environ["PATH"] = ":".join(included_paths)
                for x in essential_binaries:
                    path = find_binary(x)
                    if path is None:
                        raise portage.exception.CommandNotFound(x)
                    os.symlink(path, os.path.join(eubin, x))
            finally:
                os.environ["PATH"] = orig_path
        else:
            self.eprefix = normalize_path(eprefix)

        # Tests may override portage.const.EPREFIX in order to
        # simulate a prefix installation. It's reasonable to do
        # this because tests should be self-contained such that
        # the "real" value of portage.const.EPREFIX is entirely
        # irrelevant (see bug #492932).
        self._orig_eprefix = portage.const.EPREFIX
        portage.const.EPREFIX = self.eprefix.rstrip(os.sep)

        self.eroot = self.eprefix + os.sep
        if targetroot:
            self.target_root = os.path.join(self.eroot, "target_root")
        else:
            self.target_root = os.sep
        self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
        self.pkgdir = os.path.join(self.eprefix, "pkgdir")
        self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
        os.makedirs(self.vdbdir)

        if not debug:
            portage.util.noiselimit = -2

        self._repositories = {}
        # Make sure the main repo is always created
        self._get_repo_dir("test_repo")

        self._create_distfiles(distfiles)
        self._create_ebuilds(ebuilds)
        self._create_installed(installed)
        self._create_profile(ebuilds, eclasses, installed, profile,
                             repo_configs, user_config, sets)
        self._create_world(world, world_sets)

        self.settings, self.trees = self._load_config()

        self.gpg = None
        self._create_binpkgs(binpkgs)
        self._create_ebuild_manifests(ebuilds)

        portage.util.noiselimit = 0
Example #55
0
	def rebuild(self, exclude_pkgs=None, include_file=None,
		preserve_paths=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.

		@param exclude_pkgs: A set of packages that should be excluded from
			the LinkageMap, since they are being unmerged and their NEEDED
			entries are therefore irrelevant and would only serve to corrupt
			the LinkageMap.
		@type exclude_pkgs: set
		@param include_file: The path of a file containing NEEDED entries for
			a package which does not exist in the vardbapi yet because it is
			currently being merged.
		@type include_file: String
		@param preserve_paths: Libraries preserved by a package instance that
			is currently being merged. They need to be explicitly passed to the
			LinkageMap, since they are not registered in the
			PreservedLibsRegistry yet.
		@type preserve_paths: set
		"""

		os = _os_merge
		root = self._root
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._dbapi.settings['EROOT'],
			env=self._dbapi.settings))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			for line in grabfile(include_file):
				lines.append((None, include_file, line))

		aux_keys = [self._needed_aux_key]
		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
		if can_lock:
			self._dbapi.lock()
		try:
			for cpv in self._dbapi.cpv_all():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					continue
				needed_file = self._dbapi.getpath(cpv,
					filename=self._needed_aux_key)
				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
					lines.append((cpv, needed_file, line))
		finally:
			if can_lock:
				self._dbapi.unlock()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = {}
		if preserve_paths is not None:
			plibs.update((x, None) for x in preserve_paths)
		if self._dbapi._plib_registry and \
			self._dbapi._plib_registry.hasEntries():
			for cpv, items in \
				self._dbapi._plib_registry.getPreservedLibs().items():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					# These preserved libs will either be unmerged,
					# rendering them irrelevant, or they will be
					# preserved in the replacement package and are
					# already represented via the preserve_paths
					# parameter.
					continue
				plibs.update((x, cpv) for x in items)
		if plibs:
			args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
				for x in plibs)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					try:
						entry = NeededEntry.parse("scanelf", l)
					except InvalidData as e:
						writemsg_level("\n%s\n\n" % (e,),
							level=logging.ERROR, noiselevel=-1)
						continue
					try:
						with open(_unicode_encode(entry.filename,
							encoding=_encodings['fs'],
							errors='strict'), 'rb') as f:
							elf_header = ELFHeader.read(f)
					except EnvironmentError as e:
						if e.errno != errno.ENOENT:
							raise
						# File removed concurrently.
						continue
					entry.multilib_category = compute_multilib_category(elf_header)
					entry.filename = entry.filename[root_len:]
					owner = plibs.pop(entry.filename, None)
					lines.append((owner, "scanelf", _unicode(entry)))
				proc.wait()
				proc.stdout.close()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x, cpv in plibs.items():
				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))

		# Share identical frozenset instances when available,
		# in order to conserve memory.
		frozensets = {}

		for owner, location, l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			if '\0' in l:
				# os.stat() will raise "TypeError: must be encoded string
				# without NULL bytes, not str" in this case.
				writemsg_level(_("\nLine contains null byte(s) " \
					"in %s: %s\n\n") % (location, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			try:
				entry = NeededEntry.parse(location, l)
			except InvalidData as e:
				writemsg_level("\n%s\n\n" % (e,),
					level=logging.ERROR, noiselevel=-1)
				continue

			# If NEEDED.ELF.2 contains the new multilib category field,
			# then use that for categorization. Otherwise, if a mapping
			# exists, map e_machine (entry.arch) to an approximate
			# multilib category. If all else fails, use e_machine, just
			# as older versions of portage did.
			arch = entry.multilib_category
			if arch is None:
				arch = _approx_multilib_categories.get(
					entry.arch, entry.arch)

			obj = entry.filename
			soname = entry.soname
			expand = {"ORIGIN": os.path.dirname(entry.filename)}
			path = frozenset(normalize_path(
				varexpand(x, expand, error_leader=lambda: "%s: " % location))
				for x in entry.runpaths)
			path = frozensets.setdefault(path, path)
			needed = frozenset(entry.needed)

			needed = frozensets.setdefault(needed, needed)

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = self._obj_properties_class(
					arch, needed, path, soname, [], owner)
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops.alt_paths.append(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[soname] = soname_map
				soname_map.providers.append(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[needed_soname] = soname_map
				soname_map.consumers.append(obj_key)

		for arch, sonames in libs.items():
			for soname_node in sonames.values():
				soname_node.providers = tuple(set(soname_node.providers))
				soname_node.consumers = tuple(set(soname_node.consumers))