Esempio n. 1
0
def deprecated_profile_check(settings=None):
    config_root = "/"
    deprecated_profile_file = None
    if settings is not None:
        config_root = settings["PORTAGE_CONFIGROOT"]
        for x in reversed(settings.profiles):
            deprecated_profile_file = os.path.join(x, "deprecated")
            if os.access(deprecated_profile_file, os.R_OK):
                break
        else:
            deprecated_profile_file = None

    if deprecated_profile_file is None:
        deprecated_profile_file = os.path.join(config_root,
                                               DEPRECATED_PROFILE_FILE)
        if not os.access(deprecated_profile_file, os.R_OK):
            deprecated_profile_file = os.path.join(config_root, 'etc',
                                                   'make.profile',
                                                   'deprecated')
            if not os.access(deprecated_profile_file, os.R_OK):
                return

    dcontent = io.open(_unicode_encode(deprecated_profile_file,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace').readlines()
    writemsg(colorize(
        "BAD",
        _("\n!!! Your current profile is "
          "deprecated and not supported anymore.")) + "\n",
             noiselevel=-1)
    writemsg(
        colorize("BAD", _("!!! Use eselect profile to update your "
                          "profile.")) + "\n",
        noiselevel=-1)
    if not dcontent:
        writemsg(colorize(
            "BAD", _("!!! Please refer to the "
                     "Gentoo Upgrading Guide.")) + "\n",
                 noiselevel=-1)
        return True
    newprofile = dcontent[0]
    writemsg(colorize(
        "BAD", _("!!! Please upgrade to the "
                 "following profile if possible:")) + "\n",
             noiselevel=-1)
    writemsg(8 * " " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
    if len(dcontent) > 1:
        writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
        for myline in dcontent[1:]:
            writemsg(myline, noiselevel=-1)
        writemsg("\n\n", noiselevel=-1)
    return True
Esempio n. 2
0
	def sync(self, acquire_lock=1):
		"""
		Call this method to synchronize state with the real vardb
		after one or more packages may have been installed or
		uninstalled.
		"""
		locked = False
		try:
			if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
				self._real_vardb.lock()
				locked = True
			self._sync()
		finally:
			if locked:
				self._real_vardb.unlock()

		# Populate the old-style virtuals using the cached values.
		# Skip the aux_get wrapper here, to avoid unwanted
		# cache generation.
		try:
			self.dbapi.aux_get = self._aux_get
			self.settings._populate_treeVirtuals_if_needed(self)
		finally:
			if self._dynamic_deps:
				self.dbapi.aux_get = self._aux_get_wrapper
Esempio n. 3
0
def get_glsa_list(myconfig):
	"""
	Returns a list of all available GLSAs in the given repository
	by comparing the filelist there with the pattern described in
	the config.

	@type	myconfig: portage.config
	@param	myconfig: Portage settings instance

	@rtype:		List of Strings
	@return:	a list of GLSA IDs in this repository
	"""
	rValue = []

	if "GLSA_DIR" in myconfig:
		repository = myconfig["GLSA_DIR"]
	else:
		repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")

	if not os.access(repository, os.R_OK):
		return []
	dirlist = os.listdir(repository)
	prefix = "glsa-"
	suffix = ".xml"

	for f in dirlist:
		try:
			if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
				rValue.append(f[len(prefix):-1*len(suffix)])
		except IndexError:
			pass
	return rValue
Esempio n. 4
0
	def _check_world(self, onProgress):
		eroot = portage.settings['EROOT']
		self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
		self.found = os.access(self.world_file, os.R_OK)
		vardb = portage.db[eroot]["vartree"].dbapi

		from portage._sets import SETPREFIX
		sets = self._sets
		world_atoms = list(sets["selected"])
		maxval = len(world_atoms)
		if onProgress:
			onProgress(maxval, 0)
		for i, atom in enumerate(world_atoms):
			if not isinstance(atom, portage.dep.Atom):
				if atom.startswith(SETPREFIX):
					s = atom[len(SETPREFIX):]
					if s in sets:
						self.okay.append(atom)
					else:
						self.not_installed.append(atom)
				else:
					self.invalid.append(atom)
				if onProgress:
					onProgress(maxval, i+1)
				continue
			okay = True
			if not vardb.match(atom):
				self.not_installed.append(atom)
				okay = False
			if okay:
				self.okay.append(atom)
			if onProgress:
				onProgress(maxval, i+1)
Esempio n. 5
0
def xtermTitleReset():
	global default_xterm_title
	if default_xterm_title is None:
		prompt_command = os.environ.get('PROMPT_COMMAND')
		if prompt_command == "":
			default_xterm_title = ""
		elif prompt_command is not None:
			if dotitles and \
				'TERM' in os.environ and \
				_legal_terms_re.match(os.environ['TERM']) is not None and \
				sys.__stderr__.isatty():
				from portage.process import find_binary, spawn
				shell = os.environ.get("SHELL")
				if not shell or not os.access(shell, os.EX_OK):
					shell = find_binary("sh")
				if shell:
					spawn([shell, "-c", prompt_command], env=os.environ,
						fd_pipes={
							0: portage._get_stdin().fileno(),
							1: sys.__stderr__.fileno(),
							2: sys.__stderr__.fileno()
						})
				else:
					os.system(prompt_command)
			return
		else:
			pwd = os.environ.get('PWD','')
			home = os.environ.get('HOME', '')
			if home != '' and pwd.startswith(home):
				pwd = '~' + pwd[len(home):]
			default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
				os.environ.get('LOGNAME', ''),
				os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
	xtermTitle(default_xterm_title, raw=True)
Esempio n. 6
0
	def sync(self, acquire_lock=1):
		"""
		Call this method to synchronize state with the real vardb
		after one or more packages may have been installed or
		uninstalled.
		"""
		vdb_path = os.path.join(self.root, portage.VDB_PATH)
		try:
			# At least the parent needs to exist for the lock file.
			portage.util.ensure_dirs(vdb_path)
		except portage.exception.PortageException:
			pass
		vdb_lock = None
		try:
			if acquire_lock and os.access(vdb_path, os.W_OK):
				vdb_lock = portage.locks.lockdir(vdb_path)
			self._sync()
		finally:
			if vdb_lock:
				portage.locks.unlockdir(vdb_lock)

		# Populate the old-style virtuals using the cached values.
		# Skip the aux_get wrapper here, to avoid unwanted
		# cache generation.
		try:
			self.dbapi.aux_get = self._aux_get
			self.settings._populate_treeVirtuals_if_needed(self)
		finally:
			self.dbapi.aux_get = self._aux_get_wrapper
Esempio n. 7
0
    def _check_world(self, onProgress):
        eroot = portage.settings['EROOT']
        self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
        self.found = os.access(self.world_file, os.R_OK)
        vardb = portage.db[eroot]["vartree"].dbapi

        from portage._sets import SETPREFIX
        sets = self._sets
        world_atoms = list(sets["selected"])
        maxval = len(world_atoms)
        if onProgress:
            onProgress(maxval, 0)
        for i, atom in enumerate(world_atoms):
            if not isinstance(atom, portage.dep.Atom):
                if atom.startswith(SETPREFIX):
                    s = atom[len(SETPREFIX):]
                    if s in sets:
                        self.okay.append(atom)
                    else:
                        self.not_installed.append(atom)
                else:
                    self.invalid.append(atom)
                if onProgress:
                    onProgress(maxval, i + 1)
                continue
            okay = True
            if not vardb.match(atom):
                self.not_installed.append(atom)
                okay = False
            if okay:
                self.okay.append(atom)
            if onProgress:
                onProgress(maxval, i + 1)
Esempio n. 8
0
def binTestsCleanup():
    global basedir
    if basedir is None:
        return
    if os.access(basedir, os.W_OK):
        shutil.rmtree(basedir)
        basedir = None
def deprecated_profile_check(settings=None):
	config_root = "/"
	if settings is not None:
		config_root = settings["PORTAGE_CONFIGROOT"]
	deprecated_profile_file = os.path.join(config_root,
		DEPRECATED_PROFILE_FILE)
	if not os.access(deprecated_profile_file, os.R_OK):
		return False
	dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
		encoding=_encodings['fs'], errors='strict'), 
		mode='r', encoding=_encodings['content'], errors='replace').readlines()
	writemsg(colorize("BAD", _("\n!!! Your current profile is "
		"deprecated and not supported anymore.")) + "\n", noiselevel=-1)
	writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
		"profile.")) + "\n", noiselevel=-1)
	if not dcontent:
		writemsg(colorize("BAD", _("!!! Please refer to the "
			"Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
		return True
	newprofile = dcontent[0]
	writemsg(colorize("BAD", _("!!! Please upgrade to the "
		"following profile if possible:")) + "\n", noiselevel=-1)
	writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
	if len(dcontent) > 1:
		writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
		for myline in dcontent[1:]:
			writemsg(myline, noiselevel=-1)
		writemsg("\n\n", noiselevel=-1)
	return True
Esempio n. 10
0
    def sync(self, acquire_lock=1):
        """
		Call this method to synchronize state with the real vardb
		after one or more packages may have been installed or
		uninstalled.
		"""
        vdb_path = os.path.join(self.root, portage.VDB_PATH)
        try:
            # At least the parent needs to exist for the lock file.
            portage.util.ensure_dirs(vdb_path)
        except portage.exception.PortageException:
            pass
        vdb_lock = None
        try:
            if acquire_lock and os.access(vdb_path, os.W_OK):
                vdb_lock = portage.locks.lockdir(vdb_path)
            self._sync()
        finally:
            if vdb_lock:
                portage.locks.unlockdir(vdb_lock)

        # Populate the old-style virtuals using the cached values.
        # Skip the aux_get wrapper here, to avoid unwanted
        # cache generation.
        try:
            self.dbapi.aux_get = self._aux_get
            self.settings._populate_treeVirtuals_if_needed(self)
        finally:
            self.dbapi.aux_get = self._aux_get_wrapper
Esempio n. 11
0
def binTestsCleanup():
	global basedir
	if basedir is None:
		return
	if os.access(basedir, os.W_OK):
		shutil.rmtree(basedir)
		basedir = None
Esempio n. 12
0
    def __init__(self, settings, logger):
        self.settings = settings
        self.logger = logger
        # Similar to emerge, sync needs a default umask so that created
        # files have sane permissions.
        os.umask(0o22)

        self.module_controller = portage.sync.module_controller
        self.module_names = self.module_controller.module_names
        self.hooks = {}
        for _dir in ["repo.postsync.d", "postsync.d"]:
            postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"],
                                        portage.USER_CONFIG_PATH, _dir)
            hooks = OrderedDict()
            for filepath in util._recursive_file_list(postsync_dir):
                name = filepath.split(postsync_dir)[1].lstrip(os.sep)
                if os.access(filepath, os.X_OK):
                    hooks[filepath] = name
                else:
                    writemsg_level(" %s %s hook: '%s' is not executable\n" % (
                        warn("*"),
                        _dir,
                        _unicode_decode(name),
                    ),
                                   level=logging.WARN,
                                   noiselevel=2)
            self.hooks[_dir] = hooks
Esempio n. 13
0
	def populate(self, getbinpkgs=False, getbinpkg_refresh=True):
		"""
		Populates the binarytree with package metadata.

		@param getbinpkgs: include remote packages
		@type getbinpkgs: bool
		@param getbinpkg_refresh: attempt to refresh the cache
			of remote package metadata if getbinpkgs is also True
		@type getbinpkg_refresh: bool
		"""

		if self._populating:
			return

		pkgindex_lock = None
		try:
			if os.access(self.pkgdir, os.W_OK):
				pkgindex_lock = lockfile(self._pkgindex_file,
					wantnewlockfile=1)
			self._populating = True
			self._populate(getbinpkgs, getbinpkg_refresh=getbinpkg_refresh)
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
			self._populating = False
Esempio n. 14
0
	def sync(self, acquire_lock=1):
		"""
		Call this method to synchronize state with the real vardb
		after one or more packages may have been installed or
		uninstalled.
		"""
		locked = False
		try:
			if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
				self._real_vardb.lock()
				locked = True
			self._sync()
		finally:
			if locked:
				self._real_vardb.unlock()

		# Populate the old-style virtuals using the cached values.
		# Skip the aux_get wrapper here, to avoid unwanted
		# cache generation.
		try:
			self.dbapi.aux_get = self._aux_get
			self.settings._populate_treeVirtuals_if_needed(self)
		finally:
			if self._dynamic_deps:
				self.dbapi.aux_get = self._aux_get_wrapper
Esempio n. 15
0
def get_glsa_list(myconfig):
    """
	Returns a list of all available GLSAs in the given repository
	by comparing the filelist there with the pattern described in
	the config.

	@type	myconfig: portage.config
	@param	myconfig: Portage settings instance

	@rtype:		List of Strings
	@return:	a list of GLSA IDs in this repository
	"""
    rValue = []

    if "GLSA_DIR" in myconfig:
        repository = myconfig["GLSA_DIR"]
    else:
        repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")

    if not os.access(repository, os.R_OK):
        return []
    dirlist = os.listdir(repository)
    prefix = "glsa-"
    suffix = ".xml"

    for f in dirlist:
        try:
            if f[:len(prefix)] == prefix and f[-1 * len(suffix):] == suffix:
                rValue.append(f[len(prefix):-1 * len(suffix)])
        except IndexError:
            pass
    return rValue
Esempio n. 16
0
	def findLicensePath(self, license_name):
		mytrees = self.porttrees[:]
		mytrees.reverse()
		for x in mytrees:
			license_path = os.path.join(x, "licenses", license_name)
			if os.access(license_path, os.R_OK):
				return license_path
		return None
Esempio n. 17
0
 def findLicensePath(self, license_name):
     mytrees = self.porttrees[:]
     mytrees.reverse()
     for x in mytrees:
         license_path = os.path.join(x, "licenses", license_name)
         if os.access(license_path, os.R_OK):
             return license_path
     return None
Esempio n. 18
0
    def writable(self):
        """
		Check if self.location is writable, or permissions are sufficient
		to create it if it does not exist yet.
		@rtype: bool
		@return: True if self.location is writable or can be created,
			False otherwise
		"""
        return os.access(first_existing(self.location), os.W_OK)
Esempio n. 19
0
	def writable(self):
		"""
		Check if self.location is writable, or permissions are sufficient
		to create it if it does not exist yet.
		@rtype: bool
		@return: True if self.location is writable or can be created,
			False otherwise
		"""
		return os.access(first_existing(self.location), os.W_OK)
Esempio n. 20
0
    def _start_lock(self):
        if self.phase in self._locked_phases and "ebuild-locks" in self.settings.features:
            eroot = self.settings["EROOT"]
            lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
            if os.access(os.path.dirname(lock_path), os.W_OK):
                self._ebuild_lock = AsynchronousLock(path=lock_path, scheduler=self.scheduler)
                self._start_task(self._ebuild_lock, self._lock_exit)
                return

        self._start_ebuild()
Esempio n. 21
0
    def _start(self):
        settings = self.settings
        if settings is None:
            settings = self.portdb.settings

        if "PORTAGE_PARALLEL_FETCHONLY" in settings:
            # parallel-fetch mode
            self.returncode = os.EX_OK
            self._async_wait()
            return

        # Prevent temporary config changes from interfering
        # with config instances that are reused.
        settings = self.settings = config(clone=settings)

        # We must create our private PORTAGE_TMPDIR before calling
        # doebuild_environment(), since lots of variables such
        # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
        portage_tmpdir = settings.get("PORTAGE_TMPDIR")
        if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
            portage_tmpdir = None
        private_tmpdir = self._private_tmpdir = tempfile.mkdtemp(
            dir=portage_tmpdir)
        settings["PORTAGE_TMPDIR"] = private_tmpdir
        settings.backup_changes("PORTAGE_TMPDIR")
        # private temp dir was just created, so it's not locked yet
        settings.pop("PORTAGE_BUILDDIR_LOCKED", None)

        doebuild_environment(self.ebuild_path,
                             "nofetch",
                             settings=settings,
                             db=self.portdb)
        restrict = settings["PORTAGE_RESTRICT"].split()
        defined_phases = settings["DEFINED_PHASES"].split()
        if not defined_phases:
            # When DEFINED_PHASES is undefined, assume all
            # phases are defined.
            defined_phases = EBUILD_PHASES

        if "fetch" not in restrict and "nofetch" not in defined_phases:
            self.returncode = os.EX_OK
            self._async_wait()
            return

        prepare_build_dirs(settings=settings)

        ebuild_phase = EbuildPhase(
            background=self.background,
            phase="nofetch",
            scheduler=self.scheduler,
            fd_pipes=self.fd_pipes,
            settings=settings,
        )

        self._start_task(ebuild_phase, self._nofetch_exit)
Esempio n. 22
0
def find_updated_config_files(target_root, config_protect):
	"""
	Return a tuple of configuration files that needs to be updated.
	The tuple contains lists organized like this:
	[ protected_dir, file_list ]
	If the protected config isn't a protected_dir but a procted_file, list is:
	[ protected_file, None ]
	If no configuration files needs to be updated, None is returned
	"""

	os = _os_merge

	if config_protect:
		# directories with some protect files in them
		for x in config_protect:
			files = []

			x = os.path.join(target_root, x.lstrip(os.path.sep))
			if not os.access(x, os.W_OK):
				continue
			try:
				mymode = os.lstat(x).st_mode
			except OSError:
				continue

			if stat.S_ISLNK(mymode):
				# We want to treat it like a directory if it
				# is a symlink to an existing directory.
				try:
					real_mode = os.stat(x).st_mode
					if stat.S_ISDIR(real_mode):
						mymode = real_mode
				except OSError:
					pass

			if stat.S_ISDIR(mymode):
				mycommand = \
					"find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
			else:
				mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
						os.path.split(x.rstrip(os.path.sep))
			mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
			a = subprocess_getstatusoutput(mycommand)

			if a[0] == 0:
				files = a[1].split('\0')
				# split always produces an empty string as the last element
				if files and not files[-1]:
					del files[-1]
				if files:
					if stat.S_ISDIR(mymode):
						yield (x, files)
					else:
						yield (x, None)
Esempio n. 23
0
    def isInjected(self):
        """
		Looks if the GLSA ID is in the GLSA checkfile to check if this
		GLSA should be marked as applied.

		@rtype:		Boolean
		@returns:	True if the GLSA is in the inject file, False if not
		"""
        if not os.access(os.path.join(self.config["EROOT"], PRIVATE_PATH, "glsa_injected"), os.R_OK):
            return False
        return self.nr in get_applied_glsas(self.config)
Esempio n. 24
0
def find_updated_config_files(target_root, config_protect):
	"""
	Return a tuple of configuration files that needs to be updated.
	The tuple contains lists organized like this:
	[ protected_dir, file_list ]
	If the protected config isn't a protected_dir but a procted_file, list is:
	[ protected_file, None ]
	If no configuration files needs to be updated, None is returned
	"""

	os = _os_merge

	if config_protect:
		# directories with some protect files in them
		for x in config_protect:
			files = []

			x = os.path.join(target_root, x.lstrip(os.path.sep))
			if not os.access(x, os.W_OK):
				continue
			try:
				mymode = os.lstat(x).st_mode
			except OSError:
				continue

			if stat.S_ISLNK(mymode):
				# We want to treat it like a directory if it
				# is a symlink to an existing directory.
				try:
					real_mode = os.stat(x).st_mode
					if stat.S_ISDIR(real_mode):
						mymode = real_mode
				except OSError:
					pass

			if stat.S_ISDIR(mymode):
				mycommand = \
					"find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
			else:
				mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
						os.path.split(x.rstrip(os.path.sep))
			mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
			a = subprocess_getstatusoutput(mycommand)

			if a[0] == 0:
				files = a[1].split('\0')
				# split always produces an empty string as the last element
				if files and not files[-1]:
					del files[-1]
				if files:
					if stat.S_ISDIR(mymode):
						yield (x, files)
					else:
						yield (x, None)
Esempio n. 25
0
    def _start_lock(self):
        if (self.phase in self._locked_phases
                and "ebuild-locks" in self.settings.features):
            eroot = self.settings["EROOT"]
            lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
            if os.access(os.path.dirname(lock_path), os.W_OK):
                self._ebuild_lock = AsynchronousLock(path=lock_path,
                                                     scheduler=self.scheduler)
                self._start_task(self._ebuild_lock, self._lock_exit)
                return

        self._start_ebuild()
Esempio n. 26
0
	def isInjected(self):
		"""
		Looks if the GLSA ID is in the GLSA checkfile to check if this
		GLSA should be marked as applied.

		@rtype:		Boolean
		@returns:	True if the GLSA is in the inject file, False if not
		"""
		if not os.access(os.path.join(self.config["EROOT"],
			PRIVATE_PATH, "glsa_injected"), os.R_OK):
			return False
		return (self.nr in get_applied_glsas(self.config))
Esempio n. 27
0
def gpgsign(filename, repoman_settings, options):
	gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
	if gpgcmd in [None, '']:
		raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!"
			" Is make.globals missing?")
	if "${PORTAGE_GPG_KEY}" in gpgcmd and \
		"PORTAGE_GPG_KEY" not in repoman_settings:
		raise MissingParameter("PORTAGE_GPG_KEY is unset!")
	if "${PORTAGE_GPG_DIR}" in gpgcmd:
		if "PORTAGE_GPG_DIR" not in repoman_settings:
			repoman_settings["PORTAGE_GPG_DIR"] = \
				os.path.expanduser("~/.gnupg")
			logging.info(
				"Automatically setting PORTAGE_GPG_DIR to '%s'" %
				repoman_settings["PORTAGE_GPG_DIR"])
		else:
			repoman_settings["PORTAGE_GPG_DIR"] = \
				os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
		if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
			raise portage.exception.InvalidLocation(
				"Unable to access directory: PORTAGE_GPG_DIR='%s'" %
				repoman_settings["PORTAGE_GPG_DIR"])
	gpgvars = {"FILE": filename}
	for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
		v = repoman_settings.get(k)
		if v is not None:
			gpgvars[k] = v
	gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
	if options.pretend:
		print("(" + gpgcmd + ")")
	else:
		# Encode unicode manually for bug #310789.
		gpgcmd = portage.util.shlex_split(gpgcmd)

		if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
			not os.path.isabs(gpgcmd[0]):
			# Python 3.1 _execvp throws TypeError for non-absolute executable
			# path passed as bytes (see http://bugs.python.org/issue8513).
			fullname = find_binary(gpgcmd[0])
			if fullname is None:
				raise portage.exception.CommandNotFound(gpgcmd[0])
			gpgcmd[0] = fullname

		gpgcmd = [
			_unicode_encode(arg, encoding=_encodings['fs'], errors='strict')
			for arg in gpgcmd]
		rValue = subprocess.call(gpgcmd)
		if rValue == os.EX_OK:
			os.rename(filename + ".asc", filename)
		else:
			raise portage.exception.PortageException(
				"!!! gpg exited with '" + str(rValue) + "' status")
Esempio n. 28
0
def gpgsign(filename, repoman_settings, options):
    gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
    if gpgcmd in [None, '']:
        raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!"
                               " Is make.globals missing?")
    if "${PORTAGE_GPG_KEY}" in gpgcmd and \
     "PORTAGE_GPG_KEY" not in repoman_settings:
        raise MissingParameter("PORTAGE_GPG_KEY is unset!")
    if "${PORTAGE_GPG_DIR}" in gpgcmd:
        if "PORTAGE_GPG_DIR" not in repoman_settings:
            repoman_settings["PORTAGE_GPG_DIR"] = \
             os.path.expanduser("~/.gnupg")
            logging.info("Automatically setting PORTAGE_GPG_DIR to '%s'" %
                         repoman_settings["PORTAGE_GPG_DIR"])
        else:
            repoman_settings["PORTAGE_GPG_DIR"] = \
             os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
        if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
            raise portage.exception.InvalidLocation(
                "Unable to access directory: PORTAGE_GPG_DIR='%s'" %
                repoman_settings["PORTAGE_GPG_DIR"])
    gpgvars = {"FILE": filename}
    for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
        v = repoman_settings.get(k)
        if v is not None:
            gpgvars[k] = v
    gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
    if options.pretend:
        print("(" + gpgcmd + ")")
    else:
        # Encode unicode manually for bug #310789.
        gpgcmd = portage.util.shlex_split(gpgcmd)

        if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
         not os.path.isabs(gpgcmd[0]):
            # Python 3.1 _execvp throws TypeError for non-absolute executable
            # path passed as bytes (see https://bugs.python.org/issue8513).
            fullname = find_binary(gpgcmd[0])
            if fullname is None:
                raise portage.exception.CommandNotFound(gpgcmd[0])
            gpgcmd[0] = fullname

        gpgcmd = [
            _unicode_encode(arg, encoding=_encodings['fs'], errors='strict')
            for arg in gpgcmd
        ]
        rValue = subprocess.call(gpgcmd)
        if rValue == os.EX_OK:
            os.rename(filename + ".asc", filename)
        else:
            raise portage.exception.PortageException("!!! gpg exited with '" +
                                                     str(rValue) + "' status")
Esempio n. 29
0
	def prevent_collision(self, cpv):
		"""Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
		use for a given cpv.  If a collision will occur with an existing
		package from another category, the existing package will be bumped to
		${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
		if not self._all_directory:
			return

		# Copy group permissions for new directories that
		# may have been created.
		for path in ("All", catsplit(cpv)[0]):
			path = os.path.join(self.pkgdir, path)
			self._ensure_dir(path)
			if not os.access(path, os.W_OK):
				raise PermissionDenied("access('%s', W_OK)" % path)

		full_path = self.getname(cpv)
		if "All" == full_path.split(os.path.sep)[-2]:
			return
		"""Move a colliding package if it exists.  Code below this point only
		executes in rare cases."""
		mycat, mypkg = catsplit(cpv)
		myfile = mypkg + ".tbz2"
		mypath = os.path.join("All", myfile)
		dest_path = os.path.join(self.pkgdir, mypath)

		try:
			st = os.lstat(dest_path)
		except OSError:
			st = None
		else:
			if stat.S_ISLNK(st.st_mode):
				st = None
				try:
					os.unlink(dest_path)
				except OSError:
					if os.path.exists(dest_path):
						raise

		if st is not None:
			# For invalid packages, other_cat could be None.
			other_cat = portage.xpak.tbz2(dest_path).getfile(
				_unicode_encode("CATEGORY",
				encoding=_encodings['repo.content']))
			if other_cat:
				other_cat = _unicode_decode(other_cat,
					encoding=_encodings['repo.content'], errors='replace')
				other_cat = other_cat.strip()
				other_cpv = other_cat + "/" + mypkg
				self._move_from_all(other_cpv)
				self.inject(other_cpv)
		self._move_to_all(cpv)
Esempio n. 30
0
	def prevent_collision(self, cpv):
		"""Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
		use for a given cpv.  If a collision will occur with an existing
		package from another category, the existing package will be bumped to
		${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
		if not self._all_directory:
			return

		# Copy group permissions for new directories that
		# may have been created.
		for path in ("All", catsplit(cpv)[0]):
			path = os.path.join(self.pkgdir, path)
			self._ensure_dir(path)
			if not os.access(path, os.W_OK):
				raise PermissionDenied("access('%s', W_OK)" % path)

		full_path = self.getname(cpv)
		if "All" == full_path.split(os.path.sep)[-2]:
			return
		"""Move a colliding package if it exists.  Code below this point only
		executes in rare cases."""
		mycat, mypkg = catsplit(cpv)
		myfile = mypkg + ".tbz2"
		mypath = os.path.join("All", myfile)
		dest_path = os.path.join(self.pkgdir, mypath)

		try:
			st = os.lstat(dest_path)
		except OSError:
			st = None
		else:
			if stat.S_ISLNK(st.st_mode):
				st = None
				try:
					os.unlink(dest_path)
				except OSError:
					if os.path.exists(dest_path):
						raise

		if st is not None:
			# For invalid packages, other_cat could be None.
			other_cat = portage.xpak.tbz2(dest_path).getfile(
				_unicode_encode("CATEGORY",
				encoding=_encodings['repo.content']))
			if other_cat:
				other_cat = _unicode_decode(other_cat,
					encoding=_encodings['repo.content'], errors='replace')
				other_cat = other_cat.strip()
				other_cpv = other_cat + "/" + mypkg
				self._move_from_all(other_cpv)
				self.inject(other_cpv)
		self._move_to_all(cpv)
Esempio n. 31
0
	def _start(self):
		settings = self.settings
		if settings is None:
			settings = self.portdb.settings

		if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
			# parallel-fetch mode
			self.returncode = os.EX_OK
			self._async_wait()
			return

		# Prevent temporary config changes from interfering
		# with config instances that are reused.
		settings = self.settings = config(clone=settings)

		# We must create our private PORTAGE_TMPDIR before calling
		# doebuild_environment(), since lots of variables such
		# as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
		portage_tmpdir = settings.get('PORTAGE_TMPDIR')
		if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
			portage_tmpdir = None
		private_tmpdir = self._private_tmpdir = tempfile.mkdtemp(
			dir=portage_tmpdir)
		settings['PORTAGE_TMPDIR'] = private_tmpdir
		settings.backup_changes('PORTAGE_TMPDIR')
		# private temp dir was just created, so it's not locked yet
		settings.pop('PORTAGE_BUILDDIR_LOCKED', None)

		doebuild_environment(self.ebuild_path, 'nofetch',
			settings=settings, db=self.portdb)
		restrict = settings['PORTAGE_RESTRICT'].split()
		defined_phases = settings['DEFINED_PHASES'].split()
		if not defined_phases:
			# When DEFINED_PHASES is undefined, assume all
			# phases are defined.
			defined_phases = EBUILD_PHASES

		if 'fetch' not in restrict and \
			'nofetch' not in defined_phases:
			self.returncode = os.EX_OK
			self._async_wait()
			return

		prepare_build_dirs(settings=settings)

		ebuild_phase = EbuildPhase(background=self.background,
			phase='nofetch',
			scheduler=self.scheduler,
			fd_pipes=self.fd_pipes, settings=settings)

		self._start_task(ebuild_phase, self._nofetch_exit)
Esempio n. 32
0
def find_binary(binary):
	"""
	Given a binary name, find the binary in PATH
	
	@param binary: Name of the binary to find
	@type string
	@rtype: None or string
	@return: full path to binary or None if the binary could not be located.
	"""
	for path in os.environ.get("PATH", "").split(":"):
		filename = "%s/%s" % (path, binary)
		if os.access(filename, os.X_OK) and os.path.isfile(filename):
			return filename
	return None
def validate_cmd_var(v):
	"""
	Validate an evironment variable value to see if it
	contains an executable command as the first token.
	returns (valid, token_list) where 'valid' is boolean and 'token_list'
	is the (possibly empty) list of tokens split by shlex.
	"""
	invalid = False
	v_split = shlex_split(v)
	if not v_split:
		invalid = True
	elif os.path.isabs(v_split[0]):
		invalid = not os.access(v_split[0], os.EX_OK)
	elif find_binary(v_split[0]) is None:
		invalid = True
	return (not invalid, v_split)
Esempio n. 34
0
def validate_cmd_var(v):
    """
    Validate an evironment variable value to see if it
    contains an executable command as the first token.
    returns (valid, token_list) where 'valid' is boolean and 'token_list'
    is the (possibly empty) list of tokens split by shlex.
    """
    invalid = False
    v_split = shlex_split(v)
    if not v_split:
        invalid = True
    elif os.path.isabs(v_split[0]):
        invalid = not os.access(v_split[0], os.EX_OK)
    elif find_binary(v_split[0]) is None:
        invalid = True
    return (not invalid, v_split)
Esempio n. 35
0
def gpgsign(filename, repoman_settings, options):
    gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
    if gpgcmd in [None, ""]:
        raise MissingParameter(
            "PORTAGE_GPG_SIGNING_COMMAND is unset!" " Is make.globals missing?"
        )
    if "${PORTAGE_GPG_KEY}" in gpgcmd and "PORTAGE_GPG_KEY" not in repoman_settings:
        raise MissingParameter("PORTAGE_GPG_KEY is unset!")
    if "${PORTAGE_GPG_DIR}" in gpgcmd:
        if "PORTAGE_GPG_DIR" not in repoman_settings:
            repoman_settings["PORTAGE_GPG_DIR"] = os.path.expanduser("~/.gnupg")
            logging.info(
                "Automatically setting PORTAGE_GPG_DIR to '%s'"
                % repoman_settings["PORTAGE_GPG_DIR"]
            )
        else:
            repoman_settings["PORTAGE_GPG_DIR"] = os.path.expanduser(
                repoman_settings["PORTAGE_GPG_DIR"]
            )
        if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
            raise portage.exception.InvalidLocation(
                "Unable to access directory: PORTAGE_GPG_DIR='%s'"
                % repoman_settings["PORTAGE_GPG_DIR"]
            )
    gpgvars = {"FILE": filename}
    for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
        v = repoman_settings.get(k)
        if v is not None:
            gpgvars[k] = v
    gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
    if options.pretend:
        print("(" + gpgcmd + ")")
    else:
        # Encode unicode manually for bug #310789.
        gpgcmd = portage.util.shlex_split(gpgcmd)

        gpgcmd = [
            _unicode_encode(arg, encoding=_encodings["fs"], errors="strict")
            for arg in gpgcmd
        ]
        rValue = subprocess.call(gpgcmd)
        if rValue == os.EX_OK:
            os.rename(filename + ".asc", filename)
        else:
            raise portage.exception.PortageException(
                "!!! gpg exited with '" + str(rValue) + "' status"
            )
Esempio n. 36
0
	def populate(self, getbinpkgs=0):
		"populates the binarytree"

		if self._populating:
			return

		pkgindex_lock = None
		try:
			if os.access(self.pkgdir, os.W_OK):
				pkgindex_lock = lockfile(self._pkgindex_file,
					wantnewlockfile=1)
			self._populating = True
			self._populate(getbinpkgs)
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
			self._populating = False
Esempio n. 37
0
def editor_is_executable(editor):
	"""
	Given an EDITOR string, validate that it refers to
	an executable. This uses shlex_split() to split the
	first component and do a PATH lookup if necessary.

	@param editor: An EDITOR value from the environment.
	@type: string
	@rtype: bool
	@return: True if an executable is found, False otherwise.
	"""
	editor_split = util.shlex_split(editor)
	if not editor_split:
		return False
	filename = editor_split[0]
	if not os.path.isabs(filename):
		return find_binary(filename) is not None
	return os.access(filename, os.X_OK) and os.path.isfile(filename)
Esempio n. 38
0
def editor_is_executable(editor):
	"""
	Given an EDITOR string, validate that it refers to
	an executable. This uses shlex_split() to split the
	first component and do a PATH lookup if necessary.

	@param editor: An EDITOR value from the environment.
	@type: string
	@rtype: bool
	@return: True if an executable is found, False otherwise.
	"""
	editor_split = util.shlex_split(editor)
	if not editor_split:
		return False
	filename = editor_split[0]
	if not os.path.isabs(filename):
		return find_binary(filename) is not None
	return os.access(filename, os.X_OK) and os.path.isfile(filename)
Esempio n. 39
0
def pickle_read(filename,default=None,debug=0):
	if not os.access(filename, os.R_OK):
		writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
		return default
	data = None
	try:
		myf = open(_unicode_encode(filename,
			encoding=_encodings['fs'], errors='strict'), 'rb')
		mypickle = pickle.Unpickler(myf)
		data = mypickle.load()
		myf.close()
		del mypickle,myf
		writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
	except SystemExit as e:
		raise
	except Exception as e:
		writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
		data = default
	return data
Esempio n. 40
0
def pickle_read(filename,default=None,debug=0):
	if not os.access(filename, os.R_OK):
		writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
		return default
	data = None
	try:
		myf = open(_unicode_encode(filename,
			encoding=_encodings['fs'], errors='strict'), 'rb')
		mypickle = pickle.Unpickler(myf)
		data = mypickle.load()
		myf.close()
		del mypickle,myf
		writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
	except SystemExit as e:
		raise
	except Exception as e:
		writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
		data = default
	return data
Esempio n. 41
0
def xtermTitleReset():
    global default_xterm_title
    if default_xterm_title is None:
        prompt_command = os.environ.get("PROMPT_COMMAND")
        if prompt_command == "":
            default_xterm_title = ""
        elif prompt_command is not None:
            if (
                dotitles
                and "TERM" in os.environ
                and _legal_terms_re.match(os.environ["TERM"]) is not None
                and sys.__stderr__.isatty()
            ):
                from portage.process import find_binary, spawn

                shell = os.environ.get("SHELL")
                if not shell or not os.access(shell, os.EX_OK):
                    shell = find_binary("sh")
                if shell:
                    spawn(
                        [shell, "-c", prompt_command],
                        env=os.environ,
                        fd_pipes={
                            0: portage._get_stdin().fileno(),
                            1: sys.__stderr__.fileno(),
                            2: sys.__stderr__.fileno(),
                        },
                    )
                else:
                    os.system(prompt_command)
            return
        else:
            pwd = os.environ.get("PWD", "")
            home = os.environ.get("HOME", "")
            if home != "" and pwd.startswith(home):
                pwd = "~" + pwd[len(home) :]
            default_xterm_title = "\x1b]0;%s@%s:%s\x07" % (
                os.environ.get("LOGNAME", ""),
                os.environ.get("HOSTNAME", "").split(".", 1)[0],
                pwd,
            )
    xtermTitle(default_xterm_title, raw=True)
Esempio n. 42
0
def bin_entry_point():
    """
    Adjust sys.argv[0] to point to a script in PORTAGE_BIN_PATH, and
    then execute the script, in order to implement entry_points when
    portage has been installed by pip.
    """
    script_path = os.path.join(PORTAGE_BIN_PATH, os.path.basename(sys.argv[0]))
    if os.access(script_path, os.X_OK):
        with open(script_path, "rt") as f:
            shebang = f.readline()
        python_match = re.search(r"/python[\d\.]*\s+([^/]*)\s+$", shebang)
        if python_match:
            sys.argv = [
                os.path.join(os.path.dirname(sys.argv[0]), "python"),
                python_match.group(1),
                script_path,
            ] + sys.argv[1:]
            os.execvp(sys.argv[0], sys.argv)
        sys.argv[0] = script_path
        os.execvp(sys.argv[0], sys.argv)
    else:
        print("File not found:", script_path, file=sys.stderr)
        return 127
Esempio n. 43
0
	def populate(self, getbinpkgs=0, getbinpkgsonly=None):
		"populates the binarytree"

		if getbinpkgsonly is not None:
			warnings.warn(
				"portage.dbapi.bintree.binarytree.populate(): " + \
				"getbinpkgsonly parameter is deprecated",
				DeprecationWarning, stacklevel=2)

		if self._populating:
			return
		from portage.locks import lockfile, unlockfile
		pkgindex_lock = None
		try:
			if os.access(self.pkgdir, os.W_OK):
				pkgindex_lock = lockfile(self._pkgindex_file,
					wantnewlockfile=1)
			self._populating = True
			self._populate(getbinpkgs)
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
			self._populating = False
Esempio n. 44
0
	def __init__(self, settings, logger):
		self.settings = settings
		self.logger = logger
		# Similar to emerge, sync needs a default umask so that created
		# files have sane permissions.
		os.umask(0o22)

		self.module_controller = portage.sync.module_controller
		self.module_names = self.module_controller.module_names
		self.hooks = {}
		for _dir in ["repo.postsync.d", "postsync.d"]:
			postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"],
				portage.USER_CONFIG_PATH, _dir)
			hooks = OrderedDict()
			for filepath in util._recursive_file_list(postsync_dir):
				name = filepath.split(postsync_dir)[1].lstrip(os.sep)
				if os.access(filepath, os.X_OK):
					hooks[filepath] = name
				else:
					writemsg_level(" %s %s hook: '%s' is not executable\n"
						% (warn("*"), _dir, _unicode_decode(name),),
						level=logging.WARN, noiselevel=2)
			self.hooks[_dir] = hooks
Esempio n. 45
0
def get_glsa_list(myconfig):
    """
    Returns a list of all available GLSAs in the given repository
    by comparing the filelist there with the pattern described in
    the config.

    @type	myconfig: portage.config
    @param	myconfig: Portage settings instance

    @rtype:		List of Strings
    @return:	a list of GLSA IDs in this repository
    """

    repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")
    if "GLSA_DIR" in myconfig:
        repository = myconfig["GLSA_DIR"]

    if not os.access(repository, os.R_OK):
        return []
    dirlist = os.listdir(repository)
    prefix = "glsa-"
    prefix_size = len(prefix)
    suffix = ".xml"
    suffix_size = len(suffix)

    def check(value):
        try:
            if value[:prefix_size] == prefix and value[-suffix_size:] == suffix:
                return value[prefix_size:-suffix_size]
        except IndexError:
            return None
        return None

    checked_dirlist = (check(f) for f in dirlist)
    rValue = [f for f in checked_dirlist if f]
    return rValue
Esempio n. 46
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
    root = trees._running_eroot
    mysettings = trees[root]["vartree"].settings
    portdb = trees[root]["porttree"].dbapi
    vardb = trees[root]["vartree"].dbapi
    bindb = trees[root]["bintree"].dbapi

    world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
    world_list = grabfile(world_file)
    world_modified = False
    world_warnings = set()
    updpath_map = {}
    # Maps repo_name to list of updates. If a given repo has no updates
    # directory, it will be omitted. If a repo has an updates directory
    # but none need to be applied (according to timestamp logic), the
    # value in the dict will be an empty list.
    repo_map = {}
    timestamps = {}

    retupd = False
    update_notice_printed = False
    for repo_name in portdb.getRepositories():
        repo = portdb.getRepositoryPath(repo_name)
        updpath = os.path.join(repo, "profiles", "updates")
        if not os.path.isdir(updpath):
            continue

        if updpath in updpath_map:
            repo_map[repo_name] = updpath_map[updpath]
            continue

        try:
            if if_mtime_changed:
                update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
            else:
                update_data = grab_updates(updpath)
        except DirectoryNotFound:
            continue
        myupd = []
        updpath_map[updpath] = myupd
        repo_map[repo_name] = myupd
        if len(update_data) > 0:
            for mykey, mystat, mycontent in update_data:
                if not update_notice_printed:
                    update_notice_printed = True
                    writemsg_stdout("\n")
                    writemsg_stdout(
                        colorize("GOOD", _("Performing Global Updates\n")))
                    writemsg_stdout(
                        _("(Could take a couple of minutes if you have a lot of binary packages.)\n"
                          ))
                    if not quiet:
                        writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
                         "%s='/var/db update'  %s='/var/db move'\n"
                         "  %s='/var/db SLOT move'  %s='binary move'  "
                         "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
                         (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
                valid_updates, errors = parse_updates(mycontent)
                myupd.extend(valid_updates)
                if not quiet:
                    writemsg_stdout(bold(mykey))
                    writemsg_stdout(len(valid_updates) * "." + "\n")
                if len(errors) == 0:
                    # Update our internal mtime since we
                    # processed all of our directives.
                    timestamps[mykey] = mystat[stat.ST_MTIME]
                else:
                    for msg in errors:
                        writemsg("%s\n" % msg, noiselevel=-1)
            if myupd:
                retupd = True

    if retupd:
        if os.access(bindb.bintree.pkgdir, os.W_OK):
            # Call binarytree.populate(), since we want to make sure it's
            # only populated with local packages here (getbinpkgs=0).
            bindb.bintree.populate()
        else:
            bindb = None

    master_repo = portdb.repositories.mainRepo()
    if master_repo is not None:
        master_repo = master_repo.name
    if master_repo in repo_map:
        repo_map['DEFAULT'] = repo_map[master_repo]

    for repo_name, myupd in repo_map.items():
        if repo_name == 'DEFAULT':
            continue
        if not myupd:
            continue

        def repo_match(repository):
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        def _world_repo_match(atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
            if matches and \
             repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
                if portdb.match(atoma):
                    world_warnings.add((atoma, atomb))
                return True
            else:
                return False

        for update_cmd in myupd:
            for pos, atom in enumerate(world_list):
                new_atom = update_dbentry(update_cmd, atom)
                if atom != new_atom:
                    if _world_repo_match(atom, new_atom):
                        world_list[pos] = new_atom
                        world_modified = True

        for update_cmd in myupd:
            if update_cmd[0] == "move":
                moves = vardb.move_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "@")
                if bindb:
                    moves = bindb.move_ent(update_cmd, repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "%")
            elif update_cmd[0] == "slotmove":
                moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "s")
                if bindb:
                    moves = bindb.move_slot_ent(update_cmd,
                                                repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "S")

    if world_modified:
        world_list.sort()
        write_atomic(world_file, "".join("%s\n" % (x, ) for x in world_list))
        if world_warnings:
            # XXX: print warning that we've updated world entries
            # and the old name still matches something (from an overlay)?
            pass

    if retupd:

        def _config_repo_match(repo_name, atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
                if not matches:
                    return False
            repository = vardb.aux_get(best(matches), ['repository'])[0]
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        update_config_files(root,
                            shlex_split(mysettings.get("CONFIG_PROTECT", "")),
                            shlex_split(
                                mysettings.get("CONFIG_PROTECT_MASK", "")),
                            repo_map,
                            match_callback=_config_repo_match,
                            case_insensitive="case-insensitive-fs"
                            in mysettings.features)

        # The above global updates proceed quickly, so they
        # are considered a single mtimedb transaction.
        if timestamps:
            # We do not update the mtime in the mtimedb
            # until after _all_ of the above updates have
            # been processed because the mtimedb will
            # automatically commit when killed by ctrl C.
            for mykey, mtime in timestamps.items():
                prev_mtimes[mykey] = mtime

        do_upgrade_packagesmessage = False
        # We gotta do the brute force updates for these now.
        if True:

            def onUpdate(_maxval, curval):
                if curval > 0:
                    writemsg_stdout("#")

            if quiet:
                onUpdate = None
            vardb.update_ents(repo_map, onUpdate=onUpdate)
            if bindb:

                def onUpdate(_maxval, curval):
                    if curval > 0:
                        writemsg_stdout("*")

                if quiet:
                    onUpdate = None
                bindb.update_ents(repo_map, onUpdate=onUpdate)
        else:
            do_upgrade_packagesmessage = 1

        # Update progress above is indicated by characters written to stdout so
        # we print a couple new lines here to separate the progress output from
        # what follows.
        writemsg_stdout("\n\n")

        if do_upgrade_packagesmessage and bindb and \
         bindb.cpv_all():
            writemsg_stdout(
                _(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"
                  ))
            writemsg_stdout(bold(_("Note: This can take a very long time.")))
            writemsg_stdout("\n")

    return retupd
def spawn_nofetch(portdb, ebuild_path, settings=None):
	"""
	This spawns pkg_nofetch if appropriate. The settings parameter
	is useful only if setcpv has already been called in order
	to cache metadata. It will be cloned internally, in order to
	prevent any changes from interfering with the calling code.
	If settings is None then a suitable config instance will be
	acquired from the given portdbapi instance.

	A private PORTAGE_BUILDDIR will be created and cleaned up, in
	order to avoid any interference with any other processes.
	If PORTAGE_TMPDIR is writable, that will be used, otherwise
	the default directory for the tempfile module will be used.

	We only call the pkg_nofetch phase if either RESTRICT=fetch
	is set or the package has explicitly overridden the default
	pkg_nofetch implementation. This allows specialized messages
	to be displayed for problematic packages even though they do
	not set RESTRICT=fetch (bug #336499).

	This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
	variable is set in the config instance.
	"""

	if settings is None:
		settings = config(clone=portdb.settings)
	else:
		settings = config(clone=settings)

	if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
		return

	# We must create our private PORTAGE_TMPDIR before calling
	# doebuild_environment(), since lots of variables such
	# as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
	portage_tmpdir = settings.get('PORTAGE_TMPDIR')
	if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
		portage_tmpdir = None
	private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
	settings['PORTAGE_TMPDIR'] = private_tmpdir
	settings.backup_changes('PORTAGE_TMPDIR')
	# private temp dir was just created, so it's not locked yet
	settings.pop('PORTAGE_BUILDIR_LOCKED', None)

	try:
		doebuild_environment(ebuild_path, 'nofetch',
			settings=settings, db=portdb)
		restrict = settings['PORTAGE_RESTRICT'].split()
		defined_phases = settings['DEFINED_PHASES'].split()
		if not defined_phases:
			# When DEFINED_PHASES is undefined, assume all
			# phases are defined.
			defined_phases = EBUILD_PHASES

		if 'fetch' not in restrict and \
			'nofetch' not in defined_phases:
			return

		prepare_build_dirs(settings=settings)
		ebuild_phase = EbuildPhase(background=False,
			phase='nofetch', scheduler=PollScheduler().sched_iface,
			settings=settings)
		ebuild_phase.start()
		ebuild_phase.wait()
		elog_process(settings.mycpv, settings)
	finally:
		shutil.rmtree(private_tmpdir)
Esempio n. 48
0
	def __init__(self, _unused_param=None, mysettings=None):
		"""
		@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
		@type _unused_param: None
		@param mysettings: an immutable config instance
		@type mysettings: portage.config
		"""
		portdbapi.portdbapi_instances.append(self)

		from portage import config
		if mysettings:
			self.settings = mysettings
		else:
			from portage import settings
			self.settings = config(clone=settings)

		porttree_root = self.settings['PORTDIR']

		# always show this warning after this parameter
		# is unused in stable portage
		if _unused_param is not None and _unused_param != porttree_root:
			warnings.warn("The first parameter of the " + \
				"portage.dbapi.porttree.portdbapi" + \
				" constructor is now unused. Use " + \
				"mysettings['PORTDIR'] instead.",
				DeprecationWarning, stacklevel=2)

		# This is strictly for use in aux_get() doebuild calls when metadata
		# is generated by the depend phase.  It's safest to use a clone for
		# this purpose because doebuild makes many changes to the config
		# instance that is passed in.
		self.doebuild_settings = config(clone=self.settings)
		self.depcachedir = os.path.realpath(self.settings.depcachedir)

		if os.environ.get("SANDBOX_ON") == "1":
			# Make api consumers exempt from sandbox violations
			# when doing metadata cache updates.
			sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
			if self.depcachedir not in sandbox_write:
				sandbox_write.append(self.depcachedir)
				os.environ["SANDBOX_WRITE"] = \
					":".join(filter(None, sandbox_write))

		porttrees = [os.path.realpath(porttree_root)]
		porttrees.extend(os.path.realpath(x) for x in \
			shlex_split(self.settings.get('PORTDIR_OVERLAY', '')))
		treemap = {}
		repository_map = {}
		self.treemap = treemap
		self._repository_map = repository_map
		identically_named_paths = {}
		for path in porttrees:
			if path in repository_map:
				continue
			repo_name_path = os.path.join(path, REPO_NAME_LOC)
			try:
				repo_name = codecs.open(
					_unicode_encode(repo_name_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace').readline().strip()
			except EnvironmentError:
				# warn about missing repo_name at some other time, since we
				# don't want to see a warning every time the portage module is
				# imported.
				pass
			else:
				identically_named_path = treemap.get(repo_name)
				if identically_named_path is not None:
					# The earlier one is discarded.
					del repository_map[identically_named_path]
					identically_named_paths[identically_named_path] = repo_name
					if identically_named_path == porttrees[0]:
						# Found another repo with the same name as
						# $PORTDIR, so update porttrees[0] to match.
						porttrees[0] = path
				treemap[repo_name] = path
				repository_map[path] = repo_name

		# Ensure that each repo_name is unique. Later paths override
		# earlier ones that correspond to the same name.
		porttrees = [x for x in porttrees if x not in identically_named_paths]
		ignored_map = {}
		for path, repo_name in identically_named_paths.items():
			ignored_map.setdefault(repo_name, []).append(path)
		self._ignored_repos = tuple((repo_name, tuple(paths)) \
			for repo_name, paths in ignored_map.items())

		self.porttrees = porttrees
		porttree_root = porttrees[0]
		self.porttree_root = porttree_root

		self.eclassdb = eclass_cache.cache(porttree_root)

		# This is used as sanity check for aux_get(). If there is no
		# root eclass dir, we assume that PORTDIR is invalid or
		# missing. This check allows aux_get() to detect a missing
		# portage tree and return early by raising a KeyError.
		self._have_root_eclass_dir = os.path.isdir(
			os.path.join(self.porttree_root, "eclass"))

		self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule")

		#if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
		self.xcache = {}
		self.frozen = 0

		self._repo_info = {}
		eclass_dbs = {porttree_root : self.eclassdb}
		local_repo_configs = self.settings._local_repo_configs
		default_loc_repo_config = None
		repo_aliases = {}
		if local_repo_configs is not None:
			default_loc_repo_config = local_repo_configs.get('DEFAULT')
			for repo_name, loc_repo_conf in local_repo_configs.items():
				if loc_repo_conf.aliases is not None:
					for alias in loc_repo_conf.aliases:
						overridden_alias = repo_aliases.get(alias)
						if overridden_alias is not None:
							writemsg_level(_("!!! Alias '%s' " \
								"created for '%s' overrides " \
								"'%s' alias in " \
								"'%s'\n") % (alias, repo_name,
								overridden_alias,
								self.settings._local_repo_conf_path),
								level=logging.WARNING, noiselevel=-1)
						repo_aliases[alias] = repo_name

		for path in self.porttrees:
			if path in self._repo_info:
				continue

			repo_name = self._repository_map.get(path)

			loc_repo_conf = None
			if local_repo_configs is not None:
				if repo_name is not None:
					loc_repo_conf = local_repo_configs.get(repo_name)
				if loc_repo_conf is None:
					loc_repo_conf = default_loc_repo_config

			layout_filename = os.path.join(path, "metadata/layout.conf")
			layout_file = KeyValuePairFileLoader(layout_filename, None, None)
			layout_data, layout_errors = layout_file.load()
			porttrees = []

			masters = None
			if loc_repo_conf is not None and \
				loc_repo_conf.masters is not None:
				masters = loc_repo_conf.masters
			else:
				masters = layout_data.get('masters', '').split()

			for master_name in masters:
				master_name = repo_aliases.get(master_name, master_name)
				master_path = self.treemap.get(master_name)
				if master_path is None:
					writemsg_level(_("Unavailable repository '%s' " \
						"referenced by masters entry in '%s'\n") % \
						(master_name, layout_filename),
						level=logging.ERROR, noiselevel=-1)
				else:
					porttrees.append(master_path)

			if not porttrees and path != porttree_root:
				# Make PORTDIR the default master, but only if our
				# heuristics suggest that it's necessary.
				profiles_desc = os.path.join(path, 'profiles', 'profiles.desc')
				eclass_dir = os.path.join(path, 'eclass')
				if not os.path.isfile(profiles_desc) or \
					not os.path.isdir(eclass_dir):
					porttrees.append(porttree_root)

			porttrees.append(path)

			if loc_repo_conf is not None and \
					loc_repo_conf.eclass_overrides is not None:
					for other_name in loc_repo_conf.eclass_overrides:
						other_path = self.treemap.get(other_name)
						if other_path is None:
							writemsg_level(_("Unavailable repository '%s' " \
								"referenced by eclass-overrides entry in " \
								"'%s'\n") % (other_name,
								self.settings._local_repo_conf_path),
								level=logging.ERROR, noiselevel=-1)
							continue
						porttrees.append(other_path)

			eclass_db = None
			for porttree in porttrees:
				tree_db = eclass_dbs.get(porttree)
				if tree_db is None:
					tree_db = eclass_cache.cache(porttree)
					eclass_dbs[porttree] = tree_db
				if eclass_db is None:
					eclass_db = tree_db.copy()
				else:
					eclass_db.append(tree_db)

			self._repo_info[path] = _repo_info(repo_name, path, eclass_db)

		self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
		self.auxdb = {}
		self._pregen_auxdb = {}
		self._init_cache_dirs()
		depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
		cache_kwargs = {
			'gid'     : portage_gid,
			'perms'   : 0o664
		}

		if secpass < 1:
			# portage_gid is irrelevant, so just obey umask
			cache_kwargs['gid']   = -1
			cache_kwargs['perms'] = -1

		# XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
		# ~harring
		filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")]
		filtered_auxdbkeys.sort()
		from portage.cache import metadata_overlay, volatile
		if not depcachedir_w_ok:
			for x in self.porttrees:
				db_ro = self.auxdbmodule(self.depcachedir, x,
					filtered_auxdbkeys, gid=portage_gid, readonly=True)
				self.auxdb[x] = metadata_overlay.database(
					self.depcachedir, x, filtered_auxdbkeys,
					gid=portage_gid, db_rw=volatile.database,
					db_ro=db_ro)
		else:
			for x in self.porttrees:
				if x in self.auxdb:
					continue
				# location, label, auxdbkeys
				self.auxdb[x] = self.auxdbmodule(
					self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs)
				if self.auxdbmodule is metadata_overlay.database:
					self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db
		if "metadata-transfer" not in self.settings.features:
			for x in self.porttrees:
				if x in self._pregen_auxdb:
					continue
				if os.path.isdir(os.path.join(x, "metadata", "cache")):
					self._pregen_auxdb[x] = self.metadbmodule(
						x, "metadata/cache", filtered_auxdbkeys, readonly=True)
					try:
						self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db
					except AttributeError:
						pass
		# Selectively cache metadata in order to optimize dep matching.
		self._aux_cache_keys = set(
			["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
			"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
			"RESTRICT", "SLOT", "DEFINED_PHASES"])

		self._aux_cache = {}
		self._broken_ebuilds = set()
Esempio n. 49
0
	def updateItems(self, repoid):
		"""
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

		# Ensure that the unread path exists and is writable.

		try:
			ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
				mode=self._dir_mode, mask=self._mode_mask)
		except (OperationNotPermitted, PermissionDenied):
			return

		if not os.access(self.unread_path, os.W_OK):
			return

		news_dir = self._news_dir(repoid)
		try:
			news = _os.listdir(_unicode_encode(news_dir,
				encoding=_encodings['fs'], errors='strict'))
		except OSError:
			return

		skip_filename = self._skip_filename(repoid)
		unread_filename = self._unread_filename(repoid)
		unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		try:
			try:
				unread = set(grabfile(unread_filename))
				unread_orig = unread.copy()
				skip = set(grabfile(skip_filename))
				skip_orig = skip.copy()
			except PermissionDenied:
				return

			for itemid in news:
				try:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='replace')
					writemsg_level(
						_("!!! Invalid encoding in news item name: '%s'\n") % \
						itemid, level=logging.ERROR, noiselevel=-1)
					continue

				if itemid in skip:
					continue
				filename = os.path.join(news_dir, itemid,
					itemid + "." + self.language_id + ".txt")
				if not os.path.isfile(filename):
					continue
				item = NewsItem(filename, itemid)
				if not item.isValid():
					continue
				if item.isRelevant(profile=self._profile_path,
					config=self.config, vardb=self.vdb):
					unread.add(item.name)
					skip.add(item.name)

			if unread != unread_orig:
				write_atomic(unread_filename,
					"".join("%s\n" % x for x in sorted(unread)))
				apply_secpass_permissions(unread_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

			if skip != skip_orig:
				write_atomic(skip_filename,
					"".join("%s\n" % x for x in sorted(skip)))
				apply_secpass_permissions(skip_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

		finally:
			unlockfile(unread_lock)
Esempio n. 50
0
def _prepare_workdir(mysettings):
	workdir_mode = 0o700
	try:
		mode = mysettings["PORTAGE_WORKDIR_MODE"]
		if mode.isdigit():
			parsed_mode = int(mode, 8)
		elif mode == "":
			raise KeyError()
		else:
			raise ValueError()
		if parsed_mode & 0o7777 != parsed_mode:
			raise ValueError("Invalid file mode: %s" % mode)
		else:
			workdir_mode = parsed_mode
	except KeyError as e:
		writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
	except ValueError as e:
		if len(str(e)) > 0:
			writemsg("%s\n" % e)
		writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
		(mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
	mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
	try:
		apply_secpass_permissions(mysettings["WORKDIR"],
		uid=portage_uid, gid=portage_gid, mode=workdir_mode)
	except FileNotFound:
		pass # ebuild.sh will create it

	if mysettings.get("PORTAGE_LOGDIR", "") == "":
		while "PORTAGE_LOGDIR" in mysettings:
			del mysettings["PORTAGE_LOGDIR"]
	if "PORTAGE_LOGDIR" in mysettings:
		try:
			modified = ensure_dirs(mysettings["PORTAGE_LOGDIR"])
			if modified:
				# Only initialize group/mode if the directory doesn't
				# exist, so that we don't override permissions if they
				# were previously set by the administrator.
				# NOTE: These permissions should be compatible with our
				# default logrotate config as discussed in bug 374287.
				apply_secpass_permissions(mysettings["PORTAGE_LOGDIR"],
					uid=portage_uid, gid=portage_gid, mode=0o2770)
		except PortageException as e:
			writemsg("!!! %s\n" % str(e), noiselevel=-1)
			writemsg(_("!!! Permission issues with PORTAGE_LOGDIR='%s'\n") % \
				mysettings["PORTAGE_LOGDIR"], noiselevel=-1)
			writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
			while "PORTAGE_LOGDIR" in mysettings:
				del mysettings["PORTAGE_LOGDIR"]

	compress_log_ext = ''
	if 'compress-build-logs' in mysettings.features:
		compress_log_ext = '.gz'

	logdir_subdir_ok = False
	if "PORTAGE_LOGDIR" in mysettings and \
		os.access(mysettings["PORTAGE_LOGDIR"], os.W_OK):
		logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
		logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
		if not os.path.exists(logid_path):
			open(_unicode_encode(logid_path), 'w').close()
		logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
			time.gmtime(os.stat(logid_path).st_mtime)),
			encoding=_encodings['content'], errors='replace')

		if "split-log" in mysettings.features:
			log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				log_subdir, "%s:%s.log%s" %
				(mysettings["PF"], logid_time, compress_log_ext))
		else:
			log_subdir = logdir
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				logdir, "%s:%s:%s.log%s" % \
				(mysettings["CATEGORY"], mysettings["PF"], logid_time,
				compress_log_ext))

		if log_subdir is logdir:
			logdir_subdir_ok = True
		else:
			try:
				_ensure_log_subdirs(logdir, log_subdir)
			except PortageException as e:
				writemsg("!!! %s\n" % (e,), noiselevel=-1)

			if os.access(log_subdir, os.W_OK):
				logdir_subdir_ok = True
			else:
				writemsg("!!! %s: %s\n" %
					(_("Permission Denied"), log_subdir), noiselevel=-1)

	tmpdir_log_path = os.path.join(
		mysettings["T"], "build.log%s" % compress_log_ext)
	if not logdir_subdir_ok:
		# NOTE: When sesandbox is enabled, the local SELinux security policies
		# may not allow output to be piped out of the sesandbox domain. The
		# current policy will allow it to work when a pty is available, but
		# not through a normal pipe. See bug #162404.
		mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
	else:
		# Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
		# requested in bug #412865.
		make_new_symlink = False
		try:
			target = os.readlink(tmpdir_log_path)
		except OSError:
			make_new_symlink = True
		else:
			if target != mysettings["PORTAGE_LOG_FILE"]:
				make_new_symlink = True
		if make_new_symlink:
			try:
				os.unlink(tmpdir_log_path)
			except OSError:
				pass
			os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
Esempio n. 51
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Esempio n. 52
0
def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
	"""(baseurl,conn,chunk_size,verbose) -- 
	"""

	warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
		DeprecationWarning, stacklevel=2)

	if not conn:
		keepconnection = 0
	else:
		keepconnection = 1

	cache_path = "/var/cache/edb"
	metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')

	if makepickle is None:
		makepickle = "/var/cache/edb/metadata.idx.most_recent"

	try:
		conn, protocol, address, params, headers = create_conn(baseurl, conn)
	except _all_errors as e:
		# ftplib.FTP(host) can raise errors like this:
		#   socket.error: (111, 'Connection refused')
		sys.stderr.write("!!! %s\n" % (e,))
		return {}

	out = sys.stdout
	try:
		metadatafile = open(_unicode_encode(metadatafilename,
			encoding=_encodings['fs'], errors='strict'), 'rb')
		mypickle = pickle.Unpickler(metadatafile)
		try:
			mypickle.find_global = None
		except AttributeError:
			# TODO: If py3k, override Unpickler.find_class().
			pass
		metadata = mypickle.load()
		out.write(_("Loaded metadata pickle.\n"))
		out.flush()
		metadatafile.close()
	except (SystemExit, KeyboardInterrupt):
		raise
	except Exception:
		metadata = {}
	if baseurl not in metadata:
		metadata[baseurl]={}
	if "indexname" not in metadata[baseurl]:
		metadata[baseurl]["indexname"]=""
	if "timestamp" not in metadata[baseurl]:
		metadata[baseurl]["timestamp"]=0
	if "unmodified" not in metadata[baseurl]:
		metadata[baseurl]["unmodified"]=0
	if "data" not in metadata[baseurl]:
		metadata[baseurl]["data"]={}

	if not os.access(cache_path, os.W_OK):
		sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
		sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
		return metadata[baseurl]["data"]

	import portage.exception
	try:
		filelist = dir_get_list(baseurl, conn)
	except portage.exception.PortageException as e:
		sys.stderr.write(_("!!! Error connecting to '%s'.\n") %
			_hide_url_passwd(baseurl))
		sys.stderr.write("!!! %s\n" % str(e))
		del e
		return metadata[baseurl]["data"]
	tbz2list = match_in_array(filelist, suffix=".tbz2")
	metalist = match_in_array(filelist, prefix="metadata.idx")
	del filelist
	
	# Determine if our metadata file is current.
	metalist.sort()
	metalist.reverse() # makes the order new-to-old.
	for mfile in metalist:
		if usingcache and \
		   ((metadata[baseurl]["indexname"] != mfile) or \
			  (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
			# Try to download new cache until we succeed on one.
			data=""
			for trynum in [1,2,3]:
				mytempfile = tempfile.TemporaryFile()
				try:
					file_get(baseurl+"/"+mfile, mytempfile, conn)
					if mytempfile.tell() > len(data):
						mytempfile.seek(0)
						data = mytempfile.read()
				except ValueError as e:
					sys.stderr.write("--- "+str(e)+"\n")
					if trynum < 3:
						sys.stderr.write(_("Retrying...\n"))
					sys.stderr.flush()
					mytempfile.close()
					continue
				if match_in_array([mfile],suffix=".gz"):
					out.write("gzip'd\n")
					out.flush()
					try:
						import gzip
						mytempfile.seek(0)
						gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
						data = gzindex.read()
					except SystemExit as e:
						raise
					except Exception as e:
						mytempfile.close()
						sys.stderr.write(_("!!! Failed to use gzip: ")+str(e)+"\n")
						sys.stderr.flush()
					mytempfile.close()
				try:
					metadata[baseurl]["data"] = pickle.loads(data)
					del data
					metadata[baseurl]["indexname"] = mfile
					metadata[baseurl]["timestamp"] = int(time.time())
					metadata[baseurl]["modified"]  = 0 # It's not, right after download.
					out.write(_("Pickle loaded.\n"))
					out.flush()
					break
				except SystemExit as e:
					raise
				except Exception as e:
					sys.stderr.write(_("!!! Failed to read data from index: ")+str(mfile)+"\n")
					sys.stderr.write("!!! "+str(e)+"\n")
					sys.stderr.flush()
			try:
				metadatafile = open(_unicode_encode(metadatafilename,
					encoding=_encodings['fs'], errors='strict'), 'wb')
				pickle.dump(metadata, metadatafile, protocol=2)
				metadatafile.close()
			except SystemExit as e:
				raise
			except Exception as e:
				sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
				sys.stderr.write("!!! "+str(e)+"\n")
				sys.stderr.flush()
			break
	# We may have metadata... now we run through the tbz2 list and check.

	class CacheStats(object):
		from time import time
		def __init__(self, out):
			self.misses = 0
			self.hits = 0
			self.last_update = 0
			self.out = out
			self.min_display_latency = 0.2
		def update(self):
			cur_time = self.time()
			if cur_time - self.last_update >= self.min_display_latency:
				self.last_update = cur_time
				self.display()
		def display(self):
			self.out.write("\r"+colorize("WARN",
				_("cache miss: '")+str(self.misses)+"'") + \
				" --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
			self.out.flush()

	cache_stats = CacheStats(out)
	have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
	if have_tty:
		cache_stats.display()
	binpkg_filenames = set()
	for x in tbz2list:
		x = os.path.basename(x)
		binpkg_filenames.add(x)
		if x not in metadata[baseurl]["data"]:
			cache_stats.misses += 1
			if have_tty:
				cache_stats.update()
			metadata[baseurl]["modified"] = 1
			myid = None
			for retry in range(3):
				try:
					myid = file_get_metadata(
						"/".join((baseurl.rstrip("/"), x.lstrip("/"))),
						conn, chunk_size)
					break
				except http_client_BadStatusLine:
					# Sometimes this error is thrown from conn.getresponse() in
					# make_http_request().  The docstring for this error in
					# httplib.py says "Presumably, the server closed the
					# connection before sending a valid response".
					conn, protocol, address, params, headers = create_conn(
						baseurl)
				except http_client_ResponseNotReady:
					# With some http servers this error is known to be thrown
					# from conn.getresponse() in make_http_request() when the
					# remote file does not have appropriate read permissions.
					# Maybe it's possible to recover from this exception in
					# cases though, so retry.
					conn, protocol, address, params, headers = create_conn(
						baseurl)

			if myid and myid[0]:
				metadata[baseurl]["data"][x] = make_metadata_dict(myid)
			elif verbose:
				sys.stderr.write(colorize("BAD",
					_("!!! Failed to retrieve metadata on: "))+str(x)+"\n")
				sys.stderr.flush()
		else:
			cache_stats.hits += 1
			if have_tty:
				cache_stats.update()
	cache_stats.display()
	# Cleanse stale cache for files that don't exist on the server anymore.
	stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
	if stale_cache:
		for x in stale_cache:
			del metadata[baseurl]["data"][x]
		metadata[baseurl]["modified"] = 1
	del stale_cache
	del binpkg_filenames
	out.write("\n")
	out.flush()

	try:
		if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
			metadata[baseurl]["timestamp"] = int(time.time())
			metadatafile = open(_unicode_encode(metadatafilename,
				encoding=_encodings['fs'], errors='strict'), 'wb')
			pickle.dump(metadata, metadatafile, protocol=2)
			metadatafile.close()
		if makepickle:
			metadatafile = open(_unicode_encode(makepickle,
				encoding=_encodings['fs'], errors='strict'), 'wb')
			pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
			metadatafile.close()
	except SystemExit as e:
		raise
	except Exception as e:
		sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
		sys.stderr.write("!!! "+str(e)+"\n")
		sys.stderr.flush()

	if not keepconnection:
		conn.close()
	
	return metadata[baseurl]["data"]
Esempio n. 53
0
def emirrordist_main(args):

	# The calling environment is ignored, so the program is
	# completely controlled by commandline arguments.
	env = {}

	if not sys.stdout.isatty():
		portage.output.nocolor()
		env['NOCOLOR'] = 'true'

	parser, options, args = parse_args(args)

	if options.version:
		sys.stdout.write("Portage %s\n" % portage.VERSION)
		return os.EX_OK

	config_root = options.config_root

	if options.repositories_configuration is not None:
		env['PORTAGE_REPOSITORIES'] = options.repositories_configuration

	settings = portage.config(config_root=config_root,
		local_config=False, env=env)

	default_opts = None
	if not options.ignore_default_opts:
		default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()

	if default_opts:
		parser, options, args = parse_args(default_opts + args)

		settings = portage.config(config_root=config_root,
			local_config=False, env=env)

	if options.repo is None:
		if len(settings.repositories.prepos) == 2:
			for repo in settings.repositories:
				if repo.name != "DEFAULT":
					options.repo = repo.name
					break

		if options.repo is None:
			parser.error("--repo option is required")

	repo_path = settings.repositories.treemap.get(options.repo)
	if repo_path is None:
		parser.error("Unable to locate repository named '%s'" % (options.repo,))

	if options.jobs is not None:
		options.jobs = int(options.jobs)

	if options.load_average is not None:
		options.load_average = float(options.load_average)

	if options.failure_log is not None:
		options.failure_log = normalize_path(
			os.path.abspath(options.failure_log))

		parent_dir = os.path.dirname(options.failure_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--failure-log '%s' parent is not a "
				"writable directory") % options.failure_log)

	if options.success_log is not None:
		options.success_log = normalize_path(
			os.path.abspath(options.success_log))

		parent_dir = os.path.dirname(options.success_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--success-log '%s' parent is not a "
				"writable directory") % options.success_log)

	if options.scheduled_deletion_log is not None:
		options.scheduled_deletion_log = normalize_path(
			os.path.abspath(options.scheduled_deletion_log))

		parent_dir = os.path.dirname(options.scheduled_deletion_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--scheduled-deletion-log '%s' parent is not a "
				"writable directory") % options.scheduled_deletion_log)

		if options.deletion_db is None:
			parser.error("--scheduled-deletion-log requires --deletion-db")

	if options.deletion_delay is not None:
		options.deletion_delay = long(options.deletion_delay)
		if options.deletion_db is None:
			parser.error("--deletion-delay requires --deletion-db")

	if options.deletion_db is not None:
		if options.deletion_delay is None:
			parser.error("--deletion-db requires --deletion-delay")
		options.deletion_db = normalize_path(
			os.path.abspath(options.deletion_db))

	if options.temp_dir is not None:
		options.temp_dir = normalize_path(
			os.path.abspath(options.temp_dir))

		if not (os.path.isdir(options.temp_dir) and
			os.access(options.temp_dir, os.W_OK|os.X_OK)):
			parser.error(("--temp-dir '%s' is not a "
				"writable directory") % options.temp_dir)

	if options.distfiles is not None:
		options.distfiles = normalize_path(
			os.path.abspath(options.distfiles))

		if not (os.path.isdir(options.distfiles) and
			os.access(options.distfiles, os.W_OK|os.X_OK)):
			parser.error(("--distfiles '%s' is not a "
				"writable directory") % options.distfiles)
	else:
		parser.error("missing required --distfiles parameter")

	if options.mirror_overrides is not None:
		options.mirror_overrides = normalize_path(
			os.path.abspath(options.mirror_overrides))

		if not (os.access(options.mirror_overrides, os.R_OK) and
			os.path.isfile(options.mirror_overrides)):
			parser.error(
				"--mirror-overrides-file '%s' is not a readable file" %
				options.mirror_overrides)

	if options.distfiles_local is not None:
		options.distfiles_local = normalize_path(
			os.path.abspath(options.distfiles_local))

		if not (os.path.isdir(options.distfiles_local) and
			os.access(options.distfiles_local, os.W_OK|os.X_OK)):
			parser.error(("--distfiles-local '%s' is not a "
				"writable directory") % options.distfiles_local)

	if options.distfiles_db is not None:
		options.distfiles_db = normalize_path(
			os.path.abspath(options.distfiles_db))

	if options.tries is not None:
		options.tries = int(options.tries)

	if options.recycle_dir is not None:
		options.recycle_dir = normalize_path(
			os.path.abspath(options.recycle_dir))
		if not (os.path.isdir(options.recycle_dir) and
			os.access(options.recycle_dir, os.W_OK|os.X_OK)):
			parser.error(("--recycle-dir '%s' is not a "
				"writable directory") % options.recycle_dir)

	if options.recycle_db is not None:
		if options.recycle_dir is None:
			parser.error("--recycle-db requires "
				"--recycle-dir to be specified")
		options.recycle_db = normalize_path(
			os.path.abspath(options.recycle_db))

	if options.recycle_deletion_delay is not None:
		options.recycle_deletion_delay = \
			long(options.recycle_deletion_delay)

	if options.fetch_log_dir is not None:
		options.fetch_log_dir = normalize_path(
			os.path.abspath(options.fetch_log_dir))

		if not (os.path.isdir(options.fetch_log_dir) and
			os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
			parser.error(("--fetch-log-dir '%s' is not a "
				"writable directory") % options.fetch_log_dir)

	if options.whitelist_from:
		normalized_paths = []
		for x in options.whitelist_from:
			path = normalize_path(os.path.abspath(x))
			if not os.access(path, os.R_OK):
				parser.error("--whitelist-from '%s' is not readable" % x)
			if os.path.isfile(path):
				normalized_paths.append(path)
			elif os.path.isdir(path):
				for file in _recursive_file_list(path):
					if not os.access(file, os.R_OK):
						parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
					normalized_paths.append(file)
			else:
				parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
		options.whitelist_from = normalized_paths

	if options.strict_manifests is not None:
		if options.strict_manifests == "y":
			settings.features.add("strict")
		else:
			settings.features.discard("strict")

	settings.lock()

	portdb = portage.portdbapi(mysettings=settings)

	# Limit ebuilds to the specified repo.
	portdb.porttrees = [repo_path]

	portage.util.initialize_logger()

	if options.verbose > 0:
		l = logging.getLogger()
		l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)

	with Config(options, portdb,
		SchedulerInterface(global_event_loop())) as config:

		if not options.mirror:
			parser.error('No action specified')

		returncode = os.EX_OK

		if options.mirror:
			signum = run_main_scheduler(MirrorDistTask(config))
			if signum is not None:
				sys.exit(128 + signum)

	return returncode
Esempio n. 54
0
def _prepare_workdir(mysettings):
    workdir_mode = 0o700
    try:
        mode = mysettings["PORTAGE_WORKDIR_MODE"]
        if mode.isdigit():
            parsed_mode = int(mode, 8)
        elif mode == "":
            raise KeyError()
        else:
            raise ValueError()
        if parsed_mode & 0o7777 != parsed_mode:
            raise ValueError("Invalid file mode: %s" % mode)
        else:
            workdir_mode = parsed_mode
    except KeyError as e:
        writemsg(
            _("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") %
            oct(workdir_mode))
    except ValueError as e:
        if len(str(e)) > 0:
            writemsg("%s\n" % e)
        writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
        (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
    mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
    try:
        apply_secpass_permissions(mysettings["WORKDIR"],
                                  uid=portage_uid,
                                  gid=portage_gid,
                                  mode=workdir_mode)
    except FileNotFound:
        pass  # ebuild.sh will create it

    if mysettings.get("PORT_LOGDIR", "") == "":
        while "PORT_LOGDIR" in mysettings:
            del mysettings["PORT_LOGDIR"]
    if "PORT_LOGDIR" in mysettings:
        try:
            modified = ensure_dirs(mysettings["PORT_LOGDIR"])
            if modified:
                apply_secpass_permissions(mysettings["PORT_LOGDIR"],
                                          uid=portage_uid,
                                          gid=portage_gid,
                                          mode=0o2770)
        except PortageException as e:
            writemsg("!!! %s\n" % str(e), noiselevel=-1)
            writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
             mysettings["PORT_LOGDIR"], noiselevel=-1)
            writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
            while "PORT_LOGDIR" in mysettings:
                del mysettings["PORT_LOGDIR"]
    if "PORT_LOGDIR" in mysettings and \
     os.access(mysettings["PORT_LOGDIR"], os.W_OK):
        logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
        if not os.path.exists(logid_path):
            open(_unicode_encode(logid_path), 'w')
        logid_time = _unicode_decode(time.strftime(
            "%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)),
                                     encoding=_encodings['content'],
                                     errors='replace')

        if "split-log" in mysettings.features:
            mysettings["PORTAGE_LOG_FILE"] = os.path.join(
             mysettings["PORT_LOGDIR"], "build", "%s/%s:%s.log" % \
             (mysettings["CATEGORY"], mysettings["PF"], logid_time))
        else:
            mysettings["PORTAGE_LOG_FILE"] = os.path.join(
             mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
             (mysettings["CATEGORY"], mysettings["PF"], logid_time))

        ensure_dirs(os.path.dirname(mysettings["PORTAGE_LOG_FILE"]))

    else:
        # NOTE: When sesandbox is enabled, the local SELinux security policies
        # may not allow output to be piped out of the sesandbox domain. The
        # current policy will allow it to work when a pty is available, but
        # not through a normal pipe. See bug #162404.
        mysettings["PORTAGE_LOG_FILE"] = os.path.join(mysettings["T"],
                                                      "build.log")
Esempio n. 55
0
def post_emerge(myaction, myopts, myfiles,
	target_root, trees, mtimedb, retval):
	"""
	Misc. things to run at the end of a merge session.

	Update Info Files
	Update Config Files
	Update News Items
	Commit mtimeDB
	Display preserved libs warnings

	@param myaction: The action returned from parse_opts()
	@type myaction: String
	@param myopts: emerge options
	@type myopts: dict
	@param myfiles: emerge arguments
	@type myfiles: list
	@param target_root: The target EROOT for myaction
	@type target_root: String
	@param trees: A dictionary mapping each ROOT to it's package databases
	@type trees: dict
	@param mtimedb: The mtimeDB to store data needed across merge invocations
	@type mtimedb: MtimeDB class instance
	@param retval: Emerge's return value
	@type retval: Int
	"""

	root_config = trees[target_root]["root_config"]
	vardbapi = trees[target_root]['vartree'].dbapi
	settings = vardbapi.settings
	info_mtimes = mtimedb["info"]

	# Load the most current variables from ${ROOT}/etc/profile.env
	settings.unlock()
	settings.reload()
	settings.regenerate()
	settings.lock()

	config_protect = portage.util.shlex_split(
		settings.get("CONFIG_PROTECT", ""))
	infodirs = settings.get("INFOPATH","").split(":") + \
		settings.get("INFODIR","").split(":")

	os.chdir("/")

	if retval == os.EX_OK:
		exit_msg = " *** exiting successfully."
	else:
		exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
	emergelog("notitles" not in settings.features, exit_msg)

	_flush_elog_mod_echo()

	if not vardbapi._pkgs_changed:
		# GLEP 42 says to display news *after* an emerge --pretend
		if "--pretend" in myopts:
			display_news_notification(root_config, myopts)
		# If vdb state has not changed then there's nothing else to do.
		return

	vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
	portage.util.ensure_dirs(vdb_path)
	vdb_lock = None
	if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
		vardbapi.lock()
		vdb_lock = True

	if vdb_lock:
		try:
			if "noinfo" not in settings.features:
				chk_updated_info_files(target_root,
					infodirs, info_mtimes)
			mtimedb.commit()
		finally:
			if vdb_lock:
				vardbapi.unlock()

	# Explicitly load and prune the PreservedLibsRegistry in order
	# to ensure that we do not display stale data.
	vardbapi._plib_registry.load()

	if vardbapi._plib_registry.hasEntries():
		if "--quiet" in myopts:
			print()
			print(colorize("WARN", "!!!") + " existing preserved libs found")
		else:
			print()
			print(colorize("WARN", "!!!") + " existing preserved libs:")
			display_preserved_libs(vardbapi)
			print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
				" to rebuild packages using these libraries")

	chk_updated_cfg_files(settings['EROOT'], config_protect)

	display_news_notification(root_config, myopts)

	postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
		portage.USER_CONFIG_PATH, "bin", "post_emerge")
	if os.access(postemerge, os.X_OK):
		hook_retval = portage.process.spawn(
						[postemerge], env=settings.environ())
		if hook_retval != os.EX_OK:
			portage.util.writemsg_level(
				" %s spawn failed of %s\n" %
				(colorize("BAD", "*"), postemerge,),
				level=logging.ERROR, noiselevel=-1)

	clean_logs(settings)

	if "--quiet" not in myopts and \
		myaction is None and "@world" in myfiles:
		show_depclean_suggestion()