def process(mysettings, key, logentries, fulltext):
	if "PORTAGE_ELOG_MAILURI" in mysettings:
		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
	else:
		myrecipient = "root@localhost"
	
	myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
	mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
	mysubject = mysubject.replace("${PACKAGE}", key)
	mysubject = mysubject.replace("${HOST}", socket.getfqdn())

	# look at the phases listed in our logentries to figure out what action was performed
	action = _("merged")
	for phase in logentries:
		# if we found a *rm phase assume that the package was unmerged
		if phase in ["postrm", "prerm"]:
			action = _("unmerged")
	# if we think that the package was unmerged, make sure there was no unexpected
	# phase recorded to avoid misinformation
	if action == _("unmerged"):
		for phase in logentries:
			if phase not in ["postrm", "prerm", "other"]:
				action = _("unknown")

	mysubject = mysubject.replace("${ACTION}", action)

	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
	try:
		portage.mail.send_mail(mysettings, mymessage)
	except PortageException as e:
		writemsg("%s\n" % str(e), noiselevel=-1)

	return
Beispiel #2
0
	def _start(self):

		pkg = self.pkg
		settings = self.settings

		if not self.opts.fetchonly:
			rval = _check_temp_dir(settings)
			if rval != os.EX_OK:
				self.returncode = rval
				self._current_task = None
				self._async_wait()
				return

		root_config = pkg.root_config
		tree = "porttree"
		self._tree = tree
		portdb = root_config.trees[tree].dbapi
		settings.setcpv(pkg)
		settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
		if self.opts.buildpkgonly:
			settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
		else:
			settings.configdict["pkg"]["MERGE_TYPE"] = "source"
		ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
		if ebuild_path is None:
			raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
		self._ebuild_path = ebuild_path
		portage.doebuild_environment(ebuild_path, 'setup',
			settings=self.settings, db=portdb)

		# Check the manifest here since with --keep-going mode it's
		# currently possible to get this far with a broken manifest.
		if not self._check_manifest():
			self.returncode = 1
			self._current_task = None
			self._async_wait()
			return

		prefetcher = self.prefetcher
		if prefetcher is None:
			pass
		elif prefetcher.isAlive() and \
			prefetcher.poll() is None:

			waiting_msg = "Fetching files " + \
				"in the background. " + \
				"To view fetch progress, run `tail -f %s` in another terminal." \
				% (_emerge.emergelog._emerge_log_dir)
			msg_prefix = colorize("GOOD", " * ")
			from textwrap import wrap
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
				for line in wrap(waiting_msg, 65))
			if not self.background:
				writemsg(waiting_msg, noiselevel=-1)

			self._current_task = prefetcher
			prefetcher.addExitListener(self._prefetch_exit)
			return

		self._prefetch_exit(prefetcher)
Beispiel #3
0
	def __init__(self, depgraph, mylist, favorites, verbosity):
		frozen_config = depgraph._frozen_config
		dynamic_config = depgraph._dynamic_config

		self.mylist = mylist
		self.favorites = InternalPackageSet(favorites, allow_repo=True)
		self.verbosity = verbosity

		if self.verbosity is None:
			self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
				"--verbose" in frozen_config.myopts and 3 or 2)

		self.oneshot = "--oneshot" in frozen_config.myopts or \
			"--onlydeps" in frozen_config.myopts
		self.columns = "--columns" in frozen_config.myopts
		self.tree_display = "--tree" in frozen_config.myopts
		self.alphabetical = "--alphabetical" in frozen_config.myopts
		self.quiet = "--quiet" in frozen_config.myopts
		self.all_flags = self.verbosity == 3 or self.quiet
		self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
		self.changelog = "--changelog" in frozen_config.myopts
		self.edebug = frozen_config.edebug
		self.unordered_display = "--unordered-display" in frozen_config.myopts

		mywidth = 130
		if "COLUMNWIDTH" in frozen_config.settings:
			try:
				mywidth = int(frozen_config.settings["COLUMNWIDTH"])
			except ValueError as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
					frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
				del e
		self.columnwidth = mywidth

		if "--quiet-repo-display" in frozen_config.myopts:
			self.repo_display = _RepoDisplay(frozen_config.roots)
		self.trees = frozen_config.trees
		self.pkgsettings = frozen_config.pkgsettings
		self.target_root = frozen_config.target_root
		self.running_root = frozen_config._running_root
		self.roots = frozen_config.roots

		# Create a single merged user set for each root
		self.user_sets = {}
		for root_name, root in self.roots.items():
			self.user_sets[root_name] = InternalPackageSet(initial_atoms= \
				chain.from_iterable(pkgset.getAtoms() \
				for pkgset in root.sets.values() \
				if pkgset.user_set))

		self.blocker_parents = dynamic_config._blocker_parents
		self.reinstall_nodes = dynamic_config._reinstall_nodes
		self.digraph = dynamic_config.digraph
		self.blocker_uninstalls = dynamic_config._blocker_uninstalls
		self.package_tracker = dynamic_config._package_tracker
		self.set_nodes = dynamic_config._set_nodes

		self.pkg_use_enabled = depgraph._pkg_use_enabled
		self.pkg = depgraph._pkg
Beispiel #4
0
	def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True, eapi_filter=None):
		ret = {}
		location_dict = {}
		file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True)
		eapi = read_corresponding_eapi_file(file_name)
		if eapi_filter is not None and not eapi_filter(eapi):
			if file_dict:
				writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
					(eapi, os.path.basename(file_name), file_name),
					noiselevel=-1)
			return ret
		useflag_re = _get_useflag_re(eapi)
		for k, v in file_dict.items():
			useflags = []
			for prefixed_useflag in v:
				if prefixed_useflag[:1] == "-":
					useflag = prefixed_useflag[1:]
				else:
					useflag = prefixed_useflag
				if useflag_re.match(useflag) is None:
					writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
						(k, file_name, prefixed_useflag), noiselevel=-1)
				else:
					useflags.append(prefixed_useflag)
			location_dict.setdefault(k, []).extend(useflags)
		for k, v in location_dict.items():
			if juststrings:
				v = " ".join(v)
			else:
				v = tuple(v)
			ret.setdefault(k.cp, {})[k] = v
		return ret
Beispiel #5
0
	def _initialize(self):
		"""Initialize the plug-in module

		@rtype: boolean
		"""
		self.valid = False
		try:
			mod_name = ".".join([self._namepath, self.name])
			self._module = __import__(mod_name, [], [], ["not empty"])
			self.valid = True
		except ImportError as e:
			print("MODULE; failed import", mod_name, "  error was:", e)
			return False
		self.module_spec = self._module.module_spec
		for submodule in self.module_spec['provides']:
			kid = self.module_spec['provides'][submodule]
			kidname = kid['name']
			try:
				kid['module_name'] = '.'.join([mod_name, kid['sourcefile']])
			except KeyError:
				kid['module_name'] = '.'.join([mod_name, self.name])
				msg = ("%s module's module_spec is old, missing attribute: "
						"'sourcefile'.  Backward compatibility may be "
						"removed in the future.\nFile: %s\n")
				writemsg(_(msg) % (self.name, self._module.__file__))
			kid['is_imported'] = False
			self.kids[kidname] = kid
			self.kids_names.append(kidname)
		return True
Beispiel #6
0
	def load_profiles(self, repositories, known_repository_paths):
		known_repository_paths = set(os.path.realpath(x)
			for x in known_repository_paths)

		known_repos = []
		for x in known_repository_paths:
			try:
				layout_data = {"profile-formats":
					repositories.get_repo_for_location(x).profile_formats}
			except KeyError:
				layout_data = parse_layout_conf(x)[0]
			# force a trailing '/' for ease of doing startswith checks
			known_repos.append((x + '/', layout_data))
		known_repos = tuple(known_repos)

		if self.config_profile_path is None:
			self.config_profile_path = \
				os.path.join(self.config_root, PROFILE_PATH)
			if os.path.isdir(self.config_profile_path):
				self.profile_path = self.config_profile_path
			else:
				self.config_profile_path = \
					os.path.join(self.config_root, 'etc', 'make.profile')
				if os.path.isdir(self.config_profile_path):
					self.profile_path = self.config_profile_path
				else:
					self.profile_path = None
		else:
			# NOTE: repoman may pass in an empty string
			# here, in order to create an empty profile
			# for checking dependencies of packages with
			# empty KEYWORDS.
			self.profile_path = self.config_profile_path


		# The symlink might not exist or might not be a symlink.
		self.profiles = []
		self.profiles_complex = []
		if self.profile_path:
			try:
				self._addProfile(os.path.realpath(self.profile_path),
					repositories, known_repos)
			except ParseError as e:
				writemsg(_("!!! Unable to parse profile: '%s'\n") % \
					self.profile_path, noiselevel=-1)
				writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
				self.profiles = []
				self.profiles_complex = []

		if self._user_config and self.profiles:
			custom_prof = os.path.join(
				self.config_root, CUSTOM_PROFILE_PATH)
			if os.path.exists(custom_prof):
				self.user_profile_dir = custom_prof
				self.profiles.append(custom_prof)
				self.profiles_complex.append(_profile_node(custom_prof, True))
			del custom_prof

		self.profiles = tuple(self.profiles)
		self.profiles_complex = tuple(self.profiles_complex)
Beispiel #7
0
	def _post_phase_exit(self, post_phase):

		self._assert_current(post_phase)

		log_path = None
		if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
			log_path = self.settings.get("PORTAGE_LOG_FILE")

		if post_phase.logfile is not None and \
			post_phase.logfile != log_path:
			# We were logging to a temp file (see above), so append
			# temp file to main log and remove temp file.
			self._append_temp_log(post_phase.logfile, log_path)

		if self._final_exit(post_phase) != os.EX_OK:
			writemsg("!!! post %s failed; exiting.\n" % self.phase,
				noiselevel=-1)
			self._die_hooks()
			return

		if self.phase == "install":
			out = io.StringIO()
			_post_src_install_soname_symlinks(self.settings, out)
			msg = out.getvalue()
			if msg:
				self.scheduler.output(msg, log_path=log_path)

		self._current_task = None
		self.wait()
		return
Beispiel #8
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		self.target_root = self.target_root or os.sep

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		if self.sysroot != "/" and self.sysroot != self.target_root:
			writemsg(_("!!! Error: SYSROOT (currently %s) must "
				"equal / or ROOT (currently %s).\n") %
				(self.sysroot, self.target_root),
				noiselevel=-1)
			raise InvalidLocation(self.sysroot)

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		self.global_config_path = GLOBAL_CONFIG_PATH
		if portage.const.EPREFIX:
			self.global_config_path = os.path.join(portage.const.EPREFIX,
				GLOBAL_CONFIG_PATH.lstrip(os.sep))
def load_unpack_dependencies_configuration(repositories):
	repo_dict = {}
	for repo in repositories.repos_with_profiles():
		for eapi in _supported_eapis:
			if eapi_has_automatic_unpack_dependencies(eapi):
				file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
				lines = grabfile(file_name, recursive=True)
				for line in lines:
					elements = line.split()
					suffix = elements[0].lower()
					if len(elements) == 1:
						writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
					depend = " ".join(elements[1:])
					try:
						use_reduce(depend, eapi=eapi)
					except InvalidDependString as e:
						writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
					else:
						repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	ret = {}
	for repo in repositories.repos_with_profiles():
		for repo_name in [x.name for x in repo.masters] + [repo.name]:
			for eapi in repo_dict.get(repo_name, {}):
				for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
					ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	return ret
Beispiel #10
0
	def trace_event(self, frame, event, arg):
		writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
		(self.trim_filename(frame.f_code.co_filename),
		frame.f_lineno,
		frame.f_code.co_name,
		event,
		self.arg_repr(frame, event, arg),
		self.locals_repr(frame, event, arg)))
Beispiel #11
0
def _create_pty_or_pipe(copy_term_size=None):
	"""
	Try to create a pty and if then fails then create a normal
	pipe instead.

	@param copy_term_size: If a tty file descriptor is given
		then the term size will be copied to the pty.
	@type copy_term_size: int
	@rtype: tuple
	@returns: A tuple of (is_pty, master_fd, slave_fd) where
		is_pty is True if a pty was successfully allocated, and
		False if a normal pipe was allocated.
	"""

	got_pty = False

	global _disable_openpty, _fbsd_test_pty

	if _fbsd_test_pty and not _disable_openpty:
		# Test for python openpty breakage after freebsd7 to freebsd8
		# upgrade, which results in a 'Function not implemented' error
		# and the process being killed.
		pid = os.fork()
		if pid == 0:
			pty.openpty()
			os._exit(os.EX_OK)
		pid, status = os.waitpid(pid, 0)
		if (status & 0xff) == 140:
			_disable_openpty = True
		_fbsd_test_pty = False

	if _disable_openpty:
		master_fd, slave_fd = os.pipe()
	else:
		try:
			master_fd, slave_fd = pty.openpty()
			got_pty = True
		except EnvironmentError as e:
			_disable_openpty = True
			writemsg("openpty failed: '%s'\n" % str(e),
				noiselevel=-1)
			del e
			master_fd, slave_fd = os.pipe()

	if got_pty:
		# Disable post-processing of output since otherwise weird
		# things like \n -> \r\n transformations may occur.
		mode = termios.tcgetattr(slave_fd)
		mode[1] &= ~termios.OPOST
		termios.tcsetattr(slave_fd, termios.TCSANOW, mode)

	if got_pty and \
		copy_term_size is not None and \
		os.isatty(copy_term_size):
		rows, columns = get_term_size()
		set_term_size(rows, columns, slave_fd)

	return (got_pty, master_fd, slave_fd)
	def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
		target_root=None):
		self.user_profile_dir = None
		self._local_repo_conf_path = None
		self.eprefix = eprefix
		self.config_root = config_root
		self.target_root = target_root
		self._user_config = local_config
		
		if self.eprefix is None:
			self.eprefix = ""

		if self.config_root is None:
			self.config_root = self.eprefix + os.sep

		self.config_root = normalize_path(os.path.abspath(
			self.config_root)).rstrip(os.path.sep) + os.path.sep

		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)

		if not config_profile_path:
			config_profile_path = \
				os.path.join(self.config_root, PROFILE_PATH)
			if os.path.isdir(config_profile_path):
				self.profile_path = config_profile_path
			else:
				config_profile_path = \
					os.path.join(self.abs_user_config, 'make.profile')
				if os.path.isdir(config_profile_path):
					self.profile_path = config_profile_path
				else:
					self.profile_path = None
		else:
			self.profile_path = config_profile_path


		# The symlink might not exist or might not be a symlink.
		self.profiles = []
		if self.profile_path is not None:
			try:
				self._addProfile(os.path.realpath(self.profile_path))
			except ParseError as e:
				writemsg(_("!!! Unable to parse profile: '%s'\n") % \
					self.profile_path, noiselevel=-1)
				writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
				self.profiles = []

		if self._user_config and self.profiles:
			custom_prof = os.path.join(
				self.config_root, CUSTOM_PROFILE_PATH)
			if os.path.exists(custom_prof):
				self.user_profile_dir = custom_prof
				self.profiles.append(custom_prof)
			del custom_prof

		self.profiles = tuple(self.profiles)
	def _post_phase_exit(self, post_phase):
		if self._final_exit(post_phase) != os.EX_OK:
			writemsg("!!! post %s failed; exiting.\n" % self.phase,
				noiselevel=-1)
			self._die_hooks()
			return
		self._current_task = None
		self.wait()
		return
Beispiel #14
0
def parse_args():
	argv = sys.argv[:]

	if sys.hexversion >= 0x3000000:
		# We can't trust that the filesystem encoding (locale dependent)
		# correctly matches the arguments, so use surrogateescape to
		# pass through the original argv bytes for Python 3.
		fs_encoding = sys.getfilesystemencoding()
		argv = [x.encode(fs_encoding, 'surrogateescape') for x in argv]

	for x, arg in enumerate(argv):
		try:
			argv[x] = _unicode_decode(arg, errors='strict')
		except UnicodeDecodeError:
			writemsg('dohtml: argument is not encoded as UTF-8: %s\n' %
				_unicode_decode(arg), noiselevel=-1)
			sys.exit(1)

	options = OptionsClass()
	args = []

	x = 1
	while x < len(argv):
		arg = argv[x]
		if arg in ["-h","-r","-V"]:
			if arg == "-h":
				print_help()
				sys.exit(0)
			elif arg == "-r":
				options.recurse = True
			elif arg == "-V":
				options.verbose = True
		elif argv[x] in ["-A","-a","-f","-x","-p"]:
			x += 1
			if x == len(argv):
				print_help()
				sys.exit(0)
			elif arg == "-p":
				options.doc_prefix = argv[x]
				if options.doc_prefix:
					options.doc_prefix = normalize_path(options.doc_prefix)
			else:
				values = argv[x].split(",")
				if arg == "-A":
					options.allowed_exts.extend(values)
				elif arg == "-a":
					options.allowed_exts = values
				elif arg == "-f":
					options.allowed_files = values
				elif arg == "-x":
					options.disallowed_dirs = values
		else:
			args.append(argv[x])
		x += 1

	return (options, args)
Beispiel #15
0
	def _start(self):

		pkg = self.pkg
		settings = self.settings

		rval = _check_temp_dir(settings)
		if rval != os.EX_OK:
			self.returncode = rval
			self._current_task = None
			self.wait()
			return

		root_config = pkg.root_config
		tree = "porttree"
		self._tree = tree
		portdb = root_config.trees[tree].dbapi
		settings.setcpv(pkg)
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
		ebuild_path = portdb.findname(pkg.cpv)
		if ebuild_path is None:
			raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
		self._ebuild_path = ebuild_path

		# Check the manifest here since with --keep-going mode it's
		# currently possible to get this far with a broken manifest.
		if not self._check_manifest():
			self.returncode = 1
			self._current_task = None
			self.wait()
			return

		prefetcher = self.prefetcher
		if prefetcher is None:
			pass
		elif not prefetcher.isAlive():
			prefetcher.cancel()
		elif prefetcher.poll() is None:

			waiting_msg = "Fetching files " + \
				"in the background. " + \
				"To view fetch progress, run `tail -f " + \
				"/var/log/emerge-fetch.log` in another " + \
				"terminal."
			msg_prefix = colorize("GOOD", " * ")
			from textwrap import wrap
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
				for line in wrap(waiting_msg, 65))
			if not self.background:
				writemsg(waiting_msg, noiselevel=-1)

			self._current_task = prefetcher
			prefetcher.addExitListener(self._prefetch_exit)
			return

		self._prefetch_exit(prefetcher)
	def _read_dirVirtuals(self, profiles):
		"""
		Read the 'virtuals' file in all profiles.
		"""
		virtuals_list = []
		for x in profiles:
			virtuals_file = os.path.join(x, "virtuals")
			virtuals_dict = grabdict(virtuals_file)
			atoms_dict = {}
			for k, v in virtuals_dict.items():
				try:
					virt_atom = Atom(k)
				except InvalidAtom:
					virt_atom = None
				else:
					if virt_atom.blocker or \
						str(virt_atom) != str(virt_atom.cp):
						virt_atom = None
				if virt_atom is None:
					writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
						(virtuals_file, k), noiselevel=-1)
					continue
				providers = []
				for atom in v:
					atom_orig = atom
					if atom[:1] == '-':
						# allow incrementals
						atom = atom[1:]
					try:
						atom = Atom(atom)
					except InvalidAtom:
						atom = None
					else:
						if atom.blocker:
							atom = None
					if atom is None:
						writemsg(_("--- Invalid atom in %s: %s\n") % \
							(virtuals_file, atom_orig), noiselevel=-1)
					else:
						if atom_orig == str(atom):
							# normal atom, so return as Atom instance
							providers.append(atom)
						else:
							# atom has special prefix, so return as string
							providers.append(atom_orig)
				if providers:
					atoms_dict[virt_atom] = providers
			if atoms_dict:
				virtuals_list.append(atoms_dict)

		self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)

		for virt in self._dirVirtuals:
			# Preference for virtuals decreases from left to right.
			self._dirVirtuals[virt].reverse()
Beispiel #17
0
	def _start(self):

		pkg = self.pkg
		settings = self.settings
		settings.setcpv(pkg)
		self._tree = "bintree"
		self._bintree = self.pkg.root_config.trees[self._tree]
		self._verify = not self.opts.pretend

		dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
			"portage", pkg.category, pkg.pf)
		self._build_dir = EbuildBuildDir(dir_path=dir_path,
			pkg=pkg, settings=settings)
		self._image_dir = os.path.join(dir_path, "image")
		self._infloc = os.path.join(dir_path, "build-info")
		self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
		settings["EBUILD"] = self._ebuild_path
		debug = settings.get("PORTAGE_DEBUG") == "1"
		portage.doebuild_environment(self._ebuild_path, "setup",
			settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name

		# The prefetcher has already completed or it
		# could be running now. If it's running now,
		# wait for it to complete since it holds
		# a lock on the file being fetched. The
		# portage.locks functions are only designed
		# to work between separate processes. Since
		# the lock is held by the current process,
		# use the scheduler and fetcher methods to
		# synchronize with the fetcher.
		prefetcher = self.prefetcher
		if prefetcher is None:
			pass
		elif not prefetcher.isAlive():
			prefetcher.cancel()
		elif prefetcher.poll() is None:

			waiting_msg = ("Fetching '%s' " + \
				"in the background. " + \
				"To view fetch progress, run `tail -f " + \
				"/var/log/emerge-fetch.log` in another " + \
				"terminal.") % prefetcher.pkg_path
			msg_prefix = colorize("GOOD", " * ")
			from textwrap import wrap
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
				for line in wrap(waiting_msg, 65))
			if not self.background:
				writemsg(waiting_msg, noiselevel=-1)

			self._current_task = prefetcher
			prefetcher.addExitListener(self._prefetch_exit)
			return

		self._prefetch_exit(prefetcher)
Beispiel #18
0
	def _check_locations(self):
		"""Check if repositories location are correct and show a warning message if not"""
		for (name, r) in self.prepos.items():
			if name != 'DEFAULT':
				if r.location is None:
					writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
				else:
					if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
						self.prepos_order.remove(name)
						writemsg(_("!!! Invalid Repository Location"
							" (not a dir): '%s'\n") % r.location, noiselevel=-1)
Beispiel #19
0
 def _parse_repository_packageusealiases(self, repositories):
     ret = {}
     for repo in repositories.repos_with_profiles():
         file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
         eapi = read_corresponding_eapi_file(file_name)
         useflag_re = _get_useflag_re(eapi)
         lines = grabfile(file_name, recursive=True)
         file_dict = {}
         for line in lines:
             elements = line.split()
             atom = elements[0]
             try:
                 atom = Atom(atom, eapi=eapi)
             except InvalidAtom:
                 writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
                 continue
             if len(elements) == 1:
                 writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
                 continue
             real_flag = elements[1]
             if useflag_re.match(real_flag) is None:
                 writemsg(
                     _("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag),
                     noiselevel=-1,
                 )
             else:
                 for alias in elements[2:]:
                     if useflag_re.match(alias) is None:
                         writemsg(
                             _("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n")
                             % (real_flag, atom, file_name, alias),
                             noiselevel=-1,
                         )
                     else:
                         # Duplicated USE flag aliases in entries for different atoms
                         # matching the same package version are detected in getUseAliases().
                         if any(
                             alias in v
                             for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items()
                             if k != real_flag
                         ):
                             writemsg(
                                 _("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n")
                                 % (atom, file_name, alias),
                                 noiselevel=-1,
                             )
                         else:
                             file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(
                                 alias
                             )
         ret[repo.name] = file_dict
     return ret
	def __init__(self, depgraph, mylist, favorites, verbosity):
		frozen_config = depgraph._frozen_config
		dynamic_config = depgraph._dynamic_config

		self.mylist = mylist
		self.favorites = InternalPackageSet(favorites, allow_repo=True)
		self.verbosity = verbosity

		if self.verbosity is None:
			self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
				"--verbose" in frozen_config.myopts and 3 or 2)

		self.oneshot = "--oneshot" in frozen_config.myopts or \
			"--onlydeps" in frozen_config.myopts
		self.columns = "--columns" in frozen_config.myopts
		self.tree_display = "--tree" in frozen_config.myopts
		self.alphabetical = "--alphabetical" in frozen_config.myopts
		self.quiet = "--quiet" in frozen_config.myopts
		self.all_flags = self.verbosity == 3 or self.quiet
		self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
		self.changelog = "--changelog" in frozen_config.myopts
		self.edebug = frozen_config.edebug
		self.no_restart = frozen_config._opts_no_restart.intersection(frozen_config.myopts)
		self.unordered_display = "--unordered-display" in frozen_config.myopts

		mywidth = 130
		if "COLUMNWIDTH" in frozen_config.settings:
			try:
				mywidth = int(frozen_config.settings["COLUMNWIDTH"])
			except ValueError as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
					frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
				del e
		self.columnwidth = mywidth

		self.repo_display = _RepoDisplay(frozen_config.roots)
		self.trees = frozen_config.trees
		self.pkgsettings = frozen_config.pkgsettings
		self.target_root = frozen_config.target_root
		self.running_root = frozen_config._running_root
		self.roots = frozen_config.roots

		self.blocker_parents = dynamic_config._blocker_parents
		self.reinstall_nodes = dynamic_config._reinstall_nodes
		self.digraph = dynamic_config.digraph
		self.blocker_uninstalls = dynamic_config._blocker_uninstalls
		self.slot_pkg_map = dynamic_config._slot_pkg_map
		self.set_nodes = dynamic_config._set_nodes

		self.pkg_use_enabled = depgraph._pkg_use_enabled
		self.pkg = depgraph._pkg
Beispiel #21
0
    def getUseAliases(self, pkg):
        if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
            return {}

        cp = getattr(pkg, "cp", None)
        if cp is None:
            slot = dep_getslot(pkg)
            repo = dep_getrepo(pkg)
            pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
            cp = pkg.cp

        usealiases = {}

        if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
            repos = []
            try:
                if self.repositories[pkg.repo].use_aliases_masters is not None:
                    masters = self.repositories[pkg.repo].use_aliases_masters
                else:
                    masters = self.repositories[pkg.repo].masters
                repos.extend(repo.name for repo in masters)
            except KeyError:
                pass
            repos.append(pkg.repo)
            for repo in repos:
                usealiases_dict = self._repo_usealiases_dict.get(repo, {})
                for real_flag, aliases in usealiases_dict.items():
                    for alias in aliases:
                        if any(alias in v for k, v in usealiases.items() if k != real_flag):
                            writemsg(
                                _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n")
                                % (pkg.cpv, _repo_separator, pkg.repo, alias),
                                noiselevel=-1,
                            )
                        else:
                            usealiases.setdefault(real_flag, []).append(alias)
                cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
                if cp_usealiases_dict:
                    usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
                    for usealiases_dict in usealiases_dict_list:
                        for real_flag, aliases in usealiases_dict.items():
                            for alias in aliases:
                                if any(alias in v for k, v in usealiases.items() if k != real_flag):
                                    writemsg(
                                        _("--- Duplicated USE flag alias for '%s%s%s': '%s'\n")
                                        % (pkg.cpv, _repo_separator, pkg.repo, alias),
                                        noiselevel=-1,
                                    )
                                else:
                                    usealiases.setdefault(real_flag, []).append(alias)

        return usealiases
Beispiel #22
0
def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
	waiting_msg=None, flags=0):
	"""
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""
	lock = None
	while lock is None:
		lock = _lockfile_iteration(mypath, wantnewlockfile=wantnewlockfile,
			unlinkfile=unlinkfile, waiting_msg=waiting_msg, flags=flags)
		if lock is None:
			writemsg(_("lockfile removed by previous lock holder, retrying\n"), 1)
	return lock
Beispiel #23
0
 def makedirs(dir_path):
     try:
         os.makedirs(dir_path)
     except OSError as oe:
         if errno.EEXIST == oe.errno:
             pass
         elif errno.EPERM == oe.errno:
             writemsg("%s\n" % oe, noiselevel=-1)
             writemsg(_("Operation Not Permitted: makedirs('%s')\n") % dir_path, noiselevel=-1)
             return False
         else:
             raise
     return True
Beispiel #24
0
	def _parse(paths, prepos, ignored_map, ignored_location_map):
		"""Parse files in paths to load config"""
		parser = SafeConfigParser()

		# use read_file/readfp in order to control decoding of unicode
		try:
			# Python >=3.2
			read_file = parser.read_file
		except AttributeError:
			read_file = parser.readfp

		for p in paths:
			f = None
			try:
				f = io.open(_unicode_encode(p,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
			except EnvironmentError:
				pass
			else:
				try:
					read_file(f)
				except ParsingError as e:
					writemsg(_unicode_decode(
						_("!!! Error while reading repo config file: %s\n")
						) % e, noiselevel=-1)
			finally:
				if f is not None:
					f.close()

		prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
		for sname in parser.sections():
			optdict = {}
			for oname in parser.options(sname):
				optdict[oname] = parser.get(sname, oname)

			repo = RepoConfig(sname, optdict)
			if repo.location and not os.path.exists(repo.location):
				writemsg(_("!!! Invalid repos.conf entry '%s'"
					" (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
				continue

			if repo.name in prepos:
				old_location = prepos[repo.name].location
				if old_location is not None and repo.location is not None and old_location != repo.location:
					ignored_map.setdefault(repo.name, []).append(old_location)
					ignored_location_map[old_location] = repo.name
				prepos[repo.name].update(repo)
			else:
				prepos[repo.name] = repo
	def _parse_file_to_tuple(self, file_name, recursive=True):
		ret = []
		lines = grabfile(file_name, recursive=recursive)
		eapi = read_corresponding_eapi_file(file_name)
		useflag_re = _get_useflag_re(eapi)
		for prefixed_useflag in lines:
			if prefixed_useflag[:1] == "-":
				useflag = prefixed_useflag[1:]
			else:
				useflag = prefixed_useflag
			if useflag_re.match(useflag) is None:
				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
					(file_name, prefixed_useflag), noiselevel=-1)
			else:
				ret.append(prefixed_useflag)
		return tuple(ret)
Beispiel #26
0
def install(basename, dirname, options, prefix=""):
	fullpath = basename
	if prefix:
		fullpath = os.path.join(prefix, fullpath)
	if dirname:
		fullpath = os.path.join(dirname, fullpath)

	if options.DOCDESTTREE:
		desttree = options.DOCDESTTREE
	else:
		desttree = "html"

	destdir = os.path.join(options.ED, "usr", "share", "doc",
		options.PF.lstrip(os.sep), desttree.lstrip(os.sep),
		options.doc_prefix.lstrip(os.sep), prefix).rstrip(os.sep)

	if not os.path.exists(fullpath):
		sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
		return False
	elif os.path.isfile(fullpath):
		ext = os.path.splitext(basename)[1][1:]
		if ext in options.allowed_exts or basename in options.allowed_files:
			dodir(destdir)
			dofile(fullpath, os.path.join(destdir, basename))
		elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
			skipped_files.append(fullpath)
	elif options.recurse and os.path.isdir(fullpath) and \
	     basename not in options.disallowed_dirs:
		for i in _os.listdir(_unicode_encode(fullpath)):
			try:
				i = _unicode_decode(i, errors='strict')
			except UnicodeDecodeError:
				writemsg('dohtml: argument is not encoded as UTF-8: %s\n' %
					_unicode_decode(i), noiselevel=-1)
				sys.exit(1)
			pfx = basename
			if prefix:
				pfx = os.path.join(prefix, pfx)
			install(i, dirname, options, pfx)
	elif not options.recurse and os.path.isdir(fullpath):
		global skipped_directories
		skipped_directories.append(fullpath)
		return False
	else:
		return False
	return True
Beispiel #27
0
	def _start_with_fetchlist(self, fetchlist_task):
		if self._default_exit(fetchlist_task) != os.EX_OK:
			if not self.fetchlist_dict.cancelled():
				try:
					self.fetchlist_dict.result()
				except InvalidDependString as e:
					writemsg(
						_("!!! %s%s%s: SRC_URI: %s\n") %
						(self.cp, _repo_separator, self.repo_config.name, e),
						noiselevel=-1)
			self._async_wait()
			return
		self.fetchlist_dict = self.fetchlist_dict.result()
		manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
			fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
			scheduler=self.scheduler)
		self._start_task(manifest_proc, self._manifest_proc_exit)
Beispiel #28
0
	def _parse_file_to_tuple(self, file_name, recursive=True,
		eapi_filter=None, eapi=None, eapi_default="0"):
		"""
		@param file_name: input file name
		@type file_name: str
		@param recursive: triggers recursion if the input file is a
			directory
		@type recursive: bool
		@param eapi_filter: a function that accepts a single eapi
			argument, and returns true if the the current file type
			is supported by the given EAPI
		@type eapi_filter: callable
		@param eapi: the EAPI of the current profile node, which allows
			a call to read_corresponding_eapi_file to be skipped
		@type eapi: str
		@param eapi_default: the default EAPI which applies if the
			current profile node does not define a local EAPI
		@type eapi_default: str
		@rtype: tuple
		@return: collection of USE flags
		"""
		ret = []
		lines = grabfile(file_name, recursive=recursive)
		if eapi is None:
			eapi = read_corresponding_eapi_file(
				file_name, default=eapi_default)
		if eapi_filter is not None and not eapi_filter(eapi):
			if lines:
				writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
					(eapi, os.path.basename(file_name), file_name),
					noiselevel=-1)
			return ()
		useflag_re = _get_useflag_re(eapi)
		for prefixed_useflag in lines:
			if prefixed_useflag[:1] == "-":
				useflag = prefixed_useflag[1:]
			else:
				useflag = prefixed_useflag
			if useflag_re.match(useflag) is None:
				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
					(file_name, prefixed_useflag), noiselevel=-1)
			else:
				ret.append(prefixed_useflag)
		return tuple(ret)
Beispiel #29
0
	def _post_phase_exit(self, post_phase):

		self._assert_current(post_phase)

		log_path = self.settings.get("PORTAGE_LOG_FILE")
		if post_phase.logfile is not None and \
			post_phase.logfile != log_path:
			# We were logging to a temp file (see above), so append
			# temp file to main log and remove temp file.
			self._append_temp_log(post_phase.logfile, log_path)

		if self._final_exit(post_phase) != os.EX_OK:
			writemsg("!!! post %s failed; exiting.\n" % self.phase,
				noiselevel=-1)
			self._die_hooks()
			return
		self._current_task = None
		self.wait()
		return
def _finalize(mysettings, items):
	if len(items) == 0:
		return
	elif len(items) == 1:
		count = _("one package")
	else:
		count = _("multiple packages")
	if "PORTAGE_ELOG_MAILURI" in mysettings:
		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
	else:
		myrecipient = "root@localhost"
	
	myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
	mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
	mysubject = mysubject.replace("${PACKAGE}", count)
	mysubject = mysubject.replace("${HOST}", socket.getfqdn())

	mybody = _("elog messages for the following packages generated by "
		"process %(pid)d on host %(host)s:\n") % {"pid": os.getpid(), "host": socket.getfqdn()}
	for key in items:
		 mybody += "- %s\n" % key

	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
		mybody, attachments=list(items.values()))

	def timeout_handler(signum, frame):
		raise PortageException("Timeout in finalize() for elog system 'mail_summary'")
	import signal
	signal.signal(signal.SIGALRM, timeout_handler)
	# Timeout after one minute in case send_mail() blocks indefinitely.
	signal.alarm(60)

	try:
		try:
			portage.mail.send_mail(mysettings, mymessage)
		finally:
			signal.alarm(0)
	except PortageException as e:
		writemsg("%s\n" % str(e), noiselevel=-1)

	return
Beispiel #31
0
	def trace_line(self, frame, _event, _arg):
		writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
Beispiel #32
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
	root = trees._running_eroot
	mysettings = trees[root]["vartree"].settings
	portdb = trees[root]["porttree"].dbapi
	vardb = trees[root]["vartree"].dbapi
	bindb = trees[root]["bintree"].dbapi

	world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
	world_list = grabfile(world_file)
	world_modified = False
	world_warnings = set()
	updpath_map = {}
	# Maps repo_name to list of updates. If a given repo has no updates
	# directory, it will be omitted. If a repo has an updates directory
	# but none need to be applied (according to timestamp logic), the
	# value in the dict will be an empty list.
	repo_map = {}
	timestamps = {}

	retupd = False
	update_notice_printed = False
	for repo_name in portdb.getRepositories():
		repo = portdb.getRepositoryPath(repo_name)
		updpath = os.path.join(repo, "profiles", "updates")
		if not os.path.isdir(updpath):
			continue

		if updpath in updpath_map:
			repo_map[repo_name] = updpath_map[updpath]
			continue

		try:
			if if_mtime_changed:
				update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
			else:
				update_data = grab_updates(updpath)
		except DirectoryNotFound:
			continue
		myupd = []
		updpath_map[updpath] = myupd
		repo_map[repo_name] = myupd
		if len(update_data) > 0:
			for mykey, mystat, mycontent in update_data:
				if not update_notice_printed:
					update_notice_printed = True
					writemsg_stdout("\n")
					writemsg_stdout(colorize("GOOD",
						_("Performing Global Updates\n")))
					writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
					if not quiet:
						writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
							"%s='/var/db update'  %s='/var/db move'\n"
							"  %s='/var/db SLOT move'  %s='binary move'  "
							"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
							(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
				valid_updates, errors = parse_updates(mycontent)
				myupd.extend(valid_updates)
				if not quiet:
					writemsg_stdout(bold(mykey))
					writemsg_stdout(len(valid_updates) * "." + "\n")
				if len(errors) == 0:
					# Update our internal mtime since we
					# processed all of our directives.
					timestamps[mykey] = mystat[stat.ST_MTIME]
				else:
					for msg in errors:
						writemsg("%s\n" % msg, noiselevel=-1)
			if myupd:
				retupd = True

	if retupd:
		if os.access(bindb.bintree.pkgdir, os.W_OK):
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		else:
			bindb = None

	master_repo = portdb.repositories.mainRepo()
	if master_repo is not None:
		master_repo = master_repo.name
	if master_repo in repo_map:
		repo_map['DEFAULT'] = repo_map[master_repo]

	for repo_name, myupd in repo_map.items():
		if repo_name == 'DEFAULT':
			continue
		if not myupd:
			continue

		def repo_match(repository):
			return repository == repo_name or \
				(repo_name == master_repo and repository not in repo_map)

		def _world_repo_match(atoma, atomb):
			"""
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
			matches = vardb.match(atoma)
			if not matches:
				matches = vardb.match(atomb)
			if matches and \
				repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
				if portdb.match(atoma):
					world_warnings.add((atoma, atomb))
				return True
			return False

		for update_cmd in myupd:
			for pos, atom in enumerate(world_list):
				new_atom = update_dbentry(update_cmd, atom)
				if atom != new_atom:
					if _world_repo_match(atom, new_atom):
						world_list[pos] = new_atom
						world_modified = True

		for update_cmd in myupd:
			if update_cmd[0] == "move":
				moves = vardb.move_ent(update_cmd, repo_match=repo_match)
				if moves:
					writemsg_stdout(moves * "@")
				if bindb:
					moves = bindb.move_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "%")
			elif update_cmd[0] == "slotmove":
				moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
				if moves:
					writemsg_stdout(moves * "s")
				if bindb:
					moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "S")

	if world_modified:
		world_list.sort()
		write_atomic(world_file,
			"".join("%s\n" % (x,) for x in world_list))
		if world_warnings:
			# XXX: print warning that we've updated world entries
			# and the old name still matches something (from an overlay)?
			pass

	if retupd:

		def _config_repo_match(repo_name, atoma, atomb):
			"""
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
			matches = vardb.match(atoma)
			if not matches:
				matches = vardb.match(atomb)
				if not matches:
					return False
			repository = vardb.aux_get(best(matches), ['repository'])[0]
			return repository == repo_name or \
				(repo_name == master_repo and repository not in repo_map)

		update_config_files(root,
			shlex_split(mysettings.get("CONFIG_PROTECT", "")),
			shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
			repo_map, match_callback=_config_repo_match,
			case_insensitive="case-insensitive-fs"
			in mysettings.features)

		# The above global updates proceed quickly, so they
		# are considered a single mtimedb transaction.
		if timestamps:
			# We do not update the mtime in the mtimedb
			# until after _all_ of the above updates have
			# been processed because the mtimedb will
			# automatically commit when killed by ctrl C.
			for mykey, mtime in timestamps.items():
				prev_mtimes[mykey] = mtime

		do_upgrade_packagesmessage = False
		# We gotta do the brute force updates for these now.
		if True:
			def onUpdate(_maxval, curval):
				if curval > 0:
					writemsg_stdout("#")
			if quiet:
				onUpdate = None
			vardb.update_ents(repo_map, onUpdate=onUpdate)
			if bindb:
				def onUpdate(_maxval, curval):
					if curval > 0:
						writemsg_stdout("*")
				if quiet:
					onUpdate = None
				bindb.update_ents(repo_map, onUpdate=onUpdate)
		else:
			do_upgrade_packagesmessage = 1

		# Update progress above is indicated by characters written to stdout so
		# we print a couple new lines here to separate the progress output from
		# what follows.
		writemsg_stdout("\n\n")

		if do_upgrade_packagesmessage and bindb and \
			bindb.cpv_all():
			writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
			writemsg_stdout(bold(_("Note: This can take a very long time.")))
			writemsg_stdout("\n")

	return retupd
Beispiel #33
0
    def _add_repositories(portdir, portdir_overlay, prepos, ignored_map,
                          local_config, default_portdir):
        """Add overlays in PORTDIR_OVERLAY as repositories"""
        overlays = []
        portdir_orig = None
        if portdir:
            portdir = normalize_path(portdir)
            portdir_orig = portdir
            overlays.append(portdir)
        try:
            port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
        except ValueError as e:
            #File "/usr/lib/python3.2/shlex.py", line 168, in read_token
            #	raise ValueError("No closing quotation")
            writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
                       " %s: %s\n") % (e, portdir_overlay),
                     noiselevel=-1)
            port_ov = []
        overlays.extend(port_ov)
        default_repo_opts = {}
        if prepos['DEFAULT'].aliases is not None:
            default_repo_opts['aliases'] = \
             ' '.join(prepos['DEFAULT'].aliases)
        if prepos['DEFAULT'].eclass_overrides is not None:
            default_repo_opts['eclass-overrides'] = \
             ' '.join(prepos['DEFAULT'].eclass_overrides)
        if prepos['DEFAULT'].masters is not None:
            default_repo_opts['masters'] = \
             ' '.join(prepos['DEFAULT'].masters)

        if overlays:
            # We need a copy of the original repos.conf data, since we're
            # going to modify the prepos dict and some of the RepoConfig
            # objects that we put in prepos may have to be discarded if
            # they get overridden by a repository with the same name but
            # a different location. This is common with repoman, for example,
            # when temporarily overriding an rsync repo with another copy
            # of the same repo from CVS.
            repos_conf = prepos.copy()
            #overlay priority is negative because we want them to be looked before any other repo
            base_priority = 0
            for ov in overlays:
                # Ignore missing directory for 'gentoo' so that
                # first sync with emerge-webrsync is possible.
                if isdir_raise_eaccess(ov) or \
                 (base_priority == 0 and ov is portdir):
                    repo_opts = default_repo_opts.copy()
                    repo_opts['location'] = ov
                    repo = RepoConfig(None,
                                      repo_opts,
                                      local_config=local_config)
                    # repos_conf_opts contains options from repos.conf
                    repos_conf_opts = repos_conf.get(repo.name)
                    if repos_conf_opts is not None:
                        # Selectively copy only the attributes which
                        # repos.conf is allowed to override.
                        for k in ('aliases', 'auto_sync', 'eclass_overrides',
                                  'force', 'masters', 'priority', 'sync_depth',
                                  'sync_hooks_only_on_change', 'sync_type',
                                  'sync_umask', 'sync_uri', 'sync_user',
                                  'module_specific_options'):
                            v = getattr(repos_conf_opts, k, None)
                            if v is not None:
                                setattr(repo, k, v)

                    if repo.name in prepos:
                        # Silently ignore when PORTDIR overrides the location
                        # setting from the default repos.conf (bug #478544).
                        old_location = prepos[repo.name].location
                        if old_location is not None and \
                         old_location != repo.location and \
                         not (base_priority == 0 and
                         old_location == default_portdir):
                            ignored_map.setdefault(repo.name,
                                                   []).append(old_location)
                            if old_location == portdir:
                                portdir = repo.location

                    if repo.priority is None:
                        if base_priority == 0 and ov == portdir_orig:
                            # If it's the original PORTDIR setting and it's not
                            # in PORTDIR_OVERLAY, then it will be assigned a
                            # special priority setting later.
                            pass
                        else:
                            repo.priority = base_priority
                            base_priority += 1

                    prepos[repo.name] = repo
                else:

                    if not portage._sync_mode:
                        writemsg(
                            _("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n"
                              ) % ov,
                            noiselevel=-1)

        return portdir
Beispiel #34
0
def hardlink_lockfile(lockfilename,
                      max_wait=DeprecationWarning,
                      waiting_msg=None,
                      flags=0):
    """Does the NFS, hardlink shuffle to ensure locking on the disk.
    We create a PRIVATE hardlink to the real lockfile, that is just a
    placeholder on the disk.
    If our file can 2 references, then we have the lock. :)
    Otherwise we lather, rise, and repeat.
    """

    if max_wait is not DeprecationWarning:
        warnings.warn(
            "The 'max_wait' parameter of "
            "portage.locks.hardlink_lockfile() is now unused. Use "
            "flags=os.O_NONBLOCK instead.",
            DeprecationWarning,
            stacklevel=2,
        )

    global _quiet
    out = None
    displayed_waiting_msg = False
    preexisting = os.path.exists(lockfilename)
    myhardlock = hardlock_name(lockfilename)

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # myhardlock must not exist prior to our link() call, and we can
    # safely unlink it since its file name is unique to our PID
    try:
        os.unlink(myhardlock)
    except OSError as e:
        if e.errno in (errno.ENOENT, errno.ESTALE):
            pass
        else:
            func_call = "unlink('%s')" % myhardlock
            if e.errno == OperationNotPermitted.errno:
                raise OperationNotPermitted(func_call)
            elif e.errno == PermissionDenied.errno:
                raise PermissionDenied(func_call)
            else:
                raise

    while True:
        # create lockfilename if it doesn't exist yet
        try:
            myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
        except OSError as e:
            func_call = "open('%s')" % lockfilename
            if e.errno == OperationNotPermitted.errno:
                raise OperationNotPermitted(func_call)
            elif e.errno == PermissionDenied.errno:
                raise PermissionDenied(func_call)
            elif e.errno == ReadOnlyFileSystem.errno:
                raise ReadOnlyFileSystem(func_call)
            else:
                raise
        else:
            myfd_st = None
            try:
                myfd_st = os.fstat(myfd)
                if not preexisting:
                    # Don't chown the file if it is preexisting, since we
                    # want to preserve existing permissions in that case.
                    if portage.data.secpass >= 1 and myfd_st.st_gid != portage_gid:
                        os.fchown(myfd, -1, portage_gid)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    writemsg(
                        "%s: fchown('%s', -1, %d)\n" %
                        (e, lockfilename, portage_gid),
                        noiselevel=-1,
                    )
                    writemsg(
                        _("Cannot chown a lockfile: '%s'\n") % lockfilename,
                        noiselevel=-1,
                    )
                    writemsg(
                        _("Group IDs of current user: %s\n") %
                        " ".join(str(n) for n in os.getgroups()),
                        noiselevel=-1,
                    )
                else:
                    # another process has removed the file, so we'll have
                    # to create it again
                    continue
            finally:
                os.close(myfd)

            # If fstat shows more than one hardlink, then it's extremely
            # unlikely that the following link call will result in a lock,
            # so optimize away the wasteful link call and sleep or raise
            # TryAgain.
            if myfd_st is not None and myfd_st.st_nlink < 2:
                try:
                    os.link(lockfilename, myhardlock)
                except OSError as e:
                    func_call = "link('%s', '%s')" % (lockfilename, myhardlock)
                    if e.errno == OperationNotPermitted.errno:
                        raise OperationNotPermitted(func_call)
                    elif e.errno == PermissionDenied.errno:
                        raise PermissionDenied(func_call)
                    elif e.errno in (errno.ESTALE, errno.ENOENT):
                        # another process has removed the file, so we'll have
                        # to create it again
                        continue
                    else:
                        raise
                else:
                    if hardlink_is_mine(myhardlock, lockfilename):
                        if out is not None:
                            out.eend(os.EX_OK)
                        break

                    try:
                        os.unlink(myhardlock)
                    except OSError as e:
                        # This should not happen, since the file name of
                        # myhardlock is unique to our host and PID,
                        # and the above link() call succeeded.
                        if e.errno not in (errno.ENOENT, errno.ESTALE):
                            raise
                        raise FileNotFound(myhardlock)

        if flags & os.O_NONBLOCK:
            raise TryAgain(lockfilename)

        if out is None and not _quiet:
            out = portage.output.EOutput()
        if out is not None and not displayed_waiting_msg:
            displayed_waiting_msg = True
            if waiting_msg is None:
                waiting_msg = _("waiting for lock on %s\n") % lockfilename
            out.ebegin(waiting_msg)

        time.sleep(_HARDLINK_POLL_LATENCY)

    return True
Beispiel #35
0
def _prepare_features_dirs(mysettings):

    # Use default ABI libdir in accordance with bug #355283.
    libdir = None
    default_abi = mysettings.get("DEFAULT_ABI")
    if default_abi:
        libdir = mysettings.get("LIBDIR_" + default_abi)
    if not libdir:
        libdir = "lib"

    features_dirs = {
        "ccache": {
            "basedir_var": "CCACHE_DIR",
            "default_dir": os.path.join(mysettings["PORTAGE_TMPDIR"],
                                        "ccache"),
            "always_recurse": False
        },
        "distcc": {
            "basedir_var": "DISTCC_DIR",
            "default_dir": os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
            "subdirs": ("lock", "state"),
            "always_recurse": True
        }
    }
    dirmode = 0o2070
    filemode = 0o60
    modemask = 0o2
    restrict = mysettings.get("PORTAGE_RESTRICT", "").split()
    droppriv = secpass >= 2 and \
     "userpriv" in mysettings.features and \
     "userpriv" not in restrict
    for myfeature, kwargs in features_dirs.items():
        if myfeature in mysettings.features:
            failure = False
            basedir = mysettings.get(kwargs["basedir_var"])
            if basedir is None or not basedir.strip():
                basedir = kwargs["default_dir"]
                mysettings[kwargs["basedir_var"]] = basedir
            try:
                mydirs = [mysettings[kwargs["basedir_var"]]]
                if "subdirs" in kwargs:
                    for subdir in kwargs["subdirs"]:
                        mydirs.append(os.path.join(basedir, subdir))
                for mydir in mydirs:
                    modified = ensure_dirs(mydir)
                    # Generally, we only want to apply permissions for
                    # initial creation.  Otherwise, we don't know exactly what
                    # permissions the user wants, so should leave them as-is.
                    droppriv_fix = False
                    if droppriv:
                        st = os.stat(mydir)
                        if st.st_gid != portage_gid or \
                         not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
                            droppriv_fix = True
                        if not droppriv_fix:
                            # Check permissions of files in the directory.
                            for filename in os.listdir(mydir):
                                try:
                                    subdir_st = os.lstat(
                                        os.path.join(mydir, filename))
                                except OSError:
                                    continue
                                if subdir_st.st_gid != portage_gid or \
                                 ((stat.S_ISDIR(subdir_st.st_mode) and \
                                 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
                                    droppriv_fix = True
                                    break

                    if droppriv_fix:
                        _adjust_perms_msg(mysettings,
                         colorize("WARN", " * ") + \
                         _("Adjusting permissions "
                         "for FEATURES=userpriv: '%s'\n") % mydir)
                    elif modified:
                        _adjust_perms_msg(mysettings,
                         colorize("WARN", " * ") + \
                         _("Adjusting permissions "
                         "for FEATURES=%s: '%s'\n") % (myfeature, mydir))

                    if modified or kwargs["always_recurse"] or droppriv_fix:

                        def onerror(e):
                            raise  # The feature is disabled if a single error
                            # occurs during permissions adjustment.

                        if not apply_recursive_permissions(mydir,
                                                           gid=portage_gid,
                                                           dirmode=dirmode,
                                                           dirmask=modemask,
                                                           filemode=filemode,
                                                           filemask=modemask,
                                                           onerror=onerror):
                            raise OperationNotPermitted(
                                _("Failed to apply recursive permissions for the portage group."
                                  ))

            except DirectoryNotFound as e:
                failure = True
                writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
                 (e,), noiselevel=-1)
                writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
                         noiselevel=-1)

            except PortageException as e:
                failure = True
                writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
                writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
                 (kwargs["basedir_var"], basedir), noiselevel=-1)
                writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
                         noiselevel=-1)

            if failure:
                mysettings.features.remove(myfeature)
                time.sleep(5)
    if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
        # Python 3.1 does not support bytes in Popen args.
        args = [_unicode_encode(x, errors='strict') for x in args]
    proc = subprocess.Popen(args,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
    output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
    proc.wait()

    if output_lines:
        filtered_output = []
        for line in output_lines:
            if line[len(path) + 2:] in _ignored_errors:
                continue
            filtered_output.append(line)
        output_lines = filtered_output

    if output_lines:
        output_lines = [
            line for line in output_lines
            if _trivial_warnings.search(line) is None
        ]

    return output_lines


if __name__ == "__main__":
    for arg in sys.argv[1:]:
        for line in validate_desktop_entry(arg):
            writemsg(line + "\n", noiselevel=-1)
Beispiel #37
0
def _lockfile_iteration(mypath,
                        wantnewlockfile=False,
                        unlinkfile=False,
                        waiting_msg=None,
                        flags=0):
    """
    Acquire a lock on mypath, without retry. Return None if the lockfile
    was removed by previous lock holder (caller must retry).

    @param mypath: lock file path
    @type mypath: str
    @param wantnewlockfile: use a separate new lock file
    @type wantnewlockfile: bool
    @param unlinkfile: remove lock file prior to unlock
    @type unlinkfile: bool
    @param waiting_msg: message to show before blocking
    @type waiting_msg: str
    @param flags: lock flags (only supports os.O_NONBLOCK)
    @type flags: int
    @rtype: bool
    @return: unlockfile tuple on success, None if retry is needed
    """
    if not mypath:
        raise InvalidData(_("Empty path given"))

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # Support for file object or integer file descriptor parameters is
    # deprecated due to ambiguity in whether or not it's safe to close
    # the file descriptor, making it prone to "Bad file descriptor" errors
    # or file descriptor leaks.
    if isinstance(mypath, str) and mypath[-1] == "/":
        mypath = mypath[:-1]

    lockfilename_path = mypath
    if hasattr(mypath, "fileno"):
        warnings.warn(
            "portage.locks.lockfile() support for "
            "file object parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2,
        )
        lockfilename_path = getattr(mypath, "name", None)
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        warnings.warn(
            "portage.locks.lockfile() support for integer file "
            "descriptor parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2,
        )
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        lockfilename_path = lockfilename
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, str):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            while True:
                try:
                    myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
                except OSError as e:
                    if e.errno in (errno.ENOENT,
                                   errno.ESTALE) and os.path.isdir(
                                       os.path.dirname(lockfilename)):
                        # Retry required for NFS (see bug 636798).
                        continue
                    else:
                        _raise_exc(e)
                else:
                    break

            if not preexisting:
                try:
                    if (portage.data.secpass >= 1
                            and os.stat(lockfilename).st_gid != portage_gid):
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        os.close(myfd)
                        return None
                    writemsg(
                        "%s: chown('%s', -1, %d)\n" %
                        (e, lockfilename, portage_gid),
                        noiselevel=-1,
                    )
                    writemsg(
                        _("Cannot chown a lockfile: '%s'\n") % lockfilename,
                        noiselevel=-1,
                    )
                    writemsg(
                        _("Group IDs of current user: %s\n") %
                        " ".join(str(n) for n in os.getgroups()),
                        noiselevel=-1,
                    )
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(
            _("Unknown type passed in '%s': '%s'") % (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = portage._eintr_func_wrapper(_get_lock_fn())
    try:
        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            raise IOError(errno.ENOSYS, "Function not implemented")
        locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if not hasattr(e, "errno"):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                os.close(myfd)
                raise TryAgain(mypath)

            global _quiet
            if _quiet:
                out = None
            else:
                out = portage.output.EOutput()
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s") % lockfilename
            if out is not None:
                out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            enolock_msg_shown = False
            while True:
                try:
                    locking_method(myfd, fcntl.LOCK_EX)
                except EnvironmentError as e:
                    if e.errno == errno.ENOLCK:
                        # This is known to occur on Solaris NFS (see
                        # bug #462694). Assume that the error is due
                        # to temporary exhaustion of record locks,
                        # and loop until one becomes available.
                        if not enolock_msg_shown:
                            enolock_msg_shown = True
                            if isinstance(mypath, int):
                                context_desc = (_("Error while waiting "
                                                  "to lock fd %i") % myfd)
                            else:
                                context_desc = (_("Error while waiting "
                                                  "to lock '%s'") %
                                                lockfilename)
                            writemsg("\n!!! %s: %s\n" % (context_desc, e),
                                     noiselevel=-1)

                        time.sleep(_HARDLINK_POLL_LATENCY)
                        continue

                    if out is not None:
                        out.eend(1, str(e))
                    raise
                else:
                    break

            if out is not None:
                out.eend(os.EX_OK)
        elif e.errno in (errno.ENOSYS, ):
            # We're not allowed to lock on this FS.
            if not isinstance(lockfilename, int):
                # If a file object was passed in, it's not safe
                # to close the file descriptor because it may
                # still be in use.
                os.close(myfd)
            lockfilename_path = _unicode_decode(lockfilename_path,
                                                encoding=_encodings["fs"],
                                                errors="strict")
            if not isinstance(lockfilename_path, str):
                raise
            link_success = hardlink_lockfile(lockfilename_path,
                                             waiting_msg=waiting_msg,
                                             flags=flags)
            if not link_success:
                raise
            lockfilename = lockfilename_path
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise

    fstat_result = None
    if isinstance(lockfilename, str) and myfd != HARDLINK_FD and unlinkfile:
        try:
            (removed, fstat_result) = _lockfile_was_removed(myfd, lockfilename)
        except Exception:
            # Do not leak the file descriptor here.
            os.close(myfd)
            raise
        else:
            if removed:
                # Removed by previous lock holder... Caller will retry...
                os.close(myfd)
                return None

    if myfd != HARDLINK_FD:
        _lock_manager(myfd,
                      os.fstat(myfd) if fstat_result is None else fstat_result,
                      mypath)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Beispiel #38
0
 def output(s):
     writemsg(s, noiselevel=-1)
Beispiel #39
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
                writemsg_level):
    if writemsg_level is None:
        writemsg_level = portage.util.writemsg_level
    if target_root is None:
        target_root = portage.settings["ROOT"]
    if prev_mtimes is None:
        prev_mtimes = portage.mtimedb["ldpath"]
    if env is None:
        settings = portage.settings
    else:
        settings = env

    eprefix = settings.get("EPREFIX", "")
    eprefix_lstrip = eprefix.lstrip(os.sep)
    eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(
        os.sep) + os.sep
    envd_dir = os.path.join(eroot, "etc", "env.d")
    ensure_dirs(envd_dir, mode=0o755)
    fns = listdir(envd_dir, EmptyOnError=1)
    fns.sort()
    templist = []
    for x in fns:
        if len(x) < 3:
            continue
        if not x[0].isdigit() or not x[1].isdigit():
            continue
        if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
            continue
        templist.append(x)
    fns = templist
    del templist

    space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
    colon_separated = set([
        "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR",
        "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH",
        "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"
    ])

    config_list = []

    for x in fns:
        file_path = os.path.join(envd_dir, x)
        try:
            myconfig = getconfig(file_path, expand=False)
        except ParseError as e:
            writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
            del e
            continue
        if myconfig is None:
            # broken symlink or file removed by a concurrent process
            writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
            continue

        config_list.append(myconfig)
        if "SPACE_SEPARATED" in myconfig:
            space_separated.update(myconfig["SPACE_SEPARATED"].split())
            del myconfig["SPACE_SEPARATED"]
        if "COLON_SEPARATED" in myconfig:
            colon_separated.update(myconfig["COLON_SEPARATED"].split())
            del myconfig["COLON_SEPARATED"]

    env = {}
    specials = {}
    for var in space_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split():
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = " ".join(mylist)
        specials[var] = mylist

    for var in colon_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split(":"):
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = ":".join(mylist)
        specials[var] = mylist

    for myconfig in config_list:
        """Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
        env.update(myconfig)

    ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
    try:
        myld = io.open(_unicode_encode(ldsoconf_path,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace')
        myldlines = myld.readlines()
        myld.close()
        oldld = []
        for x in myldlines:
            #each line has at least one char (a newline)
            if x[:1] == "#":
                continue
            oldld.append(x[:-1])
    except (IOError, OSError) as e:
        if e.errno != errno.ENOENT:
            raise
        oldld = None

    newld = specials["LDPATH"]
    if oldld != newld:
        #ld.so.conf needs updating and ldconfig needs to be run
        myfd = atomic_ofstream(ldsoconf_path)
        myfd.write(
            "# ld.so.conf autogenerated by env-update; make all changes to\n")
        myfd.write("# contents of /etc/env.d directory\n")
        for x in specials["LDPATH"]:
            myfd.write(x + "\n")
        myfd.close()

    potential_lib_dirs = set()
    for lib_dir_glob in ('usr/lib*', 'lib*'):
        x = os.path.join(eroot, lib_dir_glob)
        for y in glob.glob(
                _unicode_encode(x, encoding=_encodings['fs'],
                                errors='strict')):
            try:
                y = _unicode_decode(y,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                continue
            if os.path.basename(y) != 'libexec':
                potential_lib_dirs.add(y[len(eroot):])

    # Update prelink.conf if we are prelink-enabled
    if prelink_capable:
        prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
        ensure_dirs(prelink_d)
        newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
        newprelink.write(
            "# prelink.conf autogenerated by env-update; make all changes to\n"
        )
        newprelink.write("# contents of /etc/env.d directory\n")

        for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
            newprelink.write('-l /%s\n' % (x, ))
        prelink_paths = set()
        prelink_paths |= set(specials.get('LDPATH', []))
        prelink_paths |= set(specials.get('PATH', []))
        prelink_paths |= set(specials.get('PRELINK_PATH', []))
        prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
        for x in prelink_paths:
            if not x:
                continue
            if x[-1:] != '/':
                x += "/"
            plmasked = 0
            for y in prelink_path_mask:
                if not y:
                    continue
                if y[-1] != '/':
                    y += "/"
                if y == x[0:len(y)]:
                    plmasked = 1
                    break
            if not plmasked:
                newprelink.write("-h %s\n" % (x, ))
        for x in prelink_path_mask:
            newprelink.write("-b %s\n" % (x, ))
        newprelink.close()

        # Migration code path.  If /etc/prelink.conf was generated by us, then
        # point it to the new stuff until the prelink package re-installs.
        prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
        try:
            with open(
                    _unicode_encode(prelink_conf,
                                    encoding=_encodings['fs'],
                                    errors='strict'), 'rb') as f:
                if f.readline(
                ) == b'# prelink.conf autogenerated by env-update; make all changes to\n':
                    f = atomic_ofstream(prelink_conf)
                    f.write('-c /etc/prelink.conf.d/*.conf\n')
                    f.close()
        except IOError as e:
            if e.errno != errno.ENOENT:
                raise

    current_time = int(time.time())
    mtime_changed = False

    lib_dirs = set()
    for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
        x = os.path.join(eroot, lib_dir.lstrip(os.sep))
        try:
            newldpathtime = os.stat(x)[stat.ST_MTIME]
            lib_dirs.add(normalize_path(x))
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                try:
                    del prev_mtimes[x]
                except KeyError:
                    pass
                # ignore this path because it doesn't exist
                continue
            raise
        if newldpathtime == current_time:
            # Reset mtime to avoid the potential ambiguity of times that
            # differ by less than 1 second.
            newldpathtime -= 1
            os.utime(x, (newldpathtime, newldpathtime))
            prev_mtimes[x] = newldpathtime
            mtime_changed = True
        elif x in prev_mtimes:
            if prev_mtimes[x] == newldpathtime:
                pass
            else:
                prev_mtimes[x] = newldpathtime
                mtime_changed = True
        else:
            prev_mtimes[x] = newldpathtime
            mtime_changed = True

    if makelinks and \
     not mtime_changed and \
     contents is not None:
        libdir_contents_changed = False
        for mypath, mydata in contents.items():
            if mydata[0] not in ("obj", "sym"):
                continue
            head, tail = os.path.split(mypath)
            if head in lib_dirs:
                libdir_contents_changed = True
                break
        if not libdir_contents_changed:
            makelinks = False

    if "CHOST" in settings and "CBUILD" in settings and \
     settings["CHOST"] != settings["CBUILD"]:
        ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
    else:
        ldconfig = os.path.join(eroot, "sbin", "ldconfig")

    if ldconfig is None:
        pass
    elif not (os.access(ldconfig, os.X_OK) and os.path.isfile(ldconfig)):
        ldconfig = None

    # Only run ldconfig as needed
    if makelinks and ldconfig:
        # ldconfig has very different behaviour between FreeBSD and Linux
        if ostype == "Linux" or ostype.lower().endswith("gnu"):
            # We can't update links if we haven't cleaned other versions first, as
            # an older package installed ON TOP of a newer version will cause ldconfig
            # to overwrite the symlinks we just made. -X means no links. After 'clean'
            # we can safely create links.
            writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
             (target_root,))
            os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
        elif ostype in ("FreeBSD", "DragonFly"):
            writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
             target_root)
            os.system(("cd / ; %s -elf -i " + \
             "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
             (ldconfig, target_root, target_root))

    del specials["LDPATH"]

    notice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
    notice += "# DO NOT EDIT THIS FILE."
    penvnotice = notice + " CHANGES TO STARTUP PROFILES\n"
    cenvnotice = penvnotice[:]
    penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
    cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

    #create /etc/profile.env for bash support
    profile_env_path = os.path.join(eroot, "etc", "profile.env")
    with atomic_ofstream(profile_env_path) as outfile:
        outfile.write(penvnotice)

        env_keys = [x for x in env if x != "LDPATH"]
        env_keys.sort()
        for k in env_keys:
            v = env[k]
            if v.startswith('$') and not v.startswith('${'):
                outfile.write("export %s=$'%s'\n" % (k, v[1:]))
            else:
                outfile.write("export %s='%s'\n" % (k, v))

    # Create the systemd user environment configuration file
    # /etc/environment.d/10-gentoo-env.conf with the
    # environment configuration from /etc/env.d.
    systemd_environment_dir = os.path.join(eroot, "etc", "environment.d")
    os.makedirs(systemd_environment_dir, exist_ok=True)

    systemd_gentoo_env_path = os.path.join(systemd_environment_dir,
                                           "10-gentoo-env.conf")
    with atomic_ofstream(systemd_gentoo_env_path) as systemd_gentoo_env:
        senvnotice = notice + "\n\n"
        systemd_gentoo_env.write(senvnotice)

        for env_key in env_keys:
            env_key_value = env[env_key]

            # Skip variables with the empty string
            # as value. Those sometimes appear in
            # profile.env (e.g. "export GCC_SPECS=''"),
            # but are invalid in systemd's syntax.
            if not env_key_value:
                continue

            # Transform into systemd environment.d
            # conf syntax, basically shell variable
            # assignment (without "export ").
            line = f"{env_key}={env_key_value}\n"

            systemd_gentoo_env.write(line)

    #create /etc/csh.env for (t)csh support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
    outfile.write(cenvnotice)
    for x in env_keys:
        outfile.write("setenv %s '%s'\n" % (x, env[x]))
    outfile.close()
Beispiel #40
0
def deprecated_profile_check(settings=None):
    config_root = None
    eprefix = None
    deprecated_profile_file = None
    if settings is not None:
        config_root = settings["PORTAGE_CONFIGROOT"]
        eprefix = settings["EPREFIX"]
        for x in reversed(settings.profiles):
            deprecated_profile_file = os.path.join(x, "deprecated")
            if os.access(deprecated_profile_file, os.R_OK):
                break
        else:
            deprecated_profile_file = None

    if deprecated_profile_file is None:
        deprecated_profile_file = os.path.join(config_root or "/",
                                               DEPRECATED_PROFILE_FILE)
        if not os.access(deprecated_profile_file, os.R_OK):
            deprecated_profile_file = os.path.join(config_root or "/", 'etc',
                                                   'make.profile',
                                                   'deprecated')
            if not os.access(deprecated_profile_file, os.R_OK):
                return

    with io.open(_unicode_encode(deprecated_profile_file,
                                 encoding=_encodings['fs'],
                                 errors='strict'),
                 mode='r',
                 encoding=_encodings['content'],
                 errors='replace') as f:
        dcontent = f.readlines()
    writemsg(colorize(
        "BAD",
        _("\n!!! Your current profile is "
          "deprecated and not supported anymore.")) + "\n",
             noiselevel=-1)
    writemsg(
        colorize("BAD", _("!!! Use eselect profile to update your "
                          "profile.")) + "\n",
        noiselevel=-1)
    if not dcontent:
        writemsg(colorize(
            "BAD", _("!!! Please refer to the "
                     "Gentoo Upgrading Guide.")) + "\n",
                 noiselevel=-1)
        return True
    newprofile = dcontent[0].rstrip("\n")
    writemsg(colorize(
        "BAD", _("!!! Please upgrade to the "
                 "following profile if possible:")) + "\n\n",
             noiselevel=-1)
    writemsg(8 * " " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1)
    if len(dcontent) > 1:
        writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
        for myline in dcontent[1:]:
            writemsg(myline, noiselevel=-1)
        writemsg("\n\n", noiselevel=-1)
    else:
        writemsg(_("You may use the following command to upgrade:\n\n"),
                 noiselevel=-1)
        writemsg(8 * " " +
                 colorize("INFORM", 'eselect profile set ' + newprofile) +
                 "\n\n",
                 noiselevel=-1)

    if settings is not None:
        main_repo_loc = settings.repositories.mainRepoLocation()
        new_profile_path = os.path.join(main_repo_loc, "profiles",
                                        newprofile.rstrip("\n"))

        if os.path.isdir(new_profile_path):
            new_config = portage.config(config_root=config_root,
                                        config_profile_path=new_profile_path,
                                        eprefix=eprefix)

            if not new_config.profiles:
                writemsg("\n %s %s\n" %
                         (colorize("WARN", "*"),
                          _("You must update portage before you "
                            "can migrate to the above profile.")),
                         noiselevel=-1)
                writemsg(" %s %s\n\n" %
                         (colorize("WARN", "*"),
                          _("In order to update portage, "
                            "run 'emerge --oneshot sys-apps/portage'.")),
                         noiselevel=-1)

    return True
Beispiel #41
0
def digestcheck(myfiles, mysettings, strict=False, justmanifest=None):
    """
	Verifies checksums. Assumes all files have been downloaded.
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""

    if justmanifest is not None:
        warnings.warn("The justmanifest parameter of the " + \
         "portage.package.ebuild.digestcheck.digestcheck()" + \
         " function is now unused.",
         DeprecationWarning, stacklevel=2)
        justmanifest = None

    if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
        return 1
    pkgdir = mysettings["O"]
    manifest_path = os.path.join(pkgdir, "Manifest")
    if not os.path.exists(manifest_path):
        writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
                 noiselevel=-1)
        if strict:
            return 0
        else:
            return 1
    mf = Manifest(pkgdir, mysettings["DISTDIR"])
    manifest_empty = True
    for d in mf.fhashdict.values():
        if d:
            manifest_empty = False
            break
    if manifest_empty:
        writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
                 noiselevel=-1)
        if strict:
            return 0
        else:
            return 1
    eout = EOutput()
    eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
    try:
        if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
            eout.ebegin(_("checking ebuild checksums ;-)"))
            mf.checkTypeHashes("EBUILD")
            eout.eend(0)
            eout.ebegin(_("checking auxfile checksums ;-)"))
            mf.checkTypeHashes("AUX")
            eout.eend(0)
            eout.ebegin(_("checking miscfile checksums ;-)"))
            mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
            eout.eend(0)
        for f in myfiles:
            eout.ebegin(_("checking %s ;-)") % f)
            ftype = mf.findFile(f)
            if ftype is None:
                raise KeyError(f)
            mf.checkFileHashes(ftype, f)
            eout.eend(0)
    except KeyError as e:
        eout.eend(1)
        writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
        return 0
    except FileNotFound as e:
        eout.eend(1)
        writemsg(
            _("\n!!! A file listed in the Manifest could not be found: %s\n") %
            str(e),
            noiselevel=-1)
        return 0
    except DigestException as e:
        eout.eend(1)
        writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
        writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
        writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
        writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
        writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
        return 0
    # Make sure that all of the ebuilds are actually listed in the Manifest.
    for f in os.listdir(pkgdir):
        pf = None
        if f[-7:] == '.ebuild':
            pf = f[:-7]
        if pf is not None and not mf.hasFile("EBUILD", f):
            writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
             os.path.join(pkgdir, f), noiselevel=-1)
            if strict:
                return 0
    """ epatch will just grab all the patches out of a directory, so we have to
	make sure there aren't any foreign files that it might grab."""
    filesdir = os.path.join(pkgdir, "files")

    for parent, dirs, files in os.walk(filesdir):
        try:
            parent = _unicode_decode(parent,
                                     encoding=_encodings['fs'],
                                     errors='strict')
        except UnicodeDecodeError:
            parent = _unicode_decode(parent,
                                     encoding=_encodings['fs'],
                                     errors='replace')
            writemsg(_("!!! Path contains invalid "
             "character(s) for encoding '%s': '%s'") \
             % (_encodings['fs'], parent), noiselevel=-1)
            if strict:
                return 0
            continue
        for d in dirs:
            d_bytes = d
            try:
                d = _unicode_decode(d,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                d = _unicode_decode(d,
                                    encoding=_encodings['fs'],
                                    errors='replace')
                writemsg(_("!!! Path contains invalid "
                 "character(s) for encoding '%s': '%s'") \
                 % (_encodings['fs'], os.path.join(parent, d)),
                 noiselevel=-1)
                if strict:
                    return 0
                dirs.remove(d_bytes)
                continue
            if d.startswith(".") or d == "CVS":
                dirs.remove(d_bytes)
        for f in files:
            try:
                f = _unicode_decode(f,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                f = _unicode_decode(f,
                                    encoding=_encodings['fs'],
                                    errors='replace')
                if f.startswith("."):
                    continue
                f = os.path.join(parent, f)[len(filesdir) + 1:]
                writemsg(_("!!! File name contains invalid "
                 "character(s) for encoding '%s': '%s'") \
                 % (_encodings['fs'], f), noiselevel=-1)
                if strict:
                    return 0
                continue
            if f.startswith("."):
                continue
            f = os.path.join(parent, f)[len(filesdir) + 1:]
            file_type = mf.findFile(f)
            if file_type != "AUX" and not f.startswith("digest-"):
                writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
                 os.path.join(filesdir, f), noiselevel=-1)
                if strict:
                    return 0
    return 1
Beispiel #42
0
	def __init__(self, name, repo_opts, local_config=True):
		"""Build a RepoConfig with options in repo_opts
		   Try to read repo_name in repository location, but if
		   it is not found use variable name as repository name"""

		force = repo_opts.get('force')
		if force is not None:
			force = tuple(force.split())
		self.force = force
		if force is None:
			force = ()

		self.local_config = local_config

		if local_config or 'aliases' in force:
			aliases = repo_opts.get('aliases')
			if aliases is not None:
				aliases = tuple(aliases.split())
		else:
			aliases = None

		self.aliases = aliases

		if local_config or 'eclass-overrides' in force:
			eclass_overrides = repo_opts.get('eclass-overrides')
			if eclass_overrides is not None:
				eclass_overrides = tuple(eclass_overrides.split())
		else:
			eclass_overrides = None

		self.eclass_overrides = eclass_overrides
		# Eclass databases and locations are computed later.
		self.eclass_db = None
		self.eclass_locations = None

		if local_config or 'masters' in force:
			# Masters from repos.conf override layout.conf.
			masters = repo_opts.get('masters')
			if masters is not None:
				masters = tuple(masters.split())
		else:
			masters = None

		self.masters = masters

		#The main-repo key makes only sense for the 'DEFAULT' section.
		self.main_repo = repo_opts.get('main-repo')

		priority = repo_opts.get('priority')
		if priority is not None:
			try:
				priority = int(priority)
			except ValueError:
				priority = None
		self.priority = priority

		sync_type = repo_opts.get('sync-type')
		if sync_type is not None:
			sync_type = sync_type.strip()
		self.sync_type = sync_type or None

		sync_umask = repo_opts.get('sync-umask')
		if sync_umask is not None:
			sync_umask = sync_umask.strip()
		self.sync_umask = sync_umask or None

		sync_uri = repo_opts.get('sync-uri')
		if sync_uri is not None:
			sync_uri = sync_uri.strip()
		self.sync_uri = sync_uri or None

		sync_user = repo_opts.get('sync-user')
		if sync_user is not None:
			sync_user = sync_user.strip()
		self.sync_user = sync_user or None

		auto_sync = repo_opts.get('auto-sync', 'yes')
		if auto_sync is not None:
			auto_sync = auto_sync.strip().lower()
		self.auto_sync = auto_sync

		self.clone_depth = repo_opts.get('clone-depth')
		self.sync_depth = repo_opts.get('sync-depth')

		self.sync_hooks_only_on_change = repo_opts.get(
			'sync-hooks-only-on-change', 'false').lower() in ('true', 'yes')

		self.strict_misc_digests = repo_opts.get(
			'strict-misc-digests', 'true').lower() in ('true', 'yes')

		self.sync_allow_hardlinks = repo_opts.get(
			'sync-allow-hardlinks', 'true').lower() in ('true', 'yes')

		self.sync_openpgp_keyserver = repo_opts.get(
			'sync-openpgp-keyserver', '').strip().lower() or None

		self.sync_openpgp_key_path = repo_opts.get(
			'sync-openpgp-key-path', None)

		self.sync_openpgp_key_refresh = repo_opts.get(
			'sync-openpgp-key-refresh', 'true').lower() in ('true', 'yes')

		for k in ('sync_openpgp_key_refresh_retry_count',
			'sync_openpgp_key_refresh_retry_delay_exp_base',
			'sync_openpgp_key_refresh_retry_delay_max',
			'sync_openpgp_key_refresh_retry_delay_mult',
			'sync_openpgp_key_refresh_retry_overall_timeout'):
			setattr(self, k, repo_opts.get(k.replace('_', '-'), None))

		self.sync_rcu = repo_opts.get(
			'sync-rcu', 'false').lower() in ('true', 'yes')

		self.sync_rcu_store_dir = repo_opts.get('sync-rcu-store-dir')

		for k in ('sync-rcu-spare-snapshots', 'sync-rcu-ttl-days'):
			v = repo_opts.get(k, '').strip() or None
			if v:
				try:
					v = int(v)
				except (OverflowError, ValueError):
					writemsg(_("!!! Invalid %s setting for repo"
						" %s: %s\n") % (k, name, v), noiselevel=-1)
					v = None
			setattr(self, k.replace('-', '_'), v)

		self.module_specific_options = {}

		# Not implemented.
		repo_format = repo_opts.get('format')
		if repo_format is not None:
			repo_format = repo_format.strip()
		self.format = repo_format

		self.user_location = None
		location = repo_opts.get('location')
		if location is not None and location.strip():
			if os.path.isdir(location) or portage._sync_mode:
				# The user_location is required for sync-rcu support,
				# since it manages a symlink which resides at that
				# location (and realpath is irreversible).
				self.user_location = location
				location = os.path.realpath(location)
		else:
			location = None
		self.location = location

		missing = True
		self.name = name
		if self.location is not None:
			self.name, missing = self._read_valid_repo_name(self.location)
			if missing:
				# The name from repos.conf has to be used here for
				# things like emerge-webrsync to work when the repo
				# is empty (bug #484950).
				if name is not None:
					self.name = name
				if portage._sync_mode:
					missing = False

		elif name == "DEFAULT":
			missing = False

		self.eapi = None
		self.missing_repo_name = missing
		# sign_commit is disabled by default, since it requires Git >=1.7.9,
		# and key_id configured by `git config user.signingkey key_id`
		self.sign_commit = False
		self.sign_manifest = True
		self.thin_manifest = False
		self.allow_missing_manifest = False
		self.allow_provide_virtual = False
		self.create_manifest = True
		self.disable_manifest = False
		self.manifest_hashes = None
		self.manifest_required_hashes = None
		self.update_changelog = False
		self.cache_formats = None
		self.portage1_profiles = True
		self.portage1_profiles_compat = False
		self.find_invalid_path_char = _find_invalid_path_char
		self._masters_orig = None

		# Parse layout.conf.
		if self.location:
			layout_data = parse_layout_conf(self.location, self.name)[0]
			self._masters_orig = layout_data['masters']

			# layout.conf masters may be overridden here if we have a masters
			# setting from the user's repos.conf
			if self.masters is None:
				self.masters = layout_data['masters']

			if (local_config or 'aliases' in force) and layout_data['aliases']:
				aliases = self.aliases
				if aliases is None:
					aliases = ()
				# repos.conf aliases come after layout.conf aliases, giving
				# them the ability to do incremental overrides
				self.aliases = layout_data['aliases'] + tuple(aliases)

			if layout_data['repo-name']:
				# allow layout.conf to override repository name
				# useful when having two copies of the same repo enabled
				# to avoid modifying profiles/repo_name in one of them
				self.name = layout_data['repo-name']
				self.missing_repo_name = False

			for value in ('allow-missing-manifest',
				'cache-formats',
				'create-manifest', 'disable-manifest', 'manifest-hashes',
				'manifest-required-hashes', 'profile-formats', 'properties-allowed', 'restrict-allowed',
				'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
				setattr(self, value.lower().replace("-", "_"), layout_data[value])

			# If profile-formats specifies a default EAPI, then set
			# self.eapi to that, otherwise set it to "0" as specified
			# by PMS.
			self.eapi = layout_data.get(
				'profile_eapi_when_unspecified', '0')

			eapi = read_corresponding_eapi_file(
				os.path.join(self.location, REPO_NAME_LOC),
				default=self.eapi)

			self.portage1_profiles = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
				any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
			self.portage1_profiles_compat = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
				layout_data['profile-formats'] == ('portage-1-compat',)

			self._eapis_banned = frozenset(layout_data['eapis-banned'])
			self._eapis_deprecated = frozenset(layout_data['eapis-deprecated'])
Beispiel #43
0
def deprecated_profile_check(settings=None):
    config_root = "/"
    if settings is not None:
        config_root = settings["PORTAGE_CONFIGROOT"]
    deprecated_profile_file = os.path.join(config_root,
                                           DEPRECATED_PROFILE_FILE)
    if not os.access(deprecated_profile_file, os.R_OK):
        return False
    dcontent = io.open(_unicode_encode(deprecated_profile_file,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace').readlines()
    writemsg(colorize(
        "BAD",
        _("\n!!! Your current profile is "
          "deprecated and not supported anymore.")) + "\n",
             noiselevel=-1)
    writemsg(
        colorize("BAD", _("!!! Use eselect profile to update your "
                          "profile.")) + "\n",
        noiselevel=-1)
    if not dcontent:
        writemsg(colorize(
            "BAD", _("!!! Please refer to the "
                     "Gentoo Upgrading Guide.")) + "\n",
                 noiselevel=-1)
        return True
    newprofile = dcontent[0]
    writemsg(colorize(
        "BAD", _("!!! Please upgrade to the "
                 "following profile if possible:")) + "\n",
             noiselevel=-1)
    writemsg(8 * " " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
    if len(dcontent) > 1:
        writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
        for myline in dcontent[1:]:
            writemsg(myline, noiselevel=-1)
        writemsg("\n\n", noiselevel=-1)
    return True
Beispiel #44
0
    def __init__(self, paths, settings):
        """Load config from files in paths"""

        prepos = {}
        location_map = {}
        treemap = {}
        ignored_map = {}
        default_opts = {
            "EPREFIX": settings["EPREFIX"],
            "EROOT": settings["EROOT"],
            "PORTAGE_CONFIGROOT": settings["PORTAGE_CONFIGROOT"],
            "ROOT": settings["ROOT"],
        }

        if "PORTAGE_REPOSITORIES" in settings:
            portdir = ""
            portdir_overlay = ""
            # deprecated portdir_sync
            portdir_sync = ""
        else:
            portdir = settings.get("PORTDIR", "")
            portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
            # deprecated portdir_sync
            portdir_sync = settings.get("SYNC", "")

        default_opts['sync-rsync-extra-opts'] = \
         settings.get("PORTAGE_RSYNC_EXTRA_OPTS", None)

        try:
            self._parse(paths, prepos, settings.local_config, default_opts)
        except ConfigParserError as e:
            writemsg(_("!!! Error while reading repo config file: %s\n") % e,
                     noiselevel=-1)
            # The configparser state is unreliable (prone to quirky
            # exceptions) after it has thrown an error, so use empty
            # config and try to fall back to PORTDIR{,_OVERLAY}.
            prepos.clear()
            prepos['DEFAULT'] = RepoConfig('DEFAULT', {},
                                           local_config=settings.local_config)
            location_map.clear()
            treemap.clear()

        default_portdir = os.path.join(os.sep,
                                       settings['EPREFIX'].lstrip(os.sep),
                                       'usr', 'portage')

        # If PORTDIR_OVERLAY contains a repo with the same repo_name as
        # PORTDIR, then PORTDIR is overridden.
        portdir = self._add_repositories(portdir, portdir_overlay, prepos,
                                         ignored_map, settings.local_config,
                                         default_portdir)
        if portdir and portdir.strip():
            portdir = os.path.realpath(portdir)

        ignored_repos = tuple((repo_name, tuple(paths)) \
         for repo_name, paths in ignored_map.items())

        self.missing_repo_names = frozenset(
            repo.location for repo in prepos.values()
            if repo.location is not None and repo.missing_repo_name)

        # Do this before expanding aliases, so that location_map and
        # treemap consistently map unaliased names whenever available.
        for repo_name, repo in list(prepos.items()):
            if repo.location is None:
                if repo_name != 'DEFAULT':
                    # Skip this warning for repoman (bug #474578).
                    if settings.local_config and paths:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf is missing location attribute"
                        ) % repo.name,
                                       level=logging.ERROR,
                                       noiselevel=-1)
                    del prepos[repo_name]
                    continue
            else:
                if not portage._sync_mode:
                    if not isdir_raise_eaccess(repo.location):
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf has location attribute set "
                            "to nonexistent directory: '%s'") %
                                       (repo_name, repo.location),
                                       level=logging.ERROR,
                                       noiselevel=-1)

                        # Ignore missing directory for 'gentoo' so that
                        # first sync with emerge-webrsync is possible.
                        if repo.name != 'gentoo':
                            del prepos[repo_name]
                            continue

                    # After removing support for PORTDIR_OVERLAY, the following check can be:
                    # if repo.missing_repo_name:
                    if repo.missing_repo_name and repo.name != repo_name:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf refers to repository "
                            "without repository name set in '%s'") %
                                       (repo_name,
                                        os.path.join(repo.location,
                                                     REPO_NAME_LOC)),
                                       level=logging.ERROR,
                                       noiselevel=-1)
                        del prepos[repo_name]
                        continue

                    if repo.name != repo_name:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf has name different "
                            "from repository name '%s' set inside repository")
                                       % (repo_name, repo.name),
                                       level=logging.ERROR,
                                       noiselevel=-1)
                        del prepos[repo_name]
                        continue

                location_map[repo.location] = repo_name
                treemap[repo_name] = repo.location

        # Add alias mappings, but never replace unaliased mappings.
        for repo_name, repo in list(prepos.items()):
            names = set()
            names.add(repo_name)
            if repo.aliases:
                aliases = stack_lists([repo.aliases], incremental=True)
                names.update(aliases)

            for name in names:
                if name in prepos and prepos[name].location is not None:
                    if name == repo_name:
                        # unaliased names already handled earlier
                        continue
                    writemsg_level(_("!!! Repository name or alias '%s', " + \
                     "defined for repository '%s', overrides " + \
                     "existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
                    # Never replace an unaliased mapping with
                    # an aliased mapping.
                    continue
                prepos[name] = repo
                if repo.location is not None:
                    if repo.location not in location_map:
                        # Never replace an unaliased mapping with
                        # an aliased mapping.
                        location_map[repo.location] = name
                    treemap[name] = repo.location

        main_repo = prepos['DEFAULT'].main_repo
        if main_repo is None or main_repo not in prepos:
            #setting main_repo if it was not set in repos.conf
            main_repo = location_map.get(portdir)
            if main_repo is not None:
                prepos['DEFAULT'].main_repo = main_repo
            else:
                prepos['DEFAULT'].main_repo = None
                if portdir and not portage._sync_mode:
                    writemsg(_(
                        "!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"
                    ),
                             noiselevel=-1)

        if main_repo is not None and prepos[main_repo].priority is None:
            # This happens if main-repo has been set in repos.conf.
            prepos[main_repo].priority = -1000

        # DEPRECATED Backward compatible SYNC support for old mirrorselect.
        # Feb. 2, 2015.  Version 2.2.16
        if portdir_sync and main_repo is not None:
            writemsg(_(
                "!!! SYNC setting found in make.conf.\n    "
                "This setting is Deprecated and no longer used.  "
                "Please ensure your 'sync-type' and 'sync-uri' are set correctly"
                " in /etc/portage/repos.conf/gentoo.conf\n"),
                     noiselevel=-1)

        # Include repo.name in sort key, for predictable sorting
        # even when priorities are equal.
        prepos_order = sorted(prepos.items(),
                              key=lambda r: (r[1].priority or 0, r[1].name))

        # filter duplicates from aliases, by only including
        # items where repo.name == key
        prepos_order = [
            repo.name for (key, repo) in prepos_order if repo.name == key
            and key != 'DEFAULT' and repo.location is not None
        ]

        self.prepos = prepos
        self.prepos_order = prepos_order
        self.ignored_repos = ignored_repos
        self.location_map = location_map
        self.treemap = treemap
        self._prepos_changed = True
        self._repo_location_list = []

        #The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue
            if repo.masters is None:
                if self.mainRepo() and repo_name != self.mainRepo().name:
                    repo.masters = self.mainRepo(),
                else:
                    repo.masters = ()
            else:
                if repo.masters and isinstance(repo.masters[0], RepoConfig):
                    # This one has already been processed
                    # because it has an alias.
                    continue
                master_repos = []
                for master_name in repo.masters:
                    if master_name not in prepos:
                        layout_filename = os.path.join(repo.location,
                                                       "metadata",
                                                       "layout.conf")
                        writemsg_level(_("Unavailable repository '%s' " \
                         "referenced by masters entry in '%s'\n") % \
                         (master_name, layout_filename),
                         level=logging.ERROR, noiselevel=-1)
                    else:
                        master_repos.append(prepos[master_name])
                repo.masters = tuple(master_repos)

        #The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            eclass_locations = []
            eclass_locations.extend(master_repo.location
                                    for master_repo in repo.masters)
            # Only append the current repo to eclass_locations if it's not
            # there already. This allows masters to have more control over
            # eclass override order, which may be useful for scenarios in
            # which there is a plan to migrate eclasses to a master repo.
            if repo.location not in eclass_locations:
                eclass_locations.append(repo.location)

            if repo.eclass_overrides:
                for other_repo_name in repo.eclass_overrides:
                    if other_repo_name in self.treemap:
                        eclass_locations.append(
                            self.get_location_for_name(other_repo_name))
                    else:
                        writemsg_level(_("Unavailable repository '%s' " \
                         "referenced by eclass-overrides entry for " \
                         "'%s'\n") % (other_repo_name, repo_name), \
                         level=logging.ERROR, noiselevel=-1)
            repo.eclass_locations = tuple(eclass_locations)

        eclass_dbs = {}
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            eclass_db = None
            for eclass_location in repo.eclass_locations:
                tree_db = eclass_dbs.get(eclass_location)
                if tree_db is None:
                    tree_db = eclass_cache.cache(eclass_location)
                    eclass_dbs[eclass_location] = tree_db
                if eclass_db is None:
                    eclass_db = tree_db.copy()
                else:
                    eclass_db.append(tree_db)
            repo.eclass_db = eclass_db

        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            if repo._masters_orig is None and self.mainRepo() and \
             repo.name != self.mainRepo().name and not portage._sync_mode:
                # TODO: Delete masters code in pym/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
                writemsg_level(
                    "!!! %s\n" %
                    _("Repository '%s' is missing masters attribute in '%s'") %
                    (repo.name,
                     os.path.join(repo.location, "metadata", "layout.conf")) +
                    "!!! %s\n" %
                    _("Set 'masters = %s' in this file for future compatibility"
                      ) % self.mainRepo().name,
                    level=logging.WARNING,
                    noiselevel=-1)

        self._prepos_changed = True
        self._repo_location_list = []

        self._check_locations()
Beispiel #45
0
def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
	"""
	The myroot parameter is ignored.
	"""
	myroot = None

	if settings is None:
		raise TypeError("settings argument is required")

	mysettings = settings
	clean_dirs = [mysettings["HOME"]]

	# We enable cleanup when we want to make sure old cruft (such as the old
	# environment) doesn't interfere with the current phase.
	if cleanup and 'keeptemp' not in mysettings.features:
		clean_dirs.append(mysettings["T"])

	for clean_dir in clean_dirs:
		try:
			shutil.rmtree(clean_dir)
		except OSError as oe:
			if errno.ENOENT == oe.errno:
				pass
			elif errno.EPERM == oe.errno:
				writemsg("%s\n" % oe, noiselevel=-1)
				writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
					clean_dir, noiselevel=-1)
				return 1
			else:
				raise

	def makedirs(dir_path):
		try:
			os.makedirs(dir_path)
		except OSError as oe:
			if errno.EEXIST == oe.errno:
				pass
			elif errno.EPERM == oe.errno:
				writemsg("%s\n" % oe, noiselevel=-1)
				writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
					dir_path, noiselevel=-1)
				return False
			else:
				raise
		return True

	mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")

	mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
	mydirs.append(os.path.dirname(mydirs[-1]))

	try:
		for mydir in mydirs:
			ensure_dirs(mydir)
			try:
				apply_secpass_permissions(mydir,
					gid=portage_gid, uid=portage_uid, mode=0o700, mask=0)
			except PortageException:
				if not os.path.isdir(mydir):
					raise
		for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
			ensure_dirs(mysettings[dir_key], mode=0o755)
			apply_secpass_permissions(mysettings[dir_key],
				uid=portage_uid, gid=portage_gid)
	except PermissionDenied as e:
		writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
		return 1
	except OperationNotPermitted as e:
		writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
		return 1
	except FileNotFound as e:
		writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
		return 1

	# Reset state for things like noauto and keepwork in FEATURES.
	for x in ('.die_hooks',):
		try:
			os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
		except OSError:
			pass

	_prepare_workdir(mysettings)
	if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
		# Avoid spurious permissions adjustments when fetching with
		# a temporary PORTAGE_TMPDIR setting (for fetchonly).
		_prepare_features_dirs(mysettings)
Beispiel #46
0
    def _start(self):

        pkg = self.pkg
        settings = self.settings

        if not self.opts.fetchonly:
            rval = _check_temp_dir(settings)
            if rval != os.EX_OK:
                self.returncode = rval
                self._current_task = None
                self._async_wait()
                return

        root_config = pkg.root_config
        tree = "porttree"
        self._tree = tree
        portdb = root_config.trees[tree].dbapi
        settings.setcpv(pkg)
        settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
        if self.opts.buildpkgonly:
            settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
        else:
            settings.configdict["pkg"]["MERGE_TYPE"] = "source"
        ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
        if ebuild_path is None:
            raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
        self._ebuild_path = ebuild_path
        portage.doebuild_environment(ebuild_path,
                                     'setup',
                                     settings=self.settings,
                                     db=portdb)

        # Check the manifest here since with --keep-going mode it's
        # currently possible to get this far with a broken manifest.
        if not self._check_manifest():
            self.returncode = 1
            self._current_task = None
            self._async_wait()
            return

        prefetcher = self.prefetcher
        if prefetcher is None:
            pass
        elif prefetcher.isAlive() and \
         prefetcher.poll() is None:

            waiting_msg = "Fetching files " + \
             "in the background. " + \
             "To view fetch progress, run `tail -f " + \
             "/var/log/emerge-fetch.log` in another " + \
             "terminal."
            msg_prefix = colorize("GOOD", " * ")
            from textwrap import wrap
            waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
             for line in wrap(waiting_msg, 65))
            if not self.background:
                writemsg(waiting_msg, noiselevel=-1)

            self._current_task = prefetcher
            prefetcher.addExitListener(self._prefetch_exit)
            return

        self._prefetch_exit(prefetcher)
Beispiel #47
0
def _prepare_workdir(mysettings):
	workdir_mode = 0o700
	try:
		mode = mysettings["PORTAGE_WORKDIR_MODE"]
		if mode.isdigit():
			parsed_mode = int(mode, 8)
		elif mode == "":
			raise KeyError()
		else:
			raise ValueError()
		if parsed_mode & 0o7777 != parsed_mode:
			raise ValueError("Invalid file mode: %s" % mode)
		else:
			workdir_mode = parsed_mode
	except KeyError as e:
		writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
	except ValueError as e:
		if len(str(e)) > 0:
			writemsg("%s\n" % e)
		writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
		(mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
	mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
	try:
		apply_secpass_permissions(mysettings["WORKDIR"],
		uid=portage_uid, gid=portage_gid, mode=workdir_mode)
	except FileNotFound:
		pass # ebuild.sh will create it

	if mysettings.get("PORT_LOGDIR", "") == "":
		while "PORT_LOGDIR" in mysettings:
			del mysettings["PORT_LOGDIR"]
	if "PORT_LOGDIR" in mysettings:
		try:
			modified = ensure_dirs(mysettings["PORT_LOGDIR"])
			if modified:
				# Only initialize group/mode if the directory doesn't
				# exist, so that we don't override permissions if they
				# were previously set by the administrator.
				# NOTE: These permissions should be compatible with our
				# default logrotate config as discussed in bug 374287.
				apply_secpass_permissions(mysettings["PORT_LOGDIR"],
					uid=portage_uid, gid=portage_gid, mode=0o2770)
		except PortageException as e:
			writemsg("!!! %s\n" % str(e), noiselevel=-1)
			writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
				mysettings["PORT_LOGDIR"], noiselevel=-1)
			writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
			while "PORT_LOGDIR" in mysettings:
				del mysettings["PORT_LOGDIR"]

	compress_log_ext = ''
	if 'compress-build-logs' in mysettings.features:
		compress_log_ext = '.gz'

	logdir_subdir_ok = False
	if "PORT_LOGDIR" in mysettings and \
		os.access(mysettings["PORT_LOGDIR"], os.W_OK):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
		logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
		if not os.path.exists(logid_path):
			open(_unicode_encode(logid_path), 'w').close()
		logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
			time.gmtime(os.stat(logid_path).st_mtime)),
			encoding=_encodings['content'], errors='replace')

		if "split-log" in mysettings.features:
			log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				log_subdir, "%s:%s.log%s" %
				(mysettings["PF"], logid_time, compress_log_ext))
		else:
			log_subdir = logdir
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				logdir, "%s:%s:%s.log%s" % \
				(mysettings["CATEGORY"], mysettings["PF"], logid_time,
				compress_log_ext))

		if log_subdir is logdir:
			logdir_subdir_ok = True
		else:
			try:
				_ensure_log_subdirs(logdir, log_subdir)
			except PortageException as e:
				writemsg("!!! %s\n" % (e,), noiselevel=-1)

			if os.access(log_subdir, os.W_OK):
				logdir_subdir_ok = True
			else:
				writemsg("!!! %s: %s\n" %
					(_("Permission Denied"), log_subdir), noiselevel=-1)

	tmpdir_log_path = os.path.join(
		mysettings["T"], "build.log%s" % compress_log_ext)
	if not logdir_subdir_ok:
		# NOTE: When sesandbox is enabled, the local SELinux security policies
		# may not allow output to be piped out of the sesandbox domain. The
		# current policy will allow it to work when a pty is available, but
		# not through a normal pipe. See bug #162404.
		mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
	else:
		# Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
		# requested in bug #412865.
		make_new_symlink = False
		try:
			target = os.readlink(tmpdir_log_path)
		except OSError:
			make_new_symlink = True
		else:
			if target != mysettings["PORTAGE_LOG_FILE"]:
				make_new_symlink = True
		if make_new_symlink:
			try:
				os.unlink(tmpdir_log_path)
			except OSError:
				pass
			os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
Beispiel #48
0
 def _check_var_directory(self, varname, var):
     if not isdir_raise_eaccess(var):
         writemsg(_("!!! Error: %s='%s' is not a directory. "
                    "Please correct this.\n") % (varname, var),
                  noiselevel=-1)
         raise DirectoryNotFound(var)
Beispiel #49
0
def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
	env=None, writemsg_level=None):
	"""
	Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
	ld.so.conf, and prelink.conf. Finally, run ldconfig.
	"""
	if writemsg_level is None:
		writemsg_level = portage.util.writemsg_level
	if target_root is None:
		target_root = portage.settings["ROOT"]
	if prev_mtimes is None:
		prev_mtimes = portage.mtimedb["ldpath"]
	if env is None:
		env = os.environ
	envd_dir = os.path.join(target_root, "etc", "env.d")
	ensure_dirs(envd_dir, mode=0o755)
	fns = listdir(envd_dir, EmptyOnError=1)
	fns.sort()
	templist = []
	for x in fns:
		if len(x) < 3:
			continue
		if not x[0].isdigit() or not x[1].isdigit():
			continue
		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
			continue
		templist.append(x)
	fns = templist
	del templist

	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
		  "PYTHONPATH", "ROOTPATH"])

	config_list = []

	for x in fns:
		file_path = os.path.join(envd_dir, x)
		try:
			myconfig = getconfig(file_path, expand=False)
		except ParseError as e:
			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
			del e
			continue
		if myconfig is None:
			# broken symlink or file removed by a concurrent process
			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
			continue

		config_list.append(myconfig)
		if "SPACE_SEPARATED" in myconfig:
			space_separated.update(myconfig["SPACE_SEPARATED"].split())
			del myconfig["SPACE_SEPARATED"]
		if "COLON_SEPARATED" in myconfig:
			colon_separated.update(myconfig["COLON_SEPARATED"].split())
			del myconfig["COLON_SEPARATED"]

	env = {}
	specials = {}
	for var in space_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split():
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = " ".join(mylist)
		specials[var] = mylist

	for var in colon_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split(":"):
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = ":".join(mylist)
		specials[var] = mylist

	for myconfig in config_list:
		"""Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
		env.update(myconfig)

	ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
	try:
		myld = codecs.open(_unicode_encode(ldsoconf_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['content'], errors='replace')
		myldlines=myld.readlines()
		myld.close()
		oldld=[]
		for x in myldlines:
			#each line has at least one char (a newline)
			if x[:1] == "#":
				continue
			oldld.append(x[:-1])
	except (IOError, OSError) as e:
		if e.errno != errno.ENOENT:
			raise
		oldld = None

	ld_cache_update=False

	newld = specials["LDPATH"]
	if (oldld != newld):
		#ld.so.conf needs updating and ldconfig needs to be run
		myfd = atomic_ofstream(ldsoconf_path)
		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
		myfd.write("# contents of /etc/env.d directory\n")
		for x in specials["LDPATH"]:
			myfd.write(x + "\n")
		myfd.close()
		ld_cache_update=True

	# Update prelink.conf if we are prelink-enabled
	if prelink_capable:
		newprelink = atomic_ofstream(
			os.path.join(target_root, "etc", "prelink.conf"))
		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
		newprelink.write("# contents of /etc/env.d directory\n")

		for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
			newprelink.write("-l %s\n" % (x,));
		prelink_paths = []
		prelink_paths += specials.get("LDPATH", [])
		prelink_paths += specials.get("PATH", [])
		prelink_paths += specials.get("PRELINK_PATH", [])
		prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
		for x in prelink_paths:
			if not x:
				continue
			if x[-1:] != '/':
				x += "/"
			plmasked = 0
			for y in prelink_path_mask:
				if not y:
					continue
				if y[-1] != '/':
					y += "/"
				if y == x[0:len(y)]:
					plmasked = 1
					break
			if not plmasked:
				newprelink.write("-h %s\n" % (x,))
		for x in prelink_path_mask:
			newprelink.write("-b %s\n" % (x,))
		newprelink.close()

	# Portage stores mtimes with 1 second granularity but in >=python-2.5 finer
	# granularity is possible.  In order to avoid the potential ambiguity of
	# mtimes that differ by less than 1 second, sleep here if any of the
	# directories have been modified during the current second.
	sleep_for_mtime_granularity = False
	current_time = long(time.time())
	mtime_changed = False
	lib_dirs = set()
	for lib_dir in set(specials["LDPATH"] + \
		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
		x = os.path.join(target_root, lib_dir.lstrip(os.sep))
		try:
			newldpathtime = os.stat(x)[stat.ST_MTIME]
			lib_dirs.add(normalize_path(x))
		except OSError as oe:
			if oe.errno == errno.ENOENT:
				try:
					del prev_mtimes[x]
				except KeyError:
					pass
				# ignore this path because it doesn't exist
				continue
			raise
		if newldpathtime == current_time:
			sleep_for_mtime_granularity = True
		if x in prev_mtimes:
			if prev_mtimes[x] == newldpathtime:
				pass
			else:
				prev_mtimes[x] = newldpathtime
				mtime_changed = True
		else:
			prev_mtimes[x] = newldpathtime
			mtime_changed = True

	if mtime_changed:
		ld_cache_update = True

	if makelinks and \
		not ld_cache_update and \
		contents is not None:
		libdir_contents_changed = False
		for mypath, mydata in contents.items():
			if mydata[0] not in ("obj", "sym"):
				continue
			head, tail = os.path.split(mypath)
			if head in lib_dirs:
				libdir_contents_changed = True
				break
		if not libdir_contents_changed:
			makelinks = False

	ldconfig = "/sbin/ldconfig"
	if "CHOST" in env and "CBUILD" in env and \
		env["CHOST"] != env["CBUILD"]:
		ldconfig = find_binary("%s-ldconfig" % env["CHOST"])

	# Only run ldconfig as needed
	if (ld_cache_update or makelinks) and ldconfig:
		# ldconfig has very different behaviour between FreeBSD and Linux
		if ostype == "Linux" or ostype.lower().endswith("gnu"):
			# We can't update links if we haven't cleaned other versions first, as
			# an older package installed ON TOP of a newer version will cause ldconfig
			# to overwrite the symlinks we just made. -X means no links. After 'clean'
			# we can safely create links.
			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
				(target_root,))
			if makelinks:
				os.system("cd / ; %s -r '%s'" % (ldconfig, target_root))
			else:
				os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
		elif ostype in ("FreeBSD","DragonFly"):
			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
				target_root)
			os.system(("cd / ; %s -elf -i " + \
				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
				(ldconfig, target_root, target_root))

	del specials["LDPATH"]

	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
	cenvnotice  = penvnotice[:]
	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

	#create /etc/profile.env for bash support
	outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
	outfile.write(penvnotice)

	env_keys = [ x for x in env if x != "LDPATH" ]
	env_keys.sort()
	for k in env_keys:
		v = env[k]
		if v.startswith('$') and not v.startswith('${'):
			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
		else:
			outfile.write("export %s='%s'\n" % (k, v))
	outfile.close()

	#create /etc/csh.env for (t)csh support
	outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
	outfile.write(cenvnotice)
	for x in env_keys:
		outfile.write("setenv %s '%s'\n" % (x, env[x]))
	outfile.close()

	if sleep_for_mtime_granularity:
		while current_time == long(time.time()):
			time.sleep(1)
Beispiel #50
0
    def load_profiles(self, repositories, known_repository_paths):
        known_repository_paths = set(
            os.path.realpath(x) for x in known_repository_paths)

        known_repos = []
        for x in known_repository_paths:
            try:
                repo = repositories.get_repo_for_location(x)
            except KeyError:
                layout_data = parse_layout_conf(x)[0]
            else:
                layout_data = {
                    "profile-formats": repo.profile_formats,
                    "profile_eapi_when_unspecified": repo.eapi
                }
            # force a trailing '/' for ease of doing startswith checks
            known_repos.append((x + '/', layout_data))
        known_repos = tuple(known_repos)

        if self.config_profile_path is None:
            deprecated_profile_path = os.path.join(self.config_root, 'etc',
                                                   'make.profile')
            self.config_profile_path = \
             os.path.join(self.config_root, PROFILE_PATH)
            if isdir_raise_eaccess(self.config_profile_path):
                self.profile_path = self.config_profile_path
                if isdir_raise_eaccess(deprecated_profile_path) and not \
                 os.path.samefile(self.profile_path,
                 deprecated_profile_path):
                    # Don't warn if they refer to the same path, since
                    # that can be used for backward compatibility with
                    # old software.
                    writemsg("!!! %s\n" % _("Found 2 make.profile dirs: "
                                            "using '%s', ignoring '%s'") %
                             (self.profile_path, deprecated_profile_path),
                             noiselevel=-1)
            else:
                self.config_profile_path = deprecated_profile_path
                if isdir_raise_eaccess(self.config_profile_path):
                    self.profile_path = self.config_profile_path
                else:
                    self.profile_path = None
        else:
            # NOTE: repoman may pass in an empty string
            # here, in order to create an empty profile
            # for checking dependencies of packages with
            # empty KEYWORDS.
            self.profile_path = self.config_profile_path

        # The symlink might not exist or might not be a symlink.
        self.profiles = []
        self.profiles_complex = []
        if self.profile_path:
            try:
                self._addProfile(os.path.realpath(self.profile_path),
                                 repositories, known_repos)
            except ParseError as e:
                if not portage._sync_mode:
                    writemsg(_("!!! Unable to parse profile: '%s'\n") %
                             self.profile_path,
                             noiselevel=-1)
                    writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
                self.profiles = []
                self.profiles_complex = []

        if self._user_config and self.profiles:
            custom_prof = os.path.join(self.config_root, CUSTOM_PROFILE_PATH)
            if os.path.exists(custom_prof):
                # For read_corresponding_eapi_file, specify default=None
                # in order to allow things like wildcard atoms when
                # is no explicit EAPI setting.
                self.user_profile_dir = custom_prof
                self.profiles.append(custom_prof)
                self.profiles_complex.append(
                    _profile_node(
                        custom_prof, True, True,
                        ('profile-bashrcs', 'profile-set'),
                        read_corresponding_eapi_file(custom_prof + os.sep,
                                                     default=None), True))
            del custom_prof

        self.profiles = tuple(self.profiles)
        self.profiles_complex = tuple(self.profiles_complex)
Beispiel #51
0
def unlockfile(mytuple):

    # XXX: Compatability hack.
    if len(mytuple) == 3:
        lockfilename, myfd, unlinkfile = mytuple
        locking_method = fcntl.flock
    elif len(mytuple) == 4:
        lockfilename, myfd, unlinkfile, locking_method = mytuple
    else:
        raise InvalidData

    if myfd == HARDLINK_FD:
        unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile)
        return True

    # myfd may be None here due to myfd = mypath in lockfile()
    if isinstance(lockfilename, str) and not os.path.exists(lockfilename):
        writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
        if myfd is not None:
            _open_fds[myfd].close()
        return False

    try:
        if myfd is None:
            myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
            unlinkfile = 1
        locking_method(myfd, fcntl.LOCK_UN)
    except OSError:
        if isinstance(lockfilename, str):
            _open_fds[myfd].close()
        raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)

    try:
        # This sleep call was added to allow other processes that are
        # waiting for a lock to be able to grab it before it is deleted.
        # lockfile() already accounts for this situation, however, and
        # the sleep here adds more time than is saved overall, so am
        # commenting until it is proved necessary.
        # time.sleep(0.0001)
        if unlinkfile:
            locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            # We won the lock, so there isn't competition for it.
            # We can safely delete the file.
            writemsg(_("Got the lockfile...\n"), 1)
            if _fstat_nlink(myfd) == 1:
                os.unlink(lockfilename)
                writemsg(_("Unlinked lockfile...\n"), 1)
                locking_method(myfd, fcntl.LOCK_UN)
            else:
                writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
                _open_fds[myfd].close()
                return False
    except SystemExit:
        raise
    except Exception as e:
        writemsg(_("Failed to get lock... someone took it.\n"), 1)
        writemsg(str(e) + "\n", 1)

    # why test lockfilename?  because we may have been handed an
    # fd originally, and the caller might not like having their
    # open fd closed automatically on them.
    if isinstance(lockfilename, str):
        _open_fds[myfd].close()

    return True
Beispiel #52
0
	def fix(self,  **kwargs):
		onProgress = kwargs.get('onProgress', None)
		bintree = self._bintree
		# Force reindex in case pkgdir-index-trusted is enabled.
		bintree._populate_local(reindex=True)
		bintree.populated = True
		_instance_key = bintree.dbapi._instance_key
		cpv_all = self._bintree.dbapi.cpv_all()
		cpv_all.sort()
		maxval = 0
		if onProgress:
			onProgress(maxval, 0)
		pkgindex = self._pkgindex
		missing = []
		stale = []
		metadata = {}
		for d in pkgindex.packages:
			cpv = _pkg_str(d["CPV"], metadata=d,
				settings=bintree.settings)
			d["CPV"] = cpv
			metadata[_instance_key(cpv)] = d
			if not bintree.dbapi.cpv_exists(cpv):
				stale.append(cpv)

		for cpv in cpv_all:
			d = metadata.get(_instance_key(cpv))
			if not d or self._need_update(cpv, d):
				missing.append(cpv)

		if missing or stale:
			from portage import locks
			pkgindex_lock = locks.lockfile(
				self._pkgindex_file, wantnewlockfile=1)
			try:
				# Repopulate with lock held. If _populate_local returns
				# data then use that, since _load_pkgindex would return
				# stale data in this case.
				self._pkgindex = pkgindex = (bintree._populate_local() or
					bintree._load_pkgindex())
				cpv_all = self._bintree.dbapi.cpv_all()
				cpv_all.sort()

				# Recount stale/missing packages, with lock held.
				missing = []
				stale = []
				metadata = {}
				for d in pkgindex.packages:
					cpv = _pkg_str(d["CPV"], metadata=d,
						settings=bintree.settings)
					d["CPV"] = cpv
					metadata[_instance_key(cpv)] = d
					if not bintree.dbapi.cpv_exists(cpv):
						stale.append(cpv)

				for cpv in cpv_all:
					d = metadata.get(_instance_key(cpv))
					if not d or self._need_update(cpv, d):
						missing.append(cpv)

				maxval = len(missing)
				for i, cpv in enumerate(missing):
					d = bintree._pkgindex_entry(cpv)
					try:
						bintree._eval_use_flags(cpv, d)
					except portage.exception.InvalidDependString:
						writemsg("!!! Invalid binary package: '%s'\n" % \
							bintree.getname(cpv), noiselevel=-1)
					else:
						metadata[_instance_key(cpv)] = d

					if onProgress:
						onProgress(maxval, i+1)

				for cpv in stale:
					del metadata[_instance_key(cpv)]

				# We've updated the pkgindex, so set it to
				# repopulate when necessary.
				bintree.populated = False

				del pkgindex.packages[:]
				pkgindex.packages.extend(metadata.values())
				bintree._update_pkgindex_header(self._pkgindex.header)
				bintree._pkgindex_write(self._pkgindex)

			finally:
				locks.unlockfile(pkgindex_lock)

		if onProgress:
			if maxval == 0:
				maxval = 1
			onProgress(maxval, maxval)
		return (True, None)
Beispiel #53
0
 def write(msg):
     writemsg(msg, noiselevel=-1)
Beispiel #54
0
def dep_check(depstring,
              mydbapi,
              mysettings,
              use="yes",
              mode=None,
              myuse=None,
              use_cache=1,
              use_binaries=0,
              myroot=None,
              trees=None):
    """
	Takes a depend string, parses it, and selects atoms.
	The myroot parameter is unused (use mysettings['EROOT'] instead).
	"""
    myroot = mysettings['EROOT']
    edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
    #check_config_instance(mysettings)
    if trees is None:
        trees = globals()["db"]
    if use == "yes":
        if myuse is None:
            #default behavior
            myusesplit = mysettings["PORTAGE_USE"].split()
        else:
            myusesplit = myuse
            # We've been given useflags to use.
            #print "USE FLAGS PASSED IN."
            #print myuse
            #if "bindist" in myusesplit:
            #	print "BINDIST is set!"
            #else:
            #	print "BINDIST NOT set."
    else:
        #we are being run by autouse(), don't consult USE vars yet.
        # WE ALSO CANNOT USE SETTINGS
        myusesplit = []

    mymasks = set()
    useforce = set()
    if use == "all":
        # This is only for repoman, in order to constrain the use_reduce
        # matchall behavior to account for profile use.mask/force. The
        # ARCH/archlist code here may be redundant, since the profile
        # really should be handling ARCH masking/forcing itself.
        arch = mysettings.get("ARCH")
        mymasks.update(mysettings.usemask)
        mymasks.update(mysettings.archlist())
        if arch:
            mymasks.discard(arch)
            useforce.add(arch)
        useforce.update(mysettings.useforce)
        useforce.difference_update(mymasks)

    # eapi code borrowed from _expand_new_virtuals()
    mytrees = trees[myroot]
    parent = mytrees.get("parent")
    virt_parent = mytrees.get("virt_parent")
    current_parent = None
    eapi = None
    if parent is not None:
        if virt_parent is not None:
            current_parent = virt_parent
        else:
            current_parent = parent

    if current_parent is not None:
        # Don't pass the eapi argument to use_reduce() for installed packages
        # since previous validation will have already marked them as invalid
        # when necessary and now we're more interested in evaluating
        # dependencies so that things like --depclean work as well as possible
        # in spite of partial invalidity.
        if not current_parent.installed:
            eapi = current_parent.eapi

    if isinstance(depstring, list):
        mysplit = depstring
    else:
        try:
            mysplit = use_reduce(depstring,
                                 uselist=myusesplit,
                                 masklist=mymasks,
                                 matchall=(use == "all"),
                                 excludeall=useforce,
                                 opconvert=True,
                                 token_class=Atom,
                                 eapi=eapi)
        except InvalidDependString as e:
            return [0, "%s" % (e, )]

    if mysplit == []:
        #dependencies were reduced to nothing
        return [1, []]

    # Recursively expand new-style virtuals so as to
    # collapse one or more levels of indirection.
    try:
        mysplit = _expand_new_virtuals(mysplit,
                                       edebug,
                                       mydbapi,
                                       mysettings,
                                       use=use,
                                       mode=mode,
                                       myuse=myuse,
                                       use_force=useforce,
                                       use_mask=mymasks,
                                       use_cache=use_cache,
                                       use_binaries=use_binaries,
                                       myroot=myroot,
                                       trees=trees)
    except ParseError as e:
        return [0, "%s" % (e, )]

    dnf = False
    if mysettings.local_config:  # if not repoman
        orig_split = mysplit
        mysplit = _overlap_dnf(mysplit)
        dnf = mysplit is not orig_split

    mysplit2 = dep_wordreduce(mysplit,
                              mysettings,
                              mydbapi,
                              mode,
                              use_cache=use_cache)
    if mysplit2 is None:
        return [0, _("Invalid token")]

    writemsg("\n\n\n", 1)
    writemsg("mysplit:  %s\n" % (mysplit), 1)
    writemsg("mysplit2: %s\n" % (mysplit2), 1)

    selected_atoms = dep_zapdeps(mysplit,
                                 mysplit2,
                                 myroot,
                                 use_binaries=use_binaries,
                                 trees=trees,
                                 minimize_slots=dnf)

    return [1, selected_atoms]
Beispiel #55
0
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() in ('true',
                                                                      'yes'):
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        exitcode = 0
        verify_failure = False

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no').lower() in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    self._refresh_keys(openpgp_env)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                if exitcode == 0:
                    if unchanged:
                        self.repo_storage.abort_update()
                    else:
                        self.repo_storage.commit_update()
                        self.repo_storage.garbage_collection()
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), str(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)
            elif len(uris) == 1:
                # Use the original hostname if it resolves to a single IP,
                # since DNS lookup must occur in the rsync process for
                # compatibility with things like proxychains that allocate
                # a surrogate IP which is only valid within the current
                # process.
                uris = [syncuri]

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your ebuild repository " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break

            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            if local_state_unchanged:
                # The quarantine download_dir is not intended to exist
                # in this case, so refer gemato to the normal repository
                # location.
                download_dir = self.repo.location
            else:
                download_dir = self.download_dir

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(download_dir, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (download_dir, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1
                        verify_failure = True

            if exitcode == 0 and not local_state_unchanged:
                self.repo_storage.commit_update()
                self.repo_storage.garbage_collection()

            return (exitcode, updatecache_flg)
        finally:
            # Don't delete the update if verification failed, in case
            # the cause needs to be investigated.
            if not verify_failure:
                self.repo_storage.abort_update()
            if openpgp_env is not None:
                openpgp_env.close()
Beispiel #56
0
def dep_zapdeps(unreduced,
                reduced,
                myroot,
                use_binaries=0,
                trees=None,
                minimize_slots=False):
    """
	Takes an unreduced and reduced deplist and removes satisfied dependencies.
	Returned deplist contains steps that must be taken to satisfy dependencies.
	"""
    if trees is None:
        trees = portage.db
    writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
    if not reduced or unreduced == ["||"] or dep_eval(reduced):
        return []

    if unreduced[0] != "||":
        unresolved = []
        for x, satisfied in zip(unreduced, reduced):
            if isinstance(x, list):
                unresolved += dep_zapdeps(x,
                                          satisfied,
                                          myroot,
                                          use_binaries=use_binaries,
                                          trees=trees,
                                          minimize_slots=minimize_slots)
            elif not satisfied:
                unresolved.append(x)
        return unresolved

    # We're at a ( || atom ... ) type level and need to make a choice
    deps = unreduced[1:]
    satisfieds = reduced[1:]

    # Our preference order is for an the first item that:
    # a) contains all unmasked packages with the same key as installed packages
    # b) contains all unmasked packages
    # c) contains masked installed packages
    # d) is the first item

    preferred_in_graph = []
    preferred_installed = preferred_in_graph
    preferred_any_slot = preferred_in_graph
    preferred_non_installed = []
    unsat_use_in_graph = []
    unsat_use_installed = []
    unsat_use_non_installed = []
    other_installed = []
    other_installed_some = []
    other_installed_any_slot = []
    other = []

    # unsat_use_* must come after preferred_non_installed
    # for correct ordering in cases like || ( foo[a] foo[b] ).
    choice_bins = (
        preferred_in_graph,
        preferred_non_installed,
        unsat_use_in_graph,
        unsat_use_installed,
        unsat_use_non_installed,
        other_installed,
        other_installed_some,
        other_installed_any_slot,
        other,
    )

    # Alias the trees we'll be checking availability against
    parent = trees[myroot].get("parent")
    virt_parent = trees[myroot].get("virt_parent")
    priority = trees[myroot].get("priority")
    graph_db = trees[myroot].get("graph_db")
    graph = trees[myroot].get("graph")
    pkg_use_enabled = trees[myroot].get("pkg_use_enabled")
    graph_interface = trees[myroot].get("graph_interface")
    downgrade_probe = trees[myroot].get("downgrade_probe")
    circular_dependency = trees[myroot].get("circular_dependency")
    vardb = None
    if "vartree" in trees[myroot]:
        vardb = trees[myroot]["vartree"].dbapi
    if use_binaries:
        mydbapi = trees[myroot]["bintree"].dbapi
    else:
        mydbapi = trees[myroot]["porttree"].dbapi

    try:
        mydbapi_match_pkgs = mydbapi.match_pkgs
    except AttributeError:

        def mydbapi_match_pkgs(atom):
            return [
                mydbapi._pkg_str(cpv, atom.repo) for cpv in mydbapi.match(atom)
            ]

    # Sort the deps into installed, not installed but already
    # in the graph and other, not installed and not in the graph
    # and other, with values of [[required_atom], availablility]
    for x, satisfied in zip(deps, satisfieds):
        if isinstance(x, list):
            atoms = dep_zapdeps(x,
                                satisfied,
                                myroot,
                                use_binaries=use_binaries,
                                trees=trees,
                                minimize_slots=minimize_slots)
        else:
            atoms = [x]
        if vardb is None:
            # When called by repoman, we can simply return the first choice
            # because dep_eval() handles preference selection.
            return atoms

        all_available = True
        all_use_satisfied = True
        all_use_unmasked = True
        conflict_downgrade = False
        installed_downgrade = False
        slot_atoms = collections.defaultdict(list)
        slot_map = {}
        cp_map = {}
        for atom in atoms:
            if atom.blocker:
                continue

            # It's not a downgrade if parent is replacing child.
            replacing = (parent and graph_interface
                         and graph_interface.will_replace_child(
                             parent, myroot, atom))
            # Ignore USE dependencies here since we don't want USE
            # settings to adversely affect || preference evaluation.
            avail_pkg = mydbapi_match_pkgs(atom.without_use)
            if not avail_pkg and replacing:
                avail_pkg = [replacing]
            if avail_pkg:
                avail_pkg = avail_pkg[-1]  # highest (ascending order)
                avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
            if not avail_pkg:
                all_available = False
                all_use_satisfied = False
                break

            if not replacing and graph_db is not None and downgrade_probe is not None:
                slot_matches = graph_db.match_pkgs(avail_slot)
                if (len(slot_matches) > 1 and avail_pkg < slot_matches[-1]
                        and not downgrade_probe(avail_pkg)):
                    # If a downgrade is not desirable, then avoid a
                    # choice that pulls in a lower version involved
                    # in a slot conflict (bug #531656).
                    conflict_downgrade = True

            if atom.use:
                avail_pkg_use = mydbapi_match_pkgs(atom)
                if not avail_pkg_use:
                    all_use_satisfied = False

                    if pkg_use_enabled is not None:
                        # Check which USE flags cause the match to fail,
                        # so we can prioritize choices that do not
                        # require changes to use.mask or use.force
                        # (see bug #515584).
                        violated_atom = atom.violated_conditionals(
                            pkg_use_enabled(avail_pkg),
                            avail_pkg.iuse.is_valid_flag)

                        # Note that violated_atom.use can be None here,
                        # since evaluation can collapse conditional USE
                        # deps that cause the match to fail due to
                        # missing IUSE (match uses atom.unevaluated_atom
                        # to detect such missing IUSE).
                        if violated_atom.use is not None:
                            for flag in violated_atom.use.enabled:
                                if flag in avail_pkg.use.mask:
                                    all_use_unmasked = False
                                    break
                            else:
                                for flag in violated_atom.use.disabled:
                                    if flag in avail_pkg.use.force and \
                                     flag not in avail_pkg.use.mask:
                                        all_use_unmasked = False
                                        break
                else:
                    # highest (ascending order)
                    avail_pkg_use = avail_pkg_use[-1]
                    if avail_pkg_use != avail_pkg:
                        avail_pkg = avail_pkg_use
                    avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))

            if not replacing and downgrade_probe is not None and graph is not None:
                highest_in_slot = mydbapi_match_pkgs(avail_slot)
                highest_in_slot = (highest_in_slot[-1]
                                   if highest_in_slot else None)
                if (avail_pkg and highest_in_slot
                        and avail_pkg < highest_in_slot
                        and not downgrade_probe(avail_pkg) and
                    (highest_in_slot.installed or highest_in_slot in graph)):
                    installed_downgrade = True

            slot_map[avail_slot] = avail_pkg
            slot_atoms[avail_slot].append(atom)
            highest_cpv = cp_map.get(avail_pkg.cp)
            all_match_current = None
            all_match_previous = None
            if (highest_cpv is not None
                    and highest_cpv.slot == avail_pkg.slot):
                # If possible, make the package selection internally
                # consistent by choosing a package that satisfies all
                # atoms which match a package in the same slot. Later on,
                # the package version chosen here is used in the
                # has_upgrade/has_downgrade logic to prefer choices with
                # upgrades, and a package choice that is not internally
                # consistent will lead the has_upgrade/has_downgrade logic
                # to produce invalid results (see bug 600346).
                all_match_current = all(
                    a.match(avail_pkg) for a in slot_atoms[avail_slot])
                all_match_previous = all(
                    a.match(highest_cpv) for a in slot_atoms[avail_slot])
                if all_match_previous and not all_match_current:
                    continue

            current_higher = (
                highest_cpv is None
                or vercmp(avail_pkg.version, highest_cpv.version) > 0)

            if current_higher or (all_match_current
                                  and not all_match_previous):
                cp_map[avail_pkg.cp] = avail_pkg

        want_update = False
        if graph_interface is None or graph_interface.removal_action:
            new_slot_count = len(slot_map)
        else:
            new_slot_count = 0
            for slot_atom, avail_pkg in slot_map.items():
                if parent is not None and graph_interface.want_update_pkg(
                        parent, avail_pkg):
                    want_update = True
                if (not slot_atom.cp.startswith("virtual/")
                        and not graph_db.match_pkgs(slot_atom)):
                    new_slot_count += 1

        this_choice = _dep_choice(atoms=atoms,
                                  slot_map=slot_map,
                                  cp_map=cp_map,
                                  all_available=all_available,
                                  all_installed_slots=False,
                                  new_slot_count=new_slot_count,
                                  all_in_graph=False,
                                  want_update=want_update)
        if all_available:
            # The "all installed" criterion is not version or slot specific.
            # If any version of a package is already in the graph then we
            # assume that it is preferred over other possible packages choices.
            all_installed = True
            for atom in set(Atom(atom.cp) for atom in atoms \
             if not atom.blocker):
                # New-style virtuals have zero cost to install.
                if not vardb.match(atom) and not atom.startswith("virtual/"):
                    all_installed = False
                    break
            all_installed_slots = False
            if all_installed:
                all_installed_slots = True
                for slot_atom in slot_map:
                    # New-style virtuals have zero cost to install.
                    if not vardb.match(slot_atom) and \
                     not slot_atom.startswith("virtual/"):
                        all_installed_slots = False
                        break
            this_choice.all_installed_slots = all_installed_slots
            if graph_db is None:
                if all_use_satisfied:
                    if all_installed:
                        if all_installed_slots:
                            preferred_installed.append(this_choice)
                        else:
                            preferred_any_slot.append(this_choice)
                    else:
                        preferred_non_installed.append(this_choice)
                else:
                    if not all_use_unmasked:
                        other.append(this_choice)
                    elif all_installed_slots:
                        unsat_use_installed.append(this_choice)
                    else:
                        unsat_use_non_installed.append(this_choice)
            elif conflict_downgrade or installed_downgrade:
                other.append(this_choice)
            else:
                all_in_graph = True
                for atom in atoms:
                    # New-style virtuals have zero cost to install.
                    if atom.blocker or atom.cp.startswith("virtual/"):
                        continue
                    # We check if the matched package has actually been
                    # added to the digraph, in order to distinguish between
                    # those packages and installed packages that may need
                    # to be uninstalled in order to resolve blockers.
                    if not any(pkg in graph
                               for pkg in graph_db.match_pkgs(atom)):
                        all_in_graph = False
                        break
                this_choice.all_in_graph = all_in_graph

                circular_atom = None
                if parent and parent.onlydeps:
                    # Check if the atom would result in a direct circular
                    # dependency and avoid that for --onlydeps arguments
                    # since it can defeat the purpose of --onlydeps.
                    # This check should only be used for --onlydeps
                    # arguments, since it can interfere with circular
                    # dependency backtracking choices, causing the test
                    # case for bug 756961 to fail.
                    cpv_slot_list = [parent]
                    for atom in atoms:
                        if atom.blocker:
                            continue
                        if vardb.match(atom):
                            # If the atom is satisfied by an installed
                            # version then it's not a circular dep.
                            continue
                        if atom.cp != parent.cp:
                            continue
                        if match_from_list(atom, cpv_slot_list):
                            circular_atom = atom
                            break
                if circular_atom is None and circular_dependency is not None:
                    for circular_child in itertools.chain(
                            circular_dependency.get(parent, []),
                            circular_dependency.get(virt_parent, [])):
                        for atom in atoms:
                            if not atom.blocker and atom.match(circular_child):
                                circular_atom = atom
                                break
                        if circular_atom is not None:
                            break

                if circular_atom is not None:
                    other.append(this_choice)
                else:
                    if all_use_satisfied:
                        if all_in_graph:
                            preferred_in_graph.append(this_choice)
                        elif all_installed:
                            if all_installed_slots:
                                preferred_installed.append(this_choice)
                            else:
                                preferred_any_slot.append(this_choice)
                        else:
                            preferred_non_installed.append(this_choice)
                    else:
                        if not all_use_unmasked:
                            other.append(this_choice)
                        elif all_in_graph:
                            unsat_use_in_graph.append(this_choice)
                        elif all_installed_slots:
                            unsat_use_installed.append(this_choice)
                        else:
                            unsat_use_non_installed.append(this_choice)
        else:
            all_installed = True
            some_installed = False
            for atom in atoms:
                if not atom.blocker:
                    if vardb.match(atom):
                        some_installed = True
                    else:
                        all_installed = False

            if all_installed:
                this_choice.all_installed_slots = True
                other_installed.append(this_choice)
            elif some_installed:
                other_installed_some.append(this_choice)

            # Use Atom(atom.cp) for a somewhat "fuzzy" match, since
            # the whole atom may be too specific. For example, see
            # bug #522652, where using the whole atom leads to an
            # unsatisfiable choice.
            elif any(
                    vardb.match(Atom(atom.cp)) for atom in atoms
                    if not atom.blocker):
                other_installed_any_slot.append(this_choice)
            else:
                other.append(this_choice)

    # Prefer choices which contain upgrades to higher slots. This helps
    # for deps such as || ( foo:1 foo:2 ), where we want to prefer the
    # atom which matches the higher version rather than the atom furthest
    # to the left. Sorting is done separately for each of choice_bins, so
    # as not to interfere with the ordering of the bins. Because of the
    # bin separation, the main function of this code is to allow
    # --depclean to remove old slots (rather than to pull in new slots).
    for choices in choice_bins:
        if len(choices) < 2:
            continue

        if minimize_slots:
            # Prefer choices having fewer new slots. When used with DNF form,
            # this can eliminate unecessary packages that depclean would
            # ultimately eliminate (see bug 632026). Only use this behavior
            # when deemed necessary by the caller, since this will discard the
            # order specified in the ebuild, and the preferences specified
            # there can serve as a crucial sources of guidance (see bug 645002).

            # NOTE: Under some conditions, new_slot_count value may have some
            # variance from one calculation to the next because it depends on
            # the order that packages are added to the graph. This variance can
            # contribute to outcomes that appear to be random. Meanwhile,
            # the order specified in the ebuild is without variance, so it
            # does not have this problem.
            choices.sort(key=operator.attrgetter('new_slot_count'))

        for choice_1 in choices[1:]:
            cps = set(choice_1.cp_map)
            for choice_2 in choices:
                if choice_1 is choice_2:
                    # choice_1 will not be promoted, so move on
                    break
                if (
                        # Prefer choices where all_installed_slots is True, except
                        # in cases where we want to upgrade to a new slot as in
                        # bug 706278. Don't compare new_slot_count here since that
                        # would aggressively override the preference order defined
                        # in the ebuild, breaking the test case for bug 645002.
                    (choice_1.all_installed_slots
                     and not choice_2.all_installed_slots
                     and not choice_2.want_update)):
                    # promote choice_1 in front of choice_2
                    choices.remove(choice_1)
                    index_2 = choices.index(choice_2)
                    choices.insert(index_2, choice_1)
                    break

                intersecting_cps = cps.intersection(choice_2.cp_map)
                has_upgrade = False
                has_downgrade = False
                for cp in intersecting_cps:
                    version_1 = choice_1.cp_map[cp]
                    version_2 = choice_2.cp_map[cp]
                    difference = vercmp(version_1.version, version_2.version)
                    if difference != 0:
                        if difference > 0:
                            has_upgrade = True
                        else:
                            has_downgrade = True

                if (
                        # Prefer upgrades.
                    (has_upgrade and not has_downgrade)

                        # Prefer choices where all packages have been pulled into
                        # the graph, except for choices that eliminate upgrades.
                        or (choice_1.all_in_graph and not choice_2.all_in_graph
                            and not (has_downgrade and not has_upgrade))):
                    # promote choice_1 in front of choice_2
                    choices.remove(choice_1)
                    index_2 = choices.index(choice_2)
                    choices.insert(index_2, choice_1)
                    break

    for allow_masked in (False, True):
        for choices in choice_bins:
            for choice in choices:
                if choice.all_available or allow_masked:
                    return choice.atoms

    assert False  # This point should not be reachable
Beispiel #57
0
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
    """
	Takes an unreduced and reduced deplist and removes satisfied dependencies.
	Returned deplist contains steps that must be taken to satisfy dependencies.
	"""
    if trees is None:
        trees = portage.db
    writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
    if not reduced or unreduced == ["||"] or dep_eval(reduced):
        return []

    if unreduced[0] != "||":
        unresolved = []
        for x, satisfied in zip(unreduced, reduced):
            if isinstance(x, list):
                unresolved += dep_zapdeps(x,
                                          satisfied,
                                          myroot,
                                          use_binaries=use_binaries,
                                          trees=trees)
            elif not satisfied:
                unresolved.append(x)
        return unresolved

    # We're at a ( || atom ... ) type level and need to make a choice
    deps = unreduced[1:]
    satisfieds = reduced[1:]

    # Our preference order is for an the first item that:
    # a) contains all unmasked packages with the same key as installed packages
    # b) contains all unmasked packages
    # c) contains masked installed packages
    # d) is the first item

    preferred_installed = []
    preferred_in_graph = []
    preferred_any_slot = []
    preferred_non_installed = []
    unsat_use_in_graph = []
    unsat_use_installed = []
    unsat_use_non_installed = []
    other = []

    # unsat_use_* must come after preferred_non_installed
    # for correct ordering in cases like || ( foo[a] foo[b] ).
    choice_bins = (
        preferred_in_graph,
        preferred_installed,
        preferred_any_slot,
        preferred_non_installed,
        unsat_use_in_graph,
        unsat_use_installed,
        unsat_use_non_installed,
        other,
    )

    # Alias the trees we'll be checking availability against
    parent = trees[myroot].get("parent")
    priority = trees[myroot].get("priority")
    graph_db = trees[myroot].get("graph_db")
    vardb = None
    if "vartree" in trees[myroot]:
        vardb = trees[myroot]["vartree"].dbapi
    if use_binaries:
        mydbapi = trees[myroot]["bintree"].dbapi
    else:
        mydbapi = trees[myroot]["porttree"].dbapi

    # Sort the deps into installed, not installed but already
    # in the graph and other, not installed and not in the graph
    # and other, with values of [[required_atom], availablility]
    for x, satisfied in zip(deps, satisfieds):
        if isinstance(x, list):
            atoms = dep_zapdeps(x,
                                satisfied,
                                myroot,
                                use_binaries=use_binaries,
                                trees=trees)
        else:
            atoms = [x]
        if vardb is None:
            # When called by repoman, we can simply return the first choice
            # because dep_eval() handles preference selection.
            return atoms

        all_available = True
        all_use_satisfied = True
        slot_map = {}
        cp_map = {}
        for atom in atoms:
            if atom.blocker:
                continue
            # Ignore USE dependencies here since we don't want USE
            # settings to adversely affect || preference evaluation.
            avail_pkg = mydbapi.match(atom.without_use)
            if avail_pkg:
                avail_pkg = avail_pkg[-1]  # highest (ascending order)
                avail_slot = Atom(
                    "%s:%s" %
                    (atom.cp, mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
            if not avail_pkg:
                all_available = False
                all_use_satisfied = False
                break

            if atom.use:
                avail_pkg_use = mydbapi.match(atom)
                if not avail_pkg_use:
                    all_use_satisfied = False
                else:
                    # highest (ascending order)
                    avail_pkg_use = avail_pkg_use[-1]
                    if avail_pkg_use != avail_pkg:
                        avail_pkg = avail_pkg_use
                        avail_slot = Atom(
                            "%s:%s" %
                            (atom.cp, mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))

            slot_map[avail_slot] = avail_pkg
            pkg_cp = cpv_getkey(avail_pkg)
            highest_cpv = cp_map.get(pkg_cp)
            if highest_cpv is None or \
             pkgcmp(catpkgsplit(avail_pkg)[1:],
             catpkgsplit(highest_cpv)[1:]) > 0:
                cp_map[pkg_cp] = avail_pkg

        this_choice = (atoms, slot_map, cp_map, all_available)
        if all_available:
            # The "all installed" criterion is not version or slot specific.
            # If any version of a package is already in the graph then we
            # assume that it is preferred over other possible packages choices.
            all_installed = True
            for atom in set(Atom(atom.cp) for atom in atoms \
             if not atom.blocker):
                # New-style virtuals have zero cost to install.
                if not vardb.match(atom) and not atom.startswith("virtual/"):
                    all_installed = False
                    break
            all_installed_slots = False
            if all_installed:
                all_installed_slots = True
                for slot_atom in slot_map:
                    # New-style virtuals have zero cost to install.
                    if not vardb.match(slot_atom) and \
                     not slot_atom.startswith("virtual/"):
                        all_installed_slots = False
                        break
            if graph_db is None:
                if all_use_satisfied:
                    if all_installed:
                        if all_installed_slots:
                            preferred_installed.append(this_choice)
                        else:
                            preferred_any_slot.append(this_choice)
                    else:
                        preferred_non_installed.append(this_choice)
                else:
                    if all_installed_slots:
                        unsat_use_installed.append(this_choice)
                    else:
                        unsat_use_non_installed.append(this_choice)
            else:
                all_in_graph = True
                for slot_atom in slot_map:
                    # New-style virtuals have zero cost to install.
                    if not graph_db.match(slot_atom) and \
                     not slot_atom.startswith("virtual/"):
                        all_in_graph = False
                        break
                circular_atom = None
                if all_in_graph:
                    if parent is None or priority is None:
                        pass
                    elif priority.buildtime:
                        # Check if the atom would result in a direct circular
                        # dependency and try to avoid that if it seems likely
                        # to be unresolvable. This is only relevant for
                        # buildtime deps that aren't already satisfied by an
                        # installed package.
                        cpv_slot_list = [parent]
                        for atom in atoms:
                            if atom.blocker:
                                continue
                            if vardb.match(atom):
                                # If the atom is satisfied by an installed
                                # version then it's not a circular dep.
                                continue
                            if atom.cp != parent.cp:
                                continue
                            if match_from_list(atom, cpv_slot_list):
                                circular_atom = atom
                                break
                if circular_atom is not None:
                    other.append(this_choice)
                else:
                    if all_use_satisfied:
                        if all_in_graph:
                            preferred_in_graph.append(this_choice)
                        elif all_installed:
                            if all_installed_slots:
                                preferred_installed.append(this_choice)
                            else:
                                preferred_any_slot.append(this_choice)
                        else:
                            preferred_non_installed.append(this_choice)
                    else:
                        if all_in_graph:
                            unsat_use_in_graph.append(this_choice)
                        elif all_installed_slots:
                            unsat_use_installed.append(this_choice)
                        else:
                            unsat_use_non_installed.append(this_choice)
        else:
            other.append(this_choice)

    # Prefer choices which contain upgrades to higher slots. This helps
    # for deps such as || ( foo:1 foo:2 ), where we want to prefer the
    # atom which matches the higher version rather than the atom furthest
    # to the left. Sorting is done separately for each of choice_bins, so
    # as not to interfere with the ordering of the bins. Because of the
    # bin separation, the main function of this code is to allow
    # --depclean to remove old slots (rather than to pull in new slots).
    for choices in choice_bins:
        if len(choices) < 2:
            continue
        for choice_1 in choices[1:]:
            atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1
            cps = set(cp_map_1)
            for choice_2 in choices:
                if choice_1 is choice_2:
                    # choice_1 will not be promoted, so move on
                    break
                atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2
                intersecting_cps = cps.intersection(cp_map_2)
                if not intersecting_cps:
                    continue
                has_upgrade = False
                has_downgrade = False
                for cp in intersecting_cps:
                    version_1 = cp_map_1[cp]
                    version_2 = cp_map_2[cp]
                    difference = pkgcmp(
                        catpkgsplit(version_1)[1:],
                        catpkgsplit(version_2)[1:])
                    if difference != 0:
                        if difference > 0:
                            has_upgrade = True
                        else:
                            has_downgrade = True
                            break
                if has_upgrade and not has_downgrade:
                    # promote choice_1 in front of choice_2
                    choices.remove(choice_1)
                    index_2 = choices.index(choice_2)
                    choices.insert(index_2, choice_1)
                    break

    for allow_masked in (False, True):
        for choices in choice_bins:
            for atoms, slot_map, cp_map, all_available in choices:
                if all_available or allow_masked:
                    return atoms

    assert (False)  # This point should not be reachable
Beispiel #58
0
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        out = portage.output.EOutput()
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Real local timestamp file.
        self.servertimestampfile = os.path.join(self.repo.location, "metadata",
                                                "timestamp.chk")

        content = portage.util.grabfile(self.servertimestampfile)
        timestamp = 0
        if content:
            try:
                timestamp = time.mktime(
                    time.strptime(content[0], TIMESTAMP_FORMAT))
            except (OverflowError, ValueError):
                pass
        del content

        try:
            self.rsync_initial_timeout = \
             int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
        except ValueError:
            self.rsync_initial_timeout = 15

        try:
            maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
        except SystemExit as e:
            raise  # Needed else can't exit
        except:
            maxretries = -1  #default number of retries

        if syncuri.startswith("file://"):
            self.proto = "file"
            dosyncuri = syncuri[7:]
            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            self._process_exitcode(exitcode, dosyncuri, out, 1)
            return (exitcode, updatecache_flg)

        retries = 0
        try:
            self.proto, user_name, hostname, port = re.split(
                r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                syncuri,
                maxsplit=4)[1:5]
        except ValueError:
            writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                           noiselevel=-1,
                           level=logging.ERROR)
            return (1, False)

        self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

        if port is None:
            port = ""
        if user_name is None:
            user_name = ""
        if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
            getaddrinfo_host = hostname
        else:
            # getaddrinfo needs the brackets stripped
            getaddrinfo_host = hostname[1:-1]
        updatecache_flg = False
        all_rsync_opts = set(self.rsync_opts)
        all_rsync_opts.update(self.extra_rsync_opts)

        family = socket.AF_UNSPEC
        if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
            family = socket.AF_INET
        elif socket.has_ipv6 and \
         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
            family = socket.AF_INET6

        addrinfos = None
        uris = []

        try:
            addrinfos = getaddrinfo_validate(
                socket.getaddrinfo(getaddrinfo_host, None, family,
                                   socket.SOCK_STREAM))
        except socket.error as e:
            writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                           (_unicode_decode(hostname), _unicode(e)),
                           noiselevel=-1,
                           level=logging.ERROR)

        if addrinfos:

            AF_INET = socket.AF_INET
            AF_INET6 = None
            if socket.has_ipv6:
                AF_INET6 = socket.AF_INET6

            ips_v4 = []
            ips_v6 = []

            for addrinfo in addrinfos:
                if addrinfo[0] == AF_INET:
                    ips_v4.append("%s" % addrinfo[4][0])
                elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                    # IPv6 addresses need to be enclosed in square brackets
                    ips_v6.append("[%s]" % addrinfo[4][0])

            random.shuffle(ips_v4)
            random.shuffle(ips_v6)

            # Give priority to the address family that
            # getaddrinfo() returned first.
            if AF_INET6 is not None and addrinfos and \
             addrinfos[0][0] == AF_INET6:
                ips = ips_v6 + ips_v4
            else:
                ips = ips_v4 + ips_v6

            for ip in ips:
                uris.append(
                    syncuri.replace("//" + user_name + hostname + port + "/",
                                    "//" + user_name + ip + port + "/", 1))

        if not uris:
            # With some configurations we need to use the plain hostname
            # rather than try to resolve the ip addresses (bug #340817).
            uris.append(syncuri)

        # reverse, for use with pop()
        uris.reverse()
        uris_orig = uris[:]

        effective_maxretries = maxretries
        if effective_maxretries < 0:
            effective_maxretries = len(uris) - 1

        while (1):
            if uris:
                dosyncuri = uris.pop()
            elif maxretries < 0 or retries > maxretries:
                writemsg("!!! Exhausted addresses for %s\n" %
                         _unicode_decode(hostname),
                         noiselevel=-1)
                return (1, False)
            else:
                uris.extend(uris_orig)
                dosyncuri = uris.pop()

            if (retries == 0):
                if "--ask" in opts:
                    uq = UserQuery(opts)
                    if uq.query("Do you want to sync your Portage tree " + \
                     "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                     enter_invalid) == "No":
                        print()
                        print("Quitting.")
                        print()
                        sys.exit(128 + signal.SIGINT)
                self.logger(self.xterm_titles,
                            ">>> Starting rsync with " + dosyncuri)
                if "--quiet" not in opts:
                    print(">>> Starting rsync with " + dosyncuri + "...")
            else:
                self.logger(self.xterm_titles,
                 ">>> Starting retry %d of %d with %s" % \
                  (retries, effective_maxretries, dosyncuri))
                writemsg_stdout(
                 "\n\n>>> Starting retry %d of %d with %s\n" % \
                 (retries, effective_maxretries, dosyncuri), noiselevel=-1)

            if dosyncuri.startswith('ssh://'):
                dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            if is_synced:
                break

            retries = retries + 1

            if maxretries < 0 or retries <= maxretries:
                print(">>> Retrying...")
            else:
                # over retries
                # exit loop
                exitcode = EXCEEDED_MAX_RETRIES
                break
        self._process_exitcode(exitcode, dosyncuri, out, maxretries)
        return (exitcode, updatecache_flg)
Beispiel #59
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True, force=False):
	"""
	Fetch files to DISTDIR and also verify digests if they are available.

	@param myuris: Maps each file name to a tuple of available fetch URIs.
	@type myuris: dict
	@param mysettings: Portage config instance.
	@type mysettings: portage.config
	@param listonly: Only print URIs and do not actually fetch them.
	@type listonly: bool
	@param fetchonly: Do not block for files that are locked by a
		concurrent fetcher process. This means that the function can
		return successfully *before* all files have been successfully
		fetched!
	@type fetchonly: bool
	@param use_locks: Enable locks. This parameter is ineffective if
		FEATURES=distlocks is disabled in the portage config!
	@type use_locks: bool
	@param digests: Maps each file name to a dict of digest types and values.
	@type digests: dict
	@param allow_missing_digests: Enable fetch even if there are no digests
		available for verification.
	@type allow_missing_digests: bool
	@param force: Force download, even when a file already exists in
		DISTDIR. This is most useful when there are no digests available,
		since otherwise download will be automatically forced if the
		existing file does not match the available digests. Also, this
		avoids the need to remove the existing file in advance, which
		makes it possible to atomically replace the file and avoid
		interference with concurrent processes.
	@type force: bool
	@rtype: int
	@return: 1 if successful, 0 otherwise.
	"""

	if force and digests:
		# Since the force parameter can trigger unnecessary fetch when the
		# digests match, do not allow force=True when digests are provided.
		raise PortageException(_('fetch: force=True is not allowed when digests are provided'))

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	userfetch = portage.data.secpass >= 2 and "userfetch" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK)
	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not distdir_writable and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if try_mirrors and "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			if distdir_writable:
				mirror_cache = os.path.join(mysettings["DISTDIR"],
						".mirror-cache.json")
			else:
				mirror_cache = None
			for l in locations:
				filedict[myfile].append(functools.partial(
					get_mirror_url, l, myfile, mysettings, mirror_cache))
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		try:
			_ensure_distdir(mysettings, mysettings["DISTDIR"])
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False
	valid_hashes = set(get_valid_checksum_keys())
	valid_hashes.discard("size")

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
			if not verifiable_hash_types:
				expected = " ".join(sorted(valid_hashes))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif portage.data.secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match and not force:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				# Remove broken symlinks or symlinks to files which
				# _check_distfile did not match above.
				if distdir_writable and mystat is None or os.path.islink(myfile_path):
					try:
						os.unlink(myfile_path)
					except OSError as e:
						if e.errno not in (errno.ENOENT, errno.ESTALE):
							raise
					mystat = None

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if distdir_writable and not force:
						# Since _check_distfile did not match above, the file
						# is either corrupt or its identity has changed since
						# the last time it was fetched, so rename it.
						temp_filename = _checksum_failure_temp_file(
							mysettings, mysettings["DISTDIR"], myfile)
						writemsg_stdout(_("Refetching... "
							"File renamed to '%s'\n\n") % \
							temp_filename, noiselevel=-1)

				# Stat the temporary download file for comparison with
				# fetch_resume_size.
				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					mystat = None

				if mystat is not None:
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except OSError:
								pass
					elif distdir_writable and size is not None:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, download_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(download_path):
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except EnvironmentError:
								pass
					elif not orig_digests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						if not force:
							continue
					else:
						if (mydigests[myfile].get("size") is not None
								and mystat.st_size < mydigests[myfile]["size"]
								and not restrict_fetch):
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(download_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
											mysettings, mysettings["DISTDIR"],
											os.path.basename(download_path))
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								if not fetch_to_ro:
									_movefile(download_path, myfile_path, mysettings=mysettings)
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				if isinstance(loc, functools.partial):
					loc = loc()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(download_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if distdir_writable and mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(download_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    os.path.basename(download_path)
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						v = mysettings.get(k)
						if v is not None:
							variables[k] = v

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						mystat = os.lstat(download_path)
						if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
							os.unlink(download_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(download_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(download_path)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(download_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									if distdir_writable:
										temp_filename = \
											_checksum_failure_temp_file(
												mysettings, mysettings["DISTDIR"],
												os.path.basename(download_path))
										writemsg_stdout(_("Refetching... "
											"File renamed to '%s'\n\n") % \
											temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									if not fetch_to_ro:
										_movefile(download_path, myfile_path, mysettings=mysettings)
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else: # no digests available
						if not myret:
							if not fetch_to_ro:
								_movefile(download_path, myfile_path, mysettings=mysettings)
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Beispiel #60
0
def dep_check(depstring,
              mydbapi,
              mysettings,
              use="yes",
              mode=None,
              myuse=None,
              use_cache=1,
              use_binaries=0,
              myroot="/",
              trees=None):
    """Takes a depend string and parses the condition."""
    edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
    #check_config_instance(mysettings)
    if trees is None:
        trees = globals()["db"]
    if use == "yes":
        if myuse is None:
            #default behavior
            myusesplit = mysettings["PORTAGE_USE"].split()
        else:
            myusesplit = myuse
            # We've been given useflags to use.
            #print "USE FLAGS PASSED IN."
            #print myuse
            #if "bindist" in myusesplit:
            #	print "BINDIST is set!"
            #else:
            #	print "BINDIST NOT set."
    else:
        #we are being run by autouse(), don't consult USE vars yet.
        # WE ALSO CANNOT USE SETTINGS
        myusesplit = []

    mymasks = set()
    useforce = set()
    useforce.add(mysettings["ARCH"])
    if use == "all":
        # This masking/forcing is only for repoman.  In other cases, relevant
        # masking/forcing should have already been applied via
        # config.regenerate().  Also, binary or installed packages may have
        # been built with flags that are now masked, and it would be
        # inconsistent to mask them now.  Additionally, myuse may consist of
        # flags from a parent package that is being merged to a $ROOT that is
        # different from the one that mysettings represents.
        mymasks.update(mysettings.usemask)
        mymasks.update(mysettings.archlist())
        mymasks.discard(mysettings["ARCH"])
        useforce.update(mysettings.useforce)
        useforce.difference_update(mymasks)

    # eapi code borrowed from _expand_new_virtuals()
    mytrees = trees[myroot]
    parent = mytrees.get("parent")
    virt_parent = mytrees.get("virt_parent")
    current_parent = None
    eapi = None
    if parent is not None:
        if virt_parent is not None:
            current_parent = virt_parent[0]
        else:
            current_parent = parent

    if current_parent is not None:
        # Don't pass the eapi argument to use_reduce() for installed packages
        # since previous validation will have already marked them as invalid
        # when necessary and now we're more interested in evaluating
        # dependencies so that things like --depclean work as well as possible
        # in spite of partial invalidity.
        if not current_parent.installed:
            eapi = current_parent.metadata['EAPI']

    try:
        mysplit = use_reduce(depstring, uselist=myusesplit, masklist=mymasks, \
         matchall=(use=="all"), excludeall=useforce, opconvert=True, \
         token_class=Atom, eapi=eapi)
    except InvalidDependString as e:
        return [0, str(e)]

    if mysplit == []:
        #dependencies were reduced to nothing
        return [1, []]

    # Recursively expand new-style virtuals so as to
    # collapse one or more levels of indirection.
    try:
        mysplit = _expand_new_virtuals(mysplit,
                                       edebug,
                                       mydbapi,
                                       mysettings,
                                       use=use,
                                       mode=mode,
                                       myuse=myuse,
                                       use_force=useforce,
                                       use_mask=mymasks,
                                       use_cache=use_cache,
                                       use_binaries=use_binaries,
                                       myroot=myroot,
                                       trees=trees)
    except ParseError as e:
        return [0, str(e)]

    mysplit2 = mysplit[:]
    mysplit2 = dep_wordreduce(mysplit2,
                              mysettings,
                              mydbapi,
                              mode,
                              use_cache=use_cache)
    if mysplit2 is None:
        return [0, _("Invalid token")]

    writemsg("\n\n\n", 1)
    writemsg("mysplit:  %s\n" % (mysplit), 1)
    writemsg("mysplit2: %s\n" % (mysplit2), 1)

    selected_atoms = dep_zapdeps(mysplit,
                                 mysplit2,
                                 myroot,
                                 use_binaries=use_binaries,
                                 trees=trees)

    return [1, selected_atoms]