示例#1
0
def _parse_uri_map(cpv, metadata, use=None):

    myuris = use_reduce(metadata.get('SRC_URI', ''),
                        uselist=use,
                        matchall=(use is None),
                        is_src_uri=True,
                        eapi=metadata['EAPI'])

    uri_map = OrderedDict()

    myuris.reverse()
    while myuris:
        uri = myuris.pop()
        if myuris and myuris[-1] == "->":
            myuris.pop()
            distfile = myuris.pop()
        else:
            distfile = os.path.basename(uri)
            if not distfile:
                raise portage.exception.InvalidDependString(
                 ("getFetchMap(): '%s' SRC_URI has no file " + \
                 "name: '%s'") % (cpv, uri))

        uri_set = uri_map.get(distfile)
        if uri_set is None:
            uri_set = set()
            uri_map[distfile] = uri_set
        uri_set.add(uri)
        uri = None

    return uri_map
示例#2
0
def _parse_uri_map(cpv, metadata, use=None):

	myuris = use_reduce(metadata.get('SRC_URI', ''),
		uselist=use, matchall=(use is None),
		is_src_uri=True,
		eapi=metadata['EAPI'])

	uri_map = OrderedDict()

	myuris.reverse()
	while myuris:
		uri = myuris.pop()
		if myuris and myuris[-1] == "->":
			myuris.pop()
			distfile = myuris.pop()
		else:
			distfile = os.path.basename(uri)
			if not distfile:
				raise portage.exception.InvalidDependString(
					("getFetchMap(): '%s' SRC_URI has no file " + \
					"name: '%s'") % (cpv, uri))

		uri_set = uri_map.get(distfile)
		if uri_set is None:
			uri_set = set()
			uri_map[distfile] = uri_set
		uri_set.add(uri)
		uri = None

	return uri_map
示例#3
0
def _parse_uri_map(cpv, metadata, use=None):

	myuris = use_reduce(metadata.get('SRC_URI', ''),
		uselist=use, matchall=(use is None),
		is_src_uri=True,
		eapi=metadata['EAPI'])

	uri_map = OrderedDict()

	myuris.reverse()
	while myuris:
		uri = myuris.pop()
		if myuris and myuris[-1] == "->":
			myuris.pop()
			distfile = myuris.pop()
		else:
			distfile = os.path.basename(uri)
			if not distfile:
				raise portage.exception.InvalidDependString(
					("getFetchMap(): '%s' SRC_URI has no file " + \
					"name: '%s'") % (cpv, uri))

		uri_set = uri_map.get(distfile)
		if uri_set is None:
			# Use OrderedDict to preserve order from SRC_URI
			# while ensuring uniqueness.
			uri_set = OrderedDict()
			uri_map[distfile] = uri_set
		uri_set[uri] = True

	# Convert OrderedDicts to tuples.
	for k, v in uri_map.items():
		uri_map[k] = tuple(v)

	return uri_map
示例#4
0
    def __init__(self, settings, logger):
        self.settings = settings
        self.logger = logger
        # Similar to emerge, sync needs a default umask so that created
        # files have sane permissions.
        os.umask(0o22)

        self.module_controller = portage.sync.module_controller
        self.module_names = self.module_controller.module_names
        self.hooks = {}
        for _dir in ["repo.postsync.d", "postsync.d"]:
            postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"],
                                        portage.USER_CONFIG_PATH, _dir)
            hooks = OrderedDict()
            for filepath in util._recursive_file_list(postsync_dir):
                name = filepath.split(postsync_dir)[1].lstrip(os.sep)
                if os.access(filepath, os.X_OK):
                    hooks[filepath] = name
                else:
                    writemsg_level(" %s %s hook: '%s' is not executable\n" % (
                        warn("*"),
                        _dir,
                        _unicode_decode(name),
                    ),
                                   level=logging.WARN,
                                   noiselevel=2)
            self.hooks[_dir] = hooks
示例#5
0
def _parse_uri_map(cpv, metadata, use=None):

    myuris = use_reduce(metadata.get('SRC_URI', ''),
                        uselist=use,
                        matchall=(use is None),
                        is_src_uri=True,
                        eapi=metadata['EAPI'])

    uri_map = OrderedDict()

    myuris.reverse()
    while myuris:
        uri = myuris.pop()
        if myuris and myuris[-1] == "->":
            myuris.pop()
            distfile = myuris.pop()
        else:
            distfile = os.path.basename(uri)
            if not distfile:
                raise portage.exception.InvalidDependString(
                 ("getFetchMap(): '%s' SRC_URI has no file " + \
                 "name: '%s'") % (cpv, uri))

        uri_set = uri_map.get(distfile)
        if uri_set is None:
            # Use OrderedDict to preserve order from SRC_URI
            # while ensuring uniqueness.
            uri_set = OrderedDict()
            uri_map[distfile] = uri_set

        # SRC_URI may contain a file name with no scheme, and in
        # this case it does not belong in uri_set.
        if urlparse(uri).scheme:
            uri_set[uri] = True

    # Convert OrderedDicts to tuples.
    for k, v in uri_map.items():
        uri_map[k] = tuple(v)

    return uri_map
示例#6
0
def count_unread_news(portdb, vardb, repos=None, update=True):
    """
	Returns a dictionary mapping repos to integer counts of unread news items.
	By default, this will scan all repos and check for new items that have
	appeared since the last scan.

	@param portdb: a portage tree database
	@type portdb: pordbapi
	@param vardb: an installed package database
	@type vardb: vardbapi
	@param repos: names of repos to scan (None means to scan all available repos)
	@type repos: list or None
	@param update: check for new items (default is True)
	@type update: boolean
	@rtype: dict
	@return: dictionary mapping repos to integer counts of unread news items
	"""

    NEWS_PATH = os.path.join("metadata", "news")
    UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news")
    news_counts = OrderedDict()
    if repos is None:
        repos = portdb.getRepositories()

    permission_msgs = set()
    for repo in repos:
        try:
            manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
            count = manager.getUnreadItems(repo, update=True)
        except PermissionDenied as e:
            # NOTE: The NewsManager typically handles permission errors by
            # returning silently, so PermissionDenied won't necessarily be
            # raised even if we do trigger a permission error above.
            msg = "Permission denied: '%s'\n" % (e, )
            if msg in permission_msgs:
                pass
            else:
                permission_msgs.add(msg)
                writemsg_level(msg, level=logging.ERROR, noiselevel=-1)
            news_counts[repo] = 0
        else:
            news_counts[repo] = count

    return news_counts
	def getFetchMap(self, mypkg, useflags=None, mytree=None):
		"""
		Get the SRC_URI metadata as a dict which maps each file name to a
		set of alternative URIs.

		@param mypkg: cpv for an ebuild
		@type mypkg: String
		@param useflags: a collection of enabled USE flags, for evaluation of
			conditionals
		@type useflags: set, or None to enable all conditionals
		@param mytree: The canonical path of the tree in which the ebuild
			is located, or None for automatic lookup
		@type mypkg: String
		@returns: A dict which maps each file name to a set of alternative
			URIs.
		@rtype: dict
		"""

		try:
			eapi, myuris = self.aux_get(mypkg,
				["EAPI", "SRC_URI"], mytree=mytree)
		except KeyError:
			# Convert this to an InvalidDependString exception since callers
			# already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")

		if not eapi_is_supported(eapi):
			# Convert this to an InvalidDependString exception
			# since callers already handle it.
			raise portage.exception.InvalidDependString(
				"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
				(mypkg, eapi.lstrip("-")))

		myuris = paren_reduce(myuris)
		_src_uri_validate(mypkg, eapi, myuris)
		myuris = use_reduce(myuris, uselist=useflags,
			matchall=(useflags is None))
		myuris = flatten(myuris)

		uri_map = OrderedDict()

		myuris.reverse()
		while myuris:
			uri = myuris.pop()
			if myuris and myuris[-1] == "->":
				operator = myuris.pop()
				distfile = myuris.pop()
			else:
				distfile = os.path.basename(uri)
				if not distfile:
					raise portage.exception.InvalidDependString(
						("getFetchMap(): '%s' SRC_URI has no file " + \
						"name: '%s'") % (mypkg, uri))

			uri_set = uri_map.get(distfile)
			if uri_set is None:
				uri_set = set()
				uri_map[distfile] = uri_set
			uri_set.add(uri)
			uri = None
			operator = None

		return uri_map
示例#8
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
示例#9
0
    def __init__(self, main=True):
        """
		@param main: If True then this is a singleton instance for use
			in the main thread, otherwise it is a local instance which
			can safely be use in a non-main thread (default is True, so
			that global_event_loop does not need constructor arguments)
		@type main: bool
		"""
        self._use_signal = main and fcntl is not None
        self._thread_rlock = threading.RLock()
        self._thread_condition = threading.Condition(self._thread_rlock)
        self._poll_event_queue = []
        self._poll_event_handlers = {}
        self._poll_event_handler_ids = {}
        # Increment id for each new handler.
        self._event_handler_id = 0
        # New call_soon callbacks must have an opportunity to
        # execute before it's safe to wait on self._thread_condition
        # without a timeout, since delaying its execution indefinitely
        # could lead to a deadlock. The following attribute stores the
        # event handler id of the most recently added call_soon callback.
        # If this attribute has changed since the last time that the
        # call_soon callbacks have been called, then it's not safe to
        # wait on self._thread_condition without a timeout.
        self._call_soon_id = 0
        # Use OrderedDict in order to emulate the FIFO queue behavior
        # of the AbstractEventLoop.call_soon method.
        self._idle_callbacks = OrderedDict()
        self._timeout_handlers = {}
        self._timeout_interval = None
        self._default_executor = None

        self._poll_obj = None
        try:
            select.epoll
        except AttributeError:
            pass
        else:
            try:
                epoll_obj = select.epoll()
            except IOError:
                # This happens with Linux 2.4 kernels:
                # IOError: [Errno 38] Function not implemented
                pass
            else:

                # FD_CLOEXEC is enabled by default in Python >=3.4.
                if sys.hexversion < 0x3040000 and fcntl is not None:
                    try:
                        fcntl.FD_CLOEXEC
                    except AttributeError:
                        pass
                    else:
                        fcntl.fcntl(
                            epoll_obj.fileno(), fcntl.F_SETFD,
                            fcntl.fcntl(epoll_obj.fileno(), fcntl.F_GETFD)
                            | fcntl.FD_CLOEXEC)

                self._poll_obj = _epoll_adapter(epoll_obj)
                self.IO_ERR = select.EPOLLERR
                self.IO_HUP = select.EPOLLHUP
                self.IO_IN = select.EPOLLIN
                self.IO_NVAL = 0
                self.IO_OUT = select.EPOLLOUT
                self.IO_PRI = select.EPOLLPRI

        if self._poll_obj is None:
            self._poll_obj = create_poll_instance()
            self.IO_ERR = PollConstants.POLLERR
            self.IO_HUP = PollConstants.POLLHUP
            self.IO_IN = PollConstants.POLLIN
            self.IO_NVAL = PollConstants.POLLNVAL
            self.IO_OUT = PollConstants.POLLOUT
            self.IO_PRI = PollConstants.POLLPRI

        self._child_handlers = {}
        self._sigchld_read = None
        self._sigchld_write = None
        self._sigchld_src_id = None
        self._pid = os.getpid()
示例#10
0
class EventLoop(object):
    """
	An event loop, intended to be compatible with the GLib event loop.
	Call the iteration method in order to execute one iteration of the
	loop. The idle_add and timeout_add methods serve as thread-safe
	means to interact with the loop's thread.
	"""

    supports_multiprocessing = True

    # TODO: Find out why SIGCHLD signals aren't delivered during poll
    # calls, forcing us to wakeup in order to receive them.
    _sigchld_interval = 250

    class _child_callback_class(SlotObject):
        __slots__ = ("callback", "data", "pid", "source_id")

    class _idle_callback_class(SlotObject):
        __slots__ = ("args", "callback", "calling", "source_id")

    class _io_handler_class(SlotObject):
        __slots__ = ("args", "callback", "f", "source_id")

    class _timeout_handler_class(SlotObject):
        __slots__ = ("args", "function", "calling", "interval", "source_id",
                     "timestamp")

    class _handle(object):
        """
		A callback wrapper object, compatible with asyncio.Handle.
		"""
        __slots__ = ("_callback_id", "_loop")

        def __init__(self, callback_id, loop):
            self._callback_id = callback_id
            self._loop = loop

        def cancel(self):
            """
			Cancel the call. If the callback is already canceled or executed,
			this method has no effect.
			"""
            self._loop.source_remove(self._callback_id)

    class _call_soon_callback(object):
        """
		Wraps a call_soon callback, and always returns False, since these
		callbacks are only supposed to run once.
		"""
        __slots__ = ("_args", "_callback")

        def __init__(self, callback, args):
            self._callback = callback
            self._args = args

        def __call__(self):
            self._callback(*self._args)
            return False

    def __init__(self, main=True):
        """
		@param main: If True then this is a singleton instance for use
			in the main thread, otherwise it is a local instance which
			can safely be use in a non-main thread (default is True, so
			that global_event_loop does not need constructor arguments)
		@type main: bool
		"""
        self._use_signal = main and fcntl is not None
        self._thread_rlock = threading.RLock()
        self._thread_condition = threading.Condition(self._thread_rlock)
        self._poll_event_queue = []
        self._poll_event_handlers = {}
        self._poll_event_handler_ids = {}
        # Increment id for each new handler.
        self._event_handler_id = 0
        # New call_soon callbacks must have an opportunity to
        # execute before it's safe to wait on self._thread_condition
        # without a timeout, since delaying its execution indefinitely
        # could lead to a deadlock. The following attribute stores the
        # event handler id of the most recently added call_soon callback.
        # If this attribute has changed since the last time that the
        # call_soon callbacks have been called, then it's not safe to
        # wait on self._thread_condition without a timeout.
        self._call_soon_id = 0
        # Use OrderedDict in order to emulate the FIFO queue behavior
        # of the AbstractEventLoop.call_soon method.
        self._idle_callbacks = OrderedDict()
        self._timeout_handlers = {}
        self._timeout_interval = None
        self._default_executor = None

        self._poll_obj = None
        try:
            select.epoll
        except AttributeError:
            pass
        else:
            try:
                epoll_obj = select.epoll()
            except IOError:
                # This happens with Linux 2.4 kernels:
                # IOError: [Errno 38] Function not implemented
                pass
            else:

                # FD_CLOEXEC is enabled by default in Python >=3.4.
                if sys.hexversion < 0x3040000 and fcntl is not None:
                    try:
                        fcntl.FD_CLOEXEC
                    except AttributeError:
                        pass
                    else:
                        fcntl.fcntl(
                            epoll_obj.fileno(), fcntl.F_SETFD,
                            fcntl.fcntl(epoll_obj.fileno(), fcntl.F_GETFD)
                            | fcntl.FD_CLOEXEC)

                self._poll_obj = _epoll_adapter(epoll_obj)
                self.IO_ERR = select.EPOLLERR
                self.IO_HUP = select.EPOLLHUP
                self.IO_IN = select.EPOLLIN
                self.IO_NVAL = 0
                self.IO_OUT = select.EPOLLOUT
                self.IO_PRI = select.EPOLLPRI

        if self._poll_obj is None:
            self._poll_obj = create_poll_instance()
            self.IO_ERR = PollConstants.POLLERR
            self.IO_HUP = PollConstants.POLLHUP
            self.IO_IN = PollConstants.POLLIN
            self.IO_NVAL = PollConstants.POLLNVAL
            self.IO_OUT = PollConstants.POLLOUT
            self.IO_PRI = PollConstants.POLLPRI

        self._child_handlers = {}
        self._sigchld_read = None
        self._sigchld_write = None
        self._sigchld_src_id = None
        self._pid = os.getpid()

    def create_future(self):
        """
		Create a Future object attached to the loop. This returns
		an instance of _EventLoopFuture, because EventLoop is currently
		missing some of the asyncio.AbstractEventLoop methods that
		asyncio.Future requires.
		"""
        return _EventLoopFuture(loop=self)

    def _new_source_id(self):
        """
		Generate a new source id. This method is thread-safe.
		"""
        with self._thread_rlock:
            self._event_handler_id += 1
            return self._event_handler_id

    def _poll(self, timeout=None):
        """
		All poll() calls pass through here. The poll events
		are added directly to self._poll_event_queue.
		In order to avoid endless blocking, this raises
		StopIteration if timeout is None and there are
		no file descriptors to poll.
		"""

        if timeout is None and \
         not self._poll_event_handlers:
            raise StopIteration(
                "timeout is None and there are no poll() event handlers")

        while True:
            try:
                self._poll_event_queue.extend(self._poll_obj.poll(timeout))
                break
            except (IOError, select.error) as e:
                # Silently handle EINTR, which is normal when we have
                # received a signal such as SIGINT (epoll objects may
                # raise IOError rather than select.error, at least in
                # Python 3.2).
                if not (e.args and e.args[0] == errno.EINTR):
                    writemsg_level("\n!!! select error: %s\n" % (e, ),
                                   level=logging.ERROR,
                                   noiselevel=-1)
                del e

                # This typically means that we've received a SIGINT, so
                # raise StopIteration in order to break out of our current
                # iteration and respond appropriately to the signal as soon
                # as possible.
                raise StopIteration("interrupted")

    def iteration(self, *args):
        """
		Like glib.MainContext.iteration(), runs a single iteration. In order
		to avoid blocking forever when may_block is True (the default),
		callers must be careful to ensure that at least one of the following
		conditions is met:
			1) An event source or timeout is registered which is guaranteed
				to trigger at least on event (a call to an idle function
				only counts as an event if it returns a False value which
				causes it to stop being called)
			2) Another thread is guaranteed to call one of the thread-safe
				methods which notify iteration to stop waiting (such as
				idle_add or timeout_add).
		These rules ensure that iteration is able to block until an event
		arrives, without doing any busy waiting that would waste CPU time.
		@type may_block: bool
		@param may_block: if True the call may block waiting for an event
			(default is True).
		@rtype: bool
		@return: True if events were dispatched.
		"""

        may_block = True

        if args:
            if len(args) > 1:
                raise TypeError("expected at most 1 argument (%s given)" %
                                len(args))
            may_block = args[0]

        event_queue = self._poll_event_queue
        event_handlers = self._poll_event_handlers
        events_handled = 0
        timeouts_checked = False

        if not event_handlers:
            with self._thread_condition:
                prev_call_soon_id = self._call_soon_id
                if self._run_timeouts():
                    events_handled += 1
                timeouts_checked = True

                call_soon = prev_call_soon_id != self._call_soon_id

                if (not call_soon and not event_handlers and not events_handled
                        and may_block):
                    # Block so that we don't waste cpu time by looping too
                    # quickly. This makes EventLoop useful for code that needs
                    # to wait for timeout callbacks regardless of whether or
                    # not any IO handlers are currently registered.
                    timeout = self._get_poll_timeout()
                    if timeout is None:
                        wait_timeout = None
                    else:
                        wait_timeout = timeout / 1000
                    # NOTE: In order to avoid a possible infinite wait when
                    # wait_timeout is None, the previous _run_timeouts()
                    # call must have returned False *with* _thread_condition
                    # acquired. Otherwise, we would risk going to sleep after
                    # our only notify event has already passed.
                    self._thread_condition.wait(wait_timeout)
                    if self._run_timeouts():
                        events_handled += 1
                    timeouts_checked = True

            # If any timeouts have executed, then return immediately,
            # in order to minimize latency in termination of iteration
            # loops that they may control.
            if events_handled or not event_handlers:
                return bool(events_handled)

        if not event_queue:

            if may_block:
                timeout = self._get_poll_timeout()

                # Avoid blocking for IO if there are any timeout
                # or idle callbacks available to process.
                if timeout != 0 and not timeouts_checked:
                    if self._run_timeouts():
                        events_handled += 1
                    timeouts_checked = True
                    if events_handled:
                        # Minimize latency for loops controlled
                        # by timeout or idle callback events.
                        timeout = 0
            else:
                timeout = 0

            try:
                self._poll(timeout=timeout)
            except StopIteration:
                # This can be triggered by EINTR which is caused by signals.
                pass

        # NOTE: IO event handlers may be re-entrant, in case something
        # like AbstractPollTask._wait_loop() needs to be called inside
        # a handler for some reason.
        while event_queue:
            events_handled += 1
            f, event = event_queue.pop()
            try:
                x = event_handlers[f]
            except KeyError:
                # This is known to be triggered by the epoll
                # implementation in qemu-user-1.2.2, and appears
                # to be harmless (see bug #451326).
                continue
            if not x.callback(f, event, *x.args):
                self.source_remove(x.source_id)

        if not timeouts_checked:
            if self._run_timeouts():
                events_handled += 1
            timeouts_checked = True

        return bool(events_handled)

    def _get_poll_timeout(self):

        with self._thread_rlock:
            if self._child_handlers:
                if self._timeout_interval is None:
                    timeout = self._sigchld_interval
                else:
                    timeout = min(self._sigchld_interval,
                                  self._timeout_interval)
            else:
                timeout = self._timeout_interval

        return timeout

    def child_watch_add(self, pid, callback, data=None):
        """
		Like glib.child_watch_add(), sets callback to be called with the
		user data specified by data when the child indicated by pid exits.
		The signature for the callback is:

			def callback(pid, condition, user_data)

		where pid is is the child process id, condition is the status
		information about the child process and user_data is data.

		@type int
		@param pid: process id of a child process to watch
		@type callback: callable
		@param callback: a function to call
		@type data: object
		@param data: the optional data to pass to function
		@rtype: int
		@return: an integer ID
		"""
        source_id = self._new_source_id()
        self._child_handlers[source_id] = self._child_callback_class(
            callback=callback, data=data, pid=pid, source_id=source_id)

        if self._use_signal:
            if self._sigchld_read is None:
                self._sigchld_read, self._sigchld_write = os.pipe()

                fcntl.fcntl(
                    self._sigchld_read, fcntl.F_SETFL,
                    fcntl.fcntl(self._sigchld_read, fcntl.F_GETFL)
                    | os.O_NONBLOCK)

                # FD_CLOEXEC is enabled by default in Python >=3.4.
                if sys.hexversion < 0x3040000:
                    try:
                        fcntl.FD_CLOEXEC
                    except AttributeError:
                        pass
                    else:
                        fcntl.fcntl(
                            self._sigchld_read, fcntl.F_SETFD,
                            fcntl.fcntl(self._sigchld_read, fcntl.F_GETFD)
                            | fcntl.FD_CLOEXEC)

            # The IO watch is dynamically registered and unregistered as
            # needed, since we don't want to consider it as a valid source
            # of events when there are no child listeners. It's important
            # to distinguish when there are no valid sources of IO events,
            # in order to avoid an endless poll call if there's no timeout.
            if self._sigchld_src_id is None:
                self._sigchld_src_id = self.io_add_watch(
                    self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
                signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)

        # poll now, in case the SIGCHLD has already arrived
        self._poll_child_processes()
        return source_id

    def _sigchld_sig_cb(self, signum, frame):
        # If this signal handler was not installed by the
        # current process then the signal doesn't belong to
        # this EventLoop instance.
        if os.getpid() == self._pid:
            os.write(self._sigchld_write, b'\0')

    def _sigchld_io_cb(self, fd, events):
        try:
            while True:
                os.read(self._sigchld_read, 4096)
        except OSError:
            # read until EAGAIN
            pass
        self._poll_child_processes()
        return True

    def _poll_child_processes(self):
        if not self._child_handlers:
            return False

        calls = 0

        for x in list(self._child_handlers.values()):
            if x.source_id not in self._child_handlers:
                # it's already been called via re-entrance
                continue
            try:
                wait_retval = os.waitpid(x.pid, os.WNOHANG)
            except OSError as e:
                if e.errno != errno.ECHILD:
                    raise
                del e
                self.source_remove(x.source_id)
            else:
                # With waitpid and WNOHANG, only check the
                # first element of the tuple since the second
                # element may vary (bug #337465).
                if wait_retval[0] != 0:
                    calls += 1
                    self.source_remove(x.source_id)
                    x.callback(x.pid, wait_retval[1], x.data)

        return bool(calls)

    def idle_add(self, callback, *args):
        """
		Like glib.idle_add(), if callback returns False it is
		automatically removed from the list of event sources and will
		not be called again. This method is thread-safe.

		The idle_add method is deprecated. Use the call_soon and
		call_soon_threadsafe methods instead.

		@type callback: callable
		@param callback: a function to call
		@rtype: int
		@return: an integer ID
		"""
        with self._thread_condition:
            source_id = self._call_soon_id = self._new_source_id()
            self._idle_callbacks[source_id] = self._idle_callback_class(
                args=args, callback=callback, source_id=source_id)
            self._thread_condition.notify()
        return source_id

    def _run_idle_callbacks(self):
        # assumes caller has acquired self._thread_rlock
        if not self._idle_callbacks:
            return False
        state_change = 0
        # Iterate of our local list, since self._idle_callbacks can be
        # modified during the exection of these callbacks.
        for x in list(self._idle_callbacks.values()):
            if x.source_id not in self._idle_callbacks:
                # it got cancelled while executing another callback
                continue
            if x.calling:
                # don't call it recursively
                continue
            x.calling = True
            try:
                if not x.callback(*x.args):
                    state_change += 1
                    self.source_remove(x.source_id)
            finally:
                x.calling = False

        return bool(state_change)

    def timeout_add(self, interval, function, *args):
        """
		Like glib.timeout_add(), interval argument is the number of
		milliseconds between calls to your function, and your function
		should return False to stop being called, or True to continue
		being called. Any additional positional arguments given here
		are passed to your function when it's called. This method is
		thread-safe.
		"""
        with self._thread_condition:
            source_id = self._new_source_id()
            self._timeout_handlers[source_id] = \
             self._timeout_handler_class(
              interval=interval, function=function, args=args,
              source_id=source_id, timestamp=self.time())
            if self._timeout_interval is None or \
             self._timeout_interval > interval:
                self._timeout_interval = interval
            self._thread_condition.notify()
        return source_id

    def _run_timeouts(self):

        calls = 0
        if not self._use_signal:
            if self._poll_child_processes():
                calls += 1

        with self._thread_rlock:

            if self._run_idle_callbacks():
                calls += 1

            if not self._timeout_handlers:
                return bool(calls)

            ready_timeouts = []
            current_time = self.time()
            for x in self._timeout_handlers.values():
                elapsed_seconds = current_time - x.timestamp
                # elapsed_seconds < 0 means the system clock has been adjusted
                if elapsed_seconds < 0 or \
                 (x.interval - 1000 * elapsed_seconds) <= 0:
                    ready_timeouts.append(x)

            # Iterate of our local list, since self._timeout_handlers can be
            # modified during the exection of these callbacks.
            for x in ready_timeouts:
                if x.source_id not in self._timeout_handlers:
                    # it got cancelled while executing another timeout
                    continue
                if x.calling:
                    # don't call it recursively
                    continue
                calls += 1
                x.calling = True
                try:
                    x.timestamp = self.time()
                    if not x.function(*x.args):
                        self.source_remove(x.source_id)
                finally:
                    x.calling = False

        return bool(calls)

    def io_add_watch(self, f, condition, callback, *args):
        """
		Like glib.io_add_watch(), your function should return False to
		stop being called, or True to continue being called. Any
		additional positional arguments given here are passed to your
		function when it's called.

		@type f: int or object with fileno() method
		@param f: a file descriptor to monitor
		@type condition: int
		@param condition: a condition mask
		@type callback: callable
		@param callback: a function to call
		@rtype: int
		@return: an integer ID of the event source
		"""
        if f in self._poll_event_handlers:
            raise AssertionError("fd %d is already registered" % f)
        source_id = self._new_source_id()
        self._poll_event_handler_ids[source_id] = f
        self._poll_event_handlers[f] = self._io_handler_class(
            args=args, callback=callback, f=f, source_id=source_id)
        self._poll_obj.register(f, condition)
        return source_id

    def source_remove(self, reg_id):
        """
		Like glib.source_remove(), this returns True if the given reg_id
		is found and removed, and False if the reg_id is invalid or has
		already been removed.
		"""
        x = self._child_handlers.pop(reg_id, None)
        if x is not None:
            if not self._child_handlers and self._use_signal:
                signal.signal(signal.SIGCHLD, signal.SIG_DFL)
                self.source_remove(self._sigchld_src_id)
                self._sigchld_src_id = None
            return True

        with self._thread_rlock:
            idle_callback = self._idle_callbacks.pop(reg_id, None)
            if idle_callback is not None:
                return True
            timeout_handler = self._timeout_handlers.pop(reg_id, None)
            if timeout_handler is not None:
                if timeout_handler.interval == self._timeout_interval:
                    if self._timeout_handlers:
                        self._timeout_interval = min(
                            x.interval
                            for x in self._timeout_handlers.values())
                    else:
                        self._timeout_interval = None
                return True

        f = self._poll_event_handler_ids.pop(reg_id, None)
        if f is None:
            return False
        self._poll_obj.unregister(f)
        if self._poll_event_queue:
            # Discard any unhandled events that belong to this file,
            # in order to prevent these events from being erroneously
            # delivered to a future handler that is using a reallocated
            # file descriptor of the same numeric value (causing
            # extremely confusing bugs).
            remaining_events = []
            discarded_events = False
            for event in self._poll_event_queue:
                if event[0] == f:
                    discarded_events = True
                else:
                    remaining_events.append(event)

            if discarded_events:
                self._poll_event_queue[:] = remaining_events

        del self._poll_event_handlers[f]
        return True

    def run_until_complete(self, future):
        """
		Run until the Future is done.

		@type future: asyncio.Future
		@param future: a Future to wait for
		@rtype: object
		@return: the Future's result
		@raise: the Future's exception
		"""
        while not future.done():
            self.iteration()

        return future.result()

    def call_soon(self, callback, *args):
        """
		Arrange for a callback to be called as soon as possible. The callback
		is called after call_soon() returns, when control returns to the event
		loop.

		This operates as a FIFO queue, callbacks are called in the order in
		which they are registered. Each callback will be called exactly once.

		Any positional arguments after the callback will be passed to the
		callback when it is called.

		An object compatible with asyncio.Handle is returned, which can
		be used to cancel the callback.

		@type callback: callable
		@param callback: a function to call
		@return: a handle which can be used to cancel the callback
		@rtype: asyncio.Handle (or compatible)
		"""
        return self._handle(
            self.idle_add(self._call_soon_callback(callback, args)), self)

    # The call_soon method inherits thread safety from the idle_add method.
    call_soon_threadsafe = call_soon

    def time(self):
        """Return the time according to the event loop's clock.

		This is a float expressed in seconds since an epoch, but the
		epoch, precision, accuracy and drift are unspecified and may
		differ per event loop.
		"""
        return monotonic()

    def call_later(self, delay, callback, *args):
        """
		Arrange for the callback to be called after the given delay seconds
		(either an int or float).

		An instance of asyncio.Handle is returned, which can be used to cancel
		the callback.

		callback will be called exactly once per call to call_later(). If two
		callbacks are scheduled for exactly the same time, it is undefined
		which will be called first.

		The optional positional args will be passed to the callback when
		it is called. If you want the callback to be called with some named
		arguments, use a closure or functools.partial().

		Use functools.partial to pass keywords to the callback.

		@type delay: int or float
		@param delay: delay seconds
		@type callback: callable
		@param callback: a function to call
		@return: a handle which can be used to cancel the callback
		@rtype: asyncio.Handle (or compatible)
		"""
        return self._handle(
            self.timeout_add(delay * 1000,
                             self._call_soon_callback(callback, args)), self)

    def run_in_executor(self, executor, func, *args):
        """
		Arrange for a func to be called in the specified executor.

		The executor argument should be an Executor instance. The default
		executor is used if executor is None.

		Use functools.partial to pass keywords to the *func*.

		@param executor: executor
		@type executor: concurrent.futures.Executor or None
		@param func: a function to call
		@type func: callable
		@return: a Future
		@rtype: asyncio.Future (or compatible)
		"""
        if executor is None:
            executor = self._default_executor
            if executor is None:
                executor = ForkExecutor(loop=self)
                self._default_executor = executor
        return executor.submit(func, *args)

    def close(self):
        """Close the event loop.

		This clears the queues and shuts down the executor,
		and waits for it to finish.
		"""
        executor = self._default_executor
        if executor is not None:
            self._default_executor = None
            executor.shutdown(wait=True)

        if self._poll_obj is not None:
            close = getattr(self._poll_obj, 'close')
            if close is not None:
                close()
            self._poll_obj = None
示例#11
0
文件: fetch.py 项目: ezc/portage
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    myfile
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						try:
							variables[k] = mysettings[k]
						except KeyError:
							pass

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
示例#12
0
# Copyright 2014-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2

import os

from portage import OrderedDict
from portage.module import Modules
from portage.sync.controller import SyncManager
from portage.sync.config_checks import check_type

_SUBMODULE_PATH_MAP = OrderedDict([
    ('glsa', ('metadata/glsa', )),
    ('news', ('metadata/news', )),
    ('profiles', ('metadata/layout.conf', 'profiles')),
])

path = os.path.join(os.path.dirname(__file__), "modules")
# initial development debug info
#print("module path:", path)

module_controller = Modules(path=path, namepath="portage.sync.modules")

# initial development debug info
#print(module_controller.module_names)
module_names = module_controller.module_names[:]


def module_specific_options(repo):
    '''Get the authorized module specific options set for
	the repos.conf settings for the repo'''
    global module_controller
示例#13
0
# Copyright 2014-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2

import os

from portage import OrderedDict
from portage.module import Modules
from portage.sync.controller import SyncManager
from portage.sync.config_checks import check_type

_SUBMODULE_PATH_MAP = OrderedDict([
	('glsa', 'metadata/glsa'),
	('news', 'metadata/news'),
	('profiles', 'profiles'),
])

path = os.path.join(os.path.dirname(__file__), "modules")
# initial development debug info
#print("module path:", path)

module_controller = Modules(path=path, namepath="portage.sync.modules")

# initial development debug info
#print(module_controller.module_names)
module_names = module_controller.module_names[:]


def module_specific_options(repo):
	'''Get the authorized module specific options set for
	the repos.conf settings for the repo'''
	global module_controller
示例#14
0
	def __init__(self, main=True):
		"""
		@param main: If True then this is a singleton instance for use
			in the main thread, otherwise it is a local instance which
			can safely be use in a non-main thread (default is True, so
			that global_event_loop does not need constructor arguments)
		@type main: bool
		"""
		self._use_signal = main and fcntl is not None
		self._thread_rlock = threading.RLock()
		self._thread_condition = threading.Condition(self._thread_rlock)
		self._poll_event_queue = []
		self._poll_event_handlers = {}
		self._poll_event_handler_ids = {}
		# Increment id for each new handler.
		self._event_handler_id = 0
		# New call_soon callbacks must have an opportunity to
		# execute before it's safe to wait on self._thread_condition
		# without a timeout, since delaying its execution indefinitely
		# could lead to a deadlock. The following attribute stores the
		# event handler id of the most recently added call_soon callback.
		# If this attribute has changed since the last time that the
		# call_soon callbacks have been called, then it's not safe to
		# wait on self._thread_condition without a timeout.
		self._call_soon_id = 0
		# Use OrderedDict in order to emulate the FIFO queue behavior
		# of the AbstractEventLoop.call_soon method.
		self._idle_callbacks = OrderedDict()
		self._timeout_handlers = {}
		self._timeout_interval = None
		self._default_executor = None

		self._poll_obj = None
		try:
			select.epoll
		except AttributeError:
			pass
		else:
			try:
				epoll_obj = select.epoll()
			except IOError:
				# This happens with Linux 2.4 kernels:
				# IOError: [Errno 38] Function not implemented
				pass
			else:

				# FD_CLOEXEC is enabled by default in Python >=3.4.
				if sys.hexversion < 0x3040000 and fcntl is not None:
					try:
						fcntl.FD_CLOEXEC
					except AttributeError:
						pass
					else:
						fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
							fcntl.fcntl(epoll_obj.fileno(),
								fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

				self._poll_obj = _epoll_adapter(epoll_obj)
				self.IO_ERR = select.EPOLLERR
				self.IO_HUP = select.EPOLLHUP
				self.IO_IN = select.EPOLLIN
				self.IO_NVAL = 0
				self.IO_OUT = select.EPOLLOUT
				self.IO_PRI = select.EPOLLPRI

		if self._poll_obj is None:
			self._poll_obj = create_poll_instance()
			self.IO_ERR = PollConstants.POLLERR
			self.IO_HUP = PollConstants.POLLHUP
			self.IO_IN = PollConstants.POLLIN
			self.IO_NVAL = PollConstants.POLLNVAL
			self.IO_OUT = PollConstants.POLLOUT
			self.IO_PRI = PollConstants.POLLPRI

		self._child_handlers = {}
		self._sigchld_read = None
		self._sigchld_write = None
		self._sigchld_src_id = None
		self._pid = os.getpid()
示例#15
0
class EventLoop(object):
	"""
	An event loop, intended to be compatible with the GLib event loop.
	Call the iteration method in order to execute one iteration of the
	loop. The idle_add and timeout_add methods serve as thread-safe
	means to interact with the loop's thread.
	"""

	supports_multiprocessing = True

	# TODO: Find out why SIGCHLD signals aren't delivered during poll
	# calls, forcing us to wakeup in order to receive them.
	_sigchld_interval = 250

	class _child_callback_class(SlotObject):
		__slots__ = ("callback", "data", "pid", "source_id")

	class _idle_callback_class(SlotObject):
		__slots__ = ("args", "callback", "calling", "source_id")

	class _io_handler_class(SlotObject):
		__slots__ = ("args", "callback", "f", "source_id")

	class _timeout_handler_class(SlotObject):
		__slots__ = ("args", "function", "calling", "interval", "source_id",
			"timestamp")

	class _handle(object):
		"""
		A callback wrapper object, compatible with asyncio.Handle.
		"""
		__slots__ = ("_callback_id", "_loop")

		def __init__(self, callback_id, loop):
			self._callback_id = callback_id
			self._loop = loop

		def cancel(self):
			"""
			Cancel the call. If the callback is already canceled or executed,
			this method has no effect.
			"""
			self._loop.source_remove(self._callback_id)

	class _call_soon_callback(object):
		"""
		Wraps a call_soon callback, and always returns False, since these
		callbacks are only supposed to run once.
		"""
		__slots__ = ("_args", "_callback")

		def __init__(self, callback, args):
			self._callback = callback
			self._args = args

		def __call__(self):
			self._callback(*self._args)
			return False

	def __init__(self, main=True):
		"""
		@param main: If True then this is a singleton instance for use
			in the main thread, otherwise it is a local instance which
			can safely be use in a non-main thread (default is True, so
			that global_event_loop does not need constructor arguments)
		@type main: bool
		"""
		self._use_signal = main and fcntl is not None
		self._thread_rlock = threading.RLock()
		self._thread_condition = threading.Condition(self._thread_rlock)
		self._poll_event_queue = []
		self._poll_event_handlers = {}
		self._poll_event_handler_ids = {}
		# Increment id for each new handler.
		self._event_handler_id = 0
		# New call_soon callbacks must have an opportunity to
		# execute before it's safe to wait on self._thread_condition
		# without a timeout, since delaying its execution indefinitely
		# could lead to a deadlock. The following attribute stores the
		# event handler id of the most recently added call_soon callback.
		# If this attribute has changed since the last time that the
		# call_soon callbacks have been called, then it's not safe to
		# wait on self._thread_condition without a timeout.
		self._call_soon_id = 0
		# Use OrderedDict in order to emulate the FIFO queue behavior
		# of the AbstractEventLoop.call_soon method.
		self._idle_callbacks = OrderedDict()
		self._timeout_handlers = {}
		self._timeout_interval = None
		self._default_executor = None

		self._poll_obj = None
		try:
			select.epoll
		except AttributeError:
			pass
		else:
			try:
				epoll_obj = select.epoll()
			except IOError:
				# This happens with Linux 2.4 kernels:
				# IOError: [Errno 38] Function not implemented
				pass
			else:

				# FD_CLOEXEC is enabled by default in Python >=3.4.
				if sys.hexversion < 0x3040000 and fcntl is not None:
					try:
						fcntl.FD_CLOEXEC
					except AttributeError:
						pass
					else:
						fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
							fcntl.fcntl(epoll_obj.fileno(),
								fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

				self._poll_obj = _epoll_adapter(epoll_obj)
				self.IO_ERR = select.EPOLLERR
				self.IO_HUP = select.EPOLLHUP
				self.IO_IN = select.EPOLLIN
				self.IO_NVAL = 0
				self.IO_OUT = select.EPOLLOUT
				self.IO_PRI = select.EPOLLPRI

		if self._poll_obj is None:
			self._poll_obj = create_poll_instance()
			self.IO_ERR = PollConstants.POLLERR
			self.IO_HUP = PollConstants.POLLHUP
			self.IO_IN = PollConstants.POLLIN
			self.IO_NVAL = PollConstants.POLLNVAL
			self.IO_OUT = PollConstants.POLLOUT
			self.IO_PRI = PollConstants.POLLPRI

		self._child_handlers = {}
		self._sigchld_read = None
		self._sigchld_write = None
		self._sigchld_src_id = None
		self._pid = os.getpid()

	def create_future(self):
		"""
		Create a Future object attached to the loop. This returns
		an instance of _EventLoopFuture, because EventLoop is currently
		missing some of the asyncio.AbstractEventLoop methods that
		asyncio.Future requires.
		"""
		return _EventLoopFuture(loop=self)

	def _new_source_id(self):
		"""
		Generate a new source id. This method is thread-safe.
		"""
		with self._thread_rlock:
			self._event_handler_id += 1
			return self._event_handler_id

	def _poll(self, timeout=None):
		"""
		All poll() calls pass through here. The poll events
		are added directly to self._poll_event_queue.
		In order to avoid endless blocking, this raises
		StopIteration if timeout is None and there are
		no file descriptors to poll.
		"""

		if timeout is None and \
			not self._poll_event_handlers:
			raise StopIteration(
				"timeout is None and there are no poll() event handlers")

		while True:
			try:
				self._poll_event_queue.extend(self._poll_obj.poll(timeout))
				break
			except (IOError, select.error) as e:
				# Silently handle EINTR, which is normal when we have
				# received a signal such as SIGINT (epoll objects may
				# raise IOError rather than select.error, at least in
				# Python 3.2).
				if not (e.args and e.args[0] == errno.EINTR):
					writemsg_level("\n!!! select error: %s\n" % (e,),
						level=logging.ERROR, noiselevel=-1)
				del e

				# This typically means that we've received a SIGINT, so
				# raise StopIteration in order to break out of our current
				# iteration and respond appropriately to the signal as soon
				# as possible.
				raise StopIteration("interrupted")

	def iteration(self, *args):
		"""
		Like glib.MainContext.iteration(), runs a single iteration. In order
		to avoid blocking forever when may_block is True (the default),
		callers must be careful to ensure that at least one of the following
		conditions is met:
			1) An event source or timeout is registered which is guaranteed
				to trigger at least on event (a call to an idle function
				only counts as an event if it returns a False value which
				causes it to stop being called)
			2) Another thread is guaranteed to call one of the thread-safe
				methods which notify iteration to stop waiting (such as
				idle_add or timeout_add).
		These rules ensure that iteration is able to block until an event
		arrives, without doing any busy waiting that would waste CPU time.
		@type may_block: bool
		@param may_block: if True the call may block waiting for an event
			(default is True).
		@rtype: bool
		@return: True if events were dispatched.
		"""

		may_block = True

		if args:
			if len(args) > 1:
				raise TypeError(
					"expected at most 1 argument (%s given)" % len(args))
			may_block = args[0]

		event_queue =  self._poll_event_queue
		event_handlers = self._poll_event_handlers
		events_handled = 0
		timeouts_checked = False

		if not event_handlers:
			with self._thread_condition:
				prev_call_soon_id = self._call_soon_id
				if self._run_timeouts():
					events_handled += 1
				timeouts_checked = True

				call_soon = prev_call_soon_id != self._call_soon_id

				if (not call_soon and not event_handlers
					and not events_handled and may_block):
					# Block so that we don't waste cpu time by looping too
					# quickly. This makes EventLoop useful for code that needs
					# to wait for timeout callbacks regardless of whether or
					# not any IO handlers are currently registered.
					timeout = self._get_poll_timeout()
					if timeout is None:
						wait_timeout = None
					else:
						wait_timeout = timeout / 1000
					# NOTE: In order to avoid a possible infinite wait when
					# wait_timeout is None, the previous _run_timeouts()
					# call must have returned False *with* _thread_condition
					# acquired. Otherwise, we would risk going to sleep after
					# our only notify event has already passed.
					self._thread_condition.wait(wait_timeout)
					if self._run_timeouts():
						events_handled += 1
					timeouts_checked = True

			# If any timeouts have executed, then return immediately,
			# in order to minimize latency in termination of iteration
			# loops that they may control.
			if events_handled or not event_handlers:
				return bool(events_handled)

		if not event_queue:

			if may_block:
				timeout = self._get_poll_timeout()

				# Avoid blocking for IO if there are any timeout
				# or idle callbacks available to process.
				if timeout != 0 and not timeouts_checked:
					if self._run_timeouts():
						events_handled += 1
					timeouts_checked = True
					if events_handled:
						# Minimize latency for loops controlled
						# by timeout or idle callback events.
						timeout = 0
			else:
				timeout = 0

			try:
				self._poll(timeout=timeout)
			except StopIteration:
				# This can be triggered by EINTR which is caused by signals.
				pass

		# NOTE: IO event handlers may be re-entrant, in case something
		# like AbstractPollTask._wait_loop() needs to be called inside
		# a handler for some reason.
		while event_queue:
			events_handled += 1
			f, event = event_queue.pop()
			try:
				x = event_handlers[f]
			except KeyError:
				# This is known to be triggered by the epoll
				# implementation in qemu-user-1.2.2, and appears
				# to be harmless (see bug #451326).
				continue
			if not x.callback(f, event, *x.args):
				self.source_remove(x.source_id)

		if not timeouts_checked:
			if self._run_timeouts():
				events_handled += 1
			timeouts_checked = True

		return bool(events_handled)

	def _get_poll_timeout(self):

		with self._thread_rlock:
			if self._child_handlers:
				if self._timeout_interval is None:
					timeout = self._sigchld_interval
				else:
					timeout = min(self._sigchld_interval,
						self._timeout_interval)
			else:
				timeout = self._timeout_interval

		return timeout

	def child_watch_add(self, pid, callback, data=None):
		"""
		Like glib.child_watch_add(), sets callback to be called with the
		user data specified by data when the child indicated by pid exits.
		The signature for the callback is:

			def callback(pid, condition, user_data)

		where pid is is the child process id, condition is the status
		information about the child process and user_data is data.

		@type int
		@param pid: process id of a child process to watch
		@type callback: callable
		@param callback: a function to call
		@type data: object
		@param data: the optional data to pass to function
		@rtype: int
		@return: an integer ID
		"""
		source_id = self._new_source_id()
		self._child_handlers[source_id] = self._child_callback_class(
			callback=callback, data=data, pid=pid, source_id=source_id)

		if self._use_signal:
			if self._sigchld_read is None:
				self._sigchld_read, self._sigchld_write = os.pipe()

				fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
					fcntl.fcntl(self._sigchld_read,
					fcntl.F_GETFL) | os.O_NONBLOCK)

				# FD_CLOEXEC is enabled by default in Python >=3.4.
				if sys.hexversion < 0x3040000:
					try:
						fcntl.FD_CLOEXEC
					except AttributeError:
						pass
					else:
						fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
							fcntl.fcntl(self._sigchld_read,
							fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

			# The IO watch is dynamically registered and unregistered as
			# needed, since we don't want to consider it as a valid source
			# of events when there are no child listeners. It's important
			# to distinguish when there are no valid sources of IO events,
			# in order to avoid an endless poll call if there's no timeout.
			if self._sigchld_src_id is None:
				self._sigchld_src_id = self.io_add_watch(
					self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
				signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)

		# poll now, in case the SIGCHLD has already arrived
		self._poll_child_processes()
		return source_id

	def _sigchld_sig_cb(self, signum, frame):
		# If this signal handler was not installed by the
		# current process then the signal doesn't belong to
		# this EventLoop instance.
		if os.getpid() == self._pid:
			os.write(self._sigchld_write, b'\0')

	def _sigchld_io_cb(self, fd, events):
		try:
			while True:
				os.read(self._sigchld_read, 4096)
		except OSError:
			# read until EAGAIN
			pass
		self._poll_child_processes()
		return True

	def _poll_child_processes(self):
		if not self._child_handlers:
			return False

		calls = 0

		for x in list(self._child_handlers.values()):
			if x.source_id not in self._child_handlers:
				# it's already been called via re-entrance
				continue
			try:
				wait_retval = os.waitpid(x.pid, os.WNOHANG)
			except OSError as e:
				if e.errno != errno.ECHILD:
					raise
				del e
				self.source_remove(x.source_id)
			else:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if wait_retval[0] != 0:
					calls += 1
					self.source_remove(x.source_id)
					x.callback(x.pid, wait_retval[1], x.data)

		return bool(calls)

	def idle_add(self, callback, *args):
		"""
		Like glib.idle_add(), if callback returns False it is
		automatically removed from the list of event sources and will
		not be called again. This method is thread-safe.

		The idle_add method is deprecated. Use the call_soon and
		call_soon_threadsafe methods instead.

		@type callback: callable
		@param callback: a function to call
		@rtype: int
		@return: an integer ID
		"""
		with self._thread_condition:
			source_id = self._call_soon_id = self._new_source_id()
			self._idle_callbacks[source_id] = self._idle_callback_class(
				args=args, callback=callback, source_id=source_id)
			self._thread_condition.notify()
		return source_id

	def _run_idle_callbacks(self):
		# assumes caller has acquired self._thread_rlock
		if not self._idle_callbacks:
			return False
		state_change = 0
		# Iterate of our local list, since self._idle_callbacks can be
		# modified during the exection of these callbacks.
		for x in list(self._idle_callbacks.values()):
			if x.source_id not in self._idle_callbacks:
				# it got cancelled while executing another callback
				continue
			if x.calling:
				# don't call it recursively
				continue
			x.calling = True
			try:
				if not x.callback(*x.args):
					state_change += 1
					self.source_remove(x.source_id)
			finally:
				x.calling = False

		return bool(state_change)

	def timeout_add(self, interval, function, *args):
		"""
		Like glib.timeout_add(), interval argument is the number of
		milliseconds between calls to your function, and your function
		should return False to stop being called, or True to continue
		being called. Any additional positional arguments given here
		are passed to your function when it's called. This method is
		thread-safe.
		"""
		with self._thread_condition:
			source_id = self._new_source_id()
			self._timeout_handlers[source_id] = \
				self._timeout_handler_class(
					interval=interval, function=function, args=args,
					source_id=source_id, timestamp=self.time())
			if self._timeout_interval is None or \
				self._timeout_interval > interval:
				self._timeout_interval = interval
			self._thread_condition.notify()
		return source_id

	def _run_timeouts(self):

		calls = 0
		if not self._use_signal:
			if self._poll_child_processes():
				calls += 1

		with self._thread_rlock:

			if self._run_idle_callbacks():
				calls += 1

			if not self._timeout_handlers:
				return bool(calls)

			ready_timeouts = []
			current_time = self.time()
			for x in self._timeout_handlers.values():
				elapsed_seconds = current_time - x.timestamp
				# elapsed_seconds < 0 means the system clock has been adjusted
				if elapsed_seconds < 0 or \
					(x.interval - 1000 * elapsed_seconds) <= 0:
					ready_timeouts.append(x)

			# Iterate of our local list, since self._timeout_handlers can be
			# modified during the exection of these callbacks.
			for x in ready_timeouts:
				if x.source_id not in self._timeout_handlers:
					# it got cancelled while executing another timeout
					continue
				if x.calling:
					# don't call it recursively
					continue
				calls += 1
				x.calling = True
				try:
					x.timestamp = self.time()
					if not x.function(*x.args):
						self.source_remove(x.source_id)
				finally:
					x.calling = False

		return bool(calls)

	def io_add_watch(self, f, condition, callback, *args):
		"""
		Like glib.io_add_watch(), your function should return False to
		stop being called, or True to continue being called. Any
		additional positional arguments given here are passed to your
		function when it's called.

		@type f: int or object with fileno() method
		@param f: a file descriptor to monitor
		@type condition: int
		@param condition: a condition mask
		@type callback: callable
		@param callback: a function to call
		@rtype: int
		@return: an integer ID of the event source
		"""
		if f in self._poll_event_handlers:
			raise AssertionError("fd %d is already registered" % f)
		source_id = self._new_source_id()
		self._poll_event_handler_ids[source_id] = f
		self._poll_event_handlers[f] = self._io_handler_class(
			args=args, callback=callback, f=f, source_id=source_id)
		self._poll_obj.register(f, condition)
		return source_id

	def source_remove(self, reg_id):
		"""
		Like glib.source_remove(), this returns True if the given reg_id
		is found and removed, and False if the reg_id is invalid or has
		already been removed.
		"""
		x = self._child_handlers.pop(reg_id, None)
		if x is not None:
			if not self._child_handlers and self._use_signal:
				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
				self.source_remove(self._sigchld_src_id)
				self._sigchld_src_id = None
			return True

		with self._thread_rlock:
			idle_callback = self._idle_callbacks.pop(reg_id, None)
			if idle_callback is not None:
				return True
			timeout_handler = self._timeout_handlers.pop(reg_id, None)
			if timeout_handler is not None:
				if timeout_handler.interval == self._timeout_interval:
					if self._timeout_handlers:
						self._timeout_interval = min(x.interval
							for x in self._timeout_handlers.values())
					else:
						self._timeout_interval = None
				return True

		f = self._poll_event_handler_ids.pop(reg_id, None)
		if f is None:
			return False
		self._poll_obj.unregister(f)
		if self._poll_event_queue:
			# Discard any unhandled events that belong to this file,
			# in order to prevent these events from being erroneously
			# delivered to a future handler that is using a reallocated
			# file descriptor of the same numeric value (causing
			# extremely confusing bugs).
			remaining_events = []
			discarded_events = False
			for event in self._poll_event_queue:
				if event[0] == f:
					discarded_events = True
				else:
					remaining_events.append(event)

			if discarded_events:
				self._poll_event_queue[:] = remaining_events

		del self._poll_event_handlers[f]
		return True

	def run_until_complete(self, future):
		"""
		Run until the Future is done.

		@type future: asyncio.Future
		@param future: a Future to wait for
		@rtype: object
		@return: the Future's result
		@raise: the Future's exception
		"""
		while not future.done():
			self.iteration()

		return future.result()

	def call_soon(self, callback, *args):
		"""
		Arrange for a callback to be called as soon as possible. The callback
		is called after call_soon() returns, when control returns to the event
		loop.

		This operates as a FIFO queue, callbacks are called in the order in
		which they are registered. Each callback will be called exactly once.

		Any positional arguments after the callback will be passed to the
		callback when it is called.

		An object compatible with asyncio.Handle is returned, which can
		be used to cancel the callback.

		@type callback: callable
		@param callback: a function to call
		@return: a handle which can be used to cancel the callback
		@rtype: asyncio.Handle (or compatible)
		"""
		return self._handle(self.idle_add(
			self._call_soon_callback(callback, args)), self)

	# The call_soon method inherits thread safety from the idle_add method.
	call_soon_threadsafe = call_soon

	def time(self):
		"""Return the time according to the event loop's clock.

		This is a float expressed in seconds since an epoch, but the
		epoch, precision, accuracy and drift are unspecified and may
		differ per event loop.
		"""
		return monotonic()

	def call_later(self, delay, callback, *args):
		"""
		Arrange for the callback to be called after the given delay seconds
		(either an int or float).

		An instance of asyncio.Handle is returned, which can be used to cancel
		the callback.

		callback will be called exactly once per call to call_later(). If two
		callbacks are scheduled for exactly the same time, it is undefined
		which will be called first.

		The optional positional args will be passed to the callback when
		it is called. If you want the callback to be called with some named
		arguments, use a closure or functools.partial().

		Use functools.partial to pass keywords to the callback.

		@type delay: int or float
		@param delay: delay seconds
		@type callback: callable
		@param callback: a function to call
		@return: a handle which can be used to cancel the callback
		@rtype: asyncio.Handle (or compatible)
		"""
		return self._handle(self.timeout_add(
			delay * 1000, self._call_soon_callback(callback, args)), self)

	def run_in_executor(self, executor, func, *args):
		"""
		Arrange for a func to be called in the specified executor.

		The executor argument should be an Executor instance. The default
		executor is used if executor is None.

		Use functools.partial to pass keywords to the *func*.

		@param executor: executor
		@type executor: concurrent.futures.Executor or None
		@param func: a function to call
		@type func: callable
		@return: a Future
		@rtype: asyncio.Future (or compatible)
		"""
		if executor is None:
			executor = self._default_executor
			if executor is None:
				executor = ForkExecutor(loop=self)
				self._default_executor = executor
		return executor.submit(func, *args)

	def close(self):
		"""Close the event loop.

		This clears the queues and shuts down the executor,
		and waits for it to finish.
		"""
		executor = self._default_executor
		if executor is not None:
			self._default_executor = None
			executor.shutdown(wait=True)

		if self._poll_obj is not None:
			close = getattr(self._poll_obj, 'close')
			if close is not None:
				close()
			self._poll_obj = None