Esempio n. 1
0
	def unlock(self):
		if self._lock_obj is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		unlockfile(self._lock_obj)
		self._lock_obj = None
Esempio n. 2
0
    def getUnreadItems(self, repoid, update=False):
        """
		Determine if there are unread relevant items in news.repoid.unread.
		If there are unread items return their number.
		If update is specified, updateNewsItems( repoid ) will be called to
		check for new items.
		"""

        if update:
            self.updateItems(repoid)

        unread_filename = self._unread_filename(repoid)
        unread_lock = None
        try:
            unread_lock = lockfile(unread_filename, wantnewlockfile=1)
        except (InvalidLocation, OperationNotPermitted, PermissionDenied,
                ReadOnlyFileSystem):
            pass
        try:
            try:
                return len(grabfile(unread_filename))
            except PermissionDenied:
                return 0
        finally:
            if unread_lock:
                unlockfile(unread_lock)
Esempio n. 3
0
	def unlock(self):
		if self._lock_obj is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		unlockfile(self._lock_obj)
		self._lock_obj = None
Esempio n. 4
0
	def getUnreadItems(self, repoid, update=False):
		"""
		Determine if there are unread relevant items in news.repoid.unread.
		If there are unread items return their number.
		If update is specified, updateNewsItems( repoid ) will be called to
		check for new items.
		"""

		if update:
			self.updateItems(repoid)

		unread_filename = self._unread_filename(repoid)
		unread_lock = None
		try:
			unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		except (InvalidLocation, OperationNotPermitted, PermissionDenied):
			pass
		try:
			try:
				return len(grabfile(unread_filename))
			except PermissionDenied:
				return 0
		finally:
			if unread_lock:
				unlockfile(unread_lock)
Esempio n. 5
0
    def _input_handler(self):
        # Read the whole pickle in a single atomic read() call.
        data = self._read_buf(self._files.pipe_in)
        if data is None:
            pass  # EAGAIN
        elif data:
            try:
                obj = pickle.loads(data)
            except SystemExit:
                raise
            except Exception:
                # The pickle module can raise practically
                # any exception when given corrupt data.
                pass
            else:

                self._reopen_input()

                cmd_key = obj[0]
                cmd_handler = self.commands[cmd_key]
                reply = cmd_handler(obj)
                try:
                    self._send_reply(reply)
                except OSError as e:
                    if e.errno == errno.ENXIO:
                        # This happens if the client side has been killed.
                        pass
                    else:
                        raise

                # Allow the command to execute hooks after its reply
                # has been sent. This hook is used by the 'exit'
                # command to kill the ebuild process. For some
                # reason, the ebuild-ipc helper hangs up the
                # ebuild process if it is waiting for a reply
                # when we try to kill the ebuild process.
                reply_hook = getattr(cmd_handler, "reply_hook", None)
                if reply_hook is not None:
                    reply_hook()

        else:  # EIO/POLLHUP
            # This can be triggered due to a race condition which happens when
            # the previous _reopen_input() call occurs before the writer has
            # closed the pipe (see bug #401919). It's not safe to re-open
            # without a lock here, since it's possible that another writer will
            # write something to the pipe just before we close it, and in that
            # case the write will be lost. Therefore, try for a non-blocking
            # lock, and only re-open the pipe if the lock is acquired.
            lock_filename = os.path.join(os.path.dirname(self.input_fifo), ".ipc_lock")
            try:
                lock_obj = lockfile(lock_filename, unlinkfile=True, flags=os.O_NONBLOCK)
            except TryAgain:
                # We'll try again when another IO_HUP event arrives.
                pass
            else:
                try:
                    self._reopen_input()
                finally:
                    unlockfile(lock_obj)
Esempio n. 6
0
	def unlock(self):
		if self._imp is None:
			raise AssertionError('not locked')
		if isinstance(self._imp, (_LockProcess, _LockThread)):
			self._imp.unlock()
		else:
			unlockfile(self._imp)
		self._imp = None
Esempio n. 7
0
	def unlock(self):
		if self._imp is None:
			raise AssertionError('not locked')
		if isinstance(self._imp, (_LockProcess, _LockThread)):
			self._imp.unlock()
		else:
			unlockfile(self._imp)
		self._imp = None
Esempio n. 8
0
	def _unlock(self):
		if self._lock_obj is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		if self._unlock_future is not None:
			raise AssertionError("already unlocked")
		self._unlock_future = self.scheduler.create_future()
		unlockfile(self._lock_obj)
		self._lock_obj = None
 def _unlock(self):
     if self._lock_obj is None:
         raise AssertionError('not locked')
     if self.returncode is None:
         raise AssertionError('lock not acquired yet')
     if self._unlock_future is not None:
         raise AssertionError("already unlocked")
     self._unlock_future = self.scheduler.create_future()
     unlockfile(self._lock_obj)
     self._lock_obj = None
Esempio n. 10
0
	def unlock(self):
		"""
		This method is deprecated in favor of async_unlock, since waiting
		for the child process to respond can trigger event loop recursion
		which is incompatible with asyncio.
		"""
		if self._imp is None:
			raise AssertionError('not locked')
		if isinstance(self._imp, (_LockProcess, _LockThread)):
			self._imp.unlock()
		else:
			unlockfile(self._imp)
		self._imp = None
Esempio n. 11
0
    def unlock(self):
        """
		This method is deprecated in favor of async_unlock, since waiting
		for the child process to respond can trigger event loop recursion
		which is incompatible with asyncio.
		"""
        if self._imp is None:
            raise AssertionError('not locked')
        if isinstance(self._imp, (_LockProcess, _LockThread)):
            self._imp.unlock()
        else:
            unlockfile(self._imp)
        self._imp = None
Esempio n. 12
0
	def populate(self, getbinpkgs=0):
		"populates the binarytree"

		if self._populating:
			return
		from portage.locks import lockfile, unlockfile
		pkgindex_lock = None
		try:
			if os.access(self.pkgdir, os.W_OK):
				pkgindex_lock = lockfile(self._pkgindex_file,
					wantnewlockfile=1)
			self._populating = True
			self._populate(getbinpkgs)
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
			self._populating = False
Esempio n. 13
0
	def async_unlock(self):
		"""
		Release the lock asynchronously. Release notification is available
		via the add_done_callback method of the returned Future instance.

		@returns: Future, result is None
		"""
		if self._imp is None:
			raise AssertionError('not locked')
		if self._unlock_future is not None:
			raise AssertionError("already unlocked")
		if isinstance(self._imp, (_LockProcess, _LockThread)):
			unlock_future = self._imp.async_unlock()
		else:
			unlockfile(self._imp)
			unlock_future = self.scheduler.create_future()
			self.scheduler.call_soon(unlock_future.set_result, None)
		self._imp = None
		self._unlock_future = unlock_future
		return unlock_future
Esempio n. 14
0
    def async_unlock(self):
        """
		Release the lock asynchronously. Release notification is available
		via the add_done_callback method of the returned Future instance.

		@returns: Future, result is None
		"""
        if self._imp is None:
            raise AssertionError('not locked')
        if self._unlock_future is not None:
            raise AssertionError("already unlocked")
        if isinstance(self._imp, (_LockProcess, _LockThread)):
            unlock_future = self._imp.async_unlock()
        else:
            unlockfile(self._imp)
            unlock_future = self.scheduler.create_future()
            self.scheduler.call_soon(unlock_future.set_result, None)
        self._imp = None
        self._unlock_future = unlock_future
        return unlock_future
Esempio n. 15
0
	def populate(self, getbinpkgs=0, getbinpkgsonly=None):
		"populates the binarytree"

		if getbinpkgsonly is not None:
			warnings.warn(
				"portage.dbapi.bintree.binarytree.populate(): " + \
				"getbinpkgsonly parameter is deprecated",
				DeprecationWarning, stacklevel=2)

		if self._populating:
			return
		from portage.locks import lockfile, unlockfile
		pkgindex_lock = None
		try:
			if os.access(self.pkgdir, os.W_OK):
				pkgindex_lock = lockfile(self._pkgindex_file,
					wantnewlockfile=1)
			self._populating = True
			self._populate(getbinpkgs)
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
			self._populating = False
Esempio n. 16
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True, force=False):
	"""
	Fetch files to DISTDIR and also verify digests if they are available.

	@param myuris: Maps each file name to a tuple of available fetch URIs.
	@type myuris: dict
	@param mysettings: Portage config instance.
	@type mysettings: portage.config
	@param listonly: Only print URIs and do not actually fetch them.
	@type listonly: bool
	@param fetchonly: Do not block for files that are locked by a
		concurrent fetcher process. This means that the function can
		return successfully *before* all files have been successfully
		fetched!
	@type fetchonly: bool
	@param use_locks: Enable locks. This parameter is ineffective if
		FEATURES=distlocks is disabled in the portage config!
	@type use_locks: bool
	@param digests: Maps each file name to a dict of digest types and values.
	@type digests: dict
	@param allow_missing_digests: Enable fetch even if there are no digests
		available for verification.
	@type allow_missing_digests: bool
	@param force: Force download, even when a file already exists in
		DISTDIR. This is most useful when there are no digests available,
		since otherwise download will be automatically forced if the
		existing file does not match the available digests. Also, this
		avoids the need to remove the existing file in advance, which
		makes it possible to atomically replace the file and avoid
		interference with concurrent processes.
	@type force: bool
	@rtype: int
	@return: 1 if successful, 0 otherwise.
	"""

	if force and digests:
		# Since the force parameter can trigger unnecessary fetch when the
		# digests match, do not allow force=True when digests are provided.
		raise PortageException(_('fetch: force=True is not allowed when digests are provided'))

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	userfetch = portage.data.secpass >= 2 and "userfetch" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK)
	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not distdir_writable and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if try_mirrors and "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			if distdir_writable:
				mirror_cache = os.path.join(mysettings["DISTDIR"],
						".mirror-cache.json")
			else:
				mirror_cache = None
			for l in locations:
				filedict[myfile].append(functools.partial(
					get_mirror_url, l, myfile, mysettings, mirror_cache))
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		try:
			_ensure_distdir(mysettings, mysettings["DISTDIR"])
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False
	valid_hashes = set(get_valid_checksum_keys())
	valid_hashes.discard("size")

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
			if not verifiable_hash_types:
				expected = " ".join(sorted(valid_hashes))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif portage.data.secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match and not force:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				# Remove broken symlinks or symlinks to files which
				# _check_distfile did not match above.
				if distdir_writable and mystat is None or os.path.islink(myfile_path):
					try:
						os.unlink(myfile_path)
					except OSError as e:
						if e.errno not in (errno.ENOENT, errno.ESTALE):
							raise
					mystat = None

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if distdir_writable and not force:
						# Since _check_distfile did not match above, the file
						# is either corrupt or its identity has changed since
						# the last time it was fetched, so rename it.
						temp_filename = _checksum_failure_temp_file(
							mysettings, mysettings["DISTDIR"], myfile)
						writemsg_stdout(_("Refetching... "
							"File renamed to '%s'\n\n") % \
							temp_filename, noiselevel=-1)

				# Stat the temporary download file for comparison with
				# fetch_resume_size.
				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					mystat = None

				if mystat is not None:
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except OSError:
								pass
					elif distdir_writable and size is not None:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, download_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(download_path):
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except EnvironmentError:
								pass
					elif not orig_digests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						if not force:
							continue
					else:
						if (mydigests[myfile].get("size") is not None
								and mystat.st_size < mydigests[myfile]["size"]
								and not restrict_fetch):
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(download_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
											mysettings, mysettings["DISTDIR"],
											os.path.basename(download_path))
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								if not fetch_to_ro:
									_movefile(download_path, myfile_path, mysettings=mysettings)
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				if isinstance(loc, functools.partial):
					loc = loc()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(download_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if distdir_writable and mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(download_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    os.path.basename(download_path)
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						v = mysettings.get(k)
						if v is not None:
							variables[k] = v

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						mystat = os.lstat(download_path)
						if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
							os.unlink(download_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(download_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(download_path)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(download_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									if distdir_writable:
										temp_filename = \
											_checksum_failure_temp_file(
												mysettings, mysettings["DISTDIR"],
												os.path.basename(download_path))
										writemsg_stdout(_("Refetching... "
											"File renamed to '%s'\n\n") % \
											temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									if not fetch_to_ro:
										_movefile(download_path, myfile_path, mysettings=mysettings)
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else: # no digests available
						if not myret:
							if not fetch_to_ro:
								_movefile(download_path, myfile_path, mysettings=mysettings)
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Esempio n. 17
0
    def updateItems(self, repoid):
        """
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

        # Ensure that the unread path exists and is writable.

        try:
            ensure_dirs(self.unread_path,
                        uid=self._uid,
                        gid=self._gid,
                        mode=self._dir_mode,
                        mask=self._mode_mask)
        except (OperationNotPermitted, PermissionDenied):
            return

        if not os.access(self.unread_path, os.W_OK):
            return

        news_dir = self._news_dir(repoid)
        try:
            news = _os.listdir(
                _unicode_encode(news_dir,
                                encoding=_encodings['fs'],
                                errors='strict'))
        except OSError:
            return

        skip_filename = self._skip_filename(repoid)
        unread_filename = self._unread_filename(repoid)
        unread_lock = lockfile(unread_filename, wantnewlockfile=1)
        try:
            try:
                unread = set(grabfile(unread_filename))
                unread_orig = unread.copy()
                skip = set(grabfile(skip_filename))
                skip_orig = skip.copy()
            except PermissionDenied:
                return

            for itemid in news:
                try:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='strict')
                except UnicodeDecodeError:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='replace')
                    writemsg_level(
                     _("!!! Invalid encoding in news item name: '%s'\n") % \
                     itemid, level=logging.ERROR, noiselevel=-1)
                    continue

                if itemid in skip:
                    continue
                filename = os.path.join(
                    news_dir, itemid, itemid + "." + self.language_id + ".txt")
                if not os.path.isfile(filename):
                    continue
                item = NewsItem(filename, itemid)
                if not item.isValid():
                    continue
                if item.isRelevant(profile=self._profile_path,
                                   config=self.config,
                                   vardb=self.vdb):
                    unread.add(item.name)
                    skip.add(item.name)

            if unread != unread_orig:
                write_atomic(unread_filename,
                             "".join("%s\n" % x for x in sorted(unread)))
                apply_secpass_permissions(unread_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

            if skip != skip_orig:
                write_atomic(skip_filename,
                             "".join("%s\n" % x for x in sorted(skip)))
                apply_secpass_permissions(skip_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

        finally:
            unlockfile(unread_lock)
Esempio n. 18
0
	def fix(self,  **kwargs):
		onProgress = kwargs.get('onProgress', None)
		bintree = self._bintree
		# Force reindex in case pkgdir-index-trusted is enabled.
		bintree._populate_local(reindex=True)
		bintree.populated = True
		_instance_key = bintree.dbapi._instance_key
		cpv_all = self._bintree.dbapi.cpv_all()
		cpv_all.sort()
		maxval = 0
		if onProgress:
			onProgress(maxval, 0)
		pkgindex = self._pkgindex
		missing = []
		stale = []
		metadata = {}
		for d in pkgindex.packages:
			cpv = _pkg_str(d["CPV"], metadata=d,
				settings=bintree.settings)
			d["CPV"] = cpv
			metadata[_instance_key(cpv)] = d
			if not bintree.dbapi.cpv_exists(cpv):
				stale.append(cpv)

		for cpv in cpv_all:
			d = metadata.get(_instance_key(cpv))
			if not d or self._need_update(cpv, d):
				missing.append(cpv)

		if missing or stale:
			from portage import locks
			pkgindex_lock = locks.lockfile(
				self._pkgindex_file, wantnewlockfile=1)
			try:
				# Repopulate with lock held. If _populate_local returns
				# data then use that, since _load_pkgindex would return
				# stale data in this case.
				self._pkgindex = pkgindex = (bintree._populate_local() or
					bintree._load_pkgindex())
				cpv_all = self._bintree.dbapi.cpv_all()
				cpv_all.sort()

				# Recount stale/missing packages, with lock held.
				missing = []
				stale = []
				metadata = {}
				for d in pkgindex.packages:
					cpv = _pkg_str(d["CPV"], metadata=d,
						settings=bintree.settings)
					d["CPV"] = cpv
					metadata[_instance_key(cpv)] = d
					if not bintree.dbapi.cpv_exists(cpv):
						stale.append(cpv)

				for cpv in cpv_all:
					d = metadata.get(_instance_key(cpv))
					if not d or self._need_update(cpv, d):
						missing.append(cpv)

				maxval = len(missing)
				for i, cpv in enumerate(missing):
					d = bintree._pkgindex_entry(cpv)
					try:
						bintree._eval_use_flags(cpv, d)
					except portage.exception.InvalidDependString:
						writemsg("!!! Invalid binary package: '%s'\n" % \
							bintree.getname(cpv), noiselevel=-1)
					else:
						metadata[_instance_key(cpv)] = d

					if onProgress:
						onProgress(maxval, i+1)

				for cpv in stale:
					del metadata[_instance_key(cpv)]

				# We've updated the pkgindex, so set it to
				# repopulate when necessary.
				bintree.populated = False

				del pkgindex.packages[:]
				pkgindex.packages.extend(metadata.values())
				bintree._update_pkgindex_header(self._pkgindex.header)
				bintree._pkgindex_write(self._pkgindex)

			finally:
				locks.unlockfile(pkgindex_lock)

		if onProgress:
			if maxval == 0:
				maxval = 1
			onProgress(maxval, maxval)
		return (True, None)
Esempio n. 19
0
	def _input_handler(self):
		# Read the whole pickle in a single atomic read() call.
		data = self._read_buf(self._files.pipe_in)
		if data is None:
			pass # EAGAIN
		elif data:
			try:
				obj = pickle.loads(data)
			except SystemExit:
				raise
			except Exception:
				# The pickle module can raise practically
				# any exception when given corrupt data.
				pass
			else:

				self._reopen_input()

				cmd_key = obj[0]
				cmd_handler = self.commands[cmd_key]
				reply = cmd_handler(obj)
				try:
					self._send_reply(reply)
				except OSError as e:
					if e.errno == errno.ENXIO:
						# This happens if the client side has been killed.
						pass
					else:
						raise

				# Allow the command to execute hooks after its reply
				# has been sent. This hook is used by the 'exit'
				# command to kill the ebuild process. For some
				# reason, the ebuild-ipc helper hangs up the
				# ebuild process if it is waiting for a reply
				# when we try to kill the ebuild process.
				reply_hook = getattr(cmd_handler,
					'reply_hook', None)
				if reply_hook is not None:
					reply_hook()

		else: # EIO/POLLHUP
			# This can be triggered due to a race condition which happens when
			# the previous _reopen_input() call occurs before the writer has
			# closed the pipe (see bug #401919). It's not safe to re-open
			# without a lock here, since it's possible that another writer will
			# write something to the pipe just before we close it, and in that
			# case the write will be lost. Therefore, try for a non-blocking
			# lock, and only re-open the pipe if the lock is acquired.
			lock_filename = os.path.join(
				os.path.dirname(self.input_fifo), '.ipc_lock')
			try:
				lock_obj = lockfile(lock_filename, unlinkfile=True,
					flags=os.O_NONBLOCK)
			except TryAgain:
				# We'll try again when another IO_HUP event arrives.
				pass
			else:
				try:
					self._reopen_input()
				finally:
					unlockfile(lock_obj)
Esempio n. 20
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Esempio n. 21
0
	def unlock(self):
		"""Release our exclusive lock on the preserved libs registry."""
		if self._lock is None:
			raise AssertionError("not locked")
		unlockfile(self._lock)
		self._lock = None
Esempio n. 22
0
	def updateItems(self, repoid):
		"""
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

		# Ensure that the unread path exists and is writable.

		try:
			ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
				mode=self._dir_mode, mask=self._mode_mask)
		except (OperationNotPermitted, PermissionDenied):
			return

		if not os.access(self.unread_path, os.W_OK):
			return

		news_dir = self._news_dir(repoid)
		try:
			news = _os.listdir(_unicode_encode(news_dir,
				encoding=_encodings['fs'], errors='strict'))
		except OSError:
			return

		skip_filename = self._skip_filename(repoid)
		unread_filename = self._unread_filename(repoid)
		unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		try:
			try:
				unread = set(grabfile(unread_filename))
				unread_orig = unread.copy()
				skip = set(grabfile(skip_filename))
				skip_orig = skip.copy()
			except PermissionDenied:
				return

			for itemid in news:
				try:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='replace')
					writemsg_level(
						_("!!! Invalid encoding in news item name: '%s'\n") % \
						itemid, level=logging.ERROR, noiselevel=-1)
					continue

				if itemid in skip:
					continue
				filename = os.path.join(news_dir, itemid,
					itemid + "." + self.language_id + ".txt")
				if not os.path.isfile(filename):
					continue
				item = NewsItem(filename, itemid)
				if not item.isValid():
					continue
				if item.isRelevant(profile=self._profile_path,
					config=self.config, vardb=self.vdb):
					unread.add(item.name)
					skip.add(item.name)

			if unread != unread_orig:
				write_atomic(unread_filename,
					"".join("%s\n" % x for x in sorted(unread)))
				apply_secpass_permissions(unread_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

			if skip != skip_orig:
				write_atomic(skip_filename,
					"".join("%s\n" % x for x in sorted(skip)))
				apply_secpass_permissions(skip_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

		finally:
			unlockfile(unread_lock)
Esempio n. 23
0
 def unlock(self):
     """Release our exclusive lock on the preserved libs registry."""
     if self._lock is None:
         raise AssertionError("not locked")
     unlockfile(self._lock)
     self._lock = None
Esempio n. 24
0
	def unlock(self):
		if self._lock is None:
			raise AssertionError("not locked")
		unlockfile(self._lock)
		self._lock = None
Esempio n. 25
0
	def _input_handler(self, fd, event):
		# Read the whole pickle in a single atomic read() call.
		data = None
		if event & self.scheduler.IO_IN:
			# For maximum portability, use os.read() here since
			# array.fromfile() and file.read() are both known to
			# erroneously return an empty string from this
			# non-blocking fifo stream on FreeBSD (bug #337465).
			try:
				data = os.read(fd, self._bufsize)
			except OSError as e:
				if e.errno != errno.EAGAIN:
					raise
				# Assume that another event will be generated
				# if there's any relevant data.

		if data:

			try:
				obj = pickle.loads(data)
			except SystemExit:
				raise
			except Exception:
				# The pickle module can raise practically
				# any exception when given corrupt data.
				pass
			else:

				self._reopen_input()

				cmd_key = obj[0]
				cmd_handler = self.commands[cmd_key]
				reply = cmd_handler(obj)
				try:
					self._send_reply(reply)
				except OSError as e:
					if e.errno == errno.ENXIO:
						# This happens if the client side has been killed.
						pass
					else:
						raise

				# Allow the command to execute hooks after its reply
				# has been sent. This hook is used by the 'exit'
				# command to kill the ebuild process. For some
				# reason, the ebuild-ipc helper hangs up the
				# ebuild process if it is waiting for a reply
				# when we try to kill the ebuild process.
				reply_hook = getattr(cmd_handler,
					'reply_hook', None)
				if reply_hook is not None:
					reply_hook()

		elif event & self.scheduler.IO_HUP:
			# This can be triggered due to a race condition which happens when
			# the previous _reopen_input() call occurs before the writer has
			# closed the pipe (see bug #401919). It's not safe to re-open
			# without a lock here, since it's possible that another writer will
			# write something to the pipe just before we close it, and in that
			# case the write will be lost. Therefore, try for a non-blocking
			# lock, and only re-open the pipe if the lock is acquired.
			lock_filename = os.path.join(
				os.path.dirname(self.input_fifo), '.ipc_lock')
			try:
				lock_obj = lockfile(lock_filename, unlinkfile=True,
					flags=os.O_NONBLOCK)
			except TryAgain:
				# We'll try again when another IO_HUP event arrives.
				pass
			else:
				try:
					self._reopen_input()
				finally:
					unlockfile(lock_obj)

		return True
Esempio n. 26
0
	def inject(self, cpv, filename=None):
		"""Add a freshly built package to the database.  This updates
		$PKGDIR/Packages with the new package metadata (including MD5).
		@param cpv: The cpv of the new package to inject
		@type cpv: string
		@param filename: File path of the package to inject, or None if it's
			already in the location returned by getname()
		@type filename: string
		@rtype: None
		"""
		mycat, mypkg = catsplit(cpv)
		if not self.populated:
			self.populate()
		if filename is None:
			full_path = self.getname(cpv)
		else:
			full_path = filename
		try:
			s = os.stat(full_path)
		except OSError as e:
			if e.errno != errno.ENOENT:
				raise
			del e
			writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
				noiselevel=-1)
			return
		mytbz2 = portage.xpak.tbz2(full_path)
		slot = mytbz2.getfile("SLOT")
		if slot is None:
			writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
				noiselevel=-1)
			return
		slot = slot.strip()
		self.dbapi.cpv_inject(cpv)

		# Reread the Packages index (in case it's been changed by another
		# process) and then updated it, all while holding a lock.
		from portage.locks import lockfile, unlockfile
		pkgindex_lock = None
		created_symlink = False
		try:
			pkgindex_lock = lockfile(self._pkgindex_file,
				wantnewlockfile=1)
			if filename is not None:
				new_filename = self.getname(cpv)
				try:
					samefile = os.path.samefile(filename, new_filename)
				except OSError:
					samefile = False
				if not samefile:
					self._ensure_dir(os.path.dirname(new_filename))
					_movefile(filename, new_filename, mysettings=self.settings)
			if self._all_directory and \
				self.getname(cpv).split(os.path.sep)[-2] == "All":
				self._create_symlink(cpv)
				created_symlink = True
			pkgindex = self._load_pkgindex()

			if not self._pkgindex_version_supported(pkgindex):
				pkgindex = self._new_pkgindex()

			# Discard remote metadata to ensure that _pkgindex_entry
			# gets the local metadata. This also updates state for future
			# isremote calls.
			if self._remotepkgs is not None:
				self._remotepkgs.pop(cpv, None)

			# Discard cached metadata to ensure that _pkgindex_entry
			# doesn't return stale metadata.
			self.dbapi._aux_cache.pop(cpv, None)

			try:
				d = self._pkgindex_entry(cpv)
			except portage.exception.InvalidDependString:
				writemsg(_("!!! Invalid binary package: '%s'\n") % \
					self.getname(cpv), noiselevel=-1)
				self.dbapi.cpv_remove(cpv)
				del self._pkg_paths[cpv]
				return

			# If found, remove package(s) with duplicate path.
			path = d.get("PATH", "")
			for i in range(len(pkgindex.packages) - 1, -1, -1):
				d2 = pkgindex.packages[i]
				if path and path == d2.get("PATH"):
					# Handle path collisions in $PKGDIR/All
					# when CPV is not identical.
					del pkgindex.packages[i]
				elif cpv == d2.get("CPV"):
					if path == d2.get("PATH", ""):
						del pkgindex.packages[i]
					elif created_symlink and not d2.get("PATH", ""):
						# Delete entry for the package that was just
						# overwritten by a symlink to this package.
						del pkgindex.packages[i]

			pkgindex.packages.append(d)

			self._update_pkgindex_header(pkgindex.header)
			f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
			pkgindex.write(f)
			f.close()
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)
Esempio n. 27
0
    def fix(self, **kwargs):
        onProgress = kwargs.get('onProgress', None)
        bintree = self._bintree
        cpv_all = self._bintree.dbapi.cpv_all()
        cpv_all.sort()
        missing = []
        maxval = 0
        if onProgress:
            onProgress(maxval, 0)
        pkgindex = self._pkgindex
        missing = []
        metadata = {}
        for d in pkgindex.packages:
            metadata[d["CPV"]] = d

        for i, cpv in enumerate(cpv_all):
            d = metadata.get(cpv)
            if not d or self._need_update(cpv, d):
                missing.append(cpv)

        stale = set(metadata).difference(cpv_all)
        if missing or stale:
            from portage import locks
            pkgindex_lock = locks.lockfile(self._pkgindex_file,
                                           wantnewlockfile=1)
            try:
                # Repopulate with lock held.
                bintree._populate()
                cpv_all = self._bintree.dbapi.cpv_all()
                cpv_all.sort()

                pkgindex = bintree._load_pkgindex()
                self._pkgindex = pkgindex

                metadata = {}
                for d in pkgindex.packages:
                    metadata[d["CPV"]] = d

                # Recount missing packages, with lock held.
                del missing[:]
                for i, cpv in enumerate(cpv_all):
                    d = metadata.get(cpv)
                    if not d or self._need_update(cpv, d):
                        missing.append(cpv)

                maxval = len(missing)
                for i, cpv in enumerate(missing):
                    try:
                        metadata[cpv] = bintree._pkgindex_entry(cpv)
                    except portage.exception.InvalidDependString:
                        writemsg("!!! Invalid binary package: '%s'\n" % \
                         bintree.getname(cpv), noiselevel=-1)

                    if onProgress:
                        onProgress(maxval, i + 1)

                for cpv in set(metadata).difference(
                        self._bintree.dbapi.cpv_all()):
                    del metadata[cpv]

                # We've updated the pkgindex, so set it to
                # repopulate when necessary.
                bintree.populated = False

                del pkgindex.packages[:]
                pkgindex.packages.extend(metadata.values())
                bintree._pkgindex_write(self._pkgindex)

            finally:
                locks.unlockfile(pkgindex_lock)

        if onProgress:
            if maxval == 0:
                maxval = 1
            onProgress(maxval, maxval)
        return None
Esempio n. 28
0
	def fix(self,  **kwargs):
		onProgress = kwargs.get('onProgress', None)
		bintree = self._bintree
		cpv_all = self._bintree.dbapi.cpv_all()
		cpv_all.sort()
		missing = []
		maxval = 0
		if onProgress:
			onProgress(maxval, 0)
		pkgindex = self._pkgindex
		missing = []
		metadata = {}
		for d in pkgindex.packages:
			metadata[d["CPV"]] = d

		for i, cpv in enumerate(cpv_all):
			d = metadata.get(cpv)
			if not d or self._need_update(cpv, d):
				missing.append(cpv)

		stale = set(metadata).difference(cpv_all)
		if missing or stale:
			from portage import locks
			pkgindex_lock = locks.lockfile(
				self._pkgindex_file, wantnewlockfile=1)
			try:
				# Repopulate with lock held.
				bintree._populate()
				cpv_all = self._bintree.dbapi.cpv_all()
				cpv_all.sort()

				pkgindex = bintree._load_pkgindex()
				self._pkgindex = pkgindex

				metadata = {}
				for d in pkgindex.packages:
					metadata[d["CPV"]] = d

				# Recount missing packages, with lock held.
				del missing[:]
				for i, cpv in enumerate(cpv_all):
					d = metadata.get(cpv)
					if not d or self._need_update(cpv, d):
						missing.append(cpv)

				maxval = len(missing)
				for i, cpv in enumerate(missing):
					try:
						metadata[cpv] = bintree._pkgindex_entry(cpv)
					except portage.exception.InvalidDependString:
						writemsg("!!! Invalid binary package: '%s'\n" % \
							bintree.getname(cpv), noiselevel=-1)

					if onProgress:
						onProgress(maxval, i+1)

				for cpv in set(metadata).difference(
					self._bintree.dbapi.cpv_all()):
					del metadata[cpv]

				# We've updated the pkgindex, so set it to
				# repopulate when necessary.
				bintree.populated = False

				del pkgindex.packages[:]
				pkgindex.packages.extend(metadata.values())
				bintree._pkgindex_write(self._pkgindex)

			finally:
				locks.unlockfile(pkgindex_lock)

		if onProgress:
			if maxval == 0:
				maxval = 1
			onProgress(maxval, maxval)
		return None
Esempio n. 29
0
	def unlock(self):
		unlockfile(self._lock)
		self._lock = None
Esempio n. 30
0
	def fix(self,  **kwargs):
		onProgress = kwargs.get('onProgress', None)
		bintree = self._bintree
		_instance_key = bintree.dbapi._instance_key
		cpv_all = self._bintree.dbapi.cpv_all()
		cpv_all.sort()
		missing = []
		maxval = 0
		if onProgress:
			onProgress(maxval, 0)
		pkgindex = self._pkgindex
		missing = []
		stale = []
		metadata = {}
		for d in pkgindex.packages:
			cpv = _pkg_str(d["CPV"], metadata=d,
				settings=bintree.settings)
			d["CPV"] = cpv
			metadata[_instance_key(cpv)] = d
			if not bintree.dbapi.cpv_exists(cpv):
				stale.append(cpv)

		for cpv in cpv_all:
			d = metadata.get(_instance_key(cpv))
			if not d or self._need_update(cpv, d):
				missing.append(cpv)

		if missing or stale:
			from portage import locks
			pkgindex_lock = locks.lockfile(
				self._pkgindex_file, wantnewlockfile=1)
			try:
				# Repopulate with lock held.
				bintree._populate()
				cpv_all = self._bintree.dbapi.cpv_all()
				cpv_all.sort()

				pkgindex = bintree._load_pkgindex()
				self._pkgindex = pkgindex

				# Recount stale/missing packages, with lock held.
				missing = []
				stale = []
				metadata = {}
				for d in pkgindex.packages:
					cpv = _pkg_str(d["CPV"], metadata=d,
						settings=bintree.settings)
					d["CPV"] = cpv
					metadata[_instance_key(cpv)] = d
					if not bintree.dbapi.cpv_exists(cpv):
						stale.append(cpv)

				for cpv in cpv_all:
					d = metadata.get(_instance_key(cpv))
					if not d or self._need_update(cpv, d):
						missing.append(cpv)

				maxval = len(missing)
				for i, cpv in enumerate(missing):
					d = bintree._pkgindex_entry(cpv)
					try:
						bintree._eval_use_flags(cpv, d)
					except portage.exception.InvalidDependString:
						writemsg("!!! Invalid binary package: '%s'\n" % \
							bintree.getname(cpv), noiselevel=-1)
					else:
						metadata[_instance_key(cpv)] = d

					if onProgress:
						onProgress(maxval, i+1)

				for cpv in stale:
					del metadata[_instance_key(cpv)]

				# We've updated the pkgindex, so set it to
				# repopulate when necessary.
				bintree.populated = False

				del pkgindex.packages[:]
				pkgindex.packages.extend(metadata.values())
				bintree._pkgindex_write(self._pkgindex)

			finally:
				locks.unlockfile(pkgindex_lock)

		if onProgress:
			if maxval == 0:
				maxval = 1
			onProgress(maxval, maxval)
		return (True, None)
Esempio n. 31
0
    def _input_handler(self, fd, event):
        # Read the whole pickle in a single atomic read() call.
        data = None
        if event & self.scheduler.IO_IN:
            # For maximum portability, use os.read() here since
            # array.fromfile() and file.read() are both known to
            # erroneously return an empty string from this
            # non-blocking fifo stream on FreeBSD (bug #337465).
            try:
                data = os.read(fd, self._bufsize)
            except OSError as e:
                if e.errno != errno.EAGAIN:
                    raise
                # Assume that another event will be generated
                # if there's any relevant data.

        if data:

            try:
                obj = pickle.loads(data)
            except SystemExit:
                raise
            except Exception:
                # The pickle module can raise practically
                # any exception when given corrupt data.
                pass
            else:

                self._reopen_input()

                cmd_key = obj[0]
                cmd_handler = self.commands[cmd_key]
                reply = cmd_handler(obj)
                try:
                    self._send_reply(reply)
                except OSError as e:
                    if e.errno == errno.ENXIO:
                        # This happens if the client side has been killed.
                        pass
                    else:
                        raise

                # Allow the command to execute hooks after its reply
                # has been sent. This hook is used by the 'exit'
                # command to kill the ebuild process. For some
                # reason, the ebuild-ipc helper hangs up the
                # ebuild process if it is waiting for a reply
                # when we try to kill the ebuild process.
                reply_hook = getattr(cmd_handler, 'reply_hook', None)
                if reply_hook is not None:
                    reply_hook()

        elif event & self.scheduler.IO_HUP:
            # This can be triggered due to a race condition which happens when
            # the previous _reopen_input() call occurs before the writer has
            # closed the pipe (see bug #401919). It's not safe to re-open
            # without a lock here, since it's possible that another writer will
            # write something to the pipe just before we close it, and in that
            # case the write will be lost. Therefore, try for a non-blocking
            # lock, and only re-open the pipe if the lock is acquired.
            lock_filename = os.path.join(os.path.dirname(self.input_fifo),
                                         '.ipc_lock')
            try:
                lock_obj = lockfile(lock_filename,
                                    unlinkfile=True,
                                    flags=os.O_NONBLOCK)
            except TryAgain:
                # We'll try again when another IO_HUP event arrives.
                pass
            else:
                try:
                    self._reopen_input()
                finally:
                    unlockfile(lock_obj)

        return True
Esempio n. 32
0
	def inject(self, cpv, filename=None):
		"""Add a freshly built package to the database.  This updates
		$PKGDIR/Packages with the new package metadata (including MD5).
		@param cpv: The cpv of the new package to inject
		@type cpv: string
		@param filename: File path of the package to inject, or None if it's
			already in the location returned by getname()
		@type filename: string
		@rtype: None
		"""
		mycat, mypkg = catsplit(cpv)
		if not self.populated:
			self.populate()
		if filename is None:
			full_path = self.getname(cpv)
		else:
			full_path = filename
		try:
			s = os.stat(full_path)
		except OSError as e:
			if e.errno != errno.ENOENT:
				raise
			del e
			writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
				noiselevel=-1)
			return
		mytbz2 = portage.xpak.tbz2(full_path)
		slot = mytbz2.getfile("SLOT")
		if slot is None:
			writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
				noiselevel=-1)
			return
		slot = slot.strip()
		self.dbapi.cpv_inject(cpv)

		# Reread the Packages index (in case it's been changed by another
		# process) and then updated it, all while holding a lock.
		from portage.locks import lockfile, unlockfile
		pkgindex_lock = None
		created_symlink = False
		try:
			pkgindex_lock = lockfile(self._pkgindex_file,
				wantnewlockfile=1)
			if filename is not None:
				new_filename = self.getname(cpv)
				self._ensure_dir(os.path.dirname(new_filename))
				_movefile(filename, new_filename, mysettings=self.settings)
			if self._all_directory and \
				self.getname(cpv).split(os.path.sep)[-2] == "All":
				self._create_symlink(cpv)
				created_symlink = True
			pkgindex = self._load_pkgindex()

			if not self._pkgindex_version_supported(pkgindex):
				pkgindex = self._new_pkgindex()

			try:
				d = self._pkgindex_entry(cpv)
			except portage.exception.InvalidDependString:
				writemsg(_("!!! Invalid binary package: '%s'\n") % \
					self.getname(cpv), noiselevel=-1)
				self.dbapi.cpv_remove(cpv)
				del self._pkg_paths[cpv]
				return

			# If found, remove package(s) with duplicate path.
			path = d.get("PATH", "")
			for i in range(len(pkgindex.packages) - 1, -1, -1):
				d2 = pkgindex.packages[i]
				if path and path == d2.get("PATH"):
					# Handle path collisions in $PKGDIR/All
					# when CPV is not identical.
					del pkgindex.packages[i]
				elif cpv == d2.get("CPV"):
					if path == d2.get("PATH", ""):
						del pkgindex.packages[i]
					elif created_symlink and not d2.get("PATH", ""):
						# Delete entry for the package that was just
						# overwritten by a symlink to this package.
						del pkgindex.packages[i]

			pkgindex.packages.append(d)

			self._update_pkgindex_header(pkgindex.header)
			from portage.util import atomic_ofstream
			f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
			try:
				pkgindex.write(f)
			finally:
				f.close()
		finally:
			if pkgindex_lock:
				unlockfile(pkgindex_lock)

		if self._remotepkgs is not None:
			# When a remote package is downloaded and injected,
			# update state so self.isremote() returns False.
			self._remotepkgs.pop(cpv, None)
Esempio n. 33
0
 def unlock(self):
     unlockfile(self._lock)
     self._lock = None
Esempio n. 34
0
File: files.py Progetto: ezc/portage
 def unlock(self):
     if self._lock is None:
         raise AssertionError("not locked")
     unlockfile(self._lock)
     self._lock = None